code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from mongoengine import NotUniqueError
from st2common import log as logging
from st2common.exceptions.db import StackStormDBObjectConflictError
from st2common.models.system.common import ResourceReference
from st2common.transport.reactor import TriggerDispatcher
__all__ = [
'Access',
'ContentPackResource',
'StatusBasedResource'
]
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Access(object):
impl = None
publisher = None
dispatcher = None
# ModelAPI class for this resource
api_model_cls = None
# A list of operations for which we should dispatch a trigger
dispatch_trigger_for_operations = []
# Maps model operation name (e.g. create, update, delete) to the trigger reference which is
# used when dispatching a trigger
operation_to_trigger_ref_map = {}
@classmethod
@abc.abstractmethod
def _get_impl(cls):
pass
@classmethod
@abc.abstractmethod
def _get_publisher(cls):
return None
@classmethod
def _get_dispatcher(cls):
"""
Return a dispatcher class which is used for dispatching triggers.
"""
if not cls.dispatcher:
cls.dispatcher = TriggerDispatcher(LOG)
return cls.dispatcher
@classmethod
@abc.abstractmethod
def _get_by_object(cls, object):
return None
@classmethod
def get_by_name(cls, value):
return cls._get_impl().get_by_name(value)
@classmethod
def get_by_id(cls, value):
return cls._get_impl().get_by_id(value)
@classmethod
def get_by_ref(cls, value):
return cls._get_impl().get_by_ref(value)
@classmethod
def get(cls, *args, **kwargs):
return cls._get_impl().get(*args, **kwargs)
@classmethod
def get_all(cls, *args, **kwargs):
return cls._get_impl().get_all(*args, **kwargs)
@classmethod
def count(cls, *args, **kwargs):
return cls._get_impl().count(*args, **kwargs)
@classmethod
def query(cls, *args, **kwargs):
return cls._get_impl().query(*args, **kwargs)
@classmethod
def distinct(cls, *args, **kwargs):
return cls._get_impl().distinct(*args, **kwargs)
@classmethod
def aggregate(cls, *args, **kwargs):
return cls._get_impl().aggregate(*args, **kwargs)
@classmethod
def insert(cls, model_object, publish=True, dispatch_trigger=True,
log_not_unique_error_as_debug=False):
if model_object.id:
raise ValueError('id for object %s was unexpected.' % model_object)
try:
model_object = cls._get_impl().insert(model_object)
except NotUniqueError as e:
if log_not_unique_error_as_debug:
LOG.debug('Conflict while trying to save in DB.', exc_info=True)
else:
LOG.exception('Conflict while trying to save in DB.')
# On a conflict determine the conflicting object and return its id in
# the raised exception.
conflict_object = cls._get_by_object(model_object)
conflict_id = str(conflict_object.id) if conflict_object else None
message = str(e)
raise StackStormDBObjectConflictError(message=message, conflict_id=conflict_id,
model_object=model_object)
# Publish internal event on the message bus
if publish:
try:
cls.publish_create(model_object)
except:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
cls.dispatch_create_trigger(model_object)
except:
LOG.exception('Trigger dispatch failed.')
return model_object
@classmethod
def add_or_update(cls, model_object, publish=True, dispatch_trigger=True,
log_not_unique_error_as_debug=False):
pre_persist_id = model_object.id
try:
model_object = cls._get_impl().add_or_update(model_object)
except NotUniqueError as e:
if log_not_unique_error_as_debug:
LOG.debug('Conflict while trying to save in DB.', exc_info=True)
else:
LOG.exception('Conflict while trying to save in DB.')
# On a conflict determine the conflicting object and return its id in
# the raised exception.
conflict_object = cls._get_by_object(model_object)
conflict_id = str(conflict_object.id) if conflict_object else None
message = str(e)
raise StackStormDBObjectConflictError(message=message, conflict_id=conflict_id,
model_object=model_object)
is_update = str(pre_persist_id) == str(model_object.id)
# Publish internal event on the message bus
if publish:
try:
if is_update:
cls.publish_update(model_object)
else:
cls.publish_create(model_object)
except:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
if is_update:
cls.dispatch_update_trigger(model_object)
else:
cls.dispatch_create_trigger(model_object)
except:
LOG.exception('Trigger dispatch failed.')
return model_object
@classmethod
def update(cls, model_object, publish=True, dispatch_trigger=True, **kwargs):
"""
Use this method when -
* upsert=False is desired
* special operators like push, push_all are to be used.
"""
cls._get_impl().update(model_object, **kwargs)
# update does not return the object but a flag; likely success/fail but docs
# are not very good on this one so ignoring. Explicitly get the object from
# DB abd return.
model_object = cls.get_by_id(model_object.id)
# Publish internal event on the message bus
if publish:
try:
cls.publish_update(model_object)
except:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
cls.dispatch_update_trigger(model_object)
except:
LOG.exception('Trigger dispatch failed.')
return model_object
@classmethod
def delete(cls, model_object, publish=True, dispatch_trigger=True):
persisted_object = cls._get_impl().delete(model_object)
# Publish internal event on the message bus
if publish:
try:
cls.publish_delete(model_object)
except Exception:
LOG.exception('Publish failed.')
# Dispatch trigger
if dispatch_trigger:
try:
cls.dispatch_delete_trigger(model_object)
except Exception:
LOG.exception('Trigger dispatch failed.')
return persisted_object
####################################################
# Internal event bus message publish related methods
####################################################
@classmethod
def publish_create(cls, model_object):
publisher = cls._get_publisher()
if publisher:
publisher.publish_create(model_object)
@classmethod
def publish_update(cls, model_object):
publisher = cls._get_publisher()
if publisher:
publisher.publish_update(model_object)
@classmethod
def publish_delete(cls, model_object):
publisher = cls._get_publisher()
if publisher:
publisher.publish_delete(model_object)
############################################
# Internal trigger dispatch related methods
###########################################
@classmethod
def dispatch_create_trigger(cls, model_object):
"""
Dispatch a resource-specific trigger which indicates a new resource has been created.
"""
return cls._dispatch_operation_trigger(operation='create', model_object=model_object)
@classmethod
def dispatch_update_trigger(cls, model_object):
"""
Dispatch a resource-specific trigger which indicates an existing resource has been updated.
"""
return cls._dispatch_operation_trigger(operation='update', model_object=model_object)
@classmethod
def dispatch_delete_trigger(cls, model_object):
"""
Dispatch a resource-specific trigger which indicates an existing resource has been
deleted.
"""
return cls._dispatch_operation_trigger(operation='delete', model_object=model_object)
@classmethod
def _get_trigger_ref_for_operation(cls, operation):
trigger_ref = cls.operation_to_trigger_ref_map.get(operation, None)
if not trigger_ref:
raise ValueError('Trigger ref not specified for operation: %s' % (operation))
return trigger_ref
@classmethod
def _dispatch_operation_trigger(cls, operation, model_object):
if operation not in cls.dispatch_trigger_for_operations:
return
trigger = cls._get_trigger_ref_for_operation(operation=operation)
object_payload = cls.api_model_cls.from_model(model_object, mask_secrets=True).__json__()
payload = {
'object': object_payload
}
return cls._dispatch_trigger(operation=operation, trigger=trigger, payload=payload)
@classmethod
def _dispatch_trigger(cls, operation, trigger, payload):
if operation not in cls.dispatch_trigger_for_operations:
return
dispatcher = cls._get_dispatcher()
return dispatcher.dispatch(trigger=trigger, payload=payload)
class ContentPackResource(Access):
@classmethod
def get_by_ref(cls, ref):
if not ref:
return None
ref_obj = ResourceReference.from_string_reference(ref=ref)
result = cls.query(name=ref_obj.name,
pack=ref_obj.pack).first()
return result
@classmethod
def _get_by_object(cls, object):
# For an object with a resourcepack pack.name is unique.
name = getattr(object, 'name', '')
pack = getattr(object, 'pack', '')
return cls.get_by_ref(ResourceReference.to_string_reference(pack=pack, name=name))
class StatusBasedResource(Access):
"""Persistence layer for models that needs to publish status to the message queue."""
@classmethod
def publish_status(cls, model_object):
"""Publish the object status to the message queue.
Publish the instance of the model as payload with the status
as routing key to the message queue via the StatePublisher.
:param model_object: An instance of the model.
:type model_object: ``object``
"""
publisher = cls._get_publisher()
if publisher:
publisher.publish_state(model_object, getattr(model_object, 'status', None))
|
dennybaa/st2
|
st2common/st2common/persistence/base.py
|
Python
|
apache-2.0
| 12,025
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import webob
from nova.api.openstack.compute.contrib import server_external_events
from nova import context
from nova import exception
from nova.objects import instance as instance_obj
from nova import test
fake_instances = {
'00000000-0000-0000-0000-000000000001': instance_obj.Instance(
uuid='00000000-0000-0000-0000-000000000001', host='host1'),
'00000000-0000-0000-0000-000000000002': instance_obj.Instance(
uuid='00000000-0000-0000-0000-000000000002', host='host1'),
'00000000-0000-0000-0000-000000000003': instance_obj.Instance(
uuid='00000000-0000-0000-0000-000000000003', host='host2'),
'00000000-0000-0000-0000-000000000004': instance_obj.Instance(
uuid='00000000-0000-0000-0000-000000000004', host=None),
}
fake_instance_uuids = sorted(fake_instances.keys())
MISSING_UUID = '00000000-0000-0000-0000-000000000005'
@classmethod
def fake_get_by_uuid(cls, context, uuid):
try:
return fake_instances[uuid]
except KeyError:
raise exception.InstanceNotFound(instance_id=uuid)
@mock.patch('nova.objects.instance.Instance.get_by_uuid', fake_get_by_uuid)
class ServerExternalEventsTest(test.NoDBTestCase):
def setUp(self):
super(ServerExternalEventsTest, self).setUp()
self.api = server_external_events.ServerExternalEventsController()
self.context = context.get_admin_context()
self.event_1 = {'name': 'network-vif-plugged',
'tag': 'foo',
'server_uuid': fake_instance_uuids[0]}
self.event_2 = {'name': 'network-changed',
'server_uuid': fake_instance_uuids[1]}
self.default_body = {'events': [self.event_1, self.event_2]}
self.resp_event_1 = dict(self.event_1)
self.resp_event_1['code'] = 200
self.resp_event_1['status'] = 'completed'
self.resp_event_2 = dict(self.event_2)
self.resp_event_2['code'] = 200
self.resp_event_2['status'] = 'completed'
self.default_resp_body = {'events': [self.resp_event_1,
self.resp_event_2]}
def _create_req(self, body):
req = webob.Request.blank('/v2/fake/os-server-external-events')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
req.body = json.dumps(body)
return req
def _assert_call(self, req, body, expected_uuids, expected_events):
with mock.patch.object(self.api.compute_api,
'external_instance_event') as api_method:
response = self.api.create(req, body)
result = response.obj
code = response._code
self.assertEqual(1, api_method.call_count)
for inst in api_method.call_args_list[0][0][1]:
expected_uuids.remove(inst.uuid)
self.assertEqual([], expected_uuids)
for event in api_method.call_args_list[0][0][2]:
expected_events.remove(event.name)
self.assertEqual([], expected_events)
return result, code
def test_create(self):
req = self._create_req(self.default_body)
result, code = self._assert_call(req, self.default_body,
fake_instance_uuids[:2],
['network-vif-plugged',
'network-changed'])
self.assertEqual(self.default_resp_body, result)
self.assertEqual(200, code)
def test_create_one_bad_instance(self):
body = self.default_body
body['events'][1]['server_uuid'] = MISSING_UUID
req = self._create_req(body)
result, code = self._assert_call(req, body, [fake_instance_uuids[0]],
['network-vif-plugged'])
self.assertEqual('failed', result['events'][1]['status'])
self.assertEqual(200, result['events'][0]['code'])
self.assertEqual(404, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_event_instance_has_no_host(self):
body = self.default_body
body['events'][0]['server_uuid'] = fake_instance_uuids[-1]
req = self._create_req(body)
# the instance without host should not be passed to the compute layer
result, code = self._assert_call(req, body,
[fake_instance_uuids[1]],
['network-changed'])
self.assertEqual(422, result['events'][0]['code'])
self.assertEqual('failed', result['events'][0]['status'])
self.assertEqual(200, result['events'][1]['code'])
self.assertEqual(207, code)
def test_create_no_good_instances(self):
body = self.default_body
body['events'][0]['server_uuid'] = MISSING_UUID
body['events'][1]['server_uuid'] = MISSING_UUID
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.api.create, req, body)
def test_create_bad_status(self):
body = self.default_body
body['events'][1]['status'] = 'foo'
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
def test_create_extra_gorp(self):
body = self.default_body
body['events'][0]['foobar'] = 'bad stuff'
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
def test_create_bad_events(self):
body = {'events': 'foo'}
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
def test_create_bad_body(self):
body = {'foo': 'bar'}
req = self._create_req(body)
self.assertRaises(webob.exc.HTTPBadRequest,
self.api.create, req, body)
|
luogangyi/bcec-nova
|
nova/tests/api/openstack/compute/contrib/test_server_external_events.py
|
Python
|
apache-2.0
| 6,660
|
#-----------------------------------------------------------------------
#
# tinyfont.py -- a very small font
#
#-----------------------------------------------------------------------
#
# Description:
#
# The tiny font is an 3x5 pixel monospaced font where each character
# is placed in a box of 4x6 pixels. At the native 160x120 Gigatron
# resolution we can display 40x20 characters.
#
# The encoding has 3x5 bits for the pixels and 1 shift bit to indicate
# an overall shift down for characters g, j, p, q and y. The lowercase
# j needs special handling. This scheme gives 2 bytes per character,
# for a total of 192 bytes.
#
# References:
#
# http://vt100.tarunz.org
# VT100 Terminal for Pilot (Brian J. Swetland)
#
# https://robey.lag.net/2010/01/23/tiny-monospace-font.html
# A very tiny, monospace, bitmap font (Robey Pointer)
#
# https://fonts2u.com/small-5x3-regular.font
# Small 5X3 regular (soxhead2000)
#
# https://github.com/olikraus/u8g2
# U8glib library for monochrome displays, version 2
#
# History:
#
# 2018-08-01 (marcelk) Initial version inspired by open source examples
# 2018-08-xx (marcelk) Update of @corz
#
#-----------------------------------------------------------------------
def convert(font):
for c in range(96):
i, j = c % 16, c / 16
word, shift = 0, 0
for y in range(6):
for x in range(3):
p = ((j * 6 + y) * 16 + i) * 4 + x + 1
if font[p] == '@':
if y == 5 and shift == 0:
shift, word = 1, word << 1
word |= 1 << (5*(3-x) - y - 1 + shift)
if shift == 0:
print ' 0x%04x,' % word,
else:
print '-0x%04x,' % (0x8000 - word),
print '// %s' % repr(chr(32+c))
tinyfont =\
'......@..@.@.@.@..@@.@....@...@....@.@..........................'\
'......@..@.@.@@@.@@....@.@.@..@...@...@..@.@..@................@'\
'......@......@.@..@@..@...@.......@...@...@..@@@.....@@@......@.'\
'.............@@@.@@..@...@.@......@...@..@.@..@...@..........@..'\
'......@......@.@..@....@..@@.......@.@...........@........@.....'\
'................................................................'\
'..@@..@..@@..@@..@.@.@@@..@@.@@@.@@@.@@@...........@.....@...@@@'\
'.@.@.@@....@...@.@.@.@...@.....@.@.@.@.@..@...@...@..@@@..@....@'\
'.@.@..@...@..@@..@@@.@@..@@@..@..@@@.@@@.........@.........@..@.'\
'.@.@..@..@.....@...@...@.@.@.@...@.@...@..@...@...@..@@@..@.....'\
'.@@..@@@.@@@.@@....@.@@..@@@.@...@@@.@@......@.....@.....@....@.'\
'................................................................'\
'..@@..@..@@...@@.@@..@@@.@@@..@@.@.@.@@@..@@.@.@.@...@.@.@.@.@@@'\
'.@.@.@.@.@.@.@...@.@.@...@...@...@.@..@....@.@.@.@...@@@.@@@.@.@'\
'.@.@.@@@.@@..@...@.@.@@..@@..@.@.@@@..@....@.@@..@...@@@.@@@.@.@'\
'.@...@.@.@.@.@...@.@.@...@...@.@.@.@..@..@.@.@.@.@...@.@.@@@.@.@'\
'..@@.@.@.@@...@@.@@..@@@.@...@@@.@.@.@@@..@..@.@.@@@.@.@.@.@.@@@'\
'................................................................'\
'.@@...@..@@...@@.@@@.@.@.@.@.@.@.@.@.@.@.@@@..@@.....@@...@.....'\
'.@.@.@.@.@.@.@....@..@.@.@.@.@.@.@.@.@.@...@..@..@....@..@.@....'\
'.@.@.@.@.@@@..@...@..@.@.@.@.@@@..@..@@@..@...@...@...@.........'\
'.@@..@@@.@@....@..@..@.@.@.@.@@@.@.@..@..@....@....@..@.........'\
'.@....@@.@.@.@@...@..@@@..@..@.@.@.@..@..@@@..@@.....@@......@@@'\
'................................................................'\
'.@.......@.........@.......@.....@....@....O.@...@@.............'\
'..@...@@.@@...@@..@@..@@..@...@@.@@..........@.@..@..@@@.@@...@.'\
'.....@.@.@.@.@...@.@.@.@.@@@.@.@.@.@.@@...@@.@@...@..@@@.@.@.@.@'\
'.....@.@.@.@.@...@.@.@@...@..@@@.@.@..@....@.@.@..@..@@@.@.@.@.@'\
'.....@@@.@@...@@..@@..@@..@....@.@.@.@@@.@.@.@.@.@@@.@.@.@.@..@.'\
'..............................@.......... @.....................'\
'..................@...........................@@..@..@@...@@.@@@'\
'.@@...@@.@.@..@@.@@@.@.@.@.@.@.@.@.@.@.@.@@@..@...@...@..@@..@@@'\
'.@.@.@.@.@@..@@...@..@.@.@.@.@@@..@..@.@...@.@@...@...@@.....@@@'\
'.@.@.@.@.@....@@..@..@.@.@@@.@@@..@...@@..@...@...@...@......@@@'\
'.@@...@@.@...@@...@@..@@..@..@@@.@.@...@.@@@..@@..@..@@......@@@'\
'.@.....@..............................@.........................'
print '// Generated by tinyfont.py'
convert(tinyfont)
|
kervinck/gigatron-rom
|
Contrib/pkupper/BabelFish/tinyfont.py
|
Python
|
bsd-2-clause
| 4,452
|
from unittest import mock
import os
import pytest
from django.conf import settings
from waffle.testutils import override_switch
from olympia import amo
from olympia.addons.tasks import (
recreate_theme_previews,
update_addon_average_daily_users,
update_addon_hotness,
update_addon_weekly_downloads,
)
from olympia.amo.tests import addon_factory, root_storage
from olympia.versions.models import VersionPreview
@pytest.mark.django_db
def test_recreate_theme_previews():
xpi_path = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/mozilla_static_theme.zip'
)
addon_without_previews = addon_factory(type=amo.ADDON_STATICTHEME)
root_storage.copy_stored_file(
xpi_path, addon_without_previews.current_version.file.file_path
)
addon_with_previews = addon_factory(type=amo.ADDON_STATICTHEME)
root_storage.copy_stored_file(
xpi_path, addon_with_previews.current_version.file.file_path
)
VersionPreview.objects.create(
version=addon_with_previews.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]},
)
assert addon_without_previews.current_previews.count() == 0
assert addon_with_previews.current_previews.count() == 1
recreate_theme_previews([addon_without_previews.id, addon_with_previews.id])
assert addon_without_previews.reload().current_previews.count() == 2
assert addon_with_previews.reload().current_previews.count() == 2
sizes = addon_without_previews.current_previews.values_list('sizes', flat=True)
renderings = amo.THEME_PREVIEW_RENDERINGS
assert list(sizes) == [
{
'image': list(renderings['firefox']['full']),
'thumbnail': list(renderings['firefox']['thumbnail']),
'image_format': renderings['firefox']['image_format'],
'thumbnail_format': renderings['firefox']['thumbnail_format'],
},
{
'image': list(renderings['amo']['full']),
'thumbnail': list(renderings['amo']['thumbnail']),
'image_format': renderings['amo']['image_format'],
'thumbnail_format': renderings['amo']['thumbnail_format'],
},
]
PATCH_PATH = 'olympia.addons.tasks'
@pytest.mark.django_db
@mock.patch(f'{PATCH_PATH}.parse_addon')
def test_create_missing_theme_previews(parse_addon_mock):
parse_addon_mock.return_value = {}
theme = addon_factory(type=amo.ADDON_STATICTHEME)
amo_preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={
'image': amo.THEME_PREVIEW_RENDERINGS['amo']['full'],
'thumbnail': amo.THEME_PREVIEW_RENDERINGS['amo']['thumbnail'],
'thumbnail_format': amo.THEME_PREVIEW_RENDERINGS['amo']['thumbnail_format'],
'image_format': amo.THEME_PREVIEW_RENDERINGS['amo']['image_format'],
},
)
firefox_preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={
'image': amo.THEME_PREVIEW_RENDERINGS['firefox']['full'],
'thumbnail': amo.THEME_PREVIEW_RENDERINGS['firefox']['thumbnail'],
},
)
# add another extra preview size that should be ignored
extra_preview = VersionPreview.objects.create(
version=theme.current_version,
sizes={'image': [123, 456], 'thumbnail': [34, 45]},
)
# addon has all the complete previews already so skip when only_missing=True
assert VersionPreview.objects.count() == 3
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 0
assert resize.call_count == 0
recreate_theme_previews([theme.id], only_missing=False)
assert gen_preview.call_count == 1
assert resize.call_count == 0
# If the add-on is missing a preview, we call generate_static_theme_preview
VersionPreview.objects.get(id=amo_preview.id).delete()
firefox_preview.save()
extra_preview.save()
assert VersionPreview.objects.count() == 2
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 1
assert resize.call_count == 0
# Preview is correct dimensions but wrong format, call generate_static_theme_preview
amo_preview.sizes['image_format'] = 'foo'
amo_preview.save()
firefox_preview.save()
extra_preview.save()
assert VersionPreview.objects.count() == 3
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 1
assert resize.call_count == 0
# But we don't do the full regeneration to just get new thumbnail sizes or formats
amo_preview.sizes['thumbnail'] = [666, 444]
amo_preview.sizes['image_format'] = 'svg'
amo_preview.save()
assert amo_preview.thumbnail_dimensions == [666, 444]
firefox_preview.sizes['thumbnail_format'] = 'gif'
firefox_preview.save()
assert firefox_preview.get_format('thumbnail') == 'gif'
extra_preview.save()
assert VersionPreview.objects.count() == 3
with mock.patch(
f'{PATCH_PATH}.generate_static_theme_preview.apply_async'
) as gen_preview, mock.patch(f'{PATCH_PATH}.resize_image') as resize:
recreate_theme_previews([theme.id], only_missing=True)
assert gen_preview.call_count == 0 # not called
assert resize.call_count == 2
amo_preview.reload()
assert amo_preview.thumbnail_dimensions == [720, 92]
firefox_preview.reload()
assert firefox_preview.get_format('thumbnail') == 'png'
assert VersionPreview.objects.count() == 3
@pytest.mark.django_db
def test_update_addon_average_daily_users():
addon = addon_factory(average_daily_users=0)
count = 123
data = [(addon.guid, count)]
assert addon.average_daily_users == 0
update_addon_average_daily_users(data)
addon.refresh_from_db()
assert addon.average_daily_users == count
@pytest.mark.django_db
@override_switch('local-statistics-processing', active=True)
def test_update_deleted_addon_average_daily_users():
addon = addon_factory(average_daily_users=0)
addon.delete()
count = 123
data = [(addon.guid, count)]
assert addon.average_daily_users == 0
update_addon_average_daily_users(data)
addon.refresh_from_db()
assert addon.average_daily_users == count
@pytest.mark.django_db
def test_update_addon_hotness():
addon1 = addon_factory(hotness=0, status=amo.STATUS_APPROVED)
addon2 = addon_factory(hotness=123, status=amo.STATUS_APPROVED)
addon3 = addon_factory(hotness=123, status=amo.STATUS_AWAITING_REVIEW)
averages = {
addon1.guid: {'avg_this_week': 213467, 'avg_three_weeks_before': 123467},
addon2.guid: {
'avg_this_week': 1,
'avg_three_weeks_before': 1,
},
addon3.guid: {'avg_this_week': 213467, 'avg_three_weeks_before': 123467},
}
update_addon_hotness(averages=averages.items())
addon1.refresh_from_db()
addon2.refresh_from_db()
addon3.refresh_from_db()
assert addon1.hotness > 0
# Too low averages so we set the hotness to 0.
assert addon2.hotness == 0
# We shouldn't have processed this add-on.
assert addon3.hotness == 123
def test_update_addon_weekly_downloads():
addon = addon_factory(weekly_downloads=0)
count = 123
data = [(addon.addonguid.hashed_guid, count)]
assert addon.weekly_downloads == 0
update_addon_weekly_downloads(data)
addon.refresh_from_db()
assert addon.weekly_downloads == count
def test_update_addon_weekly_downloads_ignores_deleted_addons():
guid = 'some@guid'
deleted_addon = addon_factory(guid=guid)
deleted_addon.delete()
deleted_addon.update(guid=None)
addon = addon_factory(guid=guid, weekly_downloads=0)
count = 123
data = [(addon.addonguid.hashed_guid, count)]
assert addon.weekly_downloads == 0
update_addon_weekly_downloads(data)
addon.refresh_from_db()
assert addon.weekly_downloads == count
def test_update_addon_weekly_downloads_skips_non_existent_addons():
addon = addon_factory(weekly_downloads=0)
count = 123
invalid_hashed_guid = 'does.not@exist'
data = [(invalid_hashed_guid, 0), (addon.addonguid.hashed_guid, count)]
assert addon.weekly_downloads == 0
update_addon_weekly_downloads(data)
addon.refresh_from_db()
assert addon.weekly_downloads == count
|
mozilla/addons-server
|
src/olympia/addons/tests/test_tasks.py
|
Python
|
bsd-3-clause
| 8,920
|
from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.is_.forms import (ISIdNumberField, ISPhoneNumberField,
ISPostalCodeSelect)
class ISLocalFlavorTests(SimpleTestCase):
def test_ISPostalCodeSelect(self):
f = ISPostalCodeSelect()
out = '''<select name="foo">
<option value="101">101 Reykjav\xedk</option>
<option value="103">103 Reykjav\xedk</option>
<option value="104">104 Reykjav\xedk</option>
<option value="105">105 Reykjav\xedk</option>
<option value="107">107 Reykjav\xedk</option>
<option value="108">108 Reykjav\xedk</option>
<option value="109">109 Reykjav\xedk</option>
<option value="110">110 Reykjav\xedk</option>
<option value="111">111 Reykjav\xedk</option>
<option value="112">112 Reykjav\xedk</option>
<option value="113">113 Reykjav\xedk</option>
<option value="116">116 Kjalarnes</option>
<option value="121">121 Reykjav\xedk</option>
<option value="123">123 Reykjav\xedk</option>
<option value="124">124 Reykjav\xedk</option>
<option value="125">125 Reykjav\xedk</option>
<option value="127">127 Reykjav\xedk</option>
<option value="128">128 Reykjav\xedk</option>
<option value="129">129 Reykjav\xedk</option>
<option value="130">130 Reykjav\xedk</option>
<option value="132">132 Reykjav\xedk</option>
<option value="150">150 Reykjav\xedk</option>
<option value="155">155 Reykjav\xedk</option>
<option value="170">170 Seltjarnarnes</option>
<option value="172">172 Seltjarnarnes</option>
<option value="190">190 Vogar</option>
<option value="200">200 K\xf3pavogur</option>
<option value="201">201 K\xf3pavogur</option>
<option value="202">202 K\xf3pavogur</option>
<option value="203">203 K\xf3pavogur</option>
<option value="210">210 Gar\xf0ab\xe6r</option>
<option value="212">212 Gar\xf0ab\xe6r</option>
<option value="220">220 Hafnarfj\xf6r\xf0ur</option>
<option value="221">221 Hafnarfj\xf6r\xf0ur</option>
<option value="222">222 Hafnarfj\xf6r\xf0ur</option>
<option value="225">225 \xc1lftanes</option>
<option value="230">230 Reykjanesb\xe6r</option>
<option value="232">232 Reykjanesb\xe6r</option>
<option value="233">233 Reykjanesb\xe6r</option>
<option value="235">235 Keflav\xedkurflugv\xf6llur</option>
<option value="240">240 Grindav\xedk</option>
<option value="245">245 Sandger\xf0i</option>
<option value="250">250 Gar\xf0ur</option>
<option value="260">260 Reykjanesb\xe6r</option>
<option value="270">270 Mosfellsb\xe6r</option>
<option value="271">271 Mosfellsb\xe6r</option>
<option value="276">276 Mosfellsb\xe6r</option>
<option value="300">300 Akranes</option>
<option value="301">301 Akranes</option>
<option value="302">302 Akranes</option>
<option value="310">310 Borgarnes</option>
<option value="311">311 Borgarnes</option>
<option value="320">320 Reykholt \xed Borgarfir\xf0i</option>
<option value="340">340 Stykkish\xf3lmur</option>
<option value="345">345 Flatey \xe1 Brei\xf0afir\xf0i</option>
<option value="350">350 Grundarfj\xf6r\xf0ur</option>
<option value="355">355 \xd3lafsv\xedk</option>
<option value="356">356 Sn\xe6fellsb\xe6r</option>
<option value="360">360 Hellissandur</option>
<option value="370">370 B\xfa\xf0ardalur</option>
<option value="371">371 B\xfa\xf0ardalur</option>
<option value="380">380 Reykh\xf3lahreppur</option>
<option value="400">400 \xcdsafj\xf6r\xf0ur</option>
<option value="401">401 \xcdsafj\xf6r\xf0ur</option>
<option value="410">410 Hn\xedfsdalur</option>
<option value="415">415 Bolungarv\xedk</option>
<option value="420">420 S\xfa\xf0av\xedk</option>
<option value="425">425 Flateyri</option>
<option value="430">430 Su\xf0ureyri</option>
<option value="450">450 Patreksfj\xf6r\xf0ur</option>
<option value="451">451 Patreksfj\xf6r\xf0ur</option>
<option value="460">460 T\xe1lknafj\xf6r\xf0ur</option>
<option value="465">465 B\xedldudalur</option>
<option value="470">470 \xdeingeyri</option>
<option value="471">471 \xdeingeyri</option>
<option value="500">500 Sta\xf0ur</option>
<option value="510">510 H\xf3lmav\xedk</option>
<option value="512">512 H\xf3lmav\xedk</option>
<option value="520">520 Drangsnes</option>
<option value="522">522 Kj\xf6rvogur</option>
<option value="523">523 B\xe6r</option>
<option value="524">524 Nor\xf0urfj\xf6r\xf0ur</option>
<option value="530">530 Hvammstangi</option>
<option value="531">531 Hvammstangi</option>
<option value="540">540 Bl\xf6ndu\xf3s</option>
<option value="541">541 Bl\xf6ndu\xf3s</option>
<option value="545">545 Skagastr\xf6nd</option>
<option value="550">550 Sau\xf0\xe1rkr\xf3kur</option>
<option value="551">551 Sau\xf0\xe1rkr\xf3kur</option>
<option value="560">560 Varmahl\xed\xf0</option>
<option value="565">565 Hofs\xf3s</option>
<option value="566">566 Hofs\xf3s</option>
<option value="570">570 Flj\xf3t</option>
<option value="580">580 Siglufj\xf6r\xf0ur</option>
<option value="600">600 Akureyri</option>
<option value="601">601 Akureyri</option>
<option value="602">602 Akureyri</option>
<option value="603">603 Akureyri</option>
<option value="610">610 Greniv\xedk</option>
<option value="611">611 Gr\xedmsey</option>
<option value="620">620 Dalv\xedk</option>
<option value="621">621 Dalv\xedk</option>
<option value="625">625 \xd3lafsfj\xf6r\xf0ur</option>
<option value="630">630 Hr\xedsey</option>
<option value="640">640 H\xfasav\xedk</option>
<option value="641">641 H\xfasav\xedk</option>
<option value="645">645 Fossh\xf3ll</option>
<option value="650">650 Laugar</option>
<option value="660">660 M\xfdvatn</option>
<option value="670">670 K\xf3pasker</option>
<option value="671">671 K\xf3pasker</option>
<option value="675">675 Raufarh\xf6fn</option>
<option value="680">680 \xde\xf3rsh\xf6fn</option>
<option value="681">681 \xde\xf3rsh\xf6fn</option>
<option value="685">685 Bakkafj\xf6r\xf0ur</option>
<option value="690">690 Vopnafj\xf6r\xf0ur</option>
<option value="700">700 Egilssta\xf0ir</option>
<option value="701">701 Egilssta\xf0ir</option>
<option value="710">710 Sey\xf0isfj\xf6r\xf0ur</option>
<option value="715">715 Mj\xf3ifj\xf6r\xf0ur</option>
<option value="720">720 Borgarfj\xf6r\xf0ur eystri</option>
<option value="730">730 Rey\xf0arfj\xf6r\xf0ur</option>
<option value="735">735 Eskifj\xf6r\xf0ur</option>
<option value="740">740 Neskaupsta\xf0ur</option>
<option value="750">750 F\xe1skr\xfa\xf0sfj\xf6r\xf0ur</option>
<option value="755">755 St\xf6\xf0varfj\xf6r\xf0ur</option>
<option value="760">760 Brei\xf0dalsv\xedk</option>
<option value="765">765 Dj\xfapivogur</option>
<option value="780">780 H\xf6fn \xed Hornafir\xf0i</option>
<option value="781">781 H\xf6fn \xed Hornafir\xf0i</option>
<option value="785">785 \xd6r\xe6fi</option>
<option value="800">800 Selfoss</option>
<option value="801">801 Selfoss</option>
<option value="802">802 Selfoss</option>
<option value="810">810 Hverager\xf0i</option>
<option value="815">815 \xdeorl\xe1ksh\xf6fn</option>
<option value="816">816 \xd6lfus</option>
<option value="820">820 Eyrarbakki</option>
<option value="825">825 Stokkseyri</option>
<option value="840">840 Laugarvatn</option>
<option value="845">845 Fl\xfa\xf0ir</option>
<option value="850">850 Hella</option>
<option value="851">851 Hella</option>
<option value="860">860 Hvolsv\xf6llur</option>
<option value="861">861 Hvolsv\xf6llur</option>
<option value="870">870 V\xedk</option>
<option value="871">871 V\xedk</option>
<option value="880">880 Kirkjub\xe6jarklaustur</option>
<option value="900">900 Vestmannaeyjar</option>
<option value="902">902 Vestmannaeyjar</option>
</select>'''
self.assertHTMLEqual(f.render('foo', 'bar'), out)
def test_ISIdNumberField(self):
error_atleast = ['Ensure this value has at least 10 characters (it has 9).']
error_invalid = ['Enter a valid Icelandic identification number. The format is XXXXXX-XXXX.']
error_atmost = ['Ensure this value has at most 11 characters (it has 12).']
error_notvalid = ['The Icelandic identification number is not valid.']
valid = {
'2308803449': '230880-3449',
'230880-3449': '230880-3449',
'230880 3449': '230880-3449',
'2308803440': '230880-3440',
}
invalid = {
'230880343': error_atleast + error_invalid,
'230880343234': error_atmost + error_invalid,
'abcdefghijk': error_invalid,
'2308803439': error_notvalid,
}
self.assertFieldOutput(ISIdNumberField, valid, invalid)
def test_ISPhoneNumberField(self):
error_invalid = ['Enter a valid value.']
error_atleast = ['Ensure this value has at least 7 characters (it has 6).']
error_atmost = ['Ensure this value has at most 8 characters (it has 9).']
valid = {
'1234567': '1234567',
'123 4567': '1234567',
'123-4567': '1234567',
}
invalid = {
'123-456': error_invalid,
'123456': error_atleast + error_invalid,
'123456555': error_atmost + error_invalid,
'abcdefg': error_invalid,
' 1234567 ': error_atmost + error_invalid,
' 12367 ': error_invalid
}
self.assertFieldOutput(ISPhoneNumberField, valid, invalid)
|
M157q/django-localflavor
|
tests/test_is.py
|
Python
|
bsd-3-clause
| 9,213
|
default_app_config = 'hs_tracking.apps.HSTrackingAppConfig'
|
ResearchSoftwareInstitute/MyHPOM
|
hs_tracking/__init__.py
|
Python
|
bsd-3-clause
| 60
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
class CommandsTest(unittest.TestCase):
def assert_execute_outputs(self, command, args=[], expected_stdout="", expected_stderr="", expected_exception=None, expected_logs=None, options=MockOptions(), tool=MockTool()):
options.blocks = None
options.cc = 'MOCK cc'
options.component = 'MOCK component'
options.confirm = True
options.email = 'MOCK email'
options.git_commit = 'MOCK git commit'
options.obsolete_patches = True
options.open_bug = True
options.port = 'MOCK port'
options.update_changelogs = False
options.quiet = True
options.reviewer = 'MOCK reviewer'
command.bind_to_tool(tool)
OutputCapture().assert_outputs(self, command.execute, [options, args, tool], expected_stdout=expected_stdout,
expected_stderr=expected_stderr, expected_exception=expected_exception, expected_logs=expected_logs)
|
axinging/chromium-crosswalk
|
third_party/WebKit/Tools/Scripts/webkitpy/tool/commands/commandtest.py
|
Python
|
bsd-3-clause
| 2,622
|
"""
.. todo::
WRITEME
"""
import os
import gc
import warnings
try:
import tables
except ImportError:
warnings.warn("Couldn't import tables, so far SVHN is "
"only supported with PyTables")
import numpy
from theano import config
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
from pylearn2.utils.string_utils import preprocess
from pylearn2.utils.rng import make_np_rng
class SVHN(dense_design_matrix.DenseDesignMatrixPyTables):
"""
Only for faster access there is a copy of hdf5 file in PYLEARN2_DATA_PATH
but it mean to be only readable. If you wish to modify the data, you
should pass a local copy to the path argument.
Parameters
----------
which_set : WRITEME
path : WRITEME
center : WRITEME
scale : WRITEME
start : WRITEME
stop : WRITEME
axes : WRITEME
preprocessor : WRITEME
"""
mapper = {'train': 0, 'test': 1, 'extra': 2, 'train_all': 3,
'splitted_train': 4, 'valid': 5}
data_path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
def __init__(self, which_set, path = None, center = False, scale = False,
start = None, stop = None, axes = ('b', 0, 1, 'c'),
preprocessor = None):
assert which_set in self.mapper.keys()
self.__dict__.update(locals())
del self.self
if path is None:
path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
mode = 'r'
else:
mode = 'r+'
warnings.warn("Because path is not same as PYLEARN2_DATA_PATH "
"be aware that data might have been "
"modified or pre-processed.")
if mode == 'r' and (scale or center or (start != None) or
(stop != None)):
raise ValueError("Only for speed there is a copy of hdf5 " +\
"file in PYLEARN2_DATA_PATH but it meant to be only " +\
"readable. If you wish to modify the data, you should " +\
"pass a local copy to the path argument.")
# load data
path = preprocess(path)
file_n = "{0}_32x32.h5".format(os.path.join(path, "h5", which_set))
if os.path.isfile(file_n):
make_new = False
else:
make_new = True
warnings.warn("Over riding existing file: {0}".format(file_n))
# if hdf5 file does not exist make them
if make_new:
self.filters = tables.Filters(complib='blosc', complevel=5)
self.make_data(which_set, path)
self.h5file = tables.openFile(file_n, mode = mode)
data = self.h5file.getNode('/', "Data")
if start != None or stop != None:
self.h5file, data = self.resize(self.h5file, start, stop)
# rescale or center if permitted
if center and scale:
data.X[:] -= 127.5
data.X[:] /= 127.5
elif center:
data.X[:] -= 127.5
elif scale:
data.X[:] /= 255.
view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),
axes)
super(SVHN, self).__init__(X = data.X, y = data.y,
view_converter = view_converter)
if preprocessor:
if which_set in ['train', 'train_all', 'splitted_train']:
can_fit = True
preprocessor.apply(self, can_fit)
self.h5file.flush()
def get_test_set(self):
"""
.. todo::
WRITEME
"""
return SVHN(which_set = 'test', path = self.path,
center = self.center, scale = self.scale,
start = self.start, stop = self.stop,
axes = self.axes, preprocessor = self.preprocessor)
def make_data(self, which_set, path, shuffle = True):
"""
.. todo::
WRITEME
"""
sizes = {'train': 73257, 'test': 26032, 'extra': 531131,
'train_all': 604388, 'valid': 6000, 'splitted_train' : 598388}
image_size = 32 * 32 * 3
h_file_n = "{0}_32x32.h5".format(os.path.join(path, "h5", which_set))
h5file, node = self.init_hdf5(h_file_n, ([sizes[which_set],
image_size], [sizes[which_set], 10]))
# For consistency between experiments better to make new random stream
rng = make_np_rng(None, 322, which_method="shuffle")
def design_matrix_view(data_x, data_y):
"""reshape data_x to deisng matrix view
and data_y to one_hot
"""
data_x = numpy.transpose(data_x, axes = [3, 2, 0, 1])
data_x = data_x.reshape((data_x.shape[0], 32 * 32 * 3))
# TODO assuming one_hot as default for now
one_hot = numpy.zeros((data_y.shape[0], 10), dtype = config.floatX)
for i in xrange(data_y.shape[0]):
one_hot[i, data_y[i] - 1] = 1.
return data_x, one_hot
def load_data(path):
"Loads data from mat files"
data = load(path)
data_x = numpy.cast[config.floatX](data['X'])
data_y = data['y']
del data
gc.collect()
return design_matrix_view(data_x, data_y)
def split_train_valid(path, num_valid_train = 400,
num_valid_extra = 200):
"""
Extract number of class balanced samples from train and extra
sets for validation, and regard the remaining as new train set.
Parameters
----------
num_valid_train : int, optional
Number of samples per class from train
num_valid_extra : int, optional
Number of samples per class from extra
"""
# load difficult train
data = load("{0}train_32x32.mat".format(SVHN.data_path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_train])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = data['X'][:, :, :, train_index]
train_y = data['y'][train_index, :]
valid_x = data['X'][:, :, :, valid_index]
valid_y = data['y'][valid_index, :]
train_size = data['X'].shape[3]
assert train_x.shape[3] == train_size - num_valid_train * 10
assert train_y.shape[0] == train_size - num_valid_train * 10
assert valid_x.shape[3] == num_valid_train * 10
assert valid_y.shape[0] == num_valid_train * 10
del data
gc.collect()
# load extra train
data = load("{0}extra_32x32.mat".format(SVHN.data_path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_extra])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = numpy.concatenate((train_x,
data['X'][:, :, :, train_index]), axis = 3)
train_y = numpy.concatenate((train_y, data['y'][train_index, :]))
valid_x = numpy.concatenate((valid_x,
data['X'][:, :, :, valid_index]), axis = 3)
valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :]))
extra_size = data['X'].shape[3]
sizes['valid'] = (num_valid_train + num_valid_extra) * 10
sizes['splitted_train'] = train_size + extra_size - sizes['valid']
assert train_x.shape[3] == sizes['splitted_train']
assert train_y.shape[0] == sizes['splitted_train']
assert valid_x.shape[3] == sizes['valid']
assert valid_y.shape[0] == sizes['valid']
del data
gc.collect()
train_x = numpy.cast[config.floatX](train_x)
valid_x = numpy.cast[config.floatX](valid_x)
return design_matrix_view(train_x, train_y),\
design_matrix_view(valid_x, valid_y)
# The original splits
if which_set in ['train', 'test']:
data_x, data_y = load_data("{0}{1}_32x32.mat".format(path,
which_set))
# Train valid splits
elif which_set in ['splitted_train', 'valid']:
train_data, valid_data = split_train_valid(path)
if which_set == 'splitted_train':
data_x, data_y = train_data
else:
data_x, data_y = valid_data
del train_data
# extra data
elif which_set in ['train_all', 'extra']:
data_x, data_y = load_data("{0}extra_32x32.mat".format(path))
if which_set == 'train_all':
train_x, train_y = load_data("{0}train_32x32.mat".format(path))
data_x = numpy.concatenate((data_x, train_x))
data_y = numpy.concatenate((data_y, data_y))
if shuffle:
index = range(data_x.shape[0])
rng.shuffle(index)
data_x = data_x[index, :]
data_y = data_y[index, :]
assert data_x.shape[0] == sizes[which_set]
assert data_y.shape[0] == sizes[which_set]
SVHN.fill_hdf5(h5file, data_x, data_y, node)
h5file.close()
class SVHN_On_Memory(dense_design_matrix.DenseDesignMatrix):
"""
A version of SVHN dataset that loads everything into the memory instead of
using pytables.
Parameters
----------
which_set : WRITEME
center : WRITEME
scale : WRITEME
start : WRITEME
stop : WRITEME
axes : WRITEME
preprocessor : WRITEME
"""
mapper = {'train': 0, 'test': 1, 'extra': 2, 'train_all': 3,
'splitted_train': 4, 'valid': 5}
def __init__(self, which_set, center = False, scale = False,
start = None, stop = None, axes = ('b', 0, 1, 'c'),
preprocessor = None):
assert which_set in self.mapper.keys()
self.__dict__.update(locals())
del self.self
path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
# load data
path = preprocess(path)
data_x, data_y = self.make_data(which_set, path)
# rescale or center if permitted
if center and scale:
data_x -= 127.5
data_x /= 127.5
elif center:
data_x -= 127.5
elif scale:
data_x /= 255.
view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),
axes)
super(SVHN_On_Memory, self).__init__(X = data_x, y = data_y,
view_converter = view_converter)
if preprocessor:
if which_set in ['train', 'train_all', 'splitted_train']:
can_fit = True
else:
can_fit = False
preprocessor.apply(self, can_fit)
del data_x, data_y
gc.collect()
def get_test_set(self):
"""
.. todo::
WRITEME
"""
return SVHN_On_Memory(which_set = 'test', path = self.path,
center = self.center, scale = self.scale,
start = self.start, stop = self.stop,
axes = self.axes, preprocessor = self.preprocessor)
def make_data(self, which_set, path, shuffle = True):
"""
.. todo::
WRITEME
"""
sizes = {'train': 73257, 'test': 26032, 'extra': 531131,
'train_all': 604388, 'valid': 6000, 'splitted_train' : 598388}
image_size = 32 * 32 * 3
# For consistency between experiments better to make new random stream
rng = make_np_rng(None, 322, which_method="shuffle")
def design_matrix_view(data_x, data_y):
"""reshape data_x to deisng matrix view
and data_y to one_hot
"""
data_x = numpy.transpose(data_x, axes = [3, 2, 0, 1])
data_x = data_x.reshape((data_x.shape[0], 32 * 32 * 3))
# TODO assuming one_hot as default for now
one_hot = numpy.zeros((data_y.shape[0], 10), dtype = config.floatX)
for i in xrange(data_y.shape[0]):
one_hot[i, data_y[i] - 1] = 1.
return data_x, one_hot
def load_data(path):
"Loads data from mat files"
data = load(path)
data_x = numpy.cast[config.floatX](data['X'])
import ipdb
ipdb.set_trace()
data_y = data['y']
del data
gc.collect()
return design_matrix_view(data_x, data_y)
def split_train_valid(path, num_valid_train = 400,
num_valid_extra = 200):
"""
Extract number of class balanced samples from train and extra
sets for validation, and regard the remaining as new train set.
Parameters
----------
num_valid_train : int, optional
Number of samples per class from train
num_valid_extra : int, optional
Number of samples per class from extra
"""
# load difficult train
data = load("{0}train_32x32.mat".format(path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_train])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = data['X'][:, :, :, train_index]
train_y = data['y'][train_index, :]
valid_x = data['X'][:, :, :, valid_index]
valid_y = data['y'][valid_index, :]
train_size = data['X'].shape[3]
assert train_x.shape[3] == train_size - num_valid_train * 10
assert train_y.shape[0] == train_size - num_valid_train * 10
assert valid_x.shape[3] == num_valid_train * 10
assert valid_y.shape[0] == num_valid_train * 10
del data
gc.collect()
# load extra train
data = load("{0}extra_32x32.mat".format(path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_extra])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = numpy.concatenate((train_x,
data['X'][:, :, :, train_index]), axis = 3)
train_y = numpy.concatenate((train_y, data['y'][train_index, :]))
valid_x = numpy.concatenate((valid_x,
data['X'][:, :, :, valid_index]), axis = 3)
valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :]))
extra_size = data['X'].shape[3]
sizes['valid'] = (num_valid_train + num_valid_extra) * 10
sizes['splitted_train'] = train_size + extra_size - sizes['valid']
assert train_x.shape[3] == sizes['splitted_train']
assert train_y.shape[0] == sizes['splitted_train']
assert valid_x.shape[3] == sizes['valid']
assert valid_y.shape[0] == sizes['valid']
del data
gc.collect()
train_x = numpy.cast[config.floatX](train_x)
valid_x = numpy.cast[config.floatX](valid_x)
return design_matrix_view(train_x, train_y),\
design_matrix_view(valid_x, valid_y)
# The original splits
if which_set in ['train', 'test']:
data_x, data_y = load_data("{0}{1}_32x32.mat".format(path,
which_set))
# Train valid splits
elif which_set in ['splitted_train', 'valid']:
train_data, valid_data = split_train_valid(path)
if which_set == 'splitted_train':
data_x, data_y = train_data
else:
data_x, data_y = valid_data
del train_data
# extra data
elif which_set in ['train_all', 'extra']:
data_x, data_y = load_data("{0}extra_32x32.mat".format(path))
if which_set == 'train_all':
train_x, train_y = load_data("{0}train_32x32.mat".format(path))
data_x = numpy.concatenate((data_x, train_x))
data_y = numpy.concatenate((data_y, data_y))
if shuffle:
index = range(data_x.shape[0])
rng.shuffle(index)
data_x = data_x[index, :]
data_y = data_y[index, :]
assert data_x.shape[0] == sizes[which_set]
assert data_y.shape[0] == sizes[which_set]
return data_x, data_y
|
skearnes/pylearn2
|
pylearn2/datasets/svhn.py
|
Python
|
bsd-3-clause
| 18,086
|
"""
This module contains query handlers responsible for calculus queries:
infinitesimal, bounded, etc.
"""
from __future__ import print_function, division
from sympy.logic.boolalg import conjuncts
from sympy.assumptions import Q, ask
from sympy.assumptions.handlers import CommonHandler
class AskInfinitesimalHandler(CommonHandler):
"""
Handler for key 'infinitesimal'
Test that a given expression is equivalent to an infinitesimal
number
"""
@staticmethod
def _number(expr, assumptions):
# helper method
return expr.evalf() == 0
@staticmethod
def Basic(expr, assumptions):
if expr.is_number:
return AskInfinitesimalHandler._number(expr, assumptions)
@staticmethod
def Mul(expr, assumptions):
"""
Infinitesimal*Bounded -> Infinitesimal
"""
if expr.is_number:
return AskInfinitesimalHandler._number(expr, assumptions)
result = False
for arg in expr.args:
if ask(Q.infinitesimal(arg), assumptions):
result = True
elif ask(Q.bounded(arg), assumptions):
continue
else:
break
else:
return result
Add, Pow = [Mul]*2
@staticmethod
def Number(expr, assumptions):
return expr == 0
NumberSymbol = Number
ImaginaryUnit = staticmethod(CommonHandler.AlwaysFalse)
class AskBoundedHandler(CommonHandler):
"""
Handler for key 'bounded'.
Test that an expression is bounded respect to all its variables.
Examples of usage:
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.handlers.calculus import AskBoundedHandler
>>> from sympy.abc import x
>>> a = AskBoundedHandler()
>>> a.Symbol(x, Q.positive(x)) == None
True
>>> a.Symbol(x, Q.bounded(x))
True
"""
@staticmethod
def Symbol(expr, assumptions):
"""
Handles Symbol.
Examples:
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.handlers.calculus import AskBoundedHandler
>>> from sympy.abc import x
>>> a = AskBoundedHandler()
>>> a.Symbol(x, Q.positive(x)) == None
True
>>> a.Symbol(x, Q.bounded(x))
True
"""
if Q.bounded(expr) in conjuncts(assumptions):
return True
return None
@staticmethod
def Add(expr, assumptions):
"""
Return True if expr is bounded, False if not and None if unknown.
Truth Table:
+-------+-----+-----------+-----------+
| | | | |
| | B | U | ? |
| | | | |
+-------+-----+---+---+---+---+---+---+
| | | | | | | | |
| | |'+'|'-'|'x'|'+'|'-'|'x'|
| | | | | | | | |
+-------+-----+---+---+---+---+---+---+
| | | | |
| B | B | U | ? |
| | | | |
+---+---+-----+---+---+---+---+---+---+
| | | | | | | | | |
| |'+'| | U | ? | ? | U | ? | ? |
| | | | | | | | | |
| +---+-----+---+---+---+---+---+---+
| | | | | | | | | |
| U |'-'| | ? | U | ? | ? | U | ? |
| | | | | | | | | |
| +---+-----+---+---+---+---+---+---+
| | | | | |
| |'x'| | ? | ? |
| | | | | |
+---+---+-----+---+---+---+---+---+---+
| | | | |
| ? | | | ? |
| | | | |
+-------+-----+-----------+---+---+---+
* 'B' = Bounded
* 'U' = Unbounded
* '?' = unknown boundedness
* '+' = positive sign
* '-' = negative sign
* 'x' = sign unknown
|
* All Bounded -> True
* 1 Unbounded and the rest Bounded -> False
* >1 Unbounded, all with same known sign -> False
* Any Unknown and unknown sign -> None
* Else -> None
When the signs are not the same you can have an undefined
result as in oo - oo, hence 'bounded' is also undefined.
"""
sign = -1 # sign of unknown or unbounded
result = True
for arg in expr.args:
_bounded = ask(Q.bounded(arg), assumptions)
if _bounded:
continue
s = ask(Q.positive(arg), assumptions)
# if there has been more than one sign or if the sign of this arg
# is None and Bounded is None or there was already
# an unknown sign, return None
if sign != -1 and s != sign or \
s is None and (s == _bounded or s == sign):
return None
else:
sign = s
# once False, do not change
if result is not False:
result = _bounded
return result
@staticmethod
def Mul(expr, assumptions):
"""
Return True if expr is bounded, False if not and None if unknown.
Truth Table:
+---+---+---+--------+
| | | | |
| | B | U | ? |
| | | | |
+---+---+---+---+----+
| | | | | |
| | | | s | /s |
| | | | | |
+---+---+---+---+----+
| | | | |
| B | B | U | ? |
| | | | |
+---+---+---+---+----+
| | | | | |
| U | | U | U | ? |
| | | | | |
+---+---+---+---+----+
| | | | |
| ? | | | ? |
| | | | |
+---+---+---+---+----+
* B = Bounded
* U = Unbounded
* ? = unknown boundedness
* s = signed (hence nonzero)
* /s = not signed
"""
result = True
for arg in expr.args:
_bounded = ask(Q.bounded(arg), assumptions)
if _bounded:
continue
elif _bounded is None:
if result is None:
return None
if ask(Q.nonzero(arg), assumptions) is None:
return None
if result is not False:
result = None
else:
result = False
return result
@staticmethod
def Pow(expr, assumptions):
"""
Unbounded ** NonZero -> Unbounded
Bounded ** Bounded -> Bounded
Abs()<=1 ** Positive -> Bounded
Abs()>=1 ** Negative -> Bounded
Otherwise unknown
"""
base_bounded = ask(Q.bounded(expr.base), assumptions)
exp_bounded = ask(Q.bounded(expr.exp), assumptions)
if base_bounded is None and exp_bounded is None: # Common Case
return None
if base_bounded is False and ask(Q.nonzero(expr.exp), assumptions):
return False
if base_bounded and exp_bounded:
return True
if (abs(expr.base) <= 1) is True and ask(Q.positive(expr.exp), assumptions):
return True
if (abs(expr.base) >= 1) is True and ask(Q.negative(expr.exp), assumptions):
return True
if (abs(expr.base) >= 1) is True and exp_bounded is False:
return False
return None
@staticmethod
def log(expr, assumptions):
return ask(Q.bounded(expr.args[0]), assumptions)
exp = log
cos, sin, Number, Pi, Exp1, GoldenRatio, ImaginaryUnit, sign = \
[staticmethod(CommonHandler.AlwaysTrue)]*8
Infinity, NegativeInfinity = [staticmethod(CommonHandler.AlwaysFalse)]*2
|
hrashk/sympy
|
sympy/assumptions/handlers/calculus.py
|
Python
|
bsd-3-clause
| 8,090
|
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from StringIO import StringIO
from webkitpy.common.system.environment import Environment
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system.user_mock import MockUser
from webkitpy.common.system.workspace_mock import MockWorkspace
class MockSystemHost(object):
def __init__(self, log_executive=False, executive_throws_when_run=None, os_name=None, os_version=None, executive=None, filesystem=None):
self.executable = 'python'
self.executive = executive or MockExecutive(should_log=log_executive, should_throw_when_run=executive_throws_when_run)
self.filesystem = filesystem or MockFileSystem()
self.user = MockUser()
self.platform = MockPlatformInfo()
if os_name:
self.platform.os_name = os_name
if os_version:
self.platform.os_version = os_version
# FIXME: Should this take pointers to the filesystem and the executive?
self.workspace = MockWorkspace()
self.stdin = StringIO()
self.stdout = StringIO()
self.stderr = StringIO()
def copy_current_environment(self):
return Environment({"MOCK_ENVIRON_COPY": '1'})
def print_(self, *args, **kwargs):
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
stream = kwargs.get('stream', self.stdout)
stream.write(sep.join([str(arg) for arg in args]) + end)
|
axinging/chromium-crosswalk
|
third_party/WebKit/Tools/Scripts/webkitpy/common/system/systemhost_mock.py
|
Python
|
bsd-3-clause
| 3,090
|
from nbgrader import utils
from nbgrader.preprocessors import NbGraderPreprocessor
class ComputeChecksums(NbGraderPreprocessor):
"""A preprocessor to compute checksums of grade cells."""
def preprocess_cell(self, cell, resources, cell_index):
# compute checksums of grade cell and solution cells
if utils.is_grade(cell) or utils.is_solution(cell) or utils.is_locked(cell):
checksum = utils.compute_checksum(cell)
cell.metadata.nbgrader['checksum'] = checksum
if utils.is_grade(cell) or utils.is_solution(cell):
self.log.debug(
"Checksum for '%s' is %s",
cell.metadata.nbgrader['grade_id'],
checksum)
return cell, resources
|
alope107/nbgrader
|
nbgrader/preprocessors/computechecksums.py
|
Python
|
bsd-3-clause
| 771
|
#!/usr/bin/env python
NAME = 'F5 Trafficshield'
def is_waf(self):
for hv in [['cookie', '^ASINFO='], ['server', 'F5-TrafficShield']]:
r = self.matchheader(hv)
if r is None:
return
elif r:
return r
# the following based on nmap's http-waf-fingerprint.nse
if self.matchheader(('server', 'F5-TrafficShield')):
return True
return False
|
thinksabin/wafw00f
|
wafw00f/plugins/f5trafficshield.py
|
Python
|
bsd-3-clause
| 408
|
from django.conf import settings
from django.contrib.comments.managers import CommentManager
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
COMMENT_MAX_LENGTH = getattr(settings, 'COMMENT_MAX_LENGTH', 3000)
class BaseCommentAbstractModel(models.Model):
"""
An abstract base class that any custom comment models probably should
subclass.
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
# Metadata about the comment
site = models.ForeignKey(Site)
class Meta:
abstract = True
def get_content_object_url(self):
"""
Get a URL suitable for redirecting to the content object.
"""
return urlresolvers.reverse(
"comments-url-redirect",
args=(self.content_type_id, self.object_pk)
)
@python_2_unicode_compatible
class Comment(BaseCommentAbstractModel):
"""
A user comment about some object.
"""
# Who posted this comment? If ``user`` is set then it was an authenticated
# user; otherwise at least user_name should have been set and the comment
# was posted by a non-authenticated user.
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'),
blank=True, null=True, related_name="%(class)s_comments")
user_name = models.CharField(_("user's name"), max_length=50, blank=True)
user_email = models.EmailField(_("user's email address"), blank=True)
user_url = models.URLField(_("user's URL"), blank=True)
comment = models.TextField(_('comment'), max_length=COMMENT_MAX_LENGTH)
# Metadata about the comment
submit_date = models.DateTimeField(_('date/time submitted'), default=None)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True)
is_public = models.BooleanField(_('is public'), default=True,
help_text=_('Uncheck this box to make the comment effectively ' \
'disappear from the site.'))
is_removed = models.BooleanField(_('is removed'), default=False,
help_text=_('Check this box if the comment is inappropriate. ' \
'A "This comment has been removed" message will ' \
'be displayed instead.'))
# Manager
objects = CommentManager()
class Meta:
db_table = "django_comments"
ordering = ('submit_date',)
permissions = [("can_moderate", "Can moderate comments")]
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __str__(self):
return "%s: %s..." % (self.name, self.comment[:50])
def save(self, *args, **kwargs):
if self.submit_date is None:
self.submit_date = timezone.now()
super(Comment, self).save(*args, **kwargs)
def _get_userinfo(self):
"""
Get a dictionary that pulls together information about the poster
safely for both authenticated and non-authenticated comments.
This dict will have ``name``, ``email``, and ``url`` fields.
"""
if not hasattr(self, "_userinfo"):
userinfo = {
"name" : self.user_name,
"email" : self.user_email,
"url" : self.user_url
}
if self.user_id:
u = self.user
if u.email:
userinfo["email"] = u.email
# If the user has a full name, use that for the user name.
# However, a given user_name overrides the raw user.username,
# so only use that if this comment has no associated name.
if u.get_full_name():
userinfo["name"] = self.user.get_full_name()
elif not self.user_name:
userinfo["name"] = u.username
self._userinfo = userinfo
return self._userinfo
userinfo = property(_get_userinfo, doc=_get_userinfo.__doc__)
def _get_name(self):
return self.userinfo["name"]
def _set_name(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the name is read-only."))
self.user_name = val
name = property(_get_name, _set_name, doc="The name of the user who posted this comment")
def _get_email(self):
return self.userinfo["email"]
def _set_email(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the email is read-only."))
self.user_email = val
email = property(_get_email, _set_email, doc="The email of the user who posted this comment")
def _get_url(self):
return self.userinfo["url"]
def _set_url(self, val):
self.user_url = val
url = property(_get_url, _set_url, doc="The URL given by the user who posted this comment")
def get_absolute_url(self, anchor_pattern="#c%(id)s"):
return self.get_content_object_url() + (anchor_pattern % self.__dict__)
def get_as_text(self):
"""
Return this comment as plain text. Useful for emails.
"""
d = {
'user': self.user or self.name,
'date': self.submit_date,
'comment': self.comment,
'domain': self.site.domain,
'url': self.get_absolute_url()
}
return _('Posted by %(user)s at %(date)s\n\n%(comment)s\n\nhttp://%(domain)s%(url)s') % d
@python_2_unicode_compatible
class CommentFlag(models.Model):
"""
Records a flag on a comment. This is intentionally flexible; right now, a
flag could be:
* A "removal suggestion" -- where a user suggests a comment for (potential) removal.
* A "moderator deletion" -- used when a moderator deletes a comment.
You can (ab)use this model to add other flags, if needed. However, by
design users are only allowed to flag a comment with a given flag once;
if you want rating look elsewhere.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), related_name="comment_flags")
comment = models.ForeignKey(Comment, verbose_name=_('comment'), related_name="flags")
flag = models.CharField(_('flag'), max_length=30, db_index=True)
flag_date = models.DateTimeField(_('date'), default=None)
# Constants for flag types
SUGGEST_REMOVAL = "removal suggestion"
MODERATOR_DELETION = "moderator deletion"
MODERATOR_APPROVAL = "moderator approval"
class Meta:
db_table = 'django_comment_flags'
unique_together = [('user', 'comment', 'flag')]
verbose_name = _('comment flag')
verbose_name_plural = _('comment flags')
def __str__(self):
return "%s flag of comment ID %s by %s" % \
(self.flag, self.comment_id, self.user.username)
def save(self, *args, **kwargs):
if self.flag_date is None:
self.flag_date = timezone.now()
super(CommentFlag, self).save(*args, **kwargs)
|
RichardLitt/wyrd-django-dev
|
django/contrib/comments/models.py
|
Python
|
bsd-3-clause
| 7,766
|
import unittest
from rmgpy.molecule.molecule import Molecule
from .parser import *
class ParserTest(unittest.TestCase):
def test_fromAugmentedInChI(self):
aug_inchi = 'InChI=1S/CH4/h1H4'
mol = fromAugmentedInChI(Molecule(), aug_inchi)
self.assertTrue(not mol.InChI == '')
aug_inchi = 'InChI=1/CH4/h1H4'
mol = fromAugmentedInChI(Molecule(), aug_inchi)
self.assertTrue(not mol.InChI == '')
def test_toRDKitMol(self):
"""
Test that toRDKitMol returns correct indices and atom mappings.
"""
bondOrderDict = {'SINGLE':'S','DOUBLE':'D','TRIPLE':'T','AROMATIC':'B'}
mol = fromSMILES(Molecule(), 'C1CCC=C1C=O')
rdkitmol, rdAtomIndices = mol.toRDKitMol(removeHs=False, returnMapping=True, sanitize=True)
for atom in mol.atoms:
# Check that all atoms are found in mapping
self.assertTrue(atom in rdAtomIndices)
# Check that all bonds are in rdkitmol with correct mapping and order
for connectedAtom, bond in atom.bonds.iteritems():
bondType = str(rdkitmol.GetBondBetweenAtoms(rdAtomIndices[atom],rdAtomIndices[connectedAtom]).GetBondType())
rdkitBondOrder = bondOrderDict[bondType]
self.assertEqual(bond.order, rdkitBondOrder)
# Test for removeHs = True
rdkitmol2, rdAtomIndices2 = mol.toRDKitMol(removeHs=True, returnMapping=True, sanitize=True)
for atom in mol.atoms:
# Check that all non-hydrogen atoms are found in mapping
if atom.symbol != 'H':
self.assertTrue(atom in rdAtomIndices)
# Check that all bonds connected to non-hydrogen have the correct mapping and order
for connectedAtom, bond in atom.bonds.iteritems():
if connectedAtom.symbol != 'H':
bondType = str(rdkitmol.GetBondBetweenAtoms(rdAtomIndices[atom],rdAtomIndices[connectedAtom]).GetBondType())
rdkitBondOrder = bondOrderDict[bondType]
self.assertEqual(bond.order, rdkitBondOrder)
class ResetLonePairsTest(unittest.TestCase):
def test_Methane(self):
smi = 'C'
mol = Molecule().fromSMILES(smi)
p_indices = []
reset_lone_pairs(mol, p_indices)
for at in mol.atoms:
self.assertEquals(at.lonePairs, 0)
def test_SingletMethylene(self):
adjlist = """
multiplicity 1
1 C u0 p1 c0 {2,S} {3,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
"""
mol = Molecule().fromAdjacencyList(adjlist)
p_indices = [1]
reset_lone_pairs(mol, p_indices)
for at in mol.atoms:
if at.symbol == 'C':
self.assertEquals(at.lonePairs, 1)
else:
self.assertEquals(at.lonePairs, 0)
|
chatelak/RMG-Py
|
rmgpy/molecule/parserTest.py
|
Python
|
mit
| 2,890
|
import copy
from collections import OrderedDict
from collections.abc import Mapping
class OrderedSet:
"""
A set which keeps the ordering of the inserted items.
Currently backs onto OrderedDict.
"""
def __init__(self, iterable=None):
self.dict = OrderedDict.fromkeys(iterable or ())
def add(self, item):
self.dict[item] = None
def remove(self, item):
del self.dict[item]
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def __iter__(self):
return iter(self.dict)
def __contains__(self, item):
return item in self.dict
def __bool__(self):
return bool(self.dict)
def __len__(self):
return len(self.dict)
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super().__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, super().__repr__())
def __getitem__(self, key):
"""
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
"""
try:
list_ = super().__getitem__(key)
except KeyError:
raise MultiValueDictKeyError(key)
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super().__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo):
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
return {**self.__dict__, '_data': {k: self._getlist(k) for k in self}}
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def _getlist(self, key, default=None, force_list=False):
"""
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
"""
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values
def getlist(self, key, default=None):
"""
Return the list of values for the key. If key doesn't exist, return a
default value.
"""
return self._getlist(key, default, force_list=True)
def setlist(self, key, list_):
super().__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
# Do not return default here because __setitem__() may store
# another value -- QueryDict.__setitem__() does. Look it up.
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
# Do not return default_list here because setlist() may store
# another value -- QueryDict.setlist() does. Look it up.
return self._getlist(key)
def appendlist(self, key, value):
"""Append an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def lists(self):
"""Yield (key, list) pairs."""
return iter(super().items())
def values(self):
"""Yield the last value on every key list."""
for key in self:
yield self[key]
def copy(self):
"""Return a shallow copy of this object."""
return copy.copy(self)
def update(self, *args, **kwargs):
"""Extend rather than replace existing key lists."""
if len(args) > 1:
raise TypeError("update expected at most 1 argument, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.items():
self.setlistdefault(key).append(value)
def dict(self):
"""Return current object as a dict with singular values."""
return {key: self[key] for key in self}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, warning='ImmutableList object is immutable.', **kwargs):
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wrap accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super().__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieve the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
use_func = key.startswith(self.prefix)
if use_func:
key = key[len(self.prefix):]
value = super().__getitem__(key)
if use_func:
return self.func(value)
return value
def _destruct_iterable_mapping_values(data):
for i, elem in enumerate(data):
if len(elem) != 2:
raise ValueError(
'dictionary update sequence element #{} has '
'length {}; 2 is required.'.format(i, len(elem))
)
if not isinstance(elem[0], str):
raise ValueError('Element key %r invalid, only strings are allowed' % elem[0])
yield tuple(elem)
class CaseInsensitiveMapping(Mapping):
"""
Mapping allowing case-insensitive key lookups. Original case of keys is
preserved for iteration and string representation.
Example::
>>> ci_map = CaseInsensitiveMapping({'name': 'Jane'})
>>> ci_map['Name']
Jane
>>> ci_map['NAME']
Jane
>>> ci_map['name']
Jane
>>> ci_map # original case preserved
{'name': 'Jane'}
"""
def __init__(self, data):
if not isinstance(data, Mapping):
data = {k: v for k, v in _destruct_iterable_mapping_values(data)}
self._store = {k.lower(): (k, v) for k, v in data.items()}
def __getitem__(self, key):
return self._store[key.lower()][1]
def __len__(self):
return len(self._store)
def __eq__(self, other):
return isinstance(other, Mapping) and {
k.lower(): v for k, v in self.items()
} == {
k.lower(): v for k, v in other.items()
}
def __iter__(self):
return (original_key for original_key, value in self._store.values())
def __repr__(self):
return repr({key: value for key, value in self._store.values()})
def copy(self):
return self
|
sametmax/Django--an-app-at-a-time
|
ignore_this_directory/django/utils/datastructures.py
|
Python
|
mit
| 10,194
|
"""SCons.Tool.sgicc
Tool-specific initialization for MIPSPro cc on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from . import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CXX'] = 'CC'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
timj/scons
|
src/engine/SCons/Tool/sgicc.py
|
Python
|
mit
| 1,780
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.utils2 import i18n
import scraper
BASE_URL = 'http://treasureen.com'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'treasureen'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, require_debrid=False, cache_limit=.5)
title = dom_parser2.parse_dom(html, 'meta', {'property': 'og:title'}, req='content')
meta = scraper_utils.parse_movie_link(title[0].attrs['content']) if title else {}
fragment = dom_parser2.parse_dom(html, 'p', {'class': 'download_message'})
if fragment:
for attrs, _content in dom_parser2.parse_dom(fragment[0].content, 'a', req='href'):
source = attrs['href']
if scraper_utils.excluded_link(source): continue
host = urlparse.urlparse(source).hostname
quality = scraper_utils.height_get_quality(meta.get('height', 480))
hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': quality, 'direct': True}
if 'format' in meta: hoster['format'] = meta['format']
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._blog_get_url(video)
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings = scraper_utils.disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" %s" default="30" visible="eq(-3,true)"/>' % (name, i18n('filter_results_days')))
settings.append(' <setting id="%s-select" type="enum" label=" %s" lvalues="30636|30637" default="0" visible="eq(-4,true)"/>' % (name, i18n('auto_select')))
return settings
def search(self, video_type, title, year, season=''): # @UnusedVariable
html = self._http_get(self.base_url, params={'s': title}, require_debrid=False, cache_limit=8)
post_pattern = 'class="post-thumbnail">\s*<a[^>]+href="(?P<url>[^"]+)[^>]*[^>]+title="(?P<post_title>[^"]+).*?datetime="(?P<date>[^"]+)'
date_format = '%Y-%m-%d %H:%M:%S'
return self._blog_proc_results(html, post_pattern, date_format, video_type, title, year)
|
mrquim/mrquimrepo
|
repo/plugin.video.salts/scrapers/treasureen_scraper.py
|
Python
|
gpl-2.0
| 3,727
|
from pyjsparserdata import *
class BaseNode:
def finish(self):
pass
def finishArrayExpression(self, elements):
self.type = Syntax.ArrayExpression
self.elements = elements
self.finish()
return self
def finishArrayPattern(self, elements):
self.type = Syntax.ArrayPattern
self.elements = elements
self.finish()
return self
def finishArrowFunctionExpression(self, params, defaults, body, expression):
self.type = Syntax.ArrowFunctionExpression
self.id = None
self.params = params
self.defaults = defaults
self.body = body
self.generator = False
self.expression = expression
self.finish()
return self
def finishAssignmentExpression(self, operator, left, right):
self.type = Syntax.AssignmentExpression
self.operator = operator
self.left = left
self.right = right
self.finish()
return self
def finishAssignmentPattern(self, left, right):
self.type = Syntax.AssignmentPattern
self.left = left
self.right = right
self.finish()
return self
def finishBinaryExpression(self, operator, left, right):
self.type = Syntax.LogicalExpression if (operator == '||' or operator == '&&') else Syntax.BinaryExpression
self.operator = operator
self.left = left
self.right = right
self.finish()
return self
def finishBlockStatement(self, body):
self.type = Syntax.BlockStatement
self.body = body
self.finish()
return self
def finishBreakStatement(self, label):
self.type = Syntax.BreakStatement
self.label = label
self.finish()
return self
def finishCallExpression(self, callee, args):
self.type = Syntax.CallExpression
self.callee = callee
self.arguments = args
self.finish()
return self
def finishCatchClause(self, param, body):
self.type = Syntax.CatchClause
self.param = param
self.body = body
self.finish()
return self
def finishClassBody(self, body):
self.type = Syntax.ClassBody
self.body = body
self.finish()
return self
def finishClassDeclaration(self, id, superClass, body):
self.type = Syntax.ClassDeclaration
self.id = id
self.superClass = superClass
self.body = body
self.finish()
return self
def finishClassExpression(self, id, superClass, body):
self.type = Syntax.ClassExpression
self.id = id
self.superClass = superClass
self.body = body
self.finish()
return self
def finishConditionalExpression(self, test, consequent, alternate):
self.type = Syntax.ConditionalExpression
self.test = test
self.consequent = consequent
self.alternate = alternate
self.finish()
return self
def finishContinueStatement(self, label):
self.type = Syntax.ContinueStatement
self.label = label
self.finish()
return self
def finishDebuggerStatement(self, ):
self.type = Syntax.DebuggerStatement
self.finish()
return self
def finishDoWhileStatement(self, body, test):
self.type = Syntax.DoWhileStatement
self.body = body
self.test = test
self.finish()
return self
def finishEmptyStatement(self, ):
self.type = Syntax.EmptyStatement
self.finish()
return self
def finishExpressionStatement(self, expression):
self.type = Syntax.ExpressionStatement
self.expression = expression
self.finish()
return self
def finishForStatement(self, init, test, update, body):
self.type = Syntax.ForStatement
self.init = init
self.test = test
self.update = update
self.body = body
self.finish()
return self
def finishForInStatement(self, left, right, body):
self.type = Syntax.ForInStatement
self.left = left
self.right = right
self.body = body
self.each = False
self.finish()
return self
def finishFunctionDeclaration(self, id, params, defaults, body):
self.type = Syntax.FunctionDeclaration
self.id = id
self.params = params
self.defaults = defaults
self.body = body
self.generator = False
self.expression = False
self.finish()
return self
def finishFunctionExpression(self, id, params, defaults, body):
self.type = Syntax.FunctionExpression
self.id = id
self.params = params
self.defaults = defaults
self.body = body
self.generator = False
self.expression = False
self.finish()
return self
def finishIdentifier(self, name):
self.type = Syntax.Identifier
self.name = name
self.finish()
return self
def finishIfStatement(self, test, consequent, alternate):
self.type = Syntax.IfStatement
self.test = test
self.consequent = consequent
self.alternate = alternate
self.finish()
return self
def finishLabeledStatement(self, label, body):
self.type = Syntax.LabeledStatement
self.label = label
self.body = body
self.finish()
return self
def finishLiteral(self, token):
self.type = Syntax.Literal
self.value = token['value']
self.raw = None # todo fix it?
if token.get('regex'):
self.regex = token['regex']
self.finish()
return self
def finishMemberExpression(self, accessor, object, property):
self.type = Syntax.MemberExpression
self.computed = accessor == '['
self.object = object
self.property = property
self.finish()
return self
def finishNewExpression(self, callee, args):
self.type = Syntax.NewExpression
self.callee = callee
self.arguments = args
self.finish()
return self
def finishObjectExpression(self, properties):
self.type = Syntax.ObjectExpression
self.properties = properties
self.finish()
return self
def finishObjectPattern(self, properties):
self.type = Syntax.ObjectPattern
self.properties = properties
self.finish()
return self
def finishPostfixExpression(self, operator, argument):
self.type = Syntax.UpdateExpression
self.operator = operator
self.argument = argument
self.prefix = False
self.finish()
return self
def finishProgram(self, body):
self.type = Syntax.Program
self.body = body
self.finish()
return self
def finishPyimport(self, imp):
self.type = 'PyimportStatement'
self.imp = imp
self.finish()
return self
def finishProperty(self, kind, key, computed, value, method, shorthand):
self.type = Syntax.Property
self.key = key
self.computed = computed
self.value = value
self.kind = kind
self.method = method
self.shorthand = shorthand
self.finish()
return self
def finishRestElement(self, argument):
self.type = Syntax.RestElement
self.argument = argument
self.finish()
return self
def finishReturnStatement(self, argument):
self.type = Syntax.ReturnStatement
self.argument = argument
self.finish()
return self
def finishSequenceExpression(self, expressions):
self.type = Syntax.SequenceExpression
self.expressions = expressions
self.finish()
return self
def finishSpreadElement(self, argument):
self.type = Syntax.SpreadElement
self.argument = argument
self.finish()
return self
def finishSwitchCase(self, test, consequent):
self.type = Syntax.SwitchCase
self.test = test
self.consequent = consequent
self.finish()
return self
def finishSuper(self, ):
self.type = Syntax.Super
self.finish()
return self
def finishSwitchStatement(self, discriminant, cases):
self.type = Syntax.SwitchStatement
self.discriminant = discriminant
self.cases = cases
self.finish()
return self
def finishTaggedTemplateExpression(self, tag, quasi):
self.type = Syntax.TaggedTemplateExpression
self.tag = tag
self.quasi = quasi
self.finish()
return self
def finishTemplateElement(self, value, tail):
self.type = Syntax.TemplateElement
self.value = value
self.tail = tail
self.finish()
return self
def finishTemplateLiteral(self, quasis, expressions):
self.type = Syntax.TemplateLiteral
self.quasis = quasis
self.expressions = expressions
self.finish()
return self
def finishThisExpression(self, ):
self.type = Syntax.ThisExpression
self.finish()
return self
def finishThrowStatement(self, argument):
self.type = Syntax.ThrowStatement
self.argument = argument
self.finish()
return self
def finishTryStatement(self, block, handler, finalizer):
self.type = Syntax.TryStatement
self.block = block
self.guardedHandlers = []
self.handlers = [handler] if handler else []
self.handler = handler
self.finalizer = finalizer
self.finish()
return self
def finishUnaryExpression(self, operator, argument):
self.type = Syntax.UpdateExpression if (operator == '++' or operator == '--') else Syntax.UnaryExpression
self.operator = operator
self.argument = argument
self.prefix = True
self.finish()
return self
def finishVariableDeclaration(self, declarations):
self.type = Syntax.VariableDeclaration
self.declarations = declarations
self.kind = 'var'
self.finish()
return self
def finishLexicalDeclaration(self, declarations, kind):
self.type = Syntax.VariableDeclaration
self.declarations = declarations
self.kind = kind
self.finish()
return self
def finishVariableDeclarator(self, id, init):
self.type = Syntax.VariableDeclarator
self.id = id
self.init = init
self.finish()
return self
def finishWhileStatement(self, test, body):
self.type = Syntax.WhileStatement
self.test = test
self.body = body
self.finish()
return self
def finishWithStatement(self, object, body):
self.type = Syntax.WithStatement
self.object = object
self.body = body
self.finish()
return self
def finishExportSpecifier(self, local, exported):
self.type = Syntax.ExportSpecifier
self.exported = exported or local
self.local = local
self.finish()
return self
def finishImportDefaultSpecifier(self, local):
self.type = Syntax.ImportDefaultSpecifier
self.local = local
self.finish()
return self
def finishImportNamespaceSpecifier(self, local):
self.type = Syntax.ImportNamespaceSpecifier
self.local = local
self.finish()
return self
def finishExportNamedDeclaration(self, declaration, specifiers, src):
self.type = Syntax.ExportNamedDeclaration
self.declaration = declaration
self.specifiers = specifiers
self.source = src
self.finish()
return self
def finishExportDefaultDeclaration(self, declaration):
self.type = Syntax.ExportDefaultDeclaration
self.declaration = declaration
self.finish()
return self
def finishExportAllDeclaration(self, src):
self.type = Syntax.ExportAllDeclaration
self.source = src
self.finish()
return self
def finishImportSpecifier(self, local, imported):
self.type = Syntax.ImportSpecifier
self.local = local or imported
self.imported = imported
self.finish()
return self
def finishImportDeclaration(self, specifiers, src):
self.type = Syntax.ImportDeclaration
self.specifiers = specifiers
self.source = src
self.finish()
return self
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
class Node(BaseNode):
pass
class WrappingNode(BaseNode):
def __init__(self, startToken=None):
pass
def node_to_dict(node):
if isinstance(node, list):
return [node_to_dict(e) for e in node]
elif isinstance(node, dict):
return {k:node_to_dict(v) for k,v in node.iteritems()}
elif not isinstance(node, BaseNode):
return node
return {k:node_to_dict(v) for k, v in node.__dict__.iteritems()}
|
AMOboxTV/AMOBox.LegoBuild
|
script.module.liveresolver/lib/js2py/translators/std_nodes.py
|
Python
|
gpl-2.0
| 15,390
|
import logging
from ..util import properties
from ..util import XDG
from ..backend import KeyringBackend
from ..errors import (InitError, PasswordDeleteError,
ExceptionRaisedContext)
try:
import secretstorage
import secretstorage.exceptions as exceptions
except ImportError:
pass
log = logging.getLogger(__name__)
class Keyring(KeyringBackend):
"""Secret Service Keyring"""
@properties.ClassProperty
@classmethod
@XDG.Preference('Gnome')
def priority(cls):
with ExceptionRaisedContext() as exc:
secretstorage.__name__
if exc:
raise RuntimeError("SecretStorage required")
if not hasattr(secretstorage, 'get_default_collection'):
raise RuntimeError("SecretStorage 1.0 or newer required")
try:
bus = secretstorage.dbus_init()
list(secretstorage.get_all_collections(bus))
except exceptions.SecretServiceNotAvailableException as e:
raise RuntimeError(
"Unable to initialize SecretService: %s" % e)
return 5
def get_default_collection(self):
bus = secretstorage.dbus_init()
try:
collection = secretstorage.get_default_collection(bus)
except exceptions.SecretStorageException as e:
raise InitError("Failed to create the collection: %s." % e)
if collection.is_locked():
collection.unlock()
if collection.is_locked(): # User dismissed the prompt
raise InitError("Failed to unlock the collection!")
return collection
def get_password(self, service, username):
"""Get password of the username for the service
"""
collection = self.get_default_collection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
return item.get_secret().decode('utf-8')
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
collection = self.get_default_collection()
attributes = {
"application": "python-keyring",
"service": service,
"username": username
}
label = "Password for '%s' on '%s'" % (username, service)
collection.create_item(label, attributes, password, replace=True)
def delete_password(self, service, username):
"""Delete the stored password (only the first one)
"""
collection = self.get_default_collection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
return item.delete()
raise PasswordDeleteError("No such password!")
|
ruuk/script.module.password.storage
|
lib/keyring/backends/SecretService.py
|
Python
|
gpl-2.0
| 2,786
|
# -*- coding: utf-8 -*-
#
# GObject-Introspection documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 16 15:34:52 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GObject-Introspection'
copyright = u'2013, Dieter Verfaillie'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GObject-Introspectiondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GObject-Introspection.tex', u'GObject-Introspection Documentation',
u'Dieter Verfaillie', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gobject-introspection', u'GObject-Introspection Documentation',
[u'Dieter Verfaillie'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GObject-Introspection', u'GObject-Introspection Documentation',
u'Dieter Verfaillie', 'GObject-Introspection', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
davibe/gobject-introspection
|
docs/giscanner/conf.py
|
Python
|
gpl-2.0
| 8,112
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
from __future__ import print_function
import six
import socket
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
import struct
import threading
# websocket modules
from ._exceptions import *
from ._abnf import *
from ._socket import *
from ._utils import *
from ._url import *
from ._logging import *
from ._http import *
from ._handshake import *
from ._ssl_compat import *
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
def create_connection(url, timeout=None, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied,
the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth infomation.
tuple of username and password.
default is None
"enable_multithread" -> enable lock for multithread.
"sockopt" -> socket options
"sslopt" -> ssl option
"subprotocols" - array of available sub protocols.
default is None.
"skip_utf8_validation" - skip utf8 validation.
"""
sockopt = options.get("sockopt", [])
sslopt = options.get("sslopt", {})
fire_cont_frame = options.get("fire_cont_frame", False)
enable_multithread = options.get("enable_multithread", False)
skip_utf8_validation = options.get("skip_utf8_validation", False)
websock = WebSocket(sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=fire_cont_frame,
enable_multithread=enable_multithread,
skip_utf8_validation=skip_utf8_validation)
websock.settimeout(timeout if timeout is not None else getdefaulttimeout())
websock.connect(url, **options)
return websock
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/recieve data.
The following example is a echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setscokopt.
sslopt: dict object for ssl socket option.
fire_cont_frame: fire recv event for each cont frame. default is False
enable_multithread: if set to True, lock send method.
skip_utf8_validation: skip utf8 validation.
"""
def __init__(self, get_mask_key=None, sockopt=None, sslopt=None,
fire_cont_frame=False, enable_multithread=False,
skip_utf8_validation=False):
"""
Initalize WebSocket object.
"""
self.sock_opt = sock_opt(sockopt, sslopt)
self.handshake_response = None
self.sock = None
self.connected = False
self.get_mask_key = get_mask_key
# These buffer over the build-up of a single frame.
self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation)
self.cont_frame = continuous_frame(fire_cont_frame, skip_utf8_validation)
if enable_multithread:
self.lock = threading.Lock()
else:
self.lock = NoLock()
def __iter__(self):
"""
Allow iteration over websocket, implying sequential `recv` executions.
"""
while True:
yield self.recv()
def __next__(self):
return self.recv()
def next(self):
return self.__next__()
def fileno(self):
return self.sock.fileno()
def set_mask_key(self, func):
"""
set function to create musk key. You can custumize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the fuct must 1 argument as integer.
The argument means length of mask key.
This func must be return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock_opt.timeout
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock_opt.timeout = timeout
if self.sock:
self.sock.settimeout(timeout)
timeout = property(gettimeout, settimeout)
def getsubprotocol(self):
"""
get subprotocol
"""
if self.handshake_response:
return self.handshake_response.subprotocol
else:
return None
subprotocol = property(getsubprotocol)
def getstatus(self):
"""
get handshake status
"""
if self.handshake_response:
return self.handshake_response.status
else:
return None
status = property(getstatus)
def getheaders(self):
"""
get handshake response header
"""
if self.handshake_response:
return self.handshake_response.headers
else:
return None
headers = property(getheaders)
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme.
ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth infomation.
tuple of username and password.
defualt is None
"subprotocols" - array of available sub protocols.
default is None.
"""
self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options))
try:
self.handshake_response = handshake(self.sock, *addrs, **options)
self.connected = True
except:
if self.sock:
self.sock.close()
self.sock = None
raise
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicode,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
return self.send_frame(frame)
def send_frame(self, frame):
"""
Send the data frame.
frame: frame data created by ABNF.create_frame
>>> ws = create_connection("ws://echo.websocket.org/")
>>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1)
>>> ws.send_frame(frame)
"""
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
length = len(data)
trace("send: " + repr(data))
with self.lock:
while data:
l = self._send(data)
data = data[l:]
return length
def send_binary(self, payload):
return self.send(payload, ABNF.OPCODE_BINARY)
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
if six.PY3 and opcode == ABNF.OPCODE_TEXT:
return data.decode("utf-8")
elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY:
return data
else:
return ''
def recv_data(self, control_frame=False):
"""
Recieve data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
opcode, frame = self.recv_data_frame(control_frame)
return opcode, frame.data
def recv_data_frame(self, control_frame=False):
"""
Recieve data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketProtocolException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
self.cont_frame.validate(frame)
self.cont_frame.add(frame)
if self.cont_frame.is_fire(frame):
return self.cont_frame.extract(frame)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PING:
if len(frame.data) < 126:
self.pong(frame.data)
else:
raise WebSocketProtocolException("Ping message is too long")
if control_frame:
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PONG:
if control_frame:
return (frame.opcode, frame)
def recv_frame(self):
"""
recieve data as frame from server.
return value: ABNF frame object.
"""
return self.frame_buffer.recv_frame()
def send_close(self, status=STATUS_NORMAL, reason=six.b("")):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string or bytes.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=six.b(""), timeout=3):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
timeout: timeout until recieve a close frame.
If None, it will wait forever until recieve a close frame.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
sock_timeout = self.sock.gettimeout()
self.sock.settimeout(timeout)
try:
frame = self.recv_frame()
if isEnabledForError():
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(sock_timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.shutdown()
def abort(self):
"""
Low-level asynchonous abort, wakes up other threads that are waiting in recv_*
"""
if self.connected:
self.sock.shutdown(socket.SHUT_RDWR)
def shutdown(self):
"close socket, immediately."
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
def _send(self, data):
return send(self.sock, data)
def _recv(self, bufsize):
try:
return recv(self.sock, bufsize)
except WebSocketConnectionClosedException:
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
raise
|
gw-sd-2016/Codir
|
codirSublime/SocketIO/websocket/_core.py
|
Python
|
gpl-2.0
| 16,014
|
# Copyright 1998-2004 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Id: output.py,v 1.1 2006/03/06 18:13:31 henrique Exp $
import os
import sys
import re
havecolor = 1
dotitles = 1
spinpos = 0
spinner = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
esc_seq = "\x1b["
g_attr = {}
g_attr["normal"] = 0
g_attr["bold"] = 1
g_attr["faint"] = 2
g_attr["standout"] = 3
g_attr["underline"] = 4
g_attr["blink"] = 5
g_attr["overline"] = 6 # Why is overline actually useful?
g_attr["reverse"] = 7
g_attr["invisible"] = 8
g_attr["no-attr"] = 22
g_attr["no-standout"] = 23
g_attr["no-underline"] = 24
g_attr["no-blink"] = 25
g_attr["no-overline"] = 26
g_attr["no-reverse"] = 27
# 28 isn't defined?
# 29 isn't defined?
g_attr["black"] = 30
g_attr["red"] = 31
g_attr["green"] = 32
g_attr["yellow"] = 33
g_attr["blue"] = 34
g_attr["magenta"] = 35
g_attr["cyan"] = 36
g_attr["white"] = 37
# 38 isn't defined?
g_attr["default"] = 39
g_attr["bg_black"] = 40
g_attr["bg_red"] = 41
g_attr["bg_green"] = 42
g_attr["bg_yellow"] = 43
g_attr["bg_blue"] = 44
g_attr["bg_magenta"] = 45
g_attr["bg_cyan"] = 46
g_attr["bg_white"] = 47
g_attr["bg_default"] = 49
# make_seq("blue", "black", "normal")
def color(fg, bg="default", attr=["normal"]):
mystr = esc_seq[:] + "%02d" % g_attr[fg]
for x in [bg] + attr:
mystr += ";%02d" % g_attr[x]
return mystr + "m"
codes = {}
codes["reset"] = esc_seq + "39;49;00m"
codes["bold"] = esc_seq + "01m"
codes["faint"] = esc_seq + "02m"
codes["standout"] = esc_seq + "03m"
codes["underline"] = esc_seq + "04m"
codes["blink"] = esc_seq + "05m"
codes["overline"] = esc_seq + "06m" # Who made this up? Seriously.
codes["teal"] = esc_seq + "36m"
codes["turquoise"] = esc_seq + "36;01m"
codes["fuchsia"] = esc_seq + "35;01m"
codes["purple"] = esc_seq + "35m"
codes["blue"] = esc_seq + "34;01m"
codes["darkblue"] = esc_seq + "34m"
codes["green"] = esc_seq + "32;01m"
codes["darkgreen"] = esc_seq + "32m"
codes["yellow"] = esc_seq + "33;01m"
codes["brown"] = esc_seq + "33m"
codes["red"] = esc_seq + "31;01m"
codes["darkred"] = esc_seq + "31m"
def nc_len(mystr):
tmp = re.sub(esc_seq + "^m]+m", "", mystr)
return len(tmp)
def xtermTitle(mystr):
if havecolor and dotitles and "TERM" in os.environ and sys.stderr.isatty():
myt = os.environ["TERM"]
legal_terms = [
"xterm", "Eterm", "aterm", "rxvt", "screen", "kterm", "rxvt-unicode"]
for term in legal_terms:
if myt.startswith(term):
sys.stderr.write("\x1b]2;" + str(mystr) + "\x07")
sys.stderr.flush()
break
def xtermTitleReset():
if havecolor and dotitles and "TERM" in os.environ:
myt = os.environ["TERM"]
xtermTitle(os.environ["TERM"])
def notitles():
"turn off title setting"
dotitles = 0
def nocolor():
"turn off colorization"
havecolor = 0
for x in codes.keys():
codes[x] = ""
def resetColor():
return codes["reset"]
def ctext(color, text):
return codes[ctext] + text + codes["reset"]
def bold(text):
return codes["bold"] + text + codes["reset"]
def faint(text):
return codes["faint"] + text + codes["reset"]
def white(text):
return bold(text)
def teal(text):
return codes["teal"] + text + codes["reset"]
def turquoise(text):
return codes["turquoise"] + text + codes["reset"]
def darkteal(text):
return turquoise(text)
def fuscia(text): # Don't use this one. It's spelled wrong!
return codes["fuchsia"] + text + codes["reset"]
def fuchsia(text):
return codes["fuchsia"] + text + codes["reset"]
def purple(text):
return codes["purple"] + text + codes["reset"]
def blue(text):
return codes["blue"] + text + codes["reset"]
def darkblue(text):
return codes["darkblue"] + text + codes["reset"]
def green(text):
return codes["green"] + text + codes["reset"]
def darkgreen(text):
return codes["darkgreen"] + text + codes["reset"]
def yellow(text):
return codes["yellow"] + text + codes["reset"]
def brown(text):
return codes["brown"] + text + codes["reset"]
def darkyellow(text):
return brown(text)
def red(text):
return codes["red"] + text + codes["reset"]
def darkred(text):
return codes["darkred"] + text + codes["reset"]
def update_basic_spinner():
global spinner, spinpos
spinpos = (spinpos + 1) % 500
if (spinpos % 100) == 0:
if spinpos == 0:
sys.stdout.write(". ")
else:
sys.stdout.write(".")
sys.stdout.flush()
def update_scroll_spinner():
global spinner, spinpos
if(spinpos >= len(spinner)):
sys.stdout.write(
darkgreen(" \b\b\b" + spinner[len(spinner) - 1 - (spinpos % len(spinner))]))
else:
sys.stdout.write(green("\b " + spinner[spinpos]))
sys.stdout.flush()
spinpos = (spinpos + 1) % (2 * len(spinner))
def update_spinner():
global spinner, spinpos
spinpos = (spinpos + 1) % len(spinner)
sys.stdout.write("\b\b " + spinner[spinpos])
sys.stdout.flush()
|
ankanaan/chimera
|
src/chimera/util/output.py
|
Python
|
gpl-2.0
| 5,104
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""Utilities for managing GeoNode resource metadata
"""
# Standard Modules
import logging
import datetime
from lxml import etree
# Geonode functionality
from geonode import GeoNodeException
# OWSLib functionality
from owslib.csw import CswRecord
from owslib.iso import MD_Metadata
from owslib.fgdc import Metadata
from django.utils import timezone
LOGGER = logging.getLogger(__name__)
def set_metadata(xml):
"""Generate dict of model properties based on XML metadata"""
# check if document is XML
try:
exml = etree.fromstring(xml)
except Exception as err:
raise GeoNodeException(
'Uploaded XML document is not XML: %s' % str(err))
# check if document is an accepted XML metadata format
tagname = get_tagname(exml)
if tagname == 'GetRecordByIdResponse': # strip CSW element
LOGGER.info('stripping CSW root element')
exml = exml.getchildren()[0]
tagname = get_tagname(exml)
if tagname == 'MD_Metadata': # ISO
identifier, vals, regions, keywords = iso2dict(exml)
elif tagname == 'metadata': # FGDC
identifier, vals, regions, keywords = fgdc2dict(exml)
elif tagname == 'Record': # Dublin Core
identifier, vals, regions, keywords = dc2dict(exml)
else:
raise RuntimeError('Unsupported metadata format')
if not vals.get("date"):
vals["date"] = datetime.datetime.now(timezone.get_current_timezone()).strftime("%Y-%m-%dT%H:%M:%S")
return [identifier, vals, regions, keywords]
def iso2dict(exml):
"""generate dict of properties from gmd:MD_Metadata"""
vals = {}
regions = []
keywords = []
mdata = MD_Metadata(exml)
identifier = mdata.identifier
vals['language'] = mdata.language or mdata.languagecode or 'eng'
vals['spatial_representation_type'] = mdata.hierarchy
vals['date'] = sniff_date(mdata.datestamp)
if hasattr(mdata, 'identification'):
vals['title'] = mdata.identification.title
vals['abstract'] = mdata.identification.abstract
vals['purpose'] = mdata.identification.purpose
if mdata.identification.supplementalinformation is not None:
vals['supplemental_information'] = \
mdata.identification.supplementalinformation
vals['temporal_extent_start'] = \
mdata.identification.temporalextent_start
vals['temporal_extent_end'] = \
mdata.identification.temporalextent_end
if len(mdata.identification.topiccategory) > 0:
vals['topic_category'] = mdata.identification.topiccategory[0]
if (hasattr(mdata.identification, 'keywords') and
len(mdata.identification.keywords) > 0):
for kw in mdata.identification.keywords:
if kw['type'] == "place":
regions.extend(kw['keywords'])
else:
keywords.extend(kw['keywords'])
if len(mdata.identification.otherconstraints) > 0:
vals['constraints_other'] = \
mdata.identification.otherconstraints[0]
vals['purpose'] = mdata.identification.purpose
if mdata.dataquality is not None:
vals['data_quality_statement'] = mdata.dataquality.lineage
return [identifier, vals, regions, keywords]
def fgdc2dict(exml):
"""generate dict of properties from FGDC metadata"""
vals = {}
regions = []
keywords = []
mdata = Metadata(exml)
identifier = mdata.idinfo.datasetid
if hasattr(mdata.idinfo, 'citation'):
if hasattr(mdata.idinfo.citation, 'citeinfo'):
vals['spatial_representation_type'] = \
mdata.idinfo.citation.citeinfo['geoform']
vals['title'] = mdata.idinfo.citation.citeinfo['title']
if hasattr(mdata.idinfo, 'descript'):
vals['abstract'] = mdata.idinfo.descript.abstract
vals['purpose'] = mdata.idinfo.descript.purpose
if mdata.idinfo.descript.supplinf is not None:
vals['supplemental_information'] = mdata.idinfo.descript.supplinf
if hasattr(mdata.idinfo, 'keywords'):
if mdata.idinfo.keywords.theme:
for theme in mdata.idinfo.keywords.theme:
if theme['themekt'] is not None:
lowered_themekt = theme['themekt'].lower()
# Owslib doesn't support extracting the Topic Category
# from FGDC. So we add support here.
# http://www.fgdc.gov/metadata/geospatial-metadata-standards
if all(
ss in lowered_themekt for ss in [
'iso',
'19115',
'topic']) and any(
ss in lowered_themekt for ss in [
'category',
'categories']):
vals['topic_category'] = theme['themekey'][0]
keywords.extend(theme['themekey'])
if mdata.idinfo.keywords.place:
for place in mdata.idinfo.keywords.place:
if 'placekey' in place:
regions.extend(place['placekey'])
if hasattr(mdata.idinfo.timeperd, 'timeinfo'):
if hasattr(mdata.idinfo.timeperd.timeinfo, 'rngdates'):
vals['temporal_extent_start'] = \
sniff_date(mdata.idinfo.timeperd.timeinfo.rngdates.begdate)
vals['temporal_extent_end'] = \
sniff_date(mdata.idinfo.timeperd.timeinfo.rngdates.enddate)
vals['constraints_other'] = mdata.idinfo.useconst
raw_date = mdata.metainfo.metd
if raw_date is not None:
vals['date'] = sniff_date(raw_date)
return [identifier, vals, regions, keywords]
def dc2dict(exml):
"""generate dict of properties from csw:Record"""
vals = {}
regions = []
keywords = []
mdata = CswRecord(exml)
identifier = mdata.identifier
vals['language'] = mdata.language
vals['spatial_representation_type'] = mdata.type
keywords = mdata.subjects
regions = [mdata.spatial]
vals['temporal_extent_start'] = mdata.temporal
vals['temporal_extent_end'] = mdata.temporal
vals['constraints_other'] = mdata.license
vals['date'] = sniff_date(mdata.modified)
vals['title'] = mdata.title
vals['abstract'] = mdata.abstract
return [identifier, vals, regions, keywords]
def sniff_date(datestr):
"""
Attempt to parse date into datetime.datetime object
Possible inputs:
'20001122'
'2000-11-22'
'2000-11-22T11:11:11Z'
'2000-11-22T'
"""
dateformats = ('%Y%m%d', '%Y-%m-%d', '%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT', '%Y/%m/%d')
for dfmt in dateformats:
try:
return datetime.datetime.strptime(datestr.strip(), dfmt)
except (ValueError, AttributeError):
pass
return ""
def get_tagname(element):
"""get tagname without namespace"""
try:
tagname = element.tag.split('}')[1]
except IndexError:
tagname = element.tag
return tagname
|
timlinux/geonode
|
geonode/layers/metadata.py
|
Python
|
gpl-3.0
| 7,943
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: git
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
- Manage I(git) checkouts of repositories to deploy files or software.
options:
repo:
required: true
aliases: [ name ]
description:
- git, SSH, or HTTP(S) protocol address of the git repository.
dest:
required: true
description:
- Absolute path of where the repository should be checked out to.
This parameter is required, unless C(clone) is set to C(no)
This change was made in version 1.8.3. Prior to this version,
the C(dest) parameter was always required.
version:
required: false
default: "HEAD"
description:
- What version of the repository to check out. This can be the
the literal string C(HEAD), a branch name, a tag name.
It can also be a I(SHA-1) hash, in which case C(refspec) needs
to be specified if the given revision is not already available.
accept_hostkey:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.5"
description:
- if C(yes), ensure that "-o StrictHostKeyChecking=no" is
present as an ssh options.
ssh_opts:
required: false
default: None
version_added: "1.5"
description:
- Creates a wrapper script and exports the path as GIT_SSH
which git then automatically uses to override ssh arguments.
An example value could be "-o StrictHostKeyChecking=no"
key_file:
required: false
default: None
version_added: "1.5"
description:
- Specify an optional private key file to use for the checkout.
reference:
required: false
default: null
version_added: "1.4"
description:
- Reference repository (see "git clone --reference ...")
remote:
required: false
default: "origin"
description:
- Name of the remote.
refspec:
required: false
default: null
version_added: "1.9"
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the 'git fetch' command.
An example value could be "refs/meta/config".
force:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.7"
description:
- If C(yes), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
'yes' and could not be disabled. Prior to 1.9, the default was
`yes`
depth:
required: false
default: null
version_added: "1.2"
description:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is C(1), otherwise
ignored. Needs I(git>=1.9.1) to work correctly.
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.9"
description:
- If C(no), do not clone the repository if it does not exist locally
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.2"
description:
- If C(no), do not retrieve new revisions from the origin repository
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
bare:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.4"
description:
- if C(yes), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
umask:
required: false
default: null
version_added: "2.2"
description:
- The umask to set before doing any checkouts, or any other
repository maintenance.
recursive:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.6"
description:
- if C(no), repository will be cloned without the --recursive
option, skipping sub-modules.
track_submodules:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "1.8"
description:
- if C(yes), submodules will track the latest commit on their
master branch (or other branch specified in .gitmodules). If
C(no), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the --remote flag
to git submodule update.
verify_commit:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.0"
description:
- if C(yes), when cloning or checking out a C(version) verify the
signature of a GPG signed commit. This requires C(git) version>=2.1.0
to be installed. The commit MUST be signed and the public key MUST
be present in the GPG keyring.
archive:
required: false
version_added: "2.4"
description:
- Specify archive file path with extension. If specified, creates an
archive file of the specified format containing the tree structure
for the source tree.
Allowed archive formats ["zip", "tar.gz", "tar", "tgz"]
requirements:
- git>=1.7.1 (the command line tool)
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to use the option accept_hostkey. Another solution is to
add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
'''
EXAMPLES = '''
# Example git checkout from Ansible Playbooks
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
version: release-0.22
# Example read-write git checkout from github
- git:
repo: ssh://git@github.com/mylogin/hello.git
dest: /home/mylogin/hello
# Example just ensuring the repo checkout exists
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
update: no
# Example just get information about the repository whether or not it has
# already been cloned locally.
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
clone: no
update: no
# Example checkout a github repo and use refspec to fetch all pull requests
- git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
refspec: '+refs/pull/*:refs/heads/*'
# Example Create git archive from repo
- git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
archive: /tmp/ansible-examples.zip
'''
RETURN = '''
after:
description: last commit revision of the repository retrieved during the update
returned: success
type: string
sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
before:
description: commit revision before the repository was updated, "null" for new repository
returned: success
type: string
sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
remote_url_changed:
description: Contains True or False whether or not the remote URL was changed.
returned: success
type: boolean
sample: True
warnings:
description: List of warnings if requested features were not available due to a too old git version.
returned: error
type: string
sample: Your git version is too old to fully support the depth argument. Falling back to full checkouts.
'''
import filecmp
import os
import re
import shlex
import stat
import sys
import shutil
import tempfile
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, get_module_path
from ansible.module_utils.basic import get_exception
from ansible.module_utils.six import b, string_types
from ansible.module_utils._text import to_native
def head_splitter(headfile, remote, module=None, fail_on_error=False):
'''Extract the head reference'''
# https://github.com/ansible/ansible-modules-core/pull/907
res = None
if os.path.exists(headfile):
rawdata = None
try:
f = open(headfile, 'r')
rawdata = f.readline()
f.close()
except:
if fail_on_error and module:
module.fail_json(msg="Unable to read %s" % headfile)
if rawdata:
try:
rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
refparts = rawdata.split(' ')
newref = refparts[-1]
nrefparts = newref.split('/', 2)
res = nrefparts[-1].rstrip('\n')
except:
if fail_on_error and module:
module.fail_json(msg="Unable to split head from '%s'" % rawdata)
return res
def unfrackgitpath(path):
if path is None:
return None
# copied from ansible.utils.path
return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))
def get_submodule_update_params(module, git_path, cwd):
# or: git submodule [--quiet] update [--init] [-N|--no-fetch]
# [-f|--force] [--rebase] [--reference <repository>] [--merge]
# [--recursive] [--] [<path>...]
params = []
# run a bad submodule command to get valid params
cmd = "%s submodule update --help" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
lines = stderr.split('\n')
update_line = None
for line in lines:
if 'git submodule [--quiet] update ' in line:
update_line = line
if update_line:
update_line = update_line.replace('[', '')
update_line = update_line.replace(']', '')
update_line = update_line.replace('|', ' ')
parts = shlex.split(update_line)
for part in parts:
if part.startswith('--'):
part = part.replace('--', '')
params.append(part)
return params
def write_ssh_wrapper():
module_dir = get_module_path()
try:
# make sure we have full permission to the module_dir, which
# may not be the case if we're sudo'ing to a non-root user
if os.access(module_dir, os.W_OK | os.R_OK | os.X_OK):
fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/')
else:
raise OSError
except (IOError, OSError):
fd, wrapper_path = tempfile.mkstemp()
fh = os.fdopen(fd, 'w+b')
template = b("""#!/bin/sh
if [ -z "$GIT_SSH_OPTS" ]; then
BASEOPTS=""
else
BASEOPTS=$GIT_SSH_OPTS
fi
# Let ssh fail rather than prompt
BASEOPTS="$BASEOPTS -o BatchMode=yes"
if [ -z "$GIT_KEY" ]; then
ssh $BASEOPTS "$@"
else
ssh -i "$GIT_KEY" -o IdentitiesOnly=yes $BASEOPTS "$@"
fi
""")
fh.write(template)
fh.close()
st = os.stat(wrapper_path)
os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
return wrapper_path
def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
if os.environ.get("GIT_SSH"):
del os.environ["GIT_SSH"]
os.environ["GIT_SSH"] = ssh_wrapper
if os.environ.get("GIT_KEY"):
del os.environ["GIT_KEY"]
if key_file:
os.environ["GIT_KEY"] = key_file
if os.environ.get("GIT_SSH_OPTS"):
del os.environ["GIT_SSH_OPTS"]
if ssh_opts:
os.environ["GIT_SSH_OPTS"] = ssh_opts
def get_version(module, git_path, dest, ref="HEAD"):
''' samples the version of the git repo '''
cmd = "%s rev-parse %s" % (git_path, ref)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
sha = to_native(stdout).rstrip('\n')
return sha
def get_submodule_versions(git_path, module, dest, version='HEAD'):
cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(
msg='Unable to determine hashes of submodules',
stdout=out,
stderr=err,
rc=rc)
submodules = {}
subm_name = None
for line in out.splitlines():
if line.startswith("Entering '"):
subm_name = line[10:-1]
elif len(line.strip()) == 40:
if subm_name is None:
module.fail_json()
submodules[subm_name] = line.strip()
subm_name = None
else:
module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
if subm_name is not None:
module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
return submodules
def clone(git_path, module, repo, dest, remote, depth, version, bare,
reference, refspec, verify_commit):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except:
pass
cmd = [git_path, 'clone']
if bare:
cmd.append('--bare')
else:
cmd.extend(['--origin', remote])
if depth:
if version == 'HEAD' or refspec:
cmd.extend(['--depth', str(depth)])
elif is_remote_branch(git_path, module, dest, repo, version) \
or is_remote_tag(git_path, module, dest, repo, version):
cmd.extend(['--depth', str(depth)])
cmd.extend(['--branch', version])
else:
# only use depth if the remote object is branch or tag (i.e. fetchable)
module.warn("Ignoring depth argument. "
"Shallow clones are only available for "
"HEAD, branches, tags or in combination with refspec.")
if reference:
cmd.extend(['--reference', str(reference)])
cmd.extend([repo, dest])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if bare:
if remote != 'origin':
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
cmd = [git_path, 'fetch']
if depth:
cmd.extend(['--depth', str(depth)])
cmd.extend([remote, refspec])
module.run_command(cmd, check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
def has_local_mods(module, git_path, dest, bare):
if bare:
return False
cmd = "%s status --porcelain" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
lines = stdout.splitlines()
lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))
return len(lines) > 0
def reset(git_path, module, dest):
'''
Resets the index and working tree to HEAD.
Discards any changes to tracked files in working
tree since that commit.
'''
cmd = "%s reset --hard HEAD" % (git_path,)
return module.run_command(cmd, check_rc=True, cwd=dest)
def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
''' Return the difference between 2 versions '''
if before is None:
return {'prepared': '>> Newly checked out %s' % after}
elif before != after:
# Ensure we have the object we are referring to during git diff !
git_version_used = git_version(git_path, module)
fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
cmd = '%s diff %s %s' % (git_path, before, after)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc == 0 and out:
return {'prepared': out}
elif rc == 0:
return {'prepared': '>> No visual differences between %s and %s' % (before, after)}
elif err:
return {'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err)}
else:
return {'prepared': '>> Failed to get proper diff between %s and %s' % (before, after)}
return {}
def get_remote_head(git_path, module, dest, version, remote, bare):
cloning = False
cwd = None
tag = False
if remote == module.params['repo']:
cloning = True
else:
cwd = dest
if version == 'HEAD':
if cloning:
# cloning the repo, just get the remote's HEAD version
cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
else:
head_branch = get_head_branch(git_path, module, dest, remote, bare)
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
elif is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
elif is_remote_tag(git_path, module, dest, remote, version):
tag = True
cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
else:
# appears to be a sha1. return as-is since it appears
# cannot check for a specific sha1 on remote
return version
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
if len(out) < 1:
module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)
out = to_native(out)
if tag:
# Find the dereferenced tag if this is an annotated tag.
for tag in out.split('\n'):
if tag.endswith(version + '^{}'):
out = tag
break
elif tag.endswith(version):
out = tag
rev = out.split()[0]
return rev
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def get_branches(git_path, module, dest):
branches = []
cmd = '%s branch --no-color -a' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
for line in out.split('\n'):
if line.strip():
branches.append(line.strip())
return branches
def get_annotated_tags(git_path, module, dest):
tags = []
cmd = [git_path, 'for-each-ref', 'refs/tags/', '--format', '%(objecttype):%(refname:short)']
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
for line in to_native(out).split('\n'):
if line.strip():
tagtype, tagname = line.strip().split(':')
if tagtype == 'tag':
tags.append(tagname)
return tags
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def is_local_branch(git_path, module, dest, branch):
branches = get_branches(git_path, module, dest)
lbranch = '%s' % branch
if lbranch in branches:
return True
elif '* %s' % branch in branches:
return True
else:
return False
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
for branch in branches:
if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch):
return True
return False
def get_head_branch(git_path, module, dest, remote, bare=False):
'''
Determine what branch HEAD is associated with. This is partly
taken from lib/ansible/utils/__init__.py. It finds the correct
path to .git/HEAD and reads from that file the branch that HEAD is
associated with. In the case of a detached HEAD, this will look
up the branch in .git/refs/remotes/<remote>/HEAD.
'''
if bare:
repo_path = dest
else:
repo_path = os.path.join(dest, '.git')
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
git_conf = open(repo_path, 'rb')
for line in git_conf:
config_val = line.split(b(':'), 1)
if config_val[0].strip() == b('gitdir'):
gitdir = to_native(config_val[1].strip(), errors='surrogate_or_strict')
break
else:
# No repo path found
return ''
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
# No repo path found
return ''
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
# the remote HEAD in .git/refs/remotes/<remote>/HEAD
headfile = os.path.join(repo_path, "HEAD")
if is_not_a_branch(git_path, module, dest):
headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
return branch
def get_remote_url(git_path, module, dest, remote):
'''Return URL of remote source for repo.'''
command = [git_path, 'ls-remote', '--get-url', remote]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
# There was an issue getting remote URL, most likely
# command is not available in this version of Git.
return None
return to_native(out).rstrip('\n')
def set_remote_url(git_path, module, repo, dest, remote):
''' updates repo from remote sources '''
# Return if remote URL isn't changing.
remote_url = get_remote_url(git_path, module, dest, remote)
if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
return False
command = [git_path, 'remote', 'set-url', remote, repo]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
label = "set a new url %s for %s" % (repo, remote)
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
# Return False if remote_url is None to maintain previous behavior
# for Git versions prior to 1.7.5 that lack required functionality.
return remote_url is not None
def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used):
''' updates repo from remote sources '''
set_remote_url(git_path, module, repo, dest, remote)
commands = []
fetch_str = 'download remote objects and refs'
fetch_cmd = [git_path, 'fetch']
refspecs = []
if depth:
# try to find the minimal set of refs we need to fetch to get a
# successful checkout
currenthead = get_head_branch(git_path, module, dest, remote)
if refspec:
refspecs.append(refspec)
elif version == 'HEAD':
refspecs.append(currenthead)
elif is_remote_branch(git_path, module, dest, repo, version):
if currenthead != version:
# this workaround is only needed for older git versions
# 1.8.3 is broken, 1.9.x works
# ensure that remote branch is available as both local and remote ref
refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
else:
refspecs.append(version)
elif is_remote_tag(git_path, module, dest, repo, version):
refspecs.append('+refs/tags/' + version + ':refs/tags/' + version)
if refspecs:
# if refspecs is empty, i.e. version is neither heads nor tags
# assume it is a version hash
# fall back to a full clone, otherwise we might not be able to checkout
# version
fetch_cmd.extend(['--depth', str(depth)])
if not depth or not refspecs:
# don't try to be minimalistic but do a full clone
# also do this if depth is given, but version is something that can't be fetched directly
if bare:
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
else:
# ensure all tags are fetched
if git_version_used >= LooseVersion('1.9'):
fetch_cmd.append('--tags')
else:
# old git versions have a bug in --tags that prevents updating existing tags
commands.append((fetch_str, fetch_cmd + [remote]))
refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
fetch_cmd.extend([remote])
commands.append((fetch_str, fetch_cmd + refspecs))
for (label, command) in commands:
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)
def submodules_fetch(git_path, module, remote, track_submodules, dest):
changed = False
if not os.path.exists(os.path.join(dest, '.gitmodules')):
# no submodules
return changed
gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
for line in gitmodules_file:
# Check for new submodules
if not changed and line.strip().startswith('path'):
path = line.split('=', 1)[1].strip()
# Check that dest/path/.git exists
if not os.path.exists(os.path.join(dest, path, '.git')):
changed = True
# Check for updates to existing modules
if not changed:
# Fetch updates
begin = get_submodule_versions(git_path, module, dest)
cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
if track_submodules:
# Compare against submodule HEAD
# FIXME: determine this from .gitmodules
version = 'master'
after = get_submodule_versions(git_path, module, dest, '%s/%s' % (remote, version))
if begin != after:
changed = True
else:
# Compare against the superproject's expectation
cmd = [git_path, 'submodule', 'status']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
for line in out.splitlines():
if line[0] != ' ':
changed = True
break
return changed
def submodule_update(git_path, module, dest, track_submodules, force=False):
''' init and update any submodules '''
# get the valid submodule params
params = get_submodule_update_params(module, git_path, dest)
# skip submodule commands if .gitmodules is not present
if not os.path.exists(os.path.join(dest, '.gitmodules')):
return (0, '', '')
cmd = [git_path, 'submodule', 'sync']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if 'remote' in params and track_submodules:
cmd = [git_path, 'submodule', 'update', '--init', '--recursive', '--remote']
else:
cmd = [git_path, 'submodule', 'update', '--init', '--recursive']
if force:
cmd.append('--force')
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
return (rc, out, err)
def set_remote_branch(git_path, module, dest, remote, version, depth):
"""set refs for the remote branch version
This assumes the branch does not yet exist locally and is therefore also not checked out.
Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
"""
branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
def switch_version(git_path, module, dest, remote, version, verify_commit, depth):
cmd = ''
if version == 'HEAD':
branch = get_head_branch(git_path, module, dest, remote)
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % branch,
stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s --" % (git_path, remote, branch)
else:
# FIXME check for local_branch first, should have been fetched already
if is_remote_branch(git_path, module, dest, remote, version):
if depth and not is_local_branch(git_path, module, dest, version):
# git clone --depth implies --single-branch, which makes
# the checkout fail if the version changes
# fetch the remote branch, to be able to check it out next
set_remote_branch(git_path, module, dest, remote, version, depth)
if not is_local_branch(git_path, module, dest, version):
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % version, stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
else:
cmd = "%s checkout --force %s" % (git_path, version)
(rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
module.fail_json(msg="Failed to checkout %s" % (version),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
else:
module.fail_json(msg="Failed to checkout branch %s" % (branch),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
return (rc, out1, err1)
def verify_commit_sign(git_path, module, dest, version):
if version in get_annotated_tags(git_path, module, dest):
git_sub = "verify-tag"
else:
git_sub = "verify-commit"
cmd = "%s %s %s" % (git_path, git_sub, version)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
return (rc, out, err)
def git_version(git_path, module):
"""return the installed version of git"""
cmd = "%s --version" % git_path
(rc, out, err) = module.run_command(cmd)
if rc != 0:
# one could fail_json here, but the version info is not that important,
# so let's try to fail only on actual git commands
return None
rematch = re.search('git version (.*)$', to_native(out))
if not rematch:
return None
return LooseVersion(rematch.groups()[0])
def git_archive(git_path, module, dest, archive, archive_fmt, version):
""" Create git archive in given source directory """
cmd = "%s archive --format=%s --output=%s %s" \
% (git_path, archive_fmt, archive, version)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to perform archive operation",
details="Git archive command failed to create "
"archive %s using %s directory."
"Error: %s" % (archive, dest, err))
return rc, out, err
def create_archive(git_path, module, dest, archive, version, repo, result):
""" Helper function for creating archive using git_archive """
all_archive_fmt = {'.zip': 'zip', '.gz': 'tar.gz', '.tar': 'tar',
'.tgz': 'tgz'}
_, archive_ext = os.path.splitext(archive)
archive_fmt = all_archive_fmt.get(archive_ext, None)
if archive_fmt is None:
module.fail_json(msg="Unable to get file extension from "
"archive file name : %s" % archive,
details="Please specify archive as filename with "
"extension. File extension can be one "
"of ['tar', 'tar.gz', 'zip', 'tgz']")
repo_name = repo.split("/")[-1].replace(".git", "")
if os.path.exists(archive):
# If git archive file exists, then compare it with new git archive file.
# if match, do nothing
# if does not match, then replace existing with temp archive file.
tempdir = tempfile.mkdtemp()
new_archive_dest = os.path.join(tempdir, repo_name)
new_archive = new_archive_dest + '.' + archive_fmt
git_archive(git_path, module, dest, new_archive, archive_fmt, version)
# filecmp is supposed to be efficient than md5sum checksum
if filecmp.cmp(new_archive, archive):
result.update(changed=False)
# Cleanup before exiting
try:
shutil.remove(tempdir)
except OSError:
pass
else:
try:
shutil.move(new_archive, archive)
shutil.remove(tempdir)
result.update(changed=True)
except OSError:
exception = get_exception()
module.fail_json(msg="Failed to move %s to %s" %
(new_archive, archive),
details="Error occured while moving : %s"
% exception)
else:
# Perform archive from local directory
git_archive(git_path, module, dest, archive, archive_fmt, version)
result.update(changed=True)
# ===========================================
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(type='path'),
repo=dict(required=True, aliases=['name']),
version=dict(default='HEAD'),
remote=dict(default='origin'),
refspec=dict(default=None),
reference=dict(default=None),
force=dict(default='no', type='bool'),
depth=dict(default=None, type='int'),
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
accept_hostkey=dict(default='no', type='bool'),
key_file=dict(default=None, type='path', required=False),
ssh_opts=dict(default=None, required=False),
executable=dict(default=None, type='path'),
bare=dict(default='no', type='bool'),
recursive=dict(default='yes', type='bool'),
track_submodules=dict(default='no', type='bool'),
umask=dict(default=None, type='raw'),
archive=dict(type='path'),
),
supports_check_mode=True
)
dest = module.params['dest']
repo = module.params['repo']
version = module.params['version']
remote = module.params['remote']
refspec = module.params['refspec']
force = module.params['force']
depth = module.params['depth']
update = module.params['update']
allow_clone = module.params['clone']
bare = module.params['bare']
verify_commit = module.params['verify_commit']
reference = module.params['reference']
git_path = module.params['executable'] or module.get_bin_path('git', True)
key_file = module.params['key_file']
ssh_opts = module.params['ssh_opts']
umask = module.params['umask']
archive = module.params['archive']
result = dict(changed=False, warnings=list())
if module.params['accept_hostkey']:
if ssh_opts is not None:
if "-o StrictHostKeyChecking=no" not in ssh_opts:
ssh_opts += " -o StrictHostKeyChecking=no"
else:
ssh_opts = "-o StrictHostKeyChecking=no"
# evaluate and set the umask before doing anything else
if umask is not None:
if not isinstance(umask, string_types):
module.fail_json(msg="umask must be defined as a quoted octal integer")
try:
umask = int(umask, 8)
except:
module.fail_json(msg="umask must be an octal integer",
details=str(sys.exc_info()[1]))
os.umask(umask)
# Certain features such as depth require a file:/// protocol for path based urls
# so force a protocol here ...
if repo.startswith('/'):
repo = 'file://' + repo
# We screenscrape a huge amount of git commands so use C locale anytime we
# call run_command()
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
gitconfig = None
if not dest and allow_clone:
module.fail_json(msg="the destination directory must be specified unless clone=no")
elif dest:
dest = os.path.abspath(dest)
if bare:
gitconfig = os.path.join(dest, 'config')
else:
gitconfig = os.path.join(dest, '.git', 'config')
# create a wrapper script and export
# GIT_SSH=<path> as an environment variable
# for git to use the wrapper script
ssh_wrapper = write_ssh_wrapper()
set_git_ssh(ssh_wrapper, key_file, ssh_opts)
module.add_cleanup_file(path=ssh_wrapper)
git_version_used = git_version(git_path, module)
if depth is not None and git_version_used < LooseVersion('1.9.1'):
result['warnings'].append("Your git version is too old to fully support the depth argument. Falling back to full checkouts.")
depth = None
recursive = module.params['recursive']
track_submodules = module.params['track_submodules']
result.update(before=None)
local_mods = False
need_fetch = True
if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
# if there is no git configuration, do a clone operation unless:
# * the user requested no clone (they just want info)
# * we're doing a check mode test
# In those cases we do an ls-remote
if module.check_mode or not allow_clone:
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
result.update(changed=True, after=remote_head)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit)
need_fetch = False
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
# requested.
result['before'] = get_version(module, git_path, dest)
result.update(after=result['before'])
module.exit_json(**result)
else:
# else do a pull
local_mods = has_local_mods(module, git_path, dest, bare)
result['before'] = get_version(module, git_path, dest)
if local_mods:
# failure should happen regardless of check mode
if not force:
module.fail_json(msg="Local modifications exist in repository (force=no).", **result)
# if force and in non-check mode, do a reset
if not module.check_mode:
reset(git_path, module, dest)
result.update(changed=True, msg='Local modifications exist.')
# exit if already at desired sha version
if module.check_mode:
remote_url = get_remote_url(git_path, module, dest, remote)
remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
else:
remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
result.update(remote_url_changed=remote_url_changed)
if module.check_mode:
remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
# FIXME: This diff should fail since the new remote_head is not fetched yet?!
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
else:
fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used)
result['after'] = get_version(module, git_path, dest)
# switch to version specified regardless of whether
# we got new revisions from the repository
if not bare:
switch_version(git_path, module, dest, remote, version, verify_commit, depth)
# Deal with submodules
submodules_updated = False
if recursive and not bare:
submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
if submodules_updated:
result.update(submodules_changed=submodules_updated)
if module.check_mode:
result.update(changed=True, after=remote_head)
module.exit_json(**result)
# Switch to version specified
submodule_update(git_path, module, dest, track_submodules, force=force)
# determine if we changed anything
result['after'] = get_version(module, git_path, dest)
if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
result.update(changed=True)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
if archive:
# Git archive is not supported by all git servers, so
# we will first clone and perform git archive from local directory
if module.check_mode:
result.update(changed=True)
module.exit_json(**result)
create_archive(git_path, module, dest, archive, version, repo, result)
# cleanup the wrapper script
if ssh_wrapper:
try:
os.remove(ssh_wrapper)
except OSError:
# No need to fail if the file already doesn't exist
pass
module.exit_json(**result)
if __name__ == '__main__':
main()
|
sonaht/ansible
|
lib/ansible/modules/source_control/git.py
|
Python
|
gpl-3.0
| 45,468
|
# -*- coding: utf-8 -*-
from odoo.addons.account_edi.tests.common import AccountEdiTestCommon
from odoo.tests import tagged
from odoo import Command
@tagged('post_install_l10n', 'post_install', '-at_install')
class TestL10nBeEdi(AccountEdiTestCommon):
@classmethod
def setUpClass(cls, chart_template_ref='l10n_be.l10nbe_chart_template', edi_format_ref='l10n_be_edi.edi_efff_1'):
super().setUpClass(chart_template_ref=chart_template_ref, edi_format_ref=edi_format_ref)
# ==== Init ====
cls.tax_10_include = cls.env['account.tax'].create({
'name': 'tax_10_include',
'amount_type': 'percent',
'amount': 10,
'type_tax_use': 'sale',
'price_include': True,
'include_base_amount': True,
'sequence': 10,
})
cls.tax_20 = cls.env['account.tax'].create({
'name': 'tax_20',
'amount_type': 'percent',
'amount': 20,
'invoice_repartition_line_ids': [
(0, 0, {'factor_percent': 100.0, 'repartition_type': 'base'}),
(0, 0, {'factor_percent': 40.0, 'repartition_type': 'tax'}),
(0, 0, {'factor_percent': 60.0, 'repartition_type': 'tax'}),
],
'refund_repartition_line_ids': [
(0, 0, {'factor_percent': 100.0, 'repartition_type': 'base'}),
(0, 0, {'factor_percent': 40.0, 'repartition_type': 'tax'}),
(0, 0, {'factor_percent': 60.0, 'repartition_type': 'tax'}),
],
'type_tax_use': 'sale',
'sequence': 20,
})
cls.tax_group = cls.env['account.tax'].create({
'name': 'tax_group',
'amount_type': 'group',
'amount': 0.0,
'type_tax_use': 'sale',
'children_tax_ids': [(6, 0, (cls.tax_10_include + cls.tax_20).ids)],
})
cls.partner_a.vat = 'BE0477472701'
# ==== Invoice ====
cls.invoice = cls.env['account.move'].create({
'move_type': 'out_invoice',
'journal_id': cls.journal.id,
'partner_id': cls.partner_b.id,
'invoice_date': '2017-01-01',
'date': '2017-01-01',
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [(0, 0, {
'product_id': cls.product_a.id,
'product_uom_id': cls.env.ref('uom.product_uom_dozen').id,
'price_unit': 275.0,
'quantity': 5,
'discount': 20.0,
'tax_ids': [(6, 0, cls.tax_20.ids)],
})],
})
cls.expected_invoice_efff_values = '''
<Invoice>
<UBLVersionID>2.0</UBLVersionID>
<ID>INV/2017/00001</ID>
<IssueDate>2017-01-01</IssueDate>
<InvoiceTypeCode>380</InvoiceTypeCode>
<DocumentCurrencyCode>Gol</DocumentCurrencyCode>
<AccountingSupplierParty>
<Party>
<PartyName>
<Name>company_1_data</Name>
</PartyName>
<Language>
<LocaleCode>en_US</LocaleCode>
</Language>
<PostalAddress/>
<Contact>
<Name>company_1_data</Name>
</Contact>
</Party>
</AccountingSupplierParty>
<AccountingCustomerParty>
<Party>
<PartyName>
<Name>partner_b</Name>
</PartyName>
<Language>
<LocaleCode>en_US</LocaleCode>
</Language>
<PostalAddress/>
<Contact>
<Name>partner_b</Name>
</Contact>
</Party>
</AccountingCustomerParty>
<PaymentMeans>
<PaymentMeansCode listID="UN/ECE 4461">31</PaymentMeansCode>
<PaymentDueDate>2017-01-01</PaymentDueDate>
<InstructionID>INV/2017/00001</InstructionID>
</PaymentMeans>
<TaxTotal>
<TaxAmount currencyID="Gol">220.000</TaxAmount>
</TaxTotal>
<LegalMonetaryTotal>
<LineExtensionAmount currencyID="Gol">1100.000</LineExtensionAmount>
<TaxExclusiveAmount currencyID="Gol">1100.000</TaxExclusiveAmount>
<TaxInclusiveAmount currencyID="Gol">1320.000</TaxInclusiveAmount>
<PrepaidAmount currencyID="Gol">0.000</PrepaidAmount>
<PayableAmount currencyID="Gol">1320.000</PayableAmount>
</LegalMonetaryTotal>
<InvoiceLine>
<ID>___ignore___</ID>
<Note>Discount (20.0 %)</Note>
<InvoicedQuantity>5.0</InvoicedQuantity>
<LineExtensionAmount currencyID="Gol">1100.000</LineExtensionAmount>
<TaxTotal>
<TaxAmount currencyID="Gol">220.000</TaxAmount>
</TaxTotal>
<Item>
<Description>product_a</Description>
<Name>product_a</Name>
</Item>
<Price>
<PriceAmount currencyID="Gol">275.000</PriceAmount>
</Price>
</InvoiceLine>
</Invoice>
'''
####################################################
# Test export
####################################################
def test_efff_simple_case(self):
''' Test the generated Facturx Edi attachment without any modification of the invoice. '''
self.assert_generated_file_equal(self.invoice, self.expected_invoice_efff_values)
def test_efff_group_of_taxes(self):
self.invoice.write({
'invoice_line_ids': [(1, self.invoice.invoice_line_ids.id, {'tax_ids': [Command.set(self.tax_group.ids)]})],
})
applied_xpath = '''
<xpath expr="//TaxTotal/TaxAmount" position="replace">
<TaxAmount currencyID="Gol">320.000</TaxAmount>
</xpath>
<xpath expr="//LegalMonetaryTotal/LineExtensionAmount" position="replace">
<LineExtensionAmount currencyID="Gol">1000.000</LineExtensionAmount>
</xpath>
<xpath expr="//LegalMonetaryTotal/TaxExclusiveAmount" position="replace">
<TaxExclusiveAmount currencyID="Gol">1000.000</TaxExclusiveAmount>
</xpath>
<xpath expr="//InvoiceLine/LineExtensionAmount" position="replace">
<LineExtensionAmount currencyID="Gol">1000.000</LineExtensionAmount>
</xpath>
<xpath expr="//InvoiceLine/TaxTotal" position="replace">
<TaxTotal>
<TaxAmount currencyID="Gol">100.000</TaxAmount>
</TaxTotal>
<TaxTotal>
<TaxAmount currencyID="Gol">220.000</TaxAmount>
</TaxTotal>
</xpath>
'''
self.assert_generated_file_equal(self.invoice, self.expected_invoice_efff_values, applied_xpath)
####################################################
# Test import
####################################################
def test_invoice_edi_xml_update(self):
invoice = self._create_empty_vendor_bill()
invoice_count = len(self.env['account.move'].search([]))
self.update_invoice_from_file('l10n_be_edi', 'test_xml_file', 'efff_test.xml', invoice)
self.assertEqual(len(self.env['account.move'].search([])), invoice_count)
self.assertEqual(invoice.amount_total, 666.50)
self.assertEqual(invoice.amount_tax, 115.67)
self.assertEqual(invoice.partner_id, self.partner_a)
def test_invoice_edi_xml_create(self):
invoice_count = len(self.env['account.move'].search([]))
invoice = self.create_invoice_from_file('l10n_be_edi', 'test_xml_file', 'efff_test.xml')
self.assertEqual(len(self.env['account.move'].search([])), invoice_count + 1)
self.assertEqual(invoice.amount_total, 666.50)
self.assertEqual(invoice.amount_tax, 115.67)
self.assertEqual(invoice.partner_id, self.partner_a)
|
jeremiahyan/odoo
|
addons/l10n_be_edi/tests/test_ubl.py
|
Python
|
gpl-3.0
| 8,636
|
# -*- coding: utf-8 -*-
# Automatic provisioning of Azure DNS zones.
import os
import azure
import json
from requests import Request
try:
from urllib import quote
except:
from urllib.parse import quote
from nixops.util import attr_property
from nixops.azure_common import ResourceDefinition, ResourceState, ResId
from azure.common import AzureHttpError
from azure.mgmt.network import *
class AzureDNSZoneDefinition(ResourceDefinition):
"""Definition of an Azure DNS Zone"""
@classmethod
def get_type(cls):
return "azure-dns-zone"
@classmethod
def get_resource_type(cls):
return "azureDNSZones"
def __init__(self, xml):
ResourceDefinition.__init__(self, xml)
self.dns_zone_name = self.get_option_value(xml, 'name', str)
self.copy_option(xml, 'resourceGroup', 'resource')
self.copy_tags(xml)
def show_type(self):
return self.get_type()
class AzureDNSZoneState(ResourceState):
"""State of an Azure DNS Zone"""
dns_zone_name = attr_property("azure.name", None)
resource_group = attr_property("azure.resourceGroup", None)
tags = attr_property("azure.tags", {}, 'json')
@classmethod
def get_type(cls):
return "azure-dns-zone"
@property
def resource_id(self):
return self.dns_zone_name
@property
def full_name(self):
return "Azure DNS zone '{0}'".format(self.resource_id)
def is_settled(self, resource):
return True
def get_resource_url(self):
return ("https://management.azure.com/subscriptions/{0}"
"/resourceGroups/{1}/providers/Microsoft.Network"
"/dnsZones/{2}?api-version=2015-05-04-preview"
.format(quote(self.subscription_id),
quote(self.resource_group),
quote(self.dns_zone_name)))
def mk_request(self, method):
http_request = Request()
http_request.url = self.get_resource_url()
http_request.method = method
http_request.headers['Content-Type'] = 'application/json'
return http_request
def get_resource(self):
response = self.nrpc().send_request(self.mk_request('GET'))
if response.status_code == 200:
return json.loads(response.content.decode())
else:
return None
def destroy_resource(self):
response = self.nrpc().send_request(self.mk_request('DELETE'))
if response.status_code != 200:
raise AzureHttpError(response.content, response.status_code)
defn_properties = [ 'tags' ]
def _create_or_update(self, defn):
info = {
"location": "global",
"tags": defn.tags,
"properties": { }
}
http_request = self.mk_request('PUT')
http_request.data = json.dumps(info)
http_request.headers['Content-Length'] = len(http_request.data)
response = self.nrpc().send_request(http_request)
if response.status_code not in [200, 201]:
raise AzureHttpError(response.content, response.status_code)
self.state = self.UP
self.copy_properties(defn)
def create(self, defn, check, allow_reboot, allow_recreate):
self.no_subscription_id_change(defn)
self.no_property_change(defn, 'resource_group')
self.copy_mgmt_credentials(defn)
self.dns_zone_name = defn.dns_zone_name
self.resource_group = defn.resource_group
if check:
zone = self.get_settled_resource()
if not zone:
self.warn_missing_resource()
elif self.state == self.UP:
self.handle_changed_property('tags', zone['tags'])
else:
self.warn_not_supposed_to_exist()
self.confirm_destroy()
if self.state != self.UP:
if self.get_settled_resource():
raise Exception("tried creating a DNS zone that already exists; "
"please run 'deploy --check' to fix this")
self.log("creating {0}...".format(self.full_name))
self._create_or_update(defn)
if self.properties_changed(defn):
self.log("updating properties of {0}...".format(self.full_name))
self.get_settled_resource_assert_exists()
self._create_or_update(defn)
def create_after(self, resources, defn):
from nixops.resources.azure_resource_group import AzureResourceGroupState
return {r for r in resources
if isinstance(r, AzureResourceGroupState) }
|
coreyoconnor/nixops
|
nixops/resources/azure_dns_zone.py
|
Python
|
lgpl-3.0
| 4,631
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The ActionChains implementation,
"""
import time
from selenium.webdriver.remote.command import Command
from .utils import keys_to_typing
from .actions.action_builder import ActionBuilder
class ActionChains(object):
"""
ActionChains are a way to automate low level interactions such as
mouse movements, mouse button actions, key press, and context menu interactions.
This is useful for doing more complex actions like hover over and drag and drop.
Generate user actions.
When you call methods for actions on the ActionChains object,
the actions are stored in a queue in the ActionChains object.
When you call perform(), the events are fired in the order they
are queued up.
ActionChains can be used in a chain pattern::
menu = driver.find_element(By.CSS_SELECTOR, ".nav")
hidden_submenu = driver.find_element(By.CSS_SELECTOR, ".nav #submenu1")
ActionChains(driver).move_to_element(menu).click(hidden_submenu).perform()
Or actions can be queued up one by one, then performed.::
menu = driver.find_element(By.CSS_SELECTOR, ".nav")
hidden_submenu = driver.find_element(By.CSS_SELECTOR, ".nav #submenu1")
actions = ActionChains(driver)
actions.move_to_element(menu)
actions.click(hidden_submenu)
actions.perform()
Either way, the actions are performed in the order they are called, one after
another.
"""
def __init__(self, driver):
"""
Creates a new ActionChains.
:Args:
- driver: The WebDriver instance which performs user actions.
"""
self._driver = driver
self._actions = []
if self._driver.w3c:
self.w3c_actions = ActionBuilder(driver)
def perform(self):
"""
Performs all stored actions.
"""
if self._driver.w3c:
self.w3c_actions.perform()
else:
for action in self._actions:
action()
def reset_actions(self):
"""
Clears actions that are already stored locally and on the remote end
"""
if self._driver.w3c:
self.w3c_actions.clear_actions()
for device in self.w3c_actions.devices:
device.clear_actions()
self._actions = []
def click(self, on_element=None):
"""
Clicks an element.
:Args:
- on_element: The element to click.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.click()
self.w3c_actions.key_action.pause()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.CLICK, {'button': 0}))
return self
def click_and_hold(self, on_element=None):
"""
Holds down the left mouse button on an element.
:Args:
- on_element: The element to mouse down.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.click_and_hold()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOUSE_DOWN, {}))
return self
def context_click(self, on_element=None):
"""
Performs a context-click (right click) on an element.
:Args:
- on_element: The element to context-click.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.context_click()
self.w3c_actions.key_action.pause()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.CLICK, {'button': 2}))
return self
def double_click(self, on_element=None):
"""
Double-clicks an element.
:Args:
- on_element: The element to double-click.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.double_click()
for _ in range(4):
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.DOUBLE_CLICK, {}))
return self
def drag_and_drop(self, source, target):
"""
Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- target: The element to mouse up.
"""
self.click_and_hold(source)
self.release(target)
return self
def drag_and_drop_by_offset(self, source, xoffset, yoffset):
"""
Holds down the left mouse button on the source element,
then moves to the target offset and releases the mouse button.
:Args:
- source: The element to mouse down.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
self.click_and_hold(source)
self.move_by_offset(xoffset, yoffset)
self.release()
return self
def key_down(self, value, element=None):
"""
Sends a key press only, without releasing it.
Should only be used with modifier keys (Control, Alt and Shift).
:Args:
- value: The modifier key to send. Values are defined in `Keys` class.
- element: The element to send keys.
If None, sends a key to current focused element.
Example, pressing ctrl+c::
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
"""
if element:
self.click(element)
if self._driver.w3c:
self.w3c_actions.key_action.key_down(value)
self.w3c_actions.pointer_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT,
{"value": keys_to_typing(value)}))
return self
def key_up(self, value, element=None):
"""
Releases a modifier key.
:Args:
- value: The modifier key to send. Values are defined in Keys class.
- element: The element to send keys.
If None, sends a key to current focused element.
Example, pressing ctrl+c::
ActionChains(driver).key_down(Keys.CONTROL).send_keys('c').key_up(Keys.CONTROL).perform()
"""
if element:
self.click(element)
if self._driver.w3c:
self.w3c_actions.key_action.key_up(value)
self.w3c_actions.pointer_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT,
{"value": keys_to_typing(value)}))
return self
def move_by_offset(self, xoffset, yoffset):
"""
Moving the mouse to an offset from current mouse position.
:Args:
- xoffset: X offset to move to, as a positive or negative integer.
- yoffset: Y offset to move to, as a positive or negative integer.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.move_by(xoffset, yoffset)
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOVE_TO, {
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def move_to_element(self, to_element):
"""
Moving the mouse to the middle of an element.
:Args:
- to_element: The WebElement to move to.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.move_to(to_element)
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOVE_TO, {'element': to_element.id}))
return self
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
"""
Move the mouse by an offset of the specified element.
Offsets are relative to the top-left corner of the element.
:Args:
- to_element: The WebElement to move to.
- xoffset: X offset to move to.
- yoffset: Y offset to move to.
"""
if self._driver.w3c:
self.w3c_actions.pointer_action.move_to(to_element,
int(xoffset),
int(yoffset))
self.w3c_actions.key_action.pause()
else:
self._actions.append(
lambda: self._driver.execute(Command.MOVE_TO, {
'element': to_element.id,
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def pause(self, seconds):
""" Pause all inputs for the specified duration in seconds """
if self._driver.w3c:
self.w3c_actions.pointer_action.pause(seconds)
self.w3c_actions.key_action.pause(seconds)
else:
self._actions.append(lambda: time.sleep(seconds))
return self
def release(self, on_element=None):
"""
Releasing a held mouse button on an element.
:Args:
- on_element: The element to mouse up.
If None, releases on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.release()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(Command.MOUSE_UP, {}))
return self
def send_keys(self, *keys_to_send):
"""
Sends keys to current focused element.
:Args:
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class.
"""
typing = keys_to_typing(keys_to_send)
if self._driver.w3c:
for key in typing:
self.key_down(key)
self.key_up(key)
else:
self._actions.append(lambda: self._driver.execute(
Command.SEND_KEYS_TO_ACTIVE_ELEMENT, {'value': typing}))
return self
def send_keys_to_element(self, element, *keys_to_send):
"""
Sends keys to an element.
:Args:
- element: The element to send keys.
- keys_to_send: The keys to send. Modifier keys constants can be found in the
'Keys' class.
"""
self.click(element)
self.send_keys(*keys_to_send)
return self
# Context manager so ActionChains can be used in a 'with .. as' statements.
def __enter__(self):
return self # Return created instance of self.
def __exit__(self, _type, _value, _traceback):
pass # Do nothing, does not require additional cleanup.
|
asolntsev/selenium
|
py/selenium/webdriver/common/action_chains.py
|
Python
|
apache-2.0
| 12,537
|
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import volumes
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova import volume
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
_last_param = {}
def _get_default_snapshot_param():
return {
'id': 123,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
}
def stub_snapshot_create(self, context, volume_id, name, description):
global _last_param
snapshot = _get_default_snapshot_param()
snapshot['volume_id'] = volume_id
snapshot['display_name'] = name
snapshot['display_description'] = description
LOG.debug(_("_create: %s"), snapshot)
_last_param = snapshot
return snapshot
def stub_snapshot_delete(self, context, snapshot):
global _last_param
_last_param = snapshot
LOG.debug(_("_delete: %s"), locals())
if snapshot['id'] != '123':
raise exception.NotFound
def stub_snapshot_get(self, context, snapshot_id):
global _last_param
_last_param = dict(snapshot_id=snapshot_id)
LOG.debug(_("_get: %s"), locals())
if snapshot_id != '123':
raise exception.NotFound
param = _get_default_snapshot_param()
param['id'] = snapshot_id
return param
def stub_snapshot_get_all(self, context):
LOG.debug(_("_get_all: %s"), locals())
param = _get_default_snapshot_param()
param['id'] = 123
return [param]
class SnapshotApiTest(test.TestCase):
def setUp(self):
super(SnapshotApiTest, self).setUp()
fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
self.stubs.Set(volume.api.API, "create_snapshot_force",
stub_snapshot_create)
self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
self.stubs.Set(volume.api.API, "get_all_snapshots",
stub_snapshot_get_all)
self.stubs.Set(volume.api.API, "get", fakes.stub_volume_get)
self.context = context.get_admin_context()
def test_snapshot_create(self):
global _last_param
_last_param = {}
snapshot = {"volume_id": 12,
"force": False,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = webob.Request.blank('/v2/fake/os-snapshots')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
LOG.debug(_("test_snapshot_create: param=%s"), _last_param)
self.assertEqual(resp.status_int, 200)
# Compare if parameters were correctly passed to stub
self.assertEqual(_last_param['display_name'], "Snapshot Test Name")
self.assertEqual(_last_param['display_description'],
"Snapshot Test Desc")
resp_dict = json.loads(resp.body)
LOG.debug(_("test_snapshot_create: resp_dict=%s"), resp_dict)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['displayName'],
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['displayDescription'],
snapshot['display_description'])
def test_snapshot_create_force(self):
global _last_param
_last_param = {}
snapshot = {"volume_id": 12,
"force": True,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = webob.Request.blank('/v2/fake/os-snapshots')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
LOG.debug(_("test_snapshot_create_force: param=%s"), _last_param)
self.assertEqual(resp.status_int, 200)
# Compare if parameters were correctly passed to stub
self.assertEqual(_last_param['display_name'], "Snapshot Test Name")
self.assertEqual(_last_param['display_description'],
"Snapshot Test Desc")
resp_dict = json.loads(resp.body)
LOG.debug(_("test_snapshot_create_force: resp_dict=%s"), resp_dict)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['displayName'],
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['displayDescription'],
snapshot['display_description'])
def test_snapshot_delete(self):
global _last_param
_last_param = {}
snapshot_id = 123
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 202)
self.assertEqual(str(_last_param['id']), str(snapshot_id))
def test_snapshot_delete_invalid_id(self):
global _last_param
_last_param = {}
snapshot_id = 234
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 404)
self.assertEqual(str(_last_param['snapshot_id']), str(snapshot_id))
def test_snapshot_show(self):
global _last_param
_last_param = {}
snapshot_id = 123
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'GET'
resp = req.get_response(fakes.wsgi_app())
LOG.debug(_("test_snapshot_show: resp=%s"), resp)
self.assertEqual(resp.status_int, 200)
self.assertEqual(str(_last_param['snapshot_id']), str(snapshot_id))
resp_dict = json.loads(resp.body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['id'], str(snapshot_id))
def test_snapshot_show_invalid_id(self):
global _last_param
_last_param = {}
snapshot_id = 234
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'GET'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 404)
self.assertEqual(str(_last_param['snapshot_id']), str(snapshot_id))
def test_snapshot_detail(self):
req = webob.Request.blank('/v2/fake/os-snapshots/detail')
req.method = 'GET'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
resp_dict = json.loads(resp.body)
LOG.debug(_("test_snapshot_detail: resp_dict=%s"), resp_dict)
self.assertTrue('snapshots' in resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(len(resp_snapshots), 1)
resp_snapshot = resp_snapshots.pop()
self.assertEqual(resp_snapshot['id'], 123)
class SnapshotSerializerTest(test.TestCase):
def _verify_snapshot(self, snap, tree):
self.assertEqual(tree.tag, 'snapshot')
for attr in ('id', 'status', 'size', 'createdAt',
'displayName', 'displayDescription', 'volumeId'):
self.assertEqual(str(snap[attr]), tree.get(attr))
def test_snapshot_show_create_serializer(self):
serializer = volumes.SnapshotTemplate()
raw_snapshot = dict(
id='snap_id',
status='snap_status',
size=1024,
createdAt=datetime.datetime.now(),
displayName='snap_name',
displayDescription='snap_desc',
volumeId='vol_id',
)
text = serializer.serialize(dict(snapshot=raw_snapshot))
print text
tree = etree.fromstring(text)
self._verify_snapshot(raw_snapshot, tree)
def test_snapshot_index_detail_serializer(self):
serializer = volumes.SnapshotsTemplate()
raw_snapshots = [dict(
id='snap1_id',
status='snap1_status',
size=1024,
createdAt=datetime.datetime.now(),
displayName='snap1_name',
displayDescription='snap1_desc',
volumeId='vol1_id',
),
dict(
id='snap2_id',
status='snap2_status',
size=1024,
createdAt=datetime.datetime.now(),
displayName='snap2_name',
displayDescription='snap2_desc',
volumeId='vol2_id',
)]
text = serializer.serialize(dict(snapshots=raw_snapshots))
print text
tree = etree.fromstring(text)
self.assertEqual('snapshots', tree.tag)
self.assertEqual(len(raw_snapshots), len(tree))
for idx, child in enumerate(tree):
self._verify_snapshot(raw_snapshots[idx], child)
|
sileht/deb-openstack-nova
|
nova/tests/api/openstack/compute/contrib/test_snapshots.py
|
Python
|
apache-2.0
| 10,216
|
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import context
from cinder import exception
from cinder import quota_utils
from cinder import test
from keystoneclient import exceptions
from oslo_config import cfg
from oslo_config import fixture as config_fixture
CONF = cfg.CONF
class QuotaUtilsTest(test.TestCase):
class FakeProject(object):
def __init__(self, id='foo', parent_id=None):
self.id = id
self.parent_id = parent_id
self.subtree = None
self.parents = None
self.domain_id = 'default'
def setUp(self):
super(QuotaUtilsTest, self).setUp()
self.auth_url = 'http://localhost:5000'
self.context = context.RequestContext('fake_user', 'fake_proj_id')
self.fixture = self.useFixture(config_fixture.Config(CONF))
self.fixture.config(auth_uri=self.auth_url, group='keystone_authtoken')
@mock.patch('keystoneclient.client.Client')
@mock.patch('keystoneclient.session.Session')
def test_keystone_client_instantiation(self, ksclient_session,
ksclient_class):
quota_utils._keystone_client(self.context)
ksclient_class.assert_called_once_with(auth_url=self.auth_url,
session=ksclient_session(),
version=(3, 0))
@mock.patch('keystoneclient.client.Client')
def test_get_project_keystoneclient_v2(self, ksclient_class):
keystoneclient = ksclient_class.return_value
keystoneclient.version = 'v2.0'
expected_project = quota_utils.GenericProjectInfo(
self.context.project_id, 'v2.0')
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id)
self.assertEqual(expected_project.__dict__, project.__dict__)
@mock.patch('keystoneclient.client.Client')
def test_get_project_keystoneclient_v3(self, ksclient_class):
keystoneclient = ksclient_class.return_value
keystoneclient.version = 'v3'
returned_project = self.FakeProject(self.context.project_id, 'bar')
del returned_project.subtree
keystoneclient.projects.get.return_value = returned_project
expected_project = quota_utils.GenericProjectInfo(
self.context.project_id, 'v3', 'bar')
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id)
self.assertEqual(expected_project.__dict__, project.__dict__)
@mock.patch('keystoneclient.client.Client')
def test_get_project_keystoneclient_v3_with_subtree(self, ksclient_class):
keystoneclient = ksclient_class.return_value
keystoneclient.version = 'v3'
returned_project = self.FakeProject(self.context.project_id, 'bar')
subtree_dict = {'baz': {'quux': None}}
returned_project.subtree = subtree_dict
keystoneclient.projects.get.return_value = returned_project
expected_project = quota_utils.GenericProjectInfo(
self.context.project_id, 'v3', 'bar', subtree_dict)
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, subtree_as_ids=True)
keystoneclient.projects.get.assert_called_once_with(
self.context.project_id, parents_as_ids=False, subtree_as_ids=True)
self.assertEqual(expected_project.__dict__, project.__dict__)
def _setup_mock_ksclient(self, mock_client, version='v3',
subtree=None, parents=None):
keystoneclient = mock_client.return_value
keystoneclient.version = version
proj = self.FakeProject(self.context.project_id)
proj.subtree = subtree
if parents:
proj.parents = parents
proj.parent_id = next(iter(parents.keys()))
keystoneclient.projects.get.return_value = proj
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_domain_as_parent(
self, mock_client):
# Test with a top level project (domain is direct parent)
self._setup_mock_ksclient(mock_client, parents={'default': None})
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertIsNone(project.parent_id)
self.assertIsNone(project.parents)
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_domain_as_grandparent(
self, mock_client):
# Test with a child project (domain is more than a parent)
self._setup_mock_ksclient(mock_client,
parents={'bar': {'default': None}})
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertEqual('bar', project.parent_id)
self.assertEqual({'bar': None}, project.parents)
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_no_domain_in_parents(
self, mock_client):
# Test that if top most parent is not a domain (to simulate an older
# keystone version) nothing gets removed from the tree
parents = {'bar': {'foo': None}}
self._setup_mock_ksclient(mock_client, parents=parents)
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertEqual('bar', project.parent_id)
self.assertEqual(parents, project.parents)
@mock.patch('keystoneclient.client.Client')
def test__filter_domain_id_from_parents_no_parents(
self, mock_client):
# Test that if top no parents are present (to simulate an older
# keystone version) things don't blow up
self._setup_mock_ksclient(mock_client)
project = quota_utils.get_project_hierarchy(
self.context, self.context.project_id, parents_as_ids=True)
self.assertIsNone(project.parent_id)
self.assertIsNone(project.parents)
@mock.patch('cinder.quota_utils._keystone_client')
def test_validate_nested_projects_with_keystone_v2(self, _keystone_client):
_keystone_client.side_effect = exceptions.VersionNotAvailable
self.assertRaises(exception.CinderException,
quota_utils.validate_setup_for_nested_quota_use,
self.context, [], None)
@mock.patch('cinder.quota_utils._keystone_client')
def test_validate_nested_projects_non_cloud_admin(self, _keystone_client):
# Covers not cloud admin or using old policy.json
_keystone_client.side_effect = exceptions.Forbidden
self.assertRaises(exception.CinderException,
quota_utils.validate_setup_for_nested_quota_use,
self.context, [], None)
def _process_reserve_over_quota(self, overs, usages, quotas,
expected_ex,
resource='volumes'):
ctxt = context.get_admin_context()
ctxt.project_id = 'fake'
size = 1
kwargs = {'overs': overs,
'usages': usages,
'quotas': quotas}
exc = exception.OverQuota(**kwargs)
self.assertRaises(expected_ex,
quota_utils.process_reserve_over_quota,
ctxt, exc,
resource=resource,
size=size)
def test_volume_size_exceed_quota(self):
overs = ['gigabytes']
usages = {'gigabytes': {'reserved': 1, 'in_use': 9}}
quotas = {'gigabytes': 10, 'snapshots': 10}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.VolumeSizeExceedsAvailableQuota)
def test_snapshot_limit_exceed_quota(self):
overs = ['snapshots']
usages = {'snapshots': {'reserved': 1, 'in_use': 9}}
quotas = {'gigabytes': 10, 'snapshots': 10}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.SnapshotLimitExceeded,
resource='snapshots')
def test_backup_gigabytes_exceed_quota(self):
overs = ['backup_gigabytes']
usages = {'backup_gigabytes': {'reserved': 1, 'in_use': 9}}
quotas = {'backup_gigabytes': 10}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.VolumeBackupSizeExceedsAvailableQuota,
resource='backups')
def test_backup_limit_quota(self):
overs = ['backups']
usages = {'backups': {'reserved': 1, 'in_use': 9}}
quotas = {'backups': 9}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.BackupLimitExceeded,
resource='backups')
def test_volumes_limit_quota(self):
overs = ['volumes']
usages = {'volumes': {'reserved': 1, 'in_use': 9}}
quotas = {'volumes': 9}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.VolumeLimitExceeded)
def test_unknown_quota(self):
overs = ['unknown']
usages = {'volumes': {'reserved': 1, 'in_use': 9}}
quotas = {'volumes': 9}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.UnexpectedOverQuota)
def test_unknown_quota2(self):
overs = ['volumes']
usages = {'volumes': {'reserved': 1, 'in_use': 9}}
quotas = {'volumes': 9}
self._process_reserve_over_quota(
overs, usages, quotas,
exception.UnexpectedOverQuota,
resource='snapshots')
|
bswartz/cinder
|
cinder/tests/unit/test_quota_utils.py
|
Python
|
apache-2.0
| 10,416
|
# Copyright (c) 2010-2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import join
import random
import time
import itertools
from collections import defaultdict
import six.moves.cPickle as pickle
import shutil
from eventlet import (GreenPile, GreenPool, Timeout, sleep, hubs, tpool,
spawn)
from eventlet.support.greenlets import GreenletExit
from swift import gettext_ as _
from swift.common.utils import (
whataremyips, unlink_older_than, compute_eta, get_logger,
dump_recon_cache, mkdirs, config_true_value, list_from_csv, get_hub,
tpool_reraise, GreenAsyncPile, Timestamp, remove_file)
from swift.common.swob import HeaderKeyDict
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.ring.utils import is_local_device
from swift.obj.ssync_sender import Sender as ssync_sender
from swift.common.http import HTTP_OK, HTTP_NOT_FOUND, \
HTTP_INSUFFICIENT_STORAGE
from swift.obj.diskfile import DiskFileRouter, get_data_dir, \
get_tmp_dir
from swift.common.storage_policy import POLICIES, EC_POLICY
from swift.common.exceptions import ConnectionTimeout, DiskFileError, \
SuffixSyncError
SYNC, REVERT = ('sync_only', 'sync_revert')
hubs.use_hub(get_hub())
def _get_partners(frag_index, part_nodes):
"""
Returns the left and right partners of the node whose index is
equal to the given frag_index.
:param frag_index: a fragment index
:param part_nodes: a list of primary nodes
:returns: [<node-to-left>, <node-to-right>]
"""
return [
part_nodes[(frag_index - 1) % len(part_nodes)],
part_nodes[(frag_index + 1) % len(part_nodes)],
]
class RebuildingECDiskFileStream(object):
"""
This class wraps the the reconstructed fragment archive data and
metadata in the DiskFile interface for ssync.
"""
def __init__(self, datafile_metadata, frag_index, rebuilt_fragment_iter):
# start with metadata from a participating FA
self.datafile_metadata = datafile_metadata
# the new FA is going to have the same length as others in the set
self._content_length = self.datafile_metadata['Content-Length']
# update the FI and delete the ETag, the obj server will
# recalc on the other side...
self.datafile_metadata['X-Object-Sysmeta-Ec-Frag-Index'] = frag_index
for etag_key in ('ETag', 'Etag'):
self.datafile_metadata.pop(etag_key, None)
self.frag_index = frag_index
self.rebuilt_fragment_iter = rebuilt_fragment_iter
def get_metadata(self):
return self.datafile_metadata
def get_datafile_metadata(self):
return self.datafile_metadata
@property
def content_length(self):
return self._content_length
def reader(self):
for chunk in self.rebuilt_fragment_iter:
yield chunk
class ObjectReconstructor(Daemon):
"""
Reconstruct objects using erasure code. And also rebalance EC Fragment
Archive objects off handoff nodes.
Encapsulates most logic and data needed by the object reconstruction
process. Each call to .reconstruct() performs one pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = logger or get_logger(
conf, log_route='object-reconstructor')
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6000))
self.concurrency = int(conf.get('concurrency', 1))
self.stats_interval = int(conf.get('stats_interval', '300'))
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.partition_times = []
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
# defaults subject to change after beta
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.headers = {
'Content-Length': '0',
'user-agent': 'obj-reconstructor %s' % os.getpid()}
self.handoffs_first = config_true_value(conf.get('handoffs_first',
False))
self._df_router = DiskFileRouter(conf, self.logger)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def _full_path(self, node, part, path, policy):
return '%(replication_ip)s:%(replication_port)s' \
'/%(device)s/%(part)s%(path)s ' \
'policy#%(policy)d frag#%(frag_index)s' % {
'replication_ip': node['replication_ip'],
'replication_port': node['replication_port'],
'device': node['device'],
'part': part, 'path': path,
'policy': policy,
'frag_index': node.get('index', 'handoff'),
}
def _get_response(self, node, part, path, headers, policy):
"""
Helper method for reconstruction that GETs a single EC fragment
archive
:param node: the node to GET from
:param part: the partition
:param path: full path of the desired EC archive
:param headers: the headers to send
:param policy: an instance of
:class:`~swift.common.storage_policy.BaseStoragePolicy`
:returns: response
"""
resp = None
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, 'GET', path, headers=headers)
with Timeout(self.node_timeout):
resp = conn.getresponse()
if resp.status not in [HTTP_OK, HTTP_NOT_FOUND]:
self.logger.warning(
_("Invalid response %(resp)s from %(full_path)s"),
{'resp': resp.status,
'full_path': self._full_path(node, part, path, policy)})
resp = None
elif resp.status == HTTP_NOT_FOUND:
resp = None
except (Exception, Timeout):
self.logger.exception(
_("Trying to GET %(full_path)s"), {
'full_path': self._full_path(node, part, path, policy)})
return resp
def reconstruct_fa(self, job, node, datafile_metadata):
"""
Reconstructs a fragment archive - this method is called from ssync
after a remote node responds that is missing this object - the local
diskfile is opened to provide metadata - but to reconstruct the
missing fragment archive we must connect to multiple object servers.
:param job: job from ssync_sender
:param node: node that we're rebuilding to
:param datafile_metadata: the datafile metadata to attach to
the rebuilt fragment archive
:returns: a DiskFile like class for use by ssync
:raises DiskFileError: if the fragment archive cannot be reconstructed
"""
part_nodes = job['policy'].object_ring.get_part_nodes(
job['partition'])
part_nodes.remove(node)
# the fragment index we need to reconstruct is the position index
# of the node we're rebuilding to within the primary part list
fi_to_rebuild = node['index']
# KISS send out connection requests to all nodes, see what sticks
headers = self.headers.copy()
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
pile = GreenAsyncPile(len(part_nodes))
path = datafile_metadata['name']
for node in part_nodes:
pile.spawn(self._get_response, node, job['partition'],
path, headers, job['policy'])
responses = []
etag = None
for resp in pile:
if not resp:
continue
resp.headers = HeaderKeyDict(resp.getheaders())
if str(fi_to_rebuild) == \
resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index'):
continue
if resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index') in set(
r.headers.get('X-Object-Sysmeta-Ec-Frag-Index')
for r in responses):
continue
responses.append(resp)
etag = sorted(responses, reverse=True,
key=lambda r: Timestamp(
r.headers.get('X-Backend-Timestamp')
))[0].headers.get('X-Object-Sysmeta-Ec-Etag')
responses = [r for r in responses if
r.headers.get('X-Object-Sysmeta-Ec-Etag') == etag]
if len(responses) >= job['policy'].ec_ndata:
break
else:
self.logger.error(
'Unable to get enough responses (%s/%s) '
'to reconstruct %s with ETag %s' % (
len(responses), job['policy'].ec_ndata,
self._full_path(node, job['partition'],
datafile_metadata['name'], job['policy']),
etag))
raise DiskFileError('Unable to reconstruct EC archive')
rebuilt_fragment_iter = self.make_rebuilt_fragment_iter(
responses[:job['policy'].ec_ndata], path, job['policy'],
fi_to_rebuild)
return RebuildingECDiskFileStream(datafile_metadata, fi_to_rebuild,
rebuilt_fragment_iter)
def _reconstruct(self, policy, fragment_payload, frag_index):
return policy.pyeclib_driver.reconstruct(fragment_payload,
[frag_index])[0]
def make_rebuilt_fragment_iter(self, responses, path, policy, frag_index):
"""
Turn a set of connections from backend object servers into a generator
that yields up the rebuilt fragment archive for frag_index.
"""
def _get_one_fragment(resp):
buff = ''
remaining_bytes = policy.fragment_size
while remaining_bytes:
chunk = resp.read(remaining_bytes)
if not chunk:
break
remaining_bytes -= len(chunk)
buff += chunk
return buff
def fragment_payload_iter():
# We need a fragment from each connections, so best to
# use a GreenPile to keep them ordered and in sync
pile = GreenPile(len(responses))
while True:
for resp in responses:
pile.spawn(_get_one_fragment, resp)
try:
with Timeout(self.node_timeout):
fragment_payload = [fragment for fragment in pile]
except (Exception, Timeout):
self.logger.exception(
_("Error trying to rebuild %(path)s "
"policy#%(policy)d frag#%(frag_index)s"),
{'path': path,
'policy': policy,
'frag_index': frag_index,
})
break
if not all(fragment_payload):
break
rebuilt_fragment = self._reconstruct(
policy, fragment_payload, frag_index)
yield rebuilt_fragment
return fragment_payload_iter()
def stats_line(self):
"""
Logs various stats for the currently running reconstruction pass.
"""
if (self.device_count and self.part_count and
self.reconstruction_device_count):
elapsed = (time.time() - self.start) or 0.000001
rate = self.reconstruction_part_count / elapsed
total_part_count = (self.part_count *
self.device_count /
self.reconstruction_device_count)
self.logger.info(
_("%(reconstructed)d/%(total)d (%(percentage).2f%%)"
" partitions of %(device)d/%(dtotal)d "
"(%(dpercentage).2f%%) devices"
" reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"),
{'reconstructed': self.reconstruction_part_count,
'total': self.part_count,
'percentage':
self.reconstruction_part_count * 100.0 / self.part_count,
'device': self.reconstruction_device_count,
'dtotal': self.device_count,
'dpercentage':
self.reconstruction_device_count * 100.0 / self.device_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' %
compute_eta(self.start,
self.reconstruction_part_count,
total_part_count)})
if self.suffix_count and self.partition_times:
self.logger.info(
_("%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
{'checked': self.suffix_count,
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
self.partition_times.sort()
self.logger.info(
_("Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs"),
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info(
_("Nothing reconstructed for %s seconds."),
(time.time() - self.start))
def kill_coros(self):
"""Utility function that kills all coroutines currently running."""
for coro in list(self.run_pool.coroutines_running):
try:
coro.kill(GreenletExit)
except GreenletExit:
pass
def heartbeat(self):
"""
Loop that runs in the background during reconstruction. It
periodically logs progress.
"""
while True:
sleep(self.stats_interval)
self.stats_line()
def detect_lockups(self):
"""
In testing, the pool.waitall() call very occasionally failed to return.
This is an attempt to make sure the reconstructor finishes its
reconstruction pass in some eventuality.
"""
while True:
sleep(self.lockup_timeout)
if self.reconstruction_count == self.last_reconstruction_count:
self.logger.error(_("Lockup detected.. killing live coros."))
self.kill_coros()
self.last_reconstruction_count = self.reconstruction_count
def _get_hashes(self, policy, path, recalculate=None, do_listdir=False):
df_mgr = self._df_router[policy]
hashed, suffix_hashes = tpool_reraise(
df_mgr._get_hashes, path, recalculate=recalculate,
do_listdir=do_listdir, reclaim_age=self.reclaim_age)
self.logger.update_stats('suffix.hashes', hashed)
return suffix_hashes
def get_suffix_delta(self, local_suff, local_index,
remote_suff, remote_index):
"""
Compare the local suffix hashes with the remote suffix hashes
for the given local and remote fragment indexes. Return those
suffixes which should be synced.
:param local_suff: the local suffix hashes (from _get_hashes)
:param local_index: the local fragment index for the job
:param remote_suff: the remote suffix hashes (from remote
REPLICATE request)
:param remote_index: the remote fragment index for the job
:returns: a list of strings, the suffix dirs to sync
"""
suffixes = []
for suffix, sub_dict_local in local_suff.items():
sub_dict_remote = remote_suff.get(suffix, {})
if (sub_dict_local.get(None) != sub_dict_remote.get(None) or
sub_dict_local.get(local_index) !=
sub_dict_remote.get(remote_index)):
suffixes.append(suffix)
return suffixes
def rehash_remote(self, node, job, suffixes):
try:
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(sorted(suffixes)),
headers=self.headers)
conn.getresponse().read()
except (Exception, Timeout):
self.logger.exception(
_("Trying to sync suffixes with %s") % self._full_path(
node, job['partition'], '', job['policy']))
def _get_suffixes_to_sync(self, job, node):
"""
For SYNC jobs we need to make a remote REPLICATE request to get
the remote node's current suffix's hashes and then compare to our
local suffix's hashes to decide which suffixes (if any) are out
of sync.
:param: the job dict, with the keys defined in ``_get_part_jobs``
:param node: the remote node dict
:returns: a (possibly empty) list of strings, the suffixes to be
synced with the remote node.
"""
# get hashes from the remote node
remote_suffixes = None
try:
with Timeout(self.http_timeout):
resp = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'', headers=self.headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error(
_('%s responded as unmounted'),
self._full_path(node, job['partition'], '',
job['policy']))
elif resp.status != HTTP_OK:
full_path = self._full_path(node, job['partition'], '',
job['policy'])
self.logger.error(
_("Invalid response %(resp)s from %(full_path)s"),
{'resp': resp.status, 'full_path': full_path})
else:
remote_suffixes = pickle.loads(resp.read())
except (Exception, Timeout):
# all exceptions are logged here so that our caller can
# safely catch our exception and continue to the next node
# without logging
self.logger.exception('Unable to get remote suffix hashes '
'from %r' % self._full_path(
node, job['partition'], '',
job['policy']))
if remote_suffixes is None:
raise SuffixSyncError('Unable to get remote suffix hashes')
suffixes = self.get_suffix_delta(job['hashes'],
job['frag_index'],
remote_suffixes,
node['index'])
# now recalculate local hashes for suffixes that don't
# match so we're comparing the latest
local_suff = self._get_hashes(job['policy'], job['path'],
recalculate=suffixes)
suffixes = self.get_suffix_delta(local_suff,
job['frag_index'],
remote_suffixes,
node['index'])
self.suffix_count += len(suffixes)
return suffixes
def delete_reverted_objs(self, job, objects, frag_index):
"""
For EC we can potentially revert only some of a partition
so we'll delete reverted objects here. Note that we delete
the fragment index of the file we sent to the remote node.
:param job: the job being processed
:param objects: a dict of objects to be deleted, each entry maps
hash=>timestamp
:param frag_index: (int) the fragment index of data files to be deleted
"""
df_mgr = self._df_router[job['policy']]
for object_hash, timestamps in objects.items():
try:
df = df_mgr.get_diskfile_from_hash(
job['local_dev']['device'], job['partition'],
object_hash, job['policy'],
frag_index=frag_index)
df.purge(timestamps['ts_data'], frag_index)
except DiskFileError:
self.logger.exception(
'Unable to purge DiskFile (%r %r %r)',
object_hash, timestamps['ts_data'], frag_index)
continue
def process_job(self, job):
"""
Sync the local partition with the remote node(s) according to
the parameters of the job. For primary nodes, the SYNC job type
will define both left and right hand sync_to nodes to ssync with
as defined by this primary nodes index in the node list based on
the fragment index found in the partition. For non-primary
nodes (either handoff revert, or rebalance) the REVERT job will
define a single node in sync_to which is the proper/new home for
the fragment index.
N.B. ring rebalancing can be time consuming and handoff nodes'
fragment indexes do not have a stable order, it's possible to
have more than one REVERT job for a partition, and in some rare
failure conditions there may even also be a SYNC job for the
same partition - but each one will be processed separately
because each job will define a separate list of node(s) to
'sync_to'.
:param: the job dict, with the keys defined in ``_get_job_info``
"""
self.headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
begin = time.time()
if job['job_type'] == REVERT:
self._revert(job, begin)
else:
self._sync(job, begin)
self.partition_times.append(time.time() - begin)
self.reconstruction_count += 1
def _sync(self, job, begin):
"""
Process a SYNC job.
"""
self.logger.increment(
'partition.update.count.%s' % (job['local_dev']['device'],))
# after our left and right partners, if there's some sort of
# failure we'll continue onto the remaining primary nodes and
# make sure they're in sync - or potentially rebuild missing
# fragments we find
dest_nodes = itertools.chain(
job['sync_to'],
# I think we could order these based on our index to better
# protect against a broken chain
[
n for n in
job['policy'].object_ring.get_part_nodes(job['partition'])
if n['id'] != job['local_dev']['id'] and
n['id'] not in (m['id'] for m in job['sync_to'])
],
)
syncd_with = 0
for node in dest_nodes:
if syncd_with >= len(job['sync_to']):
# success!
break
try:
suffixes = self._get_suffixes_to_sync(job, node)
except SuffixSyncError:
continue
if not suffixes:
syncd_with += 1
continue
# ssync any out-of-sync suffixes with the remote node
success, _ = ssync_sender(
self, node, job, suffixes)()
# let remote end know to rehash it's suffixes
self.rehash_remote(node, job, suffixes)
# update stats for this attempt
self.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes))
if success:
syncd_with += 1
self.logger.timing_since('partition.update.timing', begin)
def _revert(self, job, begin):
"""
Process a REVERT job.
"""
self.logger.increment(
'partition.delete.count.%s' % (job['local_dev']['device'],))
# we'd desperately like to push this partition back to it's
# primary location, but if that node is down, the next best thing
# is one of the handoff locations - which *might* be us already!
dest_nodes = itertools.chain(
job['sync_to'],
job['policy'].object_ring.get_more_nodes(job['partition']),
)
syncd_with = 0
reverted_objs = {}
for node in dest_nodes:
if syncd_with >= len(job['sync_to']):
break
if node['id'] == job['local_dev']['id']:
# this is as good a place as any for this data for now
break
success, in_sync_objs = ssync_sender(
self, node, job, job['suffixes'])()
self.rehash_remote(node, job, job['suffixes'])
if success:
syncd_with += 1
reverted_objs.update(in_sync_objs)
if syncd_with >= len(job['sync_to']):
self.delete_reverted_objs(
job, reverted_objs, job['frag_index'])
self.logger.timing_since('partition.delete.timing', begin)
def _get_part_jobs(self, local_dev, part_path, partition, policy):
"""
Helper function to build jobs for a partition, this method will
read the suffix hashes and create job dictionaries to describe
the needed work. There will be one job for each fragment index
discovered in the partition.
For a fragment index which corresponds to this node's ring
index, a job with job_type SYNC will be created to ensure that
the left and right hand primary ring nodes for the part have the
corresponding left and right hand fragment archives.
A fragment index (or entire partition) for which this node is
not the primary corresponding node, will create job(s) with
job_type REVERT to ensure that fragment archives are pushed to
the correct node and removed from this one.
A partition may result in multiple jobs. Potentially many
REVERT jobs, and zero or one SYNC job.
:param local_dev: the local device
:param part_path: full path to partition
:param partition: partition number
:param policy: the policy
:returns: a list of dicts of job info
"""
# find all the fi's in the part, and which suffixes have them
hashes = self._get_hashes(policy, part_path, do_listdir=True)
non_data_fragment_suffixes = []
data_fi_to_suffixes = defaultdict(list)
for suffix, fi_hash in hashes.items():
if not fi_hash:
# this is for sanity and clarity, normally an empty
# suffix would get del'd from the hashes dict, but an
# OSError trying to re-hash the suffix could leave the
# value empty - it will log the exception; but there's
# no way to properly address this suffix at this time.
continue
data_frag_indexes = [f for f in fi_hash if f is not None]
if not data_frag_indexes:
non_data_fragment_suffixes.append(suffix)
else:
for fi in data_frag_indexes:
data_fi_to_suffixes[fi].append(suffix)
# helper to ensure consistent structure of jobs
def build_job(job_type, frag_index, suffixes, sync_to):
return {
'job_type': job_type,
'frag_index': frag_index,
'suffixes': suffixes,
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': hashes,
'policy': policy,
'local_dev': local_dev,
# ssync likes to have it handy
'device': local_dev['device'],
}
# aggregate jobs for all the fragment index in this part
jobs = []
# check the primary nodes - to see if the part belongs here
part_nodes = policy.object_ring.get_part_nodes(partition)
for node in part_nodes:
if node['id'] == local_dev['id']:
# this partition belongs here, we'll need a sync job
frag_index = node['index']
try:
suffixes = data_fi_to_suffixes.pop(frag_index)
except KeyError:
suffixes = []
sync_job = build_job(
job_type=SYNC,
frag_index=frag_index,
suffixes=suffixes,
sync_to=_get_partners(frag_index, part_nodes),
)
# ssync callback to rebuild missing fragment_archives
sync_job['sync_diskfile_builder'] = self.reconstruct_fa
jobs.append(sync_job)
break
# assign remaining data fragment suffixes to revert jobs
ordered_fis = sorted((len(suffixes), fi) for fi, suffixes
in data_fi_to_suffixes.items())
for count, fi in ordered_fis:
revert_job = build_job(
job_type=REVERT,
frag_index=fi,
suffixes=data_fi_to_suffixes[fi],
sync_to=[part_nodes[fi]],
)
jobs.append(revert_job)
# now we need to assign suffixes that have no data fragments
if non_data_fragment_suffixes:
if jobs:
# the first job will be either the sync_job, or the
# revert_job for the fragment index that is most common
# among the suffixes
jobs[0]['suffixes'].extend(non_data_fragment_suffixes)
else:
# this is an unfortunate situation, we need a revert job to
# push partitions off this node, but none of the suffixes
# have any data fragments to hint at which node would be a
# good candidate to receive the tombstones.
jobs.append(build_job(
job_type=REVERT,
frag_index=None,
suffixes=non_data_fragment_suffixes,
# this is super safe
sync_to=part_nodes,
# something like this would be probably be better
# sync_to=random.sample(part_nodes, 3),
))
# return a list of jobs for this part
return jobs
def collect_parts(self, override_devices=None,
override_partitions=None):
"""
Helper for yielding partitions in the top level reconstructor
"""
override_devices = override_devices or []
override_partitions = override_partitions or []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
if policy.policy_type != EC_POLICY:
continue
self._diskfile_mgr = self._df_router[policy]
self.load_object_ring(policy)
data_dir = get_data_dir(policy)
local_devices = list(itertools.ifilter(
lambda dev: dev and is_local_device(
ips, self.port,
dev['replication_ip'], dev['replication_port']),
policy.object_ring.devs))
if override_devices:
self.device_count = len(override_devices)
else:
self.device_count = len(local_devices)
for local_dev in local_devices:
if override_devices and (local_dev['device'] not in
override_devices):
continue
self.reconstruction_device_count += 1
dev_path = self._df_router[policy].get_dev_path(
local_dev['device'])
if not dev_path:
self.logger.warn(_('%s is not mounted'),
local_dev['device'])
continue
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
unlink_older_than(tmp_path, time.time() -
self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception(
'Unable to create %s' % obj_path)
continue
try:
partitions = os.listdir(obj_path)
except OSError:
self.logger.exception(
'Unable to list partitions in %r' % obj_path)
continue
self.part_count += len(partitions)
for partition in partitions:
part_path = join(obj_path, partition)
if not (partition.isdigit() and
os.path.isdir(part_path)):
self.logger.warning(
'Unexpected entity in data dir: %r' % part_path)
remove_file(part_path)
self.reconstruction_part_count += 1
continue
partition = int(partition)
if override_partitions and (partition not in
override_partitions):
continue
part_info = {
'local_dev': local_dev,
'policy': policy,
'partition': partition,
'part_path': part_path,
}
yield part_info
self.reconstruction_part_count += 1
def build_reconstruction_jobs(self, part_info):
"""
Helper function for collect_jobs to build jobs for reconstruction
using EC style storage policy
"""
jobs = self._get_part_jobs(**part_info)
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff revert jobs to the front of the list
jobs.sort(key=lambda job: job['job_type'], reverse=True)
self.job_count += len(jobs)
return jobs
def _reset_stats(self):
self.start = time.time()
self.job_count = 0
self.part_count = 0
self.device_count = 0
self.suffix_count = 0
self.suffix_sync = 0
self.suffix_hash = 0
self.reconstruction_count = 0
self.reconstruction_part_count = 0
self.reconstruction_device_count = 0
self.last_reconstruction_count = -1
def delete_partition(self, path):
self.logger.info(_("Removing partition: %s"), path)
tpool.execute(shutil.rmtree, path, ignore_errors=True)
def reconstruct(self, **kwargs):
"""Run a reconstruction pass"""
self._reset_stats()
self.partition_times = []
stats = spawn(self.heartbeat)
lockup_detector = spawn(self.detect_lockups)
sleep() # Give spawns a cycle
try:
self.run_pool = GreenPool(size=self.concurrency)
for part_info in self.collect_parts(**kwargs):
if not self.check_ring(part_info['policy'].object_ring):
self.logger.info(_("Ring change detected. Aborting "
"current reconstruction pass."))
return
jobs = self.build_reconstruction_jobs(part_info)
if not jobs:
# If this part belongs on this node, _get_part_jobs
# will *always* build a sync_job - even if there's
# no suffixes in the partition that needs to sync.
# If there's any suffixes in the partition then our
# job list would have *at least* one revert job.
# Therefore we know this part a) doesn't belong on
# this node and b) doesn't have any suffixes in it.
self.run_pool.spawn(self.delete_partition,
part_info['part_path'])
for job in jobs:
self.run_pool.spawn(self.process_job, job)
with Timeout(self.lockup_timeout):
self.run_pool.waitall()
except (Exception, Timeout):
self.logger.exception(_("Exception in top-level"
"reconstruction loop"))
self.kill_coros()
finally:
stats.kill()
lockup_detector.kill()
self.stats_line()
def run_once(self, *args, **kwargs):
start = time.time()
self.logger.info(_("Running object reconstructor in script mode."))
override_devices = list_from_csv(kwargs.get('devices'))
override_partitions = [int(p) for p in
list_from_csv(kwargs.get('partitions'))]
self.reconstruct(
override_devices=override_devices,
override_partitions=override_partitions)
total = (time.time() - start) / 60
self.logger.info(
_("Object reconstruction complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
dump_recon_cache({'object_reconstruction_time': total,
'object_reconstruction_last': time.time()},
self.rcache, self.logger)
def run_forever(self, *args, **kwargs):
self.logger.info(_("Starting object reconstructor in daemon mode."))
# Run the reconstructor continually
while True:
start = time.time()
self.logger.info(_("Starting object reconstruction pass."))
# Run the reconstructor
self.reconstruct()
total = (time.time() - start) / 60
self.logger.info(
_("Object reconstruction complete. (%.02f minutes)"), total)
dump_recon_cache({'object_reconstruction_time': total,
'object_reconstruction_last': time.time()},
self.rcache, self.logger)
self.logger.debug('reconstruction sleeping for %s seconds.',
self.interval)
sleep(self.interval)
|
hbhdytf/mac
|
swift/obj/reconstructor.py
|
Python
|
apache-2.0
| 41,578
|
#!/usr/bin/env python
# Copyright 2012 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutronclient.neutron.v2_0 import floatingip as fip
from neutronclient.tests.unit import test_cli20
class CLITestV20FloatingIpsJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['floatingip']
def test_create_floatingip(self):
# Create floatingip: fip1.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
args = [name, '--description', 'floats like a butterfly']
position_names = ['floating_network_id']
position_values = [name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
description='floats like a butterfly')
def test_create_floatingip_and_port(self):
# Create floatingip: fip1.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
pid = 'mypid'
args = [name, '--port_id', pid]
position_names = ['floating_network_id', 'port_id']
position_values = [name, pid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = [name, '--port-id', pid]
position_names = ['floating_network_id', 'port_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_and_port_and_address(self):
# Create floatingip: fip1 with a given port and address.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
pid = 'mypid'
addr = '10.0.0.99'
args = [name, '--port_id', pid, '--fixed_ip_address', addr]
position_names = ['floating_network_id', 'port_id', 'fixed_ip_address']
position_values = [name, pid, addr]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = [name, '--port-id', pid, '--fixed-ip-address', addr]
position_names = ['floating_network_id', 'port_id', 'fixed_ip_address']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_with_ip_address_of_floating_ip(self):
# Create floatingip: fip1 with a given IP address of floating IP.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
addr = '10.0.0.99'
args = [name, '--floating-ip-address', addr]
position_values = [name, addr]
position_names = ['floating_network_id', 'floating_ip_address']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_with_subnet_id(self):
# Create floatingip: fip1 on a given subnet id.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
subnet_id = 'mysubnetid'
args = [name, '--subnet', subnet_id]
position_values = [name, subnet_id]
position_names = ['floating_network_id', 'subnet_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_with_subnet_id_and_port(self):
# Create floatingip: fip1 on a given subnet id and port.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
pid = 'mypid'
subnet_id = 'mysubnetid'
args = [name, '--subnet', subnet_id, '--port-id', pid]
position_values = [name, subnet_id, pid]
position_names = ['floating_network_id', 'subnet_id', 'port_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_floatingip_with_dns_name_and_dns_domain(self):
# Create floatingip: fip1 with dns name and dns domain.
resource = 'floatingip'
cmd = fip.CreateFloatingIP(test_cli20.MyApp(sys.stdout), None)
name = 'fip1'
myid = 'myid'
dns_name_name = 'my-floatingip'
dns_domain_name = 'my-domain.org.'
args = [name, '--dns-name', dns_name_name, '--dns-domain',
dns_domain_name]
position_names = ['floating_network_id', 'dns_name', 'dns_domain']
position_values = [name, dns_name_name, dns_domain_name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_floatingips(self):
# list floatingips: -D.
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_floatingips_pagination(self):
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_floatingips_sort(self):
# list floatingips:
# --sort-key name --sort-key id --sort-key asc --sort-key desc
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_floatingips_limit(self):
# list floatingips: -P.
resources = 'floatingips'
cmd = fip.ListFloatingIP(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_delete_floatingip(self):
# Delete floatingip: fip1.
resource = 'floatingip'
cmd = fip.DeleteFloatingIP(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_show_floatingip(self):
# Show floatingip: --fields id.
resource = 'floatingip'
cmd = fip.ShowFloatingIP(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id'])
def test_disassociate_ip(self):
# Disassociate floating IP: myid.
resource = 'floatingip'
cmd = fip.DisassociateFloatingIP(test_cli20.MyApp(sys.stdout), None)
args = ['myid']
self._test_update_resource(resource, cmd, 'myid',
args, {"port_id": None}
)
def test_associate_ip(self):
# Associate floating IP: myid portid.
resource = 'floatingip'
cmd = fip.AssociateFloatingIP(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'portid']
self._test_update_resource(resource, cmd, 'myid',
args, {"port_id": "portid"}
)
|
rackerlabs/rackspace-python-neutronclient
|
neutronclient/tests/unit/test_cli20_floatingips.py
|
Python
|
apache-2.0
| 8,263
|
from alembic import context
from sqlalchemy import create_engine, pool
from warehouse import db
def run_migrations_offline():
"""
Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = context.config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
options = context.config.get_section(context.config.config_ini_section)
url = options.pop("url")
engine = create_engine(url, poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection, target_metadata=db.metadata)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
chopmann/warehouse
|
warehouse/migrations/env.py
|
Python
|
apache-2.0
| 1,309
|
"""Tests for the Linky config flow."""
from pylinky.exceptions import (
PyLinkyAccessException,
PyLinkyEnedisException,
PyLinkyException,
PyLinkyWrongLoginException,
)
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.linky.const import DEFAULT_TIMEOUT, DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
USERNAME = "username@hotmail.fr"
USERNAME_2 = "username@free.fr"
PASSWORD = "password"
TIMEOUT = 20
@pytest.fixture(name="login")
def mock_controller_login():
"""Mock a successful login."""
with patch(
"homeassistant.components.linky.config_flow.LinkyClient"
) as service_mock:
service_mock.return_value.login = Mock(return_value=True)
service_mock.return_value.close_session = Mock(return_value=None)
yield service_mock
@pytest.fixture(name="fetch_data")
def mock_controller_fetch_data():
"""Mock a successful get data."""
with patch(
"homeassistant.components.linky.config_flow.LinkyClient"
) as service_mock:
service_mock.return_value.fetch_data = Mock(return_value={})
service_mock.return_value.close_session = Mock(return_value=None)
yield service_mock
async def test_user(hass: HomeAssistantType, login, fetch_data):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == DEFAULT_TIMEOUT
async def test_import(hass: HomeAssistantType, login, fetch_data):
"""Test import step."""
# import with username and password
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == DEFAULT_TIMEOUT
# import with all
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: USERNAME_2,
CONF_PASSWORD: PASSWORD,
CONF_TIMEOUT: TIMEOUT,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME_2
assert result["title"] == USERNAME_2
assert result["data"][CONF_USERNAME] == USERNAME_2
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == TIMEOUT
async def test_abort_if_already_setup(hass: HomeAssistantType, login, fetch_data):
"""Test we abort if Linky is already setup."""
MockConfigEntry(
domain=DOMAIN,
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
unique_id=USERNAME,
).add_to_hass(hass)
# Should fail, same USERNAME (import)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same USERNAME (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_login_failed(hass: HomeAssistantType, login):
"""Test when we have errors during login."""
login.return_value.login.side_effect = PyLinkyAccessException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "access"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.login.side_effect = PyLinkyWrongLoginException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "wrong_login"}
hass.config_entries.flow.async_abort(result["flow_id"])
async def test_fetch_failed(hass: HomeAssistantType, login):
"""Test when we have errors during fetch."""
login.return_value.fetch_data.side_effect = PyLinkyAccessException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "access"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.fetch_data.side_effect = PyLinkyEnedisException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "enedis"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.fetch_data.side_effect = PyLinkyException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
hass.config_entries.flow.async_abort(result["flow_id"])
|
nkgilley/home-assistant
|
tests/components/linky/test_config_flow.py
|
Python
|
apache-2.0
| 6,904
|
import logging
import os
import subprocess
import threading
import util
logger = logging.getLogger('rt.decoder')
class Decoder:
def close(self, force=False):
if not force:
self.lock.acquire()
self.decoder.stdin.close()
self.decoder.wait()
if not force:
self.lock.release()
def decode(self, sentence, grammar=None):
'''Threadsafe, FIFO'''
self.lock.acquire()
input = '<seg grammar="{g}">{s}</seg>\n'.format(s=sentence, g=grammar) if grammar else '{}\n'.format(sentence)
self.decoder.stdin.write(input)
hyp = self.decoder.stdout.readline().strip()
self.lock.release()
return hyp
class CdecDecoder(Decoder):
def __init__(self, config, weights):
cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
decoder = os.path.join(cdec_root, 'decoder', 'cdec')
decoder_cmd = [decoder, '-c', config, '-w', weights]
logger.info('Executing: {}'.format(' '.join(decoder_cmd)))
self.decoder = util.popen_io(decoder_cmd)
self.lock = util.FIFOLock()
class MIRADecoder(Decoder):
def __init__(self, config, weights, metric='ibm_bleu'):
cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
mira = os.path.join(cdec_root, 'training', 'mira', 'kbest_cut_mira')
# optimizer=2 step=0.001 best=500, k=500, uniq, stream, metric
mira_cmd = [mira, '-c', config, '-w', weights, '-o', '2', '-C', '0.001', '-b', '500', '-k', '500', '-u', '-t', '-m', metric]
logger.info('Executing: {}'.format(' '.join(mira_cmd)))
self.decoder = util.popen_io(mira_cmd)
self.lock = util.FIFOLock()
def get_weights(self):
'''Threadsafe, FIFO'''
self.lock.acquire()
self.decoder.stdin.write('WEIGHTS ||| WRITE\n')
weights = self.decoder.stdout.readline().strip()
self.lock.release()
return weights
def set_weights(self, w_line):
'''Threadsafe, FIFO'''
self.lock.acquire()
try:
# Check validity
for w_str in w_line.split():
(k, v) = w_str.split('=')
float(v)
self.decoder.stdin.write('WEIGHTS ||| {}\n'.format(w_line))
self.lock.release()
except:
self.lock.release()
raise Exception('Invalid weights line: {}'.format(w_line))
def update(self, sentence, grammar, reference):
'''Threadsafe, FIFO'''
self.lock.acquire()
input = 'LEARN ||| <seg grammar="{g}">{s}</seg> ||| {r}\n'.format(s=sentence, g=grammar, r=reference)
self.decoder.stdin.write(input)
log = self.decoder.stdout.readline().strip()
self.lock.release()
return log
|
veer66/cdec
|
realtime/rt/decoder.py
|
Python
|
apache-2.0
| 2,899
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
from oslo.config import cfg
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help=('Some periodic tasks can be run in a separate process. '
'Should we run them here?')),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on every cycle
of the periodic scheduler.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = timeutils.utcnow()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parens.
#
# In the 'with-parens' case (with kwargs present), this function needs to
# return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parens' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_last_run = cls._periodic_last_run.copy()
except AttributeError:
cls._periodic_last_run = {}
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
if task._periodic_spacing < 0:
LOG.info(_('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
continue
if not task._periodic_enabled:
LOG.info(_('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
continue
# A periodic spacing of zero indicates that this task should
# be run every pass
if task._periodic_spacing == 0:
task._periodic_spacing = None
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
cls._periodic_last_run[name] = task._periodic_last_run
class PeriodicTasks(object):
__metaclass__ = _PeriodicTasksMeta
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
now = timeutils.utcnow()
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# If a periodic task is _nearly_ due, then we'll run it early
if spacing is not None and last_run is not None:
due = last_run + datetime.timedelta(seconds=spacing)
if not timeutils.is_soon(due, 0.2):
idle_for = min(idle_for, timeutils.delta_seconds(now, due))
continue
if spacing is not None:
idle_for = min(idle_for, spacing)
LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
self._periodic_last_run[task_name] = timeutils.utcnow()
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
locals())
time.sleep(0)
return idle_for
|
ntt-sic/cinder
|
cinder/openstack/common/periodic_task.py
|
Python
|
apache-2.0
| 6,920
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for local response normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class LRNOpTest(test.TestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype):
with self.cached_session():
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 2.0 * np.random.rand()
lrn_t = nn.local_response_normalization(
p,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p],
lrn_depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
err)
if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
@test_util.run_deprecated_v1
def testCompute(self):
for _ in range(2):
self._RunAndVerify(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerify(dtypes.float16)
@test_util.run_deprecated_v1
def testGradientsZeroInput(self):
with self.session():
shape = [4, 4, 4, 4]
p = array_ops.placeholder(dtypes.float32, shape=shape)
inp_array = np.zeros(shape).astype("f")
lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn")
grad = gradients_impl.gradients([lrn_op], [p])[0]
params = {p: inp_array}
r = grad.eval(feed_dict=params)
expected = np.ones(shape).astype("f")
self.assertAllClose(r, expected)
self.assertShapeEqual(expected, grad)
def _RunAndVerifyGradients(self, dtype):
with self.cached_session():
# random shape
shape = np.random.randint(1, 5, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 1.0 * np.random.rand()
if dtype == dtypes.float32:
inp_array = np.random.rand(*shape).astype(np.float32)
else:
inp_array = np.random.rand(*shape).astype(np.float16)
inp = constant_op.constant(
list(inp_array.ravel(order="C")), shape=shape, dtype=dtype)
lrn_op = nn.local_response_normalization(
inp,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape)
print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
" is ", err)
if dtype == dtypes.float32:
self.assertLess(err, 1e-4)
else:
self.assertLess(err, 1.0)
@test_util.run_deprecated_v1
def testGradients(self):
for _ in range(2):
self._RunAndVerifyGradients(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerifyGradients(dtypes.float16)
if __name__ == "__main__":
test.main()
|
annarev/tensorflow
|
tensorflow/python/kernel_tests/lrn_op_test.py
|
Python
|
apache-2.0
| 5,781
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import unittest
from io import BytesIO, StringIO
from decimal import Decimal
import threading
from importlib import import_module
from ijson import common
from ijson.backends.python import basic_parse, Lexer
from ijson.compat import IS_PY2
JSON = b'''
{
"docs": [
{
"null": null,
"boolean": false,
"true": true,
"integer": 0,
"double": 0.5,
"exponent": 1.0e+2,
"long": 10000000000,
"string": "\\u0441\\u0442\\u0440\\u043e\\u043a\\u0430 - \xd1\x82\xd0\xb5\xd1\x81\xd1\x82"
},
{
"meta": [[1], {}]
},
{
"meta": {"key": "value"}
},
{
"meta": null
}
]
}
'''
JSON_EVENTS = [
('start_map', None),
('map_key', 'docs'),
('start_array', None),
('start_map', None),
('map_key', 'null'),
('null', None),
('map_key', 'boolean'),
('boolean', False),
('map_key', 'true'),
('boolean', True),
('map_key', 'integer'),
('number', 0),
('map_key', 'double'),
('number', Decimal('0.5')),
('map_key', 'exponent'),
('number', 100),
('map_key', 'long'),
('number', 10000000000),
('map_key', 'string'),
('string', 'строка - тест'),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('start_array', None),
('start_array', None),
('number', 1),
('end_array', None),
('start_map', None),
('end_map', None),
('end_array', None),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('start_map', None),
('map_key', 'key'),
('string', 'value'),
('end_map', None),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('null', None),
('end_map', None),
('end_array', None),
('end_map', None),
]
SCALAR_JSON = b'0'
INVALID_JSONS = [
b'["key", "value",]', # trailing comma
b'["key" "value"]', # no comma
b'{"key": "value",}', # trailing comma
b'{"key": "value" "key"}', # no comma
b'{"key" "value"}', # no colon
b'invalid', # unknown lexeme
b'[1, 2] dangling junk' # dangling junk
]
YAJL1_PASSING_INVALID = INVALID_JSONS[6]
INCOMPLETE_JSONS = [
b'',
b'"test',
b'[',
b'[1',
b'[1,',
b'{',
b'{"key"',
b'{"key":',
b'{"key": "value"',
b'{"key": "value",',
]
STRINGS_JSON = br'''
{
"str1": "",
"str2": "\"",
"str3": "\\",
"str4": "\\\\",
"special\t": "\b\f\n\r\t"
}
'''
NUMBERS_JSON = b'[1, 1.0, 1E2]'
SURROGATE_PAIRS_JSON = b'"\uD83D\uDCA9"'
class Parse(object):
'''
Base class for parsing tests that is used to create test cases for each
available backends.
'''
def test_basic_parse(self):
events = list(self.backend.basic_parse(BytesIO(JSON)))
self.assertEqual(events, JSON_EVENTS)
def test_basic_parse_threaded(self):
thread = threading.Thread(target=self.test_basic_parse)
thread.start()
thread.join()
def test_scalar(self):
events = list(self.backend.basic_parse(BytesIO(SCALAR_JSON)))
self.assertEqual(events, [('number', 0)])
def test_strings(self):
events = list(self.backend.basic_parse(BytesIO(STRINGS_JSON)))
strings = [value for event, value in events if event == 'string']
self.assertEqual(strings, ['', '"', '\\', '\\\\', '\b\f\n\r\t'])
self.assertTrue(('map_key', 'special\t') in events)
def test_surrogate_pairs(self):
event = next(self.backend.basic_parse(BytesIO(SURROGATE_PAIRS_JSON)))
parsed_string = event[1]
self.assertEqual(parsed_string, '💩')
def test_numbers(self):
events = list(self.backend.basic_parse(BytesIO(NUMBERS_JSON)))
types = [type(value) for event, value in events if event == 'number']
self.assertEqual(types, [int, Decimal, Decimal])
def test_invalid(self):
for json in INVALID_JSONS:
# Yajl1 doesn't complain about additional data after the end
# of a parsed object. Skipping this test.
if self.__class__.__name__ == 'YajlParse' and json == YAJL1_PASSING_INVALID:
continue
with self.assertRaises(common.JSONError) as cm:
list(self.backend.basic_parse(BytesIO(json)))
def test_incomplete(self):
for json in INCOMPLETE_JSONS:
with self.assertRaises(common.IncompleteJSONError):
list(self.backend.basic_parse(BytesIO(json)))
def test_utf8_split(self):
buf_size = JSON.index(b'\xd1') + 1
try:
events = list(self.backend.basic_parse(BytesIO(JSON), buf_size=buf_size))
except UnicodeDecodeError:
self.fail('UnicodeDecodeError raised')
def test_lazy(self):
# shouldn't fail since iterator is not exhausted
self.backend.basic_parse(BytesIO(INVALID_JSONS[0]))
self.assertTrue(True)
def test_boundary_lexeme(self):
buf_size = JSON.index(b'false') + 1
events = list(self.backend.basic_parse(BytesIO(JSON), buf_size=buf_size))
self.assertEqual(events, JSON_EVENTS)
def test_boundary_whitespace(self):
buf_size = JSON.index(b' ') + 1
events = list(self.backend.basic_parse(BytesIO(JSON), buf_size=buf_size))
self.assertEqual(events, JSON_EVENTS)
def test_api(self):
self.assertTrue(list(self.backend.items(BytesIO(JSON), '')))
self.assertTrue(list(self.backend.parse(BytesIO(JSON))))
# Generating real TestCase classes for each importable backend
for name in ['python', 'yajl', 'yajl2', 'yajl2_cffi']:
try:
classname = '%sParse' % ''.join(p.capitalize() for p in name.split('_'))
if IS_PY2:
classname = classname.encode('ascii')
locals()[classname] = type(
classname,
(unittest.TestCase, Parse),
{'backend': import_module('ijson.backends.%s' % name)},
)
except ImportError:
pass
class Common(unittest.TestCase):
'''
Backend independent tests. They all use basic_parse imported explicitly from
the python backend to generate parsing events.
'''
def test_object_builder(self):
builder = common.ObjectBuilder()
for event, value in basic_parse(BytesIO(JSON)):
builder.event(event, value)
self.assertEqual(builder.value, {
'docs': [
{
'string': 'строка - тест',
'null': None,
'boolean': False,
'true': True,
'integer': 0,
'double': Decimal('0.5'),
'exponent': 100,
'long': 10000000000,
},
{
'meta': [[1], {}],
},
{
'meta': {'key': 'value'},
},
{
'meta': None,
},
],
})
def test_scalar_builder(self):
builder = common.ObjectBuilder()
for event, value in basic_parse(BytesIO(SCALAR_JSON)):
builder.event(event, value)
self.assertEqual(builder.value, 0)
def test_parse(self):
events = common.parse(basic_parse(BytesIO(JSON)))
events = [value
for prefix, event, value in events
if prefix == 'docs.item.meta.item.item'
]
self.assertEqual(events, [1])
def test_items(self):
events = basic_parse(BytesIO(JSON))
meta = list(common.items(common.parse(events), 'docs.item.meta'))
self.assertEqual(meta, [
[[1], {}],
{'key': 'value'},
None,
])
class Stream(unittest.TestCase):
def test_bytes(self):
l = Lexer(BytesIO(JSON))
self.assertEqual(next(l)[1], '{')
def test_string(self):
l = Lexer(StringIO(JSON.decode('utf-8')))
self.assertEqual(next(l)[1], '{')
if __name__ == '__main__':
unittest.main()
|
catapult-project/catapult
|
third_party/ijson/tests.py
|
Python
|
bsd-3-clause
| 8,608
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/client/gtk_builder_lint.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import xml.etree.ElementTree as ElementTree
from king_phisher import find
from king_phisher import testing
GOBJECT_TOP_REGEX = r'^[A-Z][a-zA-Z0-9]+$'
class ClientGtkBuilderLint(testing.KingPhisherTestCase):
def setUp(self):
find.data_path_append('data/client')
builder_xml = find.data_file('king-phisher-client.ui')
self.xml_tree = ElementTree.parse(builder_xml)
self.xml_root = self.xml_tree.getroot()
def test_object_ids_are_valid(self):
for child in self.xml_root:
if child.tag != 'object':
continue
gobject_id = child.attrib['id']
self.assertRegex(gobject_id, GOBJECT_TOP_REGEX, "invalid gobject id '{0}'".format(gobject_id))
if __name__ == '__main__':
unittest.main()
|
securestate/king-phisher
|
tests/client/gtk_builder.py
|
Python
|
bsd-3-clause
| 2,305
|
"""
====================================================
Compute envelope correlations in volume source space
====================================================
Compute envelope correlations of orthogonalized activity
:footcite:`HippEtAl2012,KhanEtAl2018` in source
space using resting state CTF data in a volume source space.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Sheraz Khan <sheraz@khansheraz.com>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import mne
from mne.beamformer import make_lcmv, apply_lcmv_epochs
from mne.connectivity import envelope_correlation
from mne.preprocessing import compute_proj_ecg, compute_proj_eog
data_path = mne.datasets.brainstorm.bst_resting.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'bst_resting'
trans = op.join(data_path, 'MEG', 'bst_resting', 'bst_resting-trans.fif')
bem = op.join(subjects_dir, subject, 'bem', subject + '-5120-bem-sol.fif')
raw_fname = op.join(data_path, 'MEG', 'bst_resting',
'subj002_spontaneous_20111102_01_AUX.ds')
crop_to = 60.
##############################################################################
# Here we do some things in the name of speed, such as crop (which will
# hurt SNR) and downsample. Then we compute SSP projectors and apply them.
raw = mne.io.read_raw_ctf(raw_fname, verbose='error')
raw.crop(0, crop_to).pick_types(meg=True, eeg=False).load_data().resample(80)
raw.apply_gradient_compensation(3)
projs_ecg, _ = compute_proj_ecg(raw, n_grad=1, n_mag=2)
projs_eog, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='MLT31-4407')
raw.info['projs'] += projs_ecg
raw.info['projs'] += projs_eog
raw.apply_proj()
cov = mne.compute_raw_covariance(raw) # compute before band-pass of interest
##############################################################################
# Now we band-pass filter our data and create epochs.
raw.filter(14, 30)
events = mne.make_fixed_length_events(raw, duration=5.)
epochs = mne.Epochs(raw, events=events, tmin=0, tmax=5.,
baseline=None, reject=dict(mag=8e-13), preload=True)
del raw
##############################################################################
# Compute the forward and inverse
# -------------------------------
# This source space is really far too coarse, but we do this for speed
# considerations here
pos = 15. # 1.5 cm is very broad, done here for speed!
src = mne.setup_volume_source_space('bst_resting', pos, bem=bem,
subjects_dir=subjects_dir, verbose=True)
fwd = mne.make_forward_solution(epochs.info, trans, src, bem)
data_cov = mne.compute_covariance(epochs)
filters = make_lcmv(epochs.info, fwd, data_cov, 0.05, cov,
pick_ori='max-power', weight_norm='nai')
del fwd
##############################################################################
# Compute label time series and do envelope correlation
# -----------------------------------------------------
epochs.apply_hilbert() # faster to do in sensor space
stcs = apply_lcmv_epochs(epochs, filters, return_generator=True)
corr = envelope_correlation(stcs, verbose=True)
##############################################################################
# Compute the degree and plot it
# ------------------------------
degree = mne.connectivity.degree(corr, 0.15)
stc = mne.VolSourceEstimate(degree, [src[0]['vertno']], 0, 1, 'bst_resting')
brain = stc.plot(
src, clim=dict(kind='percent', lims=[75, 85, 95]), colormap='gnuplot',
subjects_dir=subjects_dir, mode='glass_brain')
##############################################################################
# References
# ----------
# .. footbibliography::
|
olafhauk/mne-python
|
examples/connectivity/plot_mne_inverse_envelope_correlation_volume.py
|
Python
|
bsd-3-clause
| 3,736
|
import sys
import re
import codecs
import snowballstemmer
def usage():
print('''usage: %s [-l <language>] [-i <input file>] [-o <output file>] [-c <character encoding>] [-p[2]] [-h]
The input file consists of a list of words to be stemmed, one per
line. Words should be in lower case, but (for English) A-Z letters
are mapped to their a-z equivalents anyway. If omitted, stdin is
used.
If -c is given, the argument is the character encoding of the input
and output files. If it is omitted, the UTF-8 encoding is used.
If -p is given the output file consists of each word of the input
file followed by \"->\" followed by its stemmed equivalent.
If -p2 is given the output file is a two column layout containing
the input words in the first column and the stemmed eqivalents in
the second column.
Otherwise, the output file consists of the stemmed words, one per
line.
-h displays this help''' % sys.argv[0])
def main():
argv = sys.argv[1:]
if len(argv) < 5:
usage()
else:
pretty = 0
input = ''
output = ''
encoding = 'utf_8'
language = 'English'
show_help = False
while len(argv):
arg = argv[0]
argv = argv[1:]
if arg == '-h':
show_help = True
break
elif arg == "-p":
pretty = 1
elif arg == "-p2":
pretty = 2
elif arg == "-l":
if len(argv) == 0:
show_help = True
break
language = argv[0]
argv = argv[1:]
elif arg == "-i":
if len(argv) == 0:
show_help = True
break
input = argv[0]
argv = argv[1:]
elif arg == "-o":
if len(argv) == 0:
show_help = True
break
output = argv[0]
argv = argv[1:]
elif arg == "-c":
if len(argv) == 0:
show_help = True
break
encoding = argv[0]
if show_help or input == '' or output == '':
usage()
else:
stemming(language, input, output, encoding, pretty)
def stemming(lang, input, output, encoding, pretty):
stemmer = snowballstemmer.stemmer(lang)
outfile = codecs.open(output, "w", encoding)
for original in codecs.open(input, "r", encoding).readlines():
original = original.strip()
# Convert only ASCII-letters to lowercase, to match C behavior
original = ''.join((lower_(c) if 'A' <= c <= 'Z' else c for c in original))
stemmed = stemmer.stemWord(original)
if pretty == 0:
if stemmed != "":
outfile.write(stemmed)
elif pretty == 1:
outfile.write(original, " -> ", stemmed)
elif pretty == 2:
outfile.write(original)
if len(original) < 30:
outfile.write(" " * (30 - len(original)))
else:
outfile.write("\n")
outfile.write(" " * 30)
outfile.write(stemmed)
outfile.write('\n')
outfile.close()
main()
|
assem-ch/snowball
|
python/stemwords.py
|
Python
|
bsd-3-clause
| 3,277
|
''' Fvtk module implements simple visualization functions using VTK.
The main idea is the following:
A window can have one or more renderers. A renderer can have none, one or more actors. Examples of actors are a sphere, line, point etc.
You basically add actors in a renderer and in that way you can visualize the forementioned objects e.g. sphere, line ...
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #fvtk.show(r)
For more information on VTK there many neat examples in
http://www.vtk.org/Wiki/VTK/Tutorials/External_Tutorials
'''
from __future__ import division, print_function, absolute_import
from dipy.utils.six.moves import xrange
import types
import numpy as np
from dipy.core.ndindex import ndindex
# Conditional import machinery for vtk
from ..utils.optpkg import optional_package
# Allow import, but disable doctests if we don't have vtk
vtk, have_vtk, setup_module = optional_package('vtk')
colors, have_vtk_colors, _ = optional_package('vtk.util.colors')
cm, have_matplotlib, _ = optional_package('matplotlib.cm')
if have_matplotlib:
get_cmap = cm.get_cmap
else:
from dipy.data import get_cmap
# a track buffer used only with picking tracks
track_buffer = []
# indices buffer for the tracks
ind_buffer = []
# tempory renderer used only with picking tracks
tmp_ren = None
if have_vtk:
major_version = vtk.vtkVersion.GetVTKMajorVersion()
# Create a text mapper and actor to display the results of picking.
textMapper = vtk.vtkTextMapper()
tprop = textMapper.GetTextProperty()
tprop.SetFontFamilyToArial()
tprop.SetFontSize(10)
# tprop.BoldOn()
# tprop.ShadowOn()
tprop.SetColor(1, 0, 0)
textActor = vtk.vtkActor2D()
textActor.VisibilityOff()
textActor.SetMapper(textMapper)
# Create a cell picker.
picker = vtk.vtkCellPicker()
def ren():
'''Create a renderer.
Returns
-------
v : vtkRenderer() object
Renderer.
Examples
--------
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3)]
>>> c=fvtk.line(lines, fvtk.colors.red)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
'''
return vtk.vtkRenderer()
def add(ren, a):
''' Add a specific actor
'''
if isinstance(a, vtk.vtkVolume):
ren.AddVolume(a)
else:
ren.AddActor(a)
def rm(ren, a):
''' Remove a specific actor
'''
ren.RemoveActor(a)
def clear(ren):
''' Remove all actors from the renderer
'''
ren.RemoveAllViewProps()
def rm_all(ren):
''' Remove all actors from the renderer
'''
clear(ren)
def _arrow(pos=(0, 0, 0), color=(1, 0, 0), scale=(1, 1, 1), opacity=1):
''' Internal function for generating arrow actors.
'''
arrow = vtk.vtkArrowSource()
# arrow.SetTipLength(length)
arrowm = vtk.vtkPolyDataMapper()
if major_version <= 5:
arrowm.SetInput(arrow.GetOutput())
else:
arrowm.SetInputData(arrow.GetOutput())
arrowa = vtk.vtkActor()
arrowa.SetMapper(arrowm)
arrowa.GetProperty().SetColor(color)
arrowa.GetProperty().SetOpacity(opacity)
arrowa.SetScale(scale)
return arrowa
def axes(scale=(1, 1, 1), colorx=(1, 0, 0), colory=(0, 1, 0), colorz=(0, 0, 1),
opacity=1):
""" Create an actor with the coordinate's system axes where
red = x, green = y, blue =z.
Parameters
----------
scale : tuple (3,)
axes size e.g. (100, 100, 100)
colorx : tuple (3,)
x-axis color. Default red.
colory : tuple (3,)
y-axis color. Default blue.
colorz : tuple (3,)
z-axis color. Default green.
Returns
-------
vtkAssembly
"""
arrowx = _arrow(color=colorx, scale=scale, opacity=opacity)
arrowy = _arrow(color=colory, scale=scale, opacity=opacity)
arrowz = _arrow(color=colorz, scale=scale, opacity=opacity)
arrowy.RotateZ(90)
arrowz.RotateY(-90)
ass = vtk.vtkAssembly()
ass.AddPart(arrowx)
ass.AddPart(arrowy)
ass.AddPart(arrowz)
return ass
def _lookup(colors):
''' Internal function
Creates a lookup table with given colors.
Parameters
------------
colors : array, shape (N,3)
Colormap where every triplet is encoding red, green and blue e.g.
::
r1,g1,b1
r2,g2,b2
...
rN,gN,bN
where
::
0=<r<=1,
0=<g<=1,
0=<b<=1,
Returns
----------
vtkLookupTable
'''
colors = np.asarray(colors, dtype=np.float32)
if colors.ndim > 2:
raise ValueError('Incorrect shape of array in colors')
if colors.ndim == 1:
N = 1
if colors.ndim == 2:
N = colors.shape[0]
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(N)
lut.Build()
if colors.ndim == 2:
scalar = 0
for (r, g, b) in colors:
lut.SetTableValue(scalar, r, g, b, 1.0)
scalar += 1
if colors.ndim == 1:
lut.SetTableValue(0, colors[0], colors[1], colors[2], 1.0)
return lut
def streamtube(lines, colors, opacity=1, linewidth=0.15, tube_sides=8,
lod=True, lod_points=10 ** 4, lod_points_size=5):
""" Uses streamtubes to visualize polylines
Parameters
----------
lines : list
list of N curves represented as 2D ndarrays
colors : array (N, 3) or tuple (3,)
opacity : float
linewidth : float
tube_sides : int
lod : bool
use vtkLODActor rather than vtkActor
lod_points : int
number of points to be used when LOD is in effect
lod_points_size : int
size of points when lod is in effect
Examples
--------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10, 3), np.random.rand(20, 3)]
>>> colors=np.random.rand(2, 3)
>>> c=fvtk.streamtube(lines, colors)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
Notes
-----
Streamtubes can be heavy on GPU when loading many streamlines and therefore,
you may experience slow rendering time depending on system GPU. A solution
to this problem is to reduce the number of points in each streamline. In Dipy
we provide an algorithm that will reduce the number of points on the straighter
parts of the streamline but keep more points on the curvier parts. This can
be used in the following way
from dipy.tracking.distances import approx_polygon_track
lines = [approx_polygon_track(line, 0.2) for line in lines]
"""
points = vtk.vtkPoints()
colors = np.asarray(colors)
if colors.ndim == 1:
colors = np.tile(colors, (len(lines), 1))
# Create the polyline.
streamlines = vtk.vtkCellArray()
cols = vtk.vtkUnsignedCharArray()
cols.SetName("Cols")
cols.SetNumberOfComponents(3)
len_lines = len(lines)
prior_line_shape = 0
for i in range(len_lines):
line = lines[i]
streamlines.InsertNextCell(line.shape[0])
for j in range(line.shape[0]):
points.InsertNextPoint(*line[j])
streamlines.InsertCellPoint(j + prior_line_shape)
color = (255 * colors[i]).astype('ubyte')
cols.InsertNextTuple3(*color)
prior_line_shape += line.shape[0]
profileData = vtk.vtkPolyData()
profileData.SetPoints(points)
profileData.SetLines(streamlines)
profileData.GetPointData().AddArray(cols)
# Add thickness to the resulting line.
profileTubes = vtk.vtkTubeFilter()
profileTubes.SetNumberOfSides(tube_sides)
if major_version <= 5:
profileTubes.SetInput(profileData)
else:
profileTubes.SetInputData(profileData)
#profileTubes.SetInput(profileData)
profileTubes.SetRadius(linewidth)
profileMapper = vtk.vtkPolyDataMapper()
profileMapper.SetInputConnection(profileTubes.GetOutputPort())
profileMapper.ScalarVisibilityOn()
profileMapper.SetScalarModeToUsePointFieldData()
profileMapper.SelectColorArray("Cols")
profileMapper.GlobalImmediateModeRenderingOn()
if lod:
profile = vtk.vtkLODActor()
profile.SetNumberOfCloudPoints(lod_points)
profile.GetProperty().SetPointSize(lod_points_size)
else:
profile = vtk.vtkActor()
profile.SetMapper(profileMapper)
profile.GetProperty().SetAmbient(0) # .3
profile.GetProperty().SetSpecular(0) # .3
profile.GetProperty().SetSpecularPower(10)
profile.GetProperty().SetInterpolationToGouraud()
profile.GetProperty().BackfaceCullingOn()
profile.GetProperty().SetOpacity(opacity)
return profile
def line(lines, colors, opacity=1, linewidth=1):
''' Create an actor for one or more lines.
Parameters
------------
lines : list of arrays representing lines as 3d points for example
lines=[np.random.rand(10,3),np.random.rand(20,3)]
represents 2 lines the first with 10 points and the second with 20 points in x,y,z coordinates.
colors : array, shape (N,3)
Colormap where every triplet is encoding red, green and blue e.g.
::
r1,g1,b1
r2,g2,b2
...
rN,gN,bN
where
::
0=<r<=1,
0=<g<=1,
0=<b<=1
opacity : float, optional
``0 <= transparency <= 1``
linewidth : float, optional
Line thickness.
Returns
----------
v : vtkActor object
Line.
Examples
----------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3), np.random.rand(20,3)]
>>> colors=np.random.rand(2,3)
>>> c=fvtk.line(lines, colors)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
'''
if not isinstance(lines, types.ListType):
lines = [lines]
points = vtk.vtkPoints()
lines_ = vtk.vtkCellArray()
linescalars = vtk.vtkFloatArray()
# lookuptable=vtk.vtkLookupTable()
lookuptable = _lookup(colors)
scalarmin = 0
colors = np.asarray(colors)
if colors.ndim == 2:
scalarmax = colors.shape[0] - 1
if colors.ndim == 1:
scalarmax = 0
curPointID = 0
m = (0.0, 0.0, 0.0)
n = (1.0, 0.0, 0.0)
scalar = 0
# many colors
if colors.ndim == 2:
for Line in lines:
inw = True
mit = iter(Line)
nit = iter(Line)
next(nit)
while(inw):
try:
m = next(mit)
n = next(nit)
# scalar=sp.rand(1)
linescalars.SetNumberOfComponents(1)
points.InsertNextPoint(m)
linescalars.InsertNextTuple1(scalar)
points.InsertNextPoint(n)
linescalars.InsertNextTuple1(scalar)
lines_.InsertNextCell(2)
lines_.InsertCellPoint(curPointID)
lines_.InsertCellPoint(curPointID + 1)
curPointID += 2
except StopIteration:
break
scalar += 1
# one color only
if colors.ndim == 1:
for Line in lines:
inw = True
mit = iter(Line)
nit = iter(Line)
next(nit)
while(inw):
try:
m = next(mit)
n = next(nit)
# scalar=sp.rand(1)
linescalars.SetNumberOfComponents(1)
points.InsertNextPoint(m)
linescalars.InsertNextTuple1(scalar)
points.InsertNextPoint(n)
linescalars.InsertNextTuple1(scalar)
lines_.InsertNextCell(2)
lines_.InsertCellPoint(curPointID)
lines_.InsertCellPoint(curPointID + 1)
curPointID += 2
except StopIteration:
break
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines_)
polydata.GetPointData().SetScalars(linescalars)
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
mapper.SetLookupTable(lookuptable)
mapper.SetColorModeToMapScalars()
mapper.SetScalarRange(scalarmin, scalarmax)
mapper.SetScalarModeToUsePointData()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(linewidth)
actor.GetProperty().SetOpacity(opacity)
return actor
def dots(points, color=(1, 0, 0), opacity=1, dot_size=5):
""" Create one or more 3d points
Parameters
----------
points : ndarray, (N, 3)
color : tuple (3,)
opacity : float
dot_size : int
Returns
--------
vtkActor
See Also
---------
dipy.viz.fvtk.point
"""
if points.ndim == 2:
points_no = points.shape[0]
else:
points_no = 1
polyVertexPoints = vtk.vtkPoints()
polyVertexPoints.SetNumberOfPoints(points_no)
aPolyVertex = vtk.vtkPolyVertex()
aPolyVertex.GetPointIds().SetNumberOfIds(points_no)
cnt = 0
if points.ndim > 1:
for point in points:
polyVertexPoints.InsertPoint(cnt, point[0], point[1], point[2])
aPolyVertex.GetPointIds().SetId(cnt, cnt)
cnt += 1
else:
polyVertexPoints.InsertPoint(cnt, points[0], points[1], points[2])
aPolyVertex.GetPointIds().SetId(cnt, cnt)
cnt += 1
aPolyVertexGrid = vtk.vtkUnstructuredGrid()
aPolyVertexGrid.Allocate(1, 1)
aPolyVertexGrid.InsertNextCell(aPolyVertex.GetCellType(),
aPolyVertex.GetPointIds())
aPolyVertexGrid.SetPoints(polyVertexPoints)
aPolyVertexMapper = vtk.vtkDataSetMapper()
if major_version <= 5:
aPolyVertexMapper.SetInput(aPolyVertexGrid)
else:
aPolyVertexMapper.SetInputData(aPolyVertexGrid)
aPolyVertexActor = vtk.vtkActor()
aPolyVertexActor.SetMapper(aPolyVertexMapper)
aPolyVertexActor.GetProperty().SetColor(color)
aPolyVertexActor.GetProperty().SetOpacity(opacity)
aPolyVertexActor.GetProperty().SetPointSize(dot_size)
return aPolyVertexActor
def point(points, colors, opacity=1, point_radius=0.1, theta=8, phi=8):
""" Visualize points as sphere glyphs
Parameters
----------
points : ndarray, shape (N, 3)
colors : ndarray (N,3) or tuple (3,)
point_radius : float
theta : int
phi : int
Returns
-------
vtkActor
Examples
--------
>>> from dipy.viz import fvtk
>>> ren = fvtk.ren()
>>> pts = np.random.rand(5, 3)
>>> point_actor = fvtk.point(pts, fvtk.colors.coral)
>>> fvtk.add(ren, point_actor)
>>> #fvtk.show(ren)
"""
if np.array(colors).ndim == 1:
# return dots(points,colors,opacity)
colors = np.tile(colors, (len(points), 1))
scalars = vtk.vtkUnsignedCharArray()
scalars.SetNumberOfComponents(3)
pts = vtk.vtkPoints()
cnt_colors = 0
for p in points:
pts.InsertNextPoint(p[0], p[1], p[2])
scalars.InsertNextTuple3(
round(255 * colors[cnt_colors][0]), round(255 * colors[cnt_colors][1]), round(255 * colors[cnt_colors][2]))
cnt_colors += 1
src = vtk.vtkSphereSource()
src.SetRadius(point_radius)
src.SetThetaResolution(theta)
src.SetPhiResolution(phi)
polyData = vtk.vtkPolyData()
polyData.SetPoints(pts)
polyData.GetPointData().SetScalars(scalars)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(src.GetOutputPort())
if major_version <= 5:
glyph.SetInput(polyData)
else:
glyph.SetInputData(polyData)
glyph.SetColorModeToColorByScalar()
glyph.SetScaleModeToDataScalingOff()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(glyph.GetOutput())
else:
mapper.SetInputData(glyph.GetOutput())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(opacity)
return actor
def label(ren, text='Origin', pos=(0, 0, 0), scale=(0.2, 0.2, 0.2),
color=(1, 1, 1)):
''' Create a label actor.
This actor will always face the camera
Parameters
----------
ren : vtkRenderer() object
Renderer as returned by ``ren()``.
text : str
Text for the label.
pos : (3,) array_like, optional
Left down position of the label.
scale : (3,) array_like
Changes the size of the label.
color : (3,) array_like
Label color as ``(r,g,b)`` tuple.
Returns
-------
l : vtkActor object
Label.
Examples
--------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> l=fvtk.label(r)
>>> fvtk.add(r,l)
>>> #fvtk.show(r)
'''
atext = vtk.vtkVectorText()
atext.SetText(text)
textm = vtk.vtkPolyDataMapper()
if major_version <= 5:
textm.SetInput(atext.GetOutput())
else:
textm.SetInputData(atext.GetOutput())
texta = vtk.vtkFollower()
texta.SetMapper(textm)
texta.SetScale(scale)
texta.GetProperty().SetColor(color)
texta.SetPosition(pos)
ren.AddActor(texta)
texta.SetCamera(ren.GetActiveCamera())
return texta
def volume(vol, voxsz=(1.0, 1.0, 1.0), affine=None, center_origin=1,
info=0, maptype=0, trilinear=1, iso=0, iso_thr=100,
opacitymap=None, colormap=None):
''' Create a volume and return a volumetric actor using volumetric
rendering.
This function has many different interesting capabilities. The maptype,
opacitymap and colormap are the most crucial parameters here.
Parameters
----------
vol : array, shape (N, M, K), dtype uint8
An array representing the volumetric dataset that we want to visualize
using volumetric rendering.
voxsz : (3,) array_like
Voxel size.
affine : (4, 4) ndarray
As given by volumeimages.
center_origin : int {0,1}
It considers that the center of the volume is the
point ``(-vol.shape[0]/2.0+0.5,-vol.shape[1]/2.0+0.5,-vol.shape[2]/2.0+0.5)``.
info : int {0,1}
If 1 it prints out some info about the volume, the method and the
dataset.
trilinear : int {0,1}
Use trilinear interpolation, default 1, gives smoother rendering. If
you want faster interpolation use 0 (Nearest).
maptype : int {0,1}
The maptype is a very important parameter which affects the raycasting algorithm in use for the rendering.
The options are:
If 0 then vtkVolumeTextureMapper2D is used.
If 1 then vtkVolumeRayCastFunction is used.
iso : int {0,1}
If iso is 1 and maptype is 1 then we use
``vtkVolumeRayCastIsosurfaceFunction`` which generates an isosurface at
the predefined iso_thr value. If iso is 0 and maptype is 1
``vtkVolumeRayCastCompositeFunction`` is used.
iso_thr : int
If iso is 1 then then this threshold in the volume defines the value
which will be used to create the isosurface.
opacitymap : (2, 2) ndarray
The opacity map assigns a transparency coefficient to every point in
the volume. The default value uses the histogram of the volume to
calculate the opacitymap.
colormap : (4, 4) ndarray
The color map assigns a color value to every point in the volume.
When None from the histogram it uses a red-blue colormap.
Returns
-------
v : vtkVolume
Volume.
Notes
--------
What is the difference between TextureMapper2D and RayCastFunction? Coming
soon... See VTK user's guide [book] & The Visualization Toolkit [book] and
VTK's online documentation & online docs.
What is the difference between RayCastIsosurfaceFunction and
RayCastCompositeFunction? Coming soon... See VTK user's guide [book] &
The Visualization Toolkit [book] and VTK's online documentation &
online docs.
What about trilinear interpolation?
Coming soon... well when time permits really ... :-)
Examples
--------
First example random points.
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> vol=100*np.random.rand(100,100,100)
>>> vol=vol.astype('uint8')
>>> vol.min(), vol.max()
(0, 99)
>>> r = fvtk.ren()
>>> v = fvtk.volume(vol)
>>> fvtk.add(r,v)
>>> #fvtk.show(r)
Second example with a more complicated function
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> x, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]
>>> s = np.sin(x*y*z)/(x*y*z)
>>> r = fvtk.ren()
>>> v = fvtk.volume(s)
>>> fvtk.add(r,v)
>>> #fvtk.show(r)
If you find this function too complicated you can always use mayavi.
Please do not forget to use the -wthread switch in ipython if you are
running mayavi.
from enthought.mayavi import mlab
import numpy as np
x, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]
s = np.sin(x*y*z)/(x*y*z)
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.show()
More mayavi demos are available here:
http://code.enthought.com/projects/mayavi/docs/development/html/mayavi/mlab.html
'''
if vol.ndim != 3:
raise ValueError('3d numpy arrays only please')
if info:
print('Datatype', vol.dtype, 'converted to uint8')
vol = np.interp(vol, [vol.min(), vol.max()], [0, 255])
vol = vol.astype('uint8')
if opacitymap is None:
bin, res = np.histogram(vol.ravel())
res2 = np.interp(res, [vol.min(), vol.max()], [0, 1])
opacitymap = np.vstack((res, res2)).T
opacitymap = opacitymap.astype('float32')
'''
opacitymap=np.array([[ 0.0, 0.0],
[50.0, 0.9]])
'''
if info:
print('opacitymap', opacitymap)
if colormap is None:
bin, res = np.histogram(vol.ravel())
res2 = np.interp(res, [vol.min(), vol.max()], [0, 1])
zer = np.zeros(res2.shape)
colormap = np.vstack((res, res2, zer, res2[::-1])).T
colormap = colormap.astype('float32')
'''
colormap=np.array([[0.0, 0.5, 0.0, 0.0],
[64.0, 1.0, 0.5, 0.5],
[128.0, 0.9, 0.2, 0.3],
[196.0, 0.81, 0.27, 0.1],
[255.0, 0.5, 0.5, 0.5]])
'''
if info:
print('colormap', colormap)
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
im.SetDimensions(vol.shape[0], vol.shape[1], vol.shape[2])
# im.SetOrigin(0,0,0)
# im.SetSpacing(voxsz[2],voxsz[0],voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
if affine is not None:
aff = vtk.vtkMatrix4x4()
aff.DeepCopy((affine[0, 0], affine[0, 1], affine[0, 2], affine[0, 3], affine[1, 0], affine[1, 1], affine[1, 2], affine[1, 3], affine[2, 0], affine[
2, 1], affine[2, 2], affine[2, 3], affine[3, 0], affine[3, 1], affine[3, 2], affine[3, 3]))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],0,affine[1,0],affine[1,1],affine[1,2],0,affine[2,0],affine[2,1],affine[2,2],0,affine[3,0],affine[3,1],affine[3,2],1))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],127.5,affine[1,0],affine[1,1],affine[1,2],-127.5,affine[2,0],affine[2,1],affine[2,2],-127.5,affine[3,0],affine[3,1],affine[3,2],1))
reslice = vtk.vtkImageReslice()
if major_version <= 5:
reslice.SetInput(im)
else:
reslice.SetInputData(im)
# reslice.SetOutputDimensionality(2)
# reslice.SetOutputOrigin(127,-145,147)
reslice.SetResliceAxes(aff)
# reslice.SetOutputOrigin(-127,-127,-127)
# reslice.SetOutputExtent(-127,128,-127,128,-127,128)
# reslice.SetResliceAxesOrigin(0,0,0)
# print 'Get Reslice Axes Origin ', reslice.GetResliceAxesOrigin()
# reslice.SetOutputSpacing(1.0,1.0,1.0)
reslice.SetInterpolationModeToLinear()
# reslice.UpdateWholeExtent()
# print 'reslice GetOutputOrigin', reslice.GetOutputOrigin()
# print 'reslice GetOutputExtent',reslice.GetOutputExtent()
# print 'reslice GetOutputSpacing',reslice.GetOutputSpacing()
changeFilter = vtk.vtkImageChangeInformation()
if major_version <= 5:
changeFilter.SetInput(reslice.GetOutput())
else:
changeFilter.SetInputData(reslice.GetOutput())
# changeFilter.SetInput(im)
if center_origin:
changeFilter.SetOutputOrigin(
-vol.shape[0] / 2.0 + 0.5, -vol.shape[1] / 2.0 + 0.5, -vol.shape[2] / 2.0 + 0.5)
print('ChangeFilter ', changeFilter.GetOutputOrigin())
opacity = vtk.vtkPiecewiseFunction()
for i in range(opacitymap.shape[0]):
opacity.AddPoint(opacitymap[i, 0], opacitymap[i, 1])
color = vtk.vtkColorTransferFunction()
for i in range(colormap.shape[0]):
color.AddRGBPoint(
colormap[i, 0], colormap[i, 1], colormap[i, 2], colormap[i, 3])
if(maptype == 0):
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if info:
print('mapper VolumeTextureMapper2D')
mapper = vtk.vtkVolumeTextureMapper2D()
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
if (maptype == 1):
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
property.ShadeOn()
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if iso:
isofunc = vtk.vtkVolumeRayCastIsosurfaceFunction()
isofunc.SetIsoValue(iso_thr)
else:
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
if info:
print('mapper VolumeRayCastMapper')
mapper = vtk.vtkVolumeRayCastMapper()
if iso:
mapper.SetVolumeRayCastFunction(isofunc)
if info:
print('Isosurface')
else:
mapper.SetVolumeRayCastFunction(compositeFunction)
# mapper.SetMinimumImageSampleDistance(0.2)
if info:
print('Composite')
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
# mapper.SetInput(reslice.GetOutput())
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
# Return mid position in world space
# im2=reslice.GetOutput()
# index=im2.FindPoint(vol.shape[0]/2.0,vol.shape[1]/2.0,vol.shape[2]/2.0)
# print 'Image Getpoint ' , im2.GetPoint(index)
volum = vtk.vtkVolume()
volum.SetMapper(mapper)
volum.SetProperty(property)
if info:
print('Origin', volum.GetOrigin())
print('Orientation', volum.GetOrientation())
print('OrientationW', volum.GetOrientationWXYZ())
print('Position', volum.GetPosition())
print('Center', volum.GetCenter())
print('Get XRange', volum.GetXRange())
print('Get YRange', volum.GetYRange())
print('Get ZRange', volum.GetZRange())
print('Volume data type', vol.dtype)
return volum
def contour(vol, voxsz=(1.0, 1.0, 1.0), affine=None, levels=[50],
colors=[np.array([1.0, 0.0, 0.0])], opacities=[0.5]):
""" Take a volume and draw surface contours for any any number of
thresholds (levels) where every contour has its own color and opacity
Parameters
----------
vol : (N, M, K) ndarray
An array representing the volumetric dataset for which we will draw
some beautiful contours .
voxsz : (3,) array_like
Voxel size.
affine : None
Not used.
levels : array_like
Sequence of thresholds for the contours taken from image values needs
to be same datatype as `vol`.
colors : (N, 3) ndarray
RGB values in [0,1].
opacities : array_like
Opacities of contours.
Returns
-------
vtkAssembly
Examples
--------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> A=np.zeros((10,10,10))
>>> A[3:-3,3:-3,3:-3]=1
>>> r=fvtk.ren()
>>> fvtk.add(r,fvtk.contour(A,levels=[1]))
>>> #fvtk.show(r)
"""
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
im.SetDimensions(vol.shape[0], vol.shape[1], vol.shape[2])
# im.SetOrigin(0,0,0)
# im.SetSpacing(voxsz[2],voxsz[0],voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
ass = vtk.vtkAssembly()
# ass=[]
for (i, l) in enumerate(levels):
# print levels
skinExtractor = vtk.vtkContourFilter()
if major_version <= 5:
skinExtractor.SetInput(im)
else:
skinExtractor.SetInputData(im)
skinExtractor.SetValue(0, l)
skinNormals = vtk.vtkPolyDataNormals()
skinNormals.SetInputConnection(skinExtractor.GetOutputPort())
skinNormals.SetFeatureAngle(60.0)
skinMapper = vtk.vtkPolyDataMapper()
skinMapper.SetInputConnection(skinNormals.GetOutputPort())
skinMapper.ScalarVisibilityOff()
skin = vtk.vtkActor()
skin.SetMapper(skinMapper)
skin.GetProperty().SetOpacity(opacities[i])
# print colors[i]
skin.GetProperty().SetColor(colors[i][0], colors[i][1], colors[i][2])
# skin.Update()
ass.AddPart(skin)
del skin
del skinMapper
del skinExtractor
return ass
lowercase_cm_name = {'blues':'Blues', 'accent':'Accent'}
def create_colormap(v, name='jet', auto=True):
"""Create colors from a specific colormap and return it
as an array of shape (N,3) where every row gives the corresponding
r,g,b value. The colormaps we use are similar with those of pylab.
Parameters
----------
v : (N,) array
vector of values to be mapped in RGB colors according to colormap
name : str.
Name of the colormap. Currently implemented: 'jet', 'blues',
'accent', 'bone' and matplotlib colormaps if you have matplotlib
installed.
auto : bool,
if auto is True then v is interpolated to [0, 10] from v.min()
to v.max()
Notes
-----
Dipy supports a few colormaps for those who do not use Matplotlib, for
more colormaps consider downloading Matplotlib.
"""
if v.ndim > 1:
msg = 'This function works only with 1d arrays. Use ravel()'
raise ValueError(msg)
if auto:
v = np.interp(v, [v.min(), v.max()], [0, 1])
else:
v = np.clip(v, 0, 1)
# For backwards compatibility with lowercase names
newname = lowercase_cm_name.get(name) or name
colormap = get_cmap(newname)
if colormap is None:
e_s = "Colormap '%s' is not yet implemented " % name
raise ValueError(e_s)
rgba = colormap(v)
rgb = rgba[:, :3].copy()
return rgb
def _makeNd(array, ndim):
"""Pads as many 1s at the beginning of array's shape as are need to give
array ndim dimensions."""
new_shape = (1,) * (ndim - array.ndim) + array.shape
return array.reshape(new_shape)
def sphere_funcs(sphere_values, sphere, image=None, colormap='jet',
scale=2.2, norm=True, radial_scale=True):
"""Plot many morphed spherical functions simultaneously.
Parameters
----------
sphere_values : (M,) or (X, M) or (X, Y, M) or (X, Y, Z, M) ndarray
Values on the sphere.
sphere : Sphere
image : None,
Not yet supported.
colormap : None or 'jet'
If None then no color is used.
scale : float,
Distance between spheres.
norm : bool,
Normalize `sphere_values`.
radial_scale : bool,
Scale sphere points according to odf values.
Returns
-------
actor : vtkActor
Spheres.
Examples
--------
>>> from dipy.viz import fvtk
>>> r = fvtk.ren()
>>> odfs = np.ones((5, 5, 724))
>>> odfs[..., 0] = 2.
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> fvtk.add(r, fvtk.sphere_funcs(odfs, sphere))
>>> #fvtk.show(r)
"""
sphere_values = np.asarray(sphere_values)
if sphere_values.ndim > 4:
raise ValueError("Wrong shape")
sphere_values = _makeNd(sphere_values, 4)
grid_shape = np.array(sphere_values.shape[:3])
faces = np.asarray(sphere.faces, dtype=int)
vertices = sphere.vertices
if sphere_values.shape[-1] != sphere.vertices.shape[0]:
msg = 'Sphere.vertices.shape[0] should be the same as the '
msg += 'last dimensions of sphere_values i.e. sphere_values.shape[-1]'
raise ValueError(msg)
list_sq = []
list_cols = []
for ijk in np.ndindex(*grid_shape):
m = sphere_values[ijk].copy()
if norm:
m /= abs(m).max()
if radial_scale:
xyz = vertices.T * m
else:
xyz = vertices.T.copy()
xyz += scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
list_sq.append(xyz)
if colormap is not None:
cols = create_colormap(m, colormap)
cols = np.interp(cols, [0, 1], [0, 255]).astype('ubyte')
list_cols.append(cols)
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
if colormap is not None:
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
for k in xrange(len(list_sq)):
xyz = list_sq[k]
if colormap is not None:
cols = list_cols[k]
for i in xrange(xyz.shape[0]):
points.InsertNextPoint(*xyz[i])
if colormap is not None:
colors.InsertNextTuple3(*cols[i])
for j in xrange(faces.shape[0]):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
triangles.InsertNextCell(triangle)
del triangle
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(triangles)
if colormap is not None:
polydata.GetPointData().SetScalars(colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def peaks(peaks_dirs, peaks_values=None, scale=2.2, colors=(1, 0, 0)):
""" Visualize peak directions as given from ``peaks_from_model``
Parameters
----------
peaks_dirs : ndarray
Peak directions. The shape of the array can be (M, 3) or (X, M, 3) or
(X, Y, M, 3) or (X, Y, Z, M, 3)
peaks_values : ndarray
Peak values. The shape of the array can be (M, ) or (X, M) or
(X, Y, M) or (X, Y, Z, M)
scale : float
Distance between spheres
colors : ndarray or tuple
Peak colors
Returns
-------
vtkActor
See Also
--------
dipy.viz.fvtk.sphere_funcs
"""
peaks_dirs = np.asarray(peaks_dirs)
if peaks_dirs.ndim > 5:
raise ValueError("Wrong shape")
peaks_dirs = _makeNd(peaks_dirs, 5)
if peaks_values is not None:
peaks_values = _makeNd(peaks_values, 4)
grid_shape = np.array(peaks_dirs.shape[:3])
list_dirs = []
for ijk in np.ndindex(*grid_shape):
xyz = scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
for i in range(peaks_dirs.shape[-2]):
if peaks_values is not None:
pv = peaks_values[ijk][i]
else:
pv = 1.
symm = np.vstack((-peaks_dirs[ijk][i] * pv + xyz,
peaks_dirs[ijk][i] * pv + xyz))
list_dirs.append(symm)
return line(list_dirs, colors)
def tensor(evals, evecs, scalar_colors=None, sphere=None, scale=2.2, norm=True):
"""Plot many tensors as ellipsoids simultaneously.
Parameters
----------
evals : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
eigenvalues
evecs : (3, 3) or (X, 3, 3) or (X, Y, 3, 3) or (X, Y, Z, 3, 3) ndarray
eigenvectors
scalar_colors : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
RGB colors used to show the tensors
Default None, color the ellipsoids using ``color_fa``
sphere : Sphere,
this sphere will be transformed to the tensor ellipsoid
Default is None which uses a symmetric sphere with 724 points.
scale : float,
distance between ellipsoids.
norm : boolean,
Normalize `evals`.
Returns
-------
actor : vtkActor
Ellipsoids
Examples
--------
>>> from dipy.viz import fvtk
>>> r = fvtk.ren()
>>> evals = np.array([1.4, .35, .35]) * 10 ** (-3)
>>> evecs = np.eye(3)
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> fvtk.add(r, fvtk.tensor(evals, evecs, sphere=sphere))
>>> #fvtk.show(r)
"""
evals = np.asarray(evals)
if evals.ndim > 4:
raise ValueError("Wrong shape")
evals = _makeNd(evals, 4)
evecs = _makeNd(evecs, 5)
grid_shape = np.array(evals.shape[:3])
if sphere is None:
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
faces = np.asarray(sphere.faces, dtype=int)
vertices = sphere.vertices
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
if scalar_colors is None:
from dipy.reconst.dti import color_fa, fractional_anisotropy
cfa = color_fa(fractional_anisotropy(evals), evecs)
else:
cfa = _makeNd(scalar_colors, 4)
list_sq = []
list_cols = []
for ijk in ndindex(grid_shape):
ea = evals[ijk]
if norm:
ea /= ea.max()
ea = np.diag(ea.copy())
ev = evecs[ijk].copy()
xyz = np.dot(ev, np.dot(ea, vertices.T))
xyz += scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
list_sq.append(xyz)
acolor = np.zeros(xyz.shape)
acolor[:, :] = np.interp(cfa[ijk], [0, 1], [0, 255])
list_cols.append(acolor.astype('ubyte'))
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
for k in xrange(len(list_sq)):
xyz = list_sq[k]
cols = list_cols[k]
for i in xrange(xyz.shape[0]):
points.InsertNextPoint(*xyz[i])
colors.InsertNextTuple3(*cols[i])
for j in xrange(faces.shape[0]):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
triangles.InsertNextCell(triangle)
del triangle
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(triangles)
polydata.GetPointData().SetScalars(colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def slicer(vol, voxsz=(1.0, 1.0, 1.0), plane_i=[0], plane_j=None,
plane_k=None, outline=True):
""" Slice a 3D volume
Parameters
----------
vol : array, shape (N, M, K)
An array representing the volumetric dataset that we want to slice
voxsz : sequence of 3 floats
Voxel size.
plane_i : sequence of ints
show plane or planes along the first dimension
plane_j : sequence of ints
show plane or planes along the second dimension
plane_k : sequence of ints
show plane or planes along the third(last) dimension
outline : bool
if True (default) a small outline is drawn around the slices
Examples
--------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> x, y, z = np.ogrid[-10:10:80j, -10:10:80j, -10:10:80j]
>>> s = np.sin(x * y * z) / (x * y * z)
>>> r = fvtk.ren()
>>> fvtk.add(r, fvtk.slicer(s, plane_i=[0, 5]))
>>> #fvtk.show(r)
"""
if plane_i is None:
plane_i = []
if plane_j is None:
plane_j = []
if plane_k is None:
plane_k = []
if vol.ndim != 3:
raise ValueError("vol has to be a 3d array")
vol = np.interp(vol, xp=[vol.min(), vol.max()], fp=[0, 255])
vol = vol.astype('uint8')
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
I, J, K = vol.shape[:3]
im.SetDimensions(I, J, K)
# im.SetOrigin(0,0,0)
im.SetSpacing(voxsz[2], voxsz[0], voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
# copy data
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
# An outline provides context around the data.
outlineData = vtk.vtkOutlineFilter()
if major_version <= 5:
outlineData.SetInput(im)
else:
outlineData.SetInputData(im)
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outlineData.GetOutputPort())
outline_ = vtk.vtkActor()
outline_.SetMapper(mapOutline)
outline_.GetProperty().SetColor(1, 0, 0)
# Now we are creating three orthogonal planes passing through the
# volume. Each plane uses a different texture map and therefore has
# diferent coloration.
# Start by creatin a black/white lookup table.
lut = vtk.vtkLookupTable()
lut.SetTableRange(vol.min(), vol.max())
lut.SetSaturationRange(0, 0)
lut.SetHueRange(0, 0)
lut.SetValueRange(0, 1)
lut.SetRampToLinear()
lut.Build()
x1, x2, y1, y2, z1, z2 = im.GetExtent()
# print x1,x2,y1,y2,z1,z2
# Create the first of the three planes. The filter vtkImageMapToColors
# maps the data through the corresponding lookup table created above.
# The vtkImageActor is a type of vtkProp and conveniently displays an
# image on a single quadrilateral plane. It does this using texture
# mapping and as a result is quite fast. (Note: the input image has to
# be unsigned char values, which the vtkImageMapToColors produces.)
# Note also that by specifying the DisplayExtent, the pipeline
# requests data of this extent and the vtkImageMapToColors only
# processes a slice of data.
planeColors = vtk.vtkImageMapToColors()
# saggitalColors.SetInputConnection(im.GetOutputPort())
if major_version <= 5:
planeColors.SetInput(im)
else:
planeColors.SetInputData(im)
planeColors.SetLookupTable(lut)
planeColors.Update()
saggitals = []
for x in plane_i:
saggital = vtk.vtkImageActor()
if major_version <= 5:
saggital.SetInput(planeColors.GetOutput())
else:
saggital.SetInputData(planeColors.GetOutput())
saggital.SetDisplayExtent(x, x, y1, y2, z1, z2)
saggitals.append(saggital)
axials = []
for z in plane_k:
axial = vtk.vtkImageActor()
if major_version <= 5:
axial.SetInput(planeColors.GetOutput())
else:
axial.SetInputData(planeColors.GetOutput())
axial.SetDisplayExtent(x1, x2, y1, y2, z, z)
axials.append(axial)
coronals = []
for y in plane_j:
coronal = vtk.vtkImageActor()
if major_version <= 5:
coronal.SetInput(planeColors.GetOutput())
else:
coronal.SetInputData(planeColors.GetOutput())
coronal.SetDisplayExtent(x1, x2, y, y, z1, z2)
coronals.append(coronal)
assem = vtk.vtkAssembly()
for sag in saggitals:
assem.AddPart(sag)
for ax in axials:
assem.AddPart(ax)
for cor in coronals:
assem.AddPart(cor)
if outline:
assem.AddPart(outline_)
return assem
def camera(ren, pos=None, focal=None, viewup=None, verbose=True):
""" Change the active camera
Parameters
----------
ren : vtkRenderer
pos : tuple
(x, y, z) position of the camera
focal : tuple
(x, y, z) focal point
viewup : tuple
(x, y, z) viewup vector
verbose : bool
show information about the camera
Returns
-------
vtkCamera
"""
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f,%.2f,%.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
if pos is not None:
cam = ren.GetActiveCamera().SetPosition(*pos)
if focal is not None:
ren.GetActiveCamera().SetFocalPoint(*focal)
if viewup is not None:
ren.GetActiveCamera().SetViewUp(*viewup)
cam = ren.GetActiveCamera()
if pos is not None or focal is not None or viewup is not None:
if verbose:
print('-------------------------------------')
print('Camera New Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera New Focal Point (%.2f,%.2f,%.2f)' %
cam.GetFocalPoint())
print('Camera New View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
return cam
def show(ren, title='Dipy', size=(300, 300), png_magnify=1):
""" Show window
Notes
-----
To save a screenshot press's' and check your current directory
for ``fvtk.png``.
Parameters
------------
ren : vtkRenderer() object
As returned from function ``ren()``.
title : string
A string for the window title bar.
size : (int, int)
``(width, height)`` of the window
png_magnify : int
Number of times to magnify the screenshot.
Notes
-----
If you want to:
* navigate in the the 3d world use the left - middle - right mouse buttons
* reset the screen press 'r'
* save a screenshot press 's'
* quit press 'q'
See also
---------
dipy.viz.fvtk.record
Examples
----------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3),np.random.rand(20,3)]
>>> colors=np.array([[0.2,0.2,0.2],[0.8,0.8,0.8]])
>>> c=fvtk.line(lines,colors)
>>> fvtk.add(r,c)
>>> l=fvtk.label(r)
>>> fvtk.add(r,l)
>>> #fvtk.show(r)
See also
----------
dipy.viz.fvtk.record
"""
ren.ResetCamera()
window = vtk.vtkRenderWindow()
window.AddRenderer(ren)
# window.SetAAFrames(6)
window.SetWindowName(title)
window.SetSize(size[0], size[1])
style = vtk.vtkInteractorStyleTrackballCamera()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(window)
iren.SetPicker(picker)
def key_press(obj, event):
key = obj.GetKeySym()
if key == 's' or key == 'S':
print('Saving image...')
renderLarge = vtk.vtkRenderLargeImage()
if major_version <= 5:
renderLarge.SetInput(ren)
else:
renderLarge.SetInputData(ren)
renderLarge.SetMagnification(png_magnify)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(renderLarge.GetOutputPort())
writer.SetFileName('fvtk.png')
writer.Write()
print('Look for fvtk.png in your current working directory.')
iren.AddObserver('KeyPressEvent', key_press)
iren.SetInteractorStyle(style)
iren.Initialize()
picker.Pick(85, 126, 0, ren)
window.Render()
iren.Start()
# window.RemoveAllObservers()
# ren.SetRenderWindow(None)
window.RemoveRenderer(ren)
ren.SetRenderWindow(None)
def record(ren=None, cam_pos=None, cam_focal=None, cam_view=None,
out_path=None, path_numbering=False, n_frames=1, az_ang=10,
magnification=1, size=(300, 300), verbose=False):
''' This will record a video of your scene
Records a video as a series of ``.png`` files of your scene by rotating
the azimuth angle az_angle in every frame.
Parameters
-----------
ren : vtkRenderer() object
As returned from :func:`ren`.
cam_pos : None or sequence (3,), optional
Camera position.
cam_focal : None or sequence (3,), optional
Camera focal point.
cam_view : None or sequence (3,), optional
Camera view up.
out_path : str, optional
Output directory for the frames
path_numbering : bool, optional
when recording it changes out_path to out_path + str(frame number).
If n_frames is larger than 1, this will default to True
n_frames : int, optional
number of frames to save. Default: 1
az_ang : float, optional
Azimuthal angle of camera rotation (degrees). Default: 10.
magnification : int, optional
How much to magnify the saved frame. Default: 1 (no magnification).
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #uncomment below to record
>>> #fvtk.record(r)
>>> #check for new images in current directory
'''
if ren is None:
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(size[0], size[1])
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# ren.GetActiveCamera().Azimuth(180)
ren.ResetCamera()
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
ang = 0
if cam_pos is not None:
cx, cy, cz = cam_pos
ren.GetActiveCamera().SetPosition(cx, cy, cz)
if cam_focal is not None:
fx, fy, fz = cam_focal
ren.GetActiveCamera().SetFocalPoint(fx, fy, fz)
if cam_view is not None:
ux, uy, uz = cam_view
ren.GetActiveCamera().SetViewUp(ux, uy, uz)
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f,%.2f,%.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
for i in range(n_frames):
ren.GetActiveCamera().Azimuth(ang)
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer.SetInputConnection(renderLarge.GetOutputPort())
# filename='/tmp/'+str(3000000+i)+'.png'
if n_frames > 1 or path_numbering:
if out_path is None:
filename = str(1000000 + i) + '.png'
else:
filename = out_path + str(1000000 + i) + '.png'
else:
filename = out_path
writer.SetFileName(filename)
writer.Write()
ang = +az_ang
if __name__ == "__main__":
pass
|
jyeatman/dipy
|
dipy/viz/fvtk.py
|
Python
|
bsd-3-clause
| 53,175
|
#!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
__version__='''$Id: test_geomutils.py 3355 2009-01-08 14:58:44Z jonas $'''
__doc__="""Tests for geometry utility functions."""
import unittest
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses
setOutDir(__name__)
class GeomTestCase(unittest.TestCase):
def test_padding(self):
"Test reportlab.lib.boxstuff.normalizePadding."
from reportlab.lib.geomutils import normalizeTRBL
paddings = (
(4, (4, 4, 4, 4)),
((0, 1), (0, 1, 0, 1)),
((0, 1, 2), (0, 1, 2, 1)),
((0, 1, 2, 3), (0, 1, 2, 3)),
)
for pin, pout in paddings:
pres = normalizeTRBL(pin)
assert pres == pout, "normalizeTRBL(%s) returned %s, expected %s" % (pin, pres, pout)
def makeSuite():
return makeSuiteForClasses(GeomTestCase)
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
|
ejucovy/reportlab
|
tests/test_geomutils.py
|
Python
|
bsd-3-clause
| 1,009
|
# -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2018 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
# or submit itself to any jurisdiction.
"""PID minters for drafts."""
from __future__ import absolute_import, print_function
import uuid
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
def cap_deposit_minter(record_uuid, data):
"""Mint deposit's identifier."""
try:
pid_value = data['_deposit']['id']
except KeyError:
pid_value = uuid.uuid4().hex
pid = PersistentIdentifier.create(
'depid',
pid_value,
object_type='rec',
object_uuid=record_uuid,
status=PIDStatus.REGISTERED
)
data['_deposit'] = {
'id': pid.pid_value,
'status': 'draft',
}
return pid
|
cernanalysispreservation/analysis-preservation.cern.ch
|
cap/modules/deposit/minters.py
|
Python
|
gpl-2.0
| 1,769
|
#
# Copyright 2014 Red Hat, Inc
#
# Author: Chris Dent <chdent@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for producing IPMI sample messages from notification events.
"""
import mock
from oslotest import base
from ceilometer.ipmi.notifications import ironic as ipmi
from ceilometer import sample
from ceilometer.tests.ipmi.notifications import ipmi_test_data
class TestNotifications(base.BaseTestCase):
def test_ipmi_temperature_notification(self):
"""Test IPMI Temperature sensor data.
Based on the above ipmi_testdata the expected sample for a single
temperature reading has::
* a resource_id composed from the node_uuid Sensor ID
* a name composed from 'hardware.ipmi.' and 'temperature'
* a volume from the first chunk of the Sensor Reading
* a unit from the last chunk of the Sensor Reading
* some readings are skipped if the value is 'Disabled'
* metatata with the node id
"""
processor = ipmi.TemperatureSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(10, len(counters),
'expected 10 temperature readings')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)'
)
test_counter = counters[resource_id]
self.assertEqual(26.0, test_counter.volume)
self.assertEqual('C', test_counter.unit)
self.assertEqual(sample.TYPE_GAUGE, test_counter.type)
self.assertEqual('hardware.ipmi.temperature', test_counter.name)
self.assertEqual('hardware.ipmi.metrics.update',
test_counter.resource_metadata['event_type'])
self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad',
test_counter.resource_metadata['node'])
def test_ipmi_current_notification(self):
"""Test IPMI Current sensor data.
A single current reading is effectively the same as temperature,
modulo "current".
"""
processor = ipmi.CurrentSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(1, len(counters), 'expected 1 current reading')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-avg_power_(0x2e)'
)
test_counter = counters[resource_id]
self.assertEqual(130.0, test_counter.volume)
self.assertEqual('W', test_counter.unit)
self.assertEqual(sample.TYPE_GAUGE, test_counter.type)
self.assertEqual('hardware.ipmi.current', test_counter.name)
def test_ipmi_fan_notification(self):
"""Test IPMI Fan sensor data.
A single fan reading is effectively the same as temperature,
modulo "fan".
"""
processor = ipmi.FanSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(12, len(counters), 'expected 12 fan readings')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)'
)
test_counter = counters[resource_id]
self.assertEqual(6900.0, test_counter.volume)
self.assertEqual('RPM', test_counter.unit)
self.assertEqual(sample.TYPE_GAUGE, test_counter.type)
self.assertEqual('hardware.ipmi.fan', test_counter.name)
def test_ipmi_voltage_notification(self):
"""Test IPMI Voltage sensor data.
A single voltage reading is effectively the same as temperature,
modulo "voltage".
"""
processor = ipmi.VoltageSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(4, len(counters), 'expected 4 volate readings')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)'
)
test_counter = counters[resource_id]
self.assertEqual(3.137, test_counter.volume)
self.assertEqual('V', test_counter.unit)
self.assertEqual(sample.TYPE_GAUGE, test_counter.type)
self.assertEqual('hardware.ipmi.voltage', test_counter.name)
def test_disabed_skips_metric(self):
"""Test that a meter which a disabled volume is skipped."""
processor = ipmi.TemperatureSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.SENSOR_DATA)])
self.assertEqual(10, len(counters),
'expected 10 temperature readings')
resource_id = (
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)'
)
self.assertNotIn(resource_id, counters)
def test_empty_payload_no_metrics_success(self):
processor = ipmi.TemperatureSensorNotification(None)
counters = dict([(counter.resource_id, counter) for counter in
processor.process_notification(
ipmi_test_data.EMPTY_PAYLOAD)])
self.assertEqual(0, len(counters), 'expected 0 readings')
@mock.patch('ceilometer.ipmi.notifications.ironic.LOG')
def test_missing_sensor_data(self, mylog):
processor = ipmi.TemperatureSensorNotification(None)
messages = []
mylog.warn = lambda *args: messages.extend(args)
list(processor.process_notification(ipmi_test_data.MISSING_SENSOR))
self.assertEqual(
'invalid sensor data for '
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): '
"missing 'Sensor Reading' in payload",
messages[0]
)
@mock.patch('ceilometer.ipmi.notifications.ironic.LOG')
def test_sensor_data_malformed(self, mylog):
processor = ipmi.TemperatureSensorNotification(None)
messages = []
mylog.warn = lambda *args: messages.extend(args)
list(processor.process_notification(ipmi_test_data.BAD_SENSOR))
self.assertEqual(
'invalid sensor data for '
'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): '
'unable to parse sensor reading: some bad stuff',
messages[0]
)
@mock.patch('ceilometer.ipmi.notifications.ironic.LOG')
def test_missing_node_uuid(self, mylog):
"""Test for desired error message when 'node_uuid' missing.
Presumably this will never happen given the way the data
is created, but better defensive than dead.
"""
processor = ipmi.TemperatureSensorNotification(None)
messages = []
mylog.warn = lambda *args: messages.extend(args)
list(processor.process_notification(ipmi_test_data.NO_NODE_ID))
self.assertEqual(
'invalid sensor data for missing id: missing key in payload: '
"'node_uuid'",
messages[0]
)
@mock.patch('ceilometer.ipmi.notifications.ironic.LOG')
def test_missing_sensor_id(self, mylog):
"""Test for desired error message when 'Sensor ID' missing."""
processor = ipmi.TemperatureSensorNotification(None)
messages = []
mylog.warn = lambda *args: messages.extend(args)
list(processor.process_notification(ipmi_test_data.NO_SENSOR_ID))
self.assertEqual(
'invalid sensor data for missing id: missing key in payload: '
"'Sensor ID'",
messages[0]
)
|
ChinaMassClouds/copenstack-server
|
openstack/src/ceilometer-2014.2.2/ceilometer/tests/ipmi/notifications/test_ironic.py
|
Python
|
gpl-2.0
| 8,529
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config, sys
hooks = {}
# Datastructures and functions needed before plugins can be loaded
loaded_with_language = False
# Load all login plugins
def load_plugins():
global loaded_with_language
if loaded_with_language == current_language:
return
# Cleanup all registered hooks. They need to be renewed by load_plugins()
# of the other modules
unregister()
# This must be set after plugin loading to make broken plugins raise
# exceptions all the time and not only the first time (when the plugins
# are loaded).
loaded_with_language = current_language
def unregister():
global hooks
hooks = {}
def register(name, func):
hooks.setdefault(name, []).append(func)
def get(name):
return hooks.get(name, [])
def registered(name):
""" Returns True if at least one function is registered for the given hook """
return hooks.get(name, []) != []
def call(name, *args):
n = 0
for hk in hooks.get(name, []):
n += 1
try:
hk(*args)
except Exception, e:
if config.debug:
import traceback, StringIO
txt = StringIO.StringIO()
t, v, tb = sys.exc_info()
traceback.print_exception(t, v, tb, None, txt)
html.show_error("<h1>" + _("Error executing hook") + " %s #%d: %s</h1>"
"<pre>%s</pre>" % (name, n, e, txt.getvalue()))
raise
|
alberts/check_mk
|
web/htdocs/hooks.py
|
Python
|
gpl-2.0
| 2,947
|
print ("Hello World!")
|
hacktoberfest17/programming
|
hello_world/python/hello_world_py3.py
|
Python
|
gpl-3.0
| 23
|
#!/bin/env python
""" Extract and tag References from a PDF.
Created on Mar 1, 2010
@author: John Harrison
Usage: headings.py OPTIONS FILEPATH
OPTIONS:
--help, -h Print help and exit
--noxml Do not tag individual headings with XML tags.
Default is to include tagging.
--title Only print title then exit
--author Only print author then exit
"""
import sys, getopt
from lxml import etree
from utils import UsageError, ConfigError, mean, median
from pdf2xml import pdf2etree
def pdf2heads(opts, args):
xmltag = True
highlight = False
titleonly = False
authonly = False
for o, a in opts:
if (o == '--noxml'):
xmltag = False
elif (o == '--highlight'):
highlight = True
if (o == '--title'):
titleonly = True
elif (o == '--author'):
authonly = True
tree = pdf2etree(args)
# find title
page = 1
block = 1
title_node = None
while True:
try: title_node = tree.xpath("//PAGE[{0}]//BLOCK[{1}]".format(page, block))[0]
except IndexError: page+=1
else: break
if page > 2:
# probably not going to find it now
break
# find author
page = 1
block = 2
auth_node = None
while True:
try: auth_node = tree.xpath("//PAGE[{0}]//BLOCK[{1}]".format(page, block))[0]
except InbdexError: block+=1
else: break
if block > 4:
# probably not going to find it now
break
font_sizes = tree.xpath('//TOKEN/@font-size')
mean_font_size = mean(font_sizes)
median_font_size = median(font_sizes)
#print "Median Font Size (i.e. body text):", median_font_size
font_colors = tree.xpath('//TOKEN/@font-color')
font_color_hash = {}
for fc in font_colors:
try:
font_color_hash[fc]+=1
except KeyError:
font_color_hash[fc] = 1
sortlist = [(v,k) for k,v in font_color_hash.iteritems()]
sortlist.sort(reverse=True)
main_font_color = sortlist[0][1]
head_txts = []
stop = False
for page_node in tree.xpath('//PAGE'):
for block_node in page_node.xpath('.//BLOCK'):
if xmltag:
if block_node == title_node:
st = "<title>"
et = "</title>"
elif block_node == auth_node:
st = "<author>"
et = "</author>"
else:
st = "<heading>"
et = "</heading>"
if highlight:
st = "\033[0;32m{0}\033[0m".format(st)
et = "\033[0;32m{0}\033[0m".format(et)
else:
st = et = ""
if block_node == title_node and authonly:
continue
headers = block_node.xpath(".//TOKEN[@font-size > {0} or @bold = 'yes' or @font-color != '{1}']".format(mean_font_size*1.05, main_font_color))
head_txt = ' '.join([etree.tostring(el, method='text', encoding="UTF-8") for el in headers])
if len(head_txt):
head_txts.append("{0}{1}{2}".format(st, head_txt, et))
if block_node == title_node and titleonly:
stop = True
break
elif block_node == auth_node and authonly:
stop = True
break
if stop:
break
for txt in head_txts:
sys.stdout.writelines([txt, '\n'])
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
try:
try:
opts, args = getopt.getopt(argv, "ht", ["help", "test", "noxml", "highlight", "title", "author"])
except getopt.error as msg:
raise UsageError(msg)
for o, a in opts:
if (o in ['-h', '--help']):
# print help and exit
sys.stdout.write(__doc__)
sys.stdout.flush()
return 0
pdf2heads(opts, args)
except UsageError as err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
except ConfigError, err:
sys.stderr.writelines([str(err.msg),'\n'])
sys.stderr.flush()
return 1
if __name__ == '__main__':
sys.exit(main())
|
valentinedwv/pdfssa4met
|
headings.py
|
Python
|
gpl-3.0
| 4,413
|
test_records = [
[{
"doctype": "Country",
"country_name": "_Test Country"
}]
]
|
gangadhar-kadam/mtn-erpnext
|
setup/doctype/country/test_country.py
|
Python
|
agpl-3.0
| 84
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-12-13 07:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('programs', '0012_auto_20170419_0018'),
]
operations = [
migrations.CreateModel(
name='CustomProgramsConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('arguments', models.TextField(blank=True, default='', help_text='Useful for manually running a Jenkins job. Specify like "--usernames A B --program-uuids X Y".')),
('changed_by', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='Changed by')),
],
options={
'verbose_name': 'backpopulate_program_credentials argument',
},
),
]
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/programs/migrations/0013_customprogramsconfig.py
|
Python
|
agpl-3.0
| 1,294
|
#!/usr/bin/python -tt
#
# Copyright (C) 2005-2017 Erik de Castro Lopo <erikd@mega-nerd.com>
#
# Released under the 2 clause BSD license.
"""
This program checks C code for compliance to coding standards used in
libsndfile and other projects I run.
"""
import re
import sys
class Preprocessor:
"""
Preprocess lines of C code to make it easier for the CStyleChecker class to
test for correctness. Preprocessing works on a single line at a time but
maintains state between consecutive lines so it can preprocessess multi-line
comments.
Preprocessing involves:
- Strip C++ style comments from a line.
- Strip C comments from a series of lines. When a C comment starts and
ends on the same line it will be replaced with 'comment'.
- Replace arbitrary C strings with the zero length string.
- Replace '#define f(x)' with '#define f (c)' (The C #define requires that
there be no space between defined macro name and the open paren of the
argument list).
Used by the CStyleChecker class.
"""
def __init__ (self):
self.comment_nest = 0
self.leading_space_re = re.compile ('^(\t+| )')
self.trailing_space_re = re.compile ('(\t+| )$')
self.define_hack_re = re.compile ("(#\s*define\s+[a-zA-Z0-9_]+)\(")
def comment_nesting (self):
"""
Return the currect comment nesting. At the start and end of the file,
this value should be zero. Inside C comments it should be 1 or
(possibly) more.
"""
return self.comment_nest
def __call__ (self, line):
"""
Strip the provided line of C and C++ comments. Stripping of multi-line
C comments works as expected.
"""
line = self.define_hack_re.sub (r'\1 (', line)
line = self.process_strings (line)
# Strip C++ style comments.
if self.comment_nest == 0:
line = re.sub ("( |\t*)//.*", '', line)
# Strip C style comments.
open_comment = line.find ('/*')
close_comment = line.find ('*/')
if self.comment_nest > 0 and close_comment < 0:
# Inside a comment block that does not close on this line.
return ""
if open_comment >= 0 and close_comment < 0:
# A comment begins on this line but doesn't close on this line.
self.comment_nest += 1
return self.trailing_space_re.sub ('', line [:open_comment])
if open_comment < 0 and close_comment >= 0:
# Currently open comment ends on this line.
self.comment_nest -= 1
return self.trailing_space_re.sub ('', line [close_comment + 2:])
if open_comment >= 0 and close_comment > 0 and self.comment_nest == 0:
# Comment begins and ends on this line. Replace it with 'comment'
# so we don't need to check whitespace before and after the comment
# we're removing.
newline = line [:open_comment] + "comment" + line [close_comment + 2:]
return self.__call__ (newline)
return line
def process_strings (self, line):
"""
Given a line of C code, return a string where all literal C strings have
been replaced with the empty string literal "".
"""
for k in range (0, len (line)):
if line [k] == '"':
start = k
for k in range (start + 1, len (line)):
if line [k] == '"' and line [k - 1] != '\\':
return line [:start + 1] + '"' + self.process_strings (line [k + 1:])
return line
class CStyleChecker:
"""
A class for checking the whitespace and layout of a C code.
"""
def __init__ (self, debug):
self.debug = debug
self.filename = None
self.error_count = 0
self.line_num = 1
self.orig_line = ''
self.trailing_newline_re = re.compile ('[\r\n]+$')
self.indent_re = re.compile ("^\s*")
self.last_line_indent = ""
self.last_line_indent_curly = False
self.re_checks = \
[ ( re.compile (" "), "multiple space instead of tab" )
, ( re.compile ("\t "), "space after tab" )
, ( re.compile ("[^ ];"), "missing space before semi-colon" )
, ( re.compile ("{[^\s}]"), "missing space after open brace" )
, ( re.compile ("[^{\s]}"), "missing space before close brace" )
, ( re.compile ("[ \t]+$"), "contains trailing whitespace" )
, ( re.compile (",[^\s\n]"), "missing space after comma" )
, ( re.compile (";[^\s]"), "missing space after semi-colon" )
, ( re.compile ("=[^\s\"'=]"), "missing space after assignment" )
# Open and close parenthesis.
, ( re.compile ("[^\s\(\[\*&']\("), "missing space before open parenthesis" )
, ( re.compile ("\)(-[^>]|[^,'\s\n\)\]-])"), "missing space after close parenthesis" )
, ( re.compile ("\s(do|for|if|when)\s.*{$"), "trailing open parenthesis at end of line" )
, ( re.compile ("\( [^;]"), "space after open parenthesis" )
, ( re.compile ("[^;] \)"), "space before close parenthesis" )
# Open and close square brace.
, ( re.compile ("[^\s\(\]]\["), "missing space before open square brace" )
, ( re.compile ("\][^,\)\]\[\s\.-]"), "missing space after close square brace" )
, ( re.compile ("\[ "), "space after open square brace" )
, ( re.compile (" \]"), "space before close square brace" )
# Space around operators.
, ( re.compile ("[^\s][\*/%+-][=][^\s]"), "missing space around opassign" )
, ( re.compile ("[^\s][<>!=^/][=]{1,2}[^\s]"), "missing space around comparison" )
# Parens around single argument to return.
, ( re.compile ("\s+return\s+\([a-zA-Z0-9_]+\)\s+;"), "parens around return value" )
# Parens around single case argument.
, ( re.compile ("\s+case\s+\([a-zA-Z0-9_]+\)\s+:"), "parens around single case argument" )
# Open curly at end of line.
, ( re.compile ("\)\s*{\s*$"), "open curly brace at end of line" )
# Pre and post increment/decrment.
, ( re.compile ("[^\(\[][+-]{2}[a-zA-Z0-9_]"), "space after pre increment/decrement" )
, ( re.compile ("[a-zA-Z0-9_][+-]{2}[^\)\,]]"), "space before post increment/decrement" )
]
def get_error_count (self):
"""
Return the current error count for this CStyleChecker object.
"""
return self.error_count
def check_files (self, files):
"""
Run the style checker on all the specified files.
"""
for filename in files:
self.check_file (filename)
def check_file (self, filename):
"""
Run the style checker on the specified file.
"""
self.filename = filename
cfile = open (filename, "r")
self.line_num = 1
preprocess = Preprocessor ()
while 1:
line = cfile.readline ()
if not line:
break
line = self.trailing_newline_re.sub ('', line)
self.orig_line = line
self.line_checks (preprocess (line))
self.line_num += 1
cfile.close ()
self.filename = None
# Check for errors finding comments.
if preprocess.comment_nesting () != 0:
print ("Weird, comments nested incorrectly.")
sys.exit (1)
return
def line_checks (self, line):
"""
Run the style checker on provided line of text, but within the context
of how the line fits within the file.
"""
indent = len (self.indent_re.search (line).group ())
if re.search ("^\s+}", line):
if not self.last_line_indent_curly and indent != self.last_line_indent:
None # self.error ("bad indent on close curly brace")
self.last_line_indent_curly = True
else:
self.last_line_indent_curly = False
# Now all the regex checks.
for (check_re, msg) in self.re_checks:
if check_re.search (line):
self.error (msg)
if re.search ("[a-zA-Z0-9][<>!=^/&\|]{1,2}[a-zA-Z0-9]", line):
if not re.search (".*#include.*[a-zA-Z0-9]/[a-zA-Z]", line):
self.error ("missing space around operator")
self.last_line_indent = indent
return
def error (self, msg):
"""
Print an error message and increment the error count.
"""
print ("%s (%d) : %s" % (self.filename, self.line_num, msg))
if self.debug:
print ("'" + self.orig_line + "'")
self.error_count += 1
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
if len (sys.argv) < 1:
print ("Usage : yada yada")
sys.exit (1)
# Create a new CStyleChecker object
if sys.argv [1] == '-d' or sys.argv [1] == '--debug':
cstyle = CStyleChecker (True)
cstyle.check_files (sys.argv [2:])
else:
cstyle = CStyleChecker (False)
cstyle.check_files (sys.argv [1:])
if cstyle.get_error_count ():
sys.exit (1)
sys.exit (0)
|
jniemann66/ReSampler
|
android/src/libsndfile/Scripts/cstyle.py
|
Python
|
lgpl-2.1
| 8,137
|
# -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
import avango.osg
import avango.osg.simpleviewer
import avango.script
from elasticnodes import *
import sys
import random
from PySide import QtCore, QtGui
#from PyQt4 import QtCore, QtGui
class TreeItem:
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def columnCount(self):
return len(self.itemData)
def data(self, column):
return self.itemData[column]
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
class TreeModel(QtCore.QAbstractItemModel):
def __init__(self, columnDesc, parent=None):
QtCore.QAbstractItemModel.__init__(self, parent)
self.columnDesc = []
for desc in columnDesc:
self.columnDesc.append(QtCore.QVariant(desc))
self.rootItem = TreeItem(self.columnDesc)
def clear(self):
self.rootItem = TreeItem(self.columnDesc)
def columnCount(self, parent):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self.rootItem.columnCount()
def data(self, index, role):
if not index.isValid():
return QtCore.QVariant()
if role != QtCore.Qt.DisplayRole:
return QtCore.QVariant()
item = index.internalPointer()
return QtCore.QVariant(item.data(index.column()))
def flags(self, index):
if not index.isValid():
return QtCore.Qt.ItemIsEnabled
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.rootItem.data(section)
return QtCore.QVariant()
def index(self, row, column, parent):
if row < 0 or column < 0 or row >= self.rowCount(parent) or column >= self.columnCount(parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent()
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return parentItem.childCount()
# def setupModelData(self, rootNode):
# print "setupModelData"
#
# def update_tree_model(node,tree_item_parent):
# for i in xrange(node._get_num_fields()):
# name = node._get_field_name(i)
# field = node._get_field(i)
# value = field.value
# if name != "Children":
# continue
#
# for child in value:
# new_parent_node = TreeItem([child.Name.value, name],tree_item_parent)
# tree_item_parent.appendChild(new_parent_node)
# update_tree_model(child,new_parent_node)
#
#
# for child in rootNode.value:
# new_parent_node = TreeItem([child.Name.value, "RootChild"],self.rootItem)
# self.rootItem.appendChild(new_parent_node)
# update_tree_model(child, new_parent_node)
#
# self.reset()
class NodeTreeModel(TreeModel):
def __init__(self, columnDesc, parent=None):
super(NodeTreeModel,self).__init__(columnDesc,parent)
def setupModelData(self, rootNode):
print "setupModelData"
def update_tree_model(node,tree_item_parent):
for i in xrange(node._get_num_fields()):
name = node._get_field_name(i)
field = node._get_field(i)
value = field.value
if name != "Children":
continue
for child in value:
new_parent_node = TreeItem([child.Name.value, name],tree_item_parent)
tree_item_parent.appendChild(new_parent_node)
update_tree_model(child,new_parent_node)
for child in rootNode.value:
new_parent_node = TreeItem([child.Name.value, "RootChild"],self.rootItem)
self.rootItem.appendChild(new_parent_node)
update_tree_model(child, new_parent_node)
self.reset()
def createGraphNodes(rootNode,graphWidget):
def update_tree_model(node, tree_item_parent, graphWidget, items):
for i in xrange(node._get_num_fields()):
name = node._get_field_name(i)
field = node._get_field(i)
value = field.value
if name != "Children":
continue
for child in value:
#create new node
actual_node = Node(graphWidget, child.Name.value, child)
random.seed(42)
x = random.randint(0,75)
y = random.randint(0,75)
actual_node.setPos(x, y)
#add to list
items.append(actual_node)
items.append(Edge(tree_item_parent, actual_node))
update_tree_model(child,actual_node,graphWidget,items)
items = []
parent_node = Node(graphWidget,"Root",rootNode)
parent_node.setPos(50, 50)
items.append(parent_node)
for child in rootNode.value:
update_tree_model(child, parent_node, graphWidget, items)
return items
class ScenegraphTreeNodeWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.createNodeWidgets()
self.fieldsWidget = QtGui.QWidget()
self.fieldsLayout = QtGui.QVBoxLayout()
self.fieldsWidget.setLayout(self.fieldsLayout)
self.createFieldWidgets()
self.globalLayout = QtGui.QHBoxLayout()
self.globalLayout.addWidget(self.nodesWidget)
self.globalLayout.addWidget(self.fieldsWidget)
self.setLayout(self.globalLayout)
def createNodeWidgets(self):
self.nodeTabWidget = QtGui.QTabWidget()
self.nodesWidget = QtGui.QWidget()
self.nodesLayout = QtGui.QVBoxLayout()
self.nodesWidget.setLayout(self.nodesLayout)
self.nodeRefresh = QtGui.QPushButton("Refresh", self.nodesWidget)
self.nodeGraphView = QtGui.QTreeView()
self.nodeModel = NodeTreeModel(["Nodes"])
self.nodeGraphView.setModel(self.nodeModel)
self.graphWidget = GraphWidget()
QtGui.QWidget.connect(self.graphWidget,
QtCore.SIGNAL("nodeSelected"),
self.updateFieldWidget)
#add widgets to the tab widget
self.nodeTabWidget.addTab(self.graphWidget,"Spring graph view")
self.nodeTabWidget.addTab(self.nodeGraphView,"Tree view")
self.nodesLayout.addWidget(self.nodeTabWidget)
self.nodesLayout.addWidget(self.nodeRefresh)
#self.nodesLayout.addWidget(self.graphWidget)
self.scene = self.graphWidget.scene
def createFieldWidgets(self):
self.fieldsGraphView = QtGui.QTreeView()
self.fieldsModel = QtGui.QStandardItemModel(0, 2)
self.fieldsModel.setHeaderData(0, QtCore.Qt.Horizontal, QtCore.QVariant("Field"))
self.fieldsModel.setHeaderData(1, QtCore.Qt.Horizontal, QtCore.QVariant("Value"))
self.fieldsGraphView.setModel(self.fieldsModel)
self.fieldsGraphView.setWindowTitle("Fields")
self.fieldsLayout.addWidget(self.fieldsGraphView)
def updateNodeTreeWidget(self, rootNode):
#update the tree view
self.nodeModel.clear()
self.nodeModel.setupModelData(rootNode)
self.nodeGraphView.expandAll()
#update the graph view
self.graphWidget.removeAllItems()
#add the new items
items = createGraphNodes(rootNode,self.graphWidget)
self.graphWidget.addItems(items)
def updateFieldWidget(self, node):
self.fieldsModel.removeRows(0,self.fieldsModel.rowCount())
self.fieldsModel.setRowCount(node._get_num_fields())
for i in xrange(node._get_num_fields()):
name = node._get_field_name(i)
field = node._get_field(i)
value = field.value
self.fieldsModel.setData(self.fieldsModel.index(i, 0), QtCore.QVariant(str(name)))
self.fieldsModel.setData(self.fieldsModel.index(i, 1), QtCore.QVariant(str(value)))
class EditWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.textDisplayWidget = QtGui.QTextEdit()
self.textDisplayWidget.setReadOnly(True)
self.textEditWidget = QtGui.QLineEdit()
self.mainLayout = QtGui.QVBoxLayout()
self.setLayout(self.mainLayout)
self.mainLayout.addWidget(self.textDisplayWidget)
self.mainLayout.addWidget(self.textEditWidget)
class QTInspector(avango.script.Script):
Children = avango.MFContainer()
def __init__(self):
self.always_evaluate(True)
self.app = QtGui.QApplication(sys.argv)
self.mainWidget = QtGui.QWidget()
self.mainLayout = QtGui.QVBoxLayout()
self.mainWidget.setLayout(self.mainLayout)
self.sceneGraphWidget = ScenegraphTreeNodeWidget()
self.editWidget = EditWidget()
self.mainLayout.addWidget(self.sceneGraphWidget)
self.mainLayout.addWidget(self.editWidget)
#Refresh nodes
QtGui.QWidget.connect(self.sceneGraphWidget.nodeRefresh,
QtCore.SIGNAL("clicked()"),
self.updateTreeNodeModel)
#Node selected
QtGui.QWidget.connect(self.sceneGraphWidget.nodeGraphView,
QtCore.SIGNAL("activated(QModelIndex)"),
self.updateFieldModel)
self.mainWindow = QtGui.QMainWindow()
self.mainWindow.setCentralWidget(self.mainWidget)
self.mainWindow.show()
@avango.script.field_has_changed(Children)
def children_changed(self):
self.updateTreeNodeModel()
def evaluate(self):
if self.app.hasPendingEvents():
self.app.processEvents()
def updateTreeNodeModel(self):
print "updateTreeNodeModel"
self.sceneGraphWidget.updateNodeTreeWidget(self.Children)
def updateFieldModel(self,index):
print "activate index: " + str(index.row()) + " " + str(index.data())
root = avango.osg.nodes.Group(Name="RootNode")
sphere1 = avango.osg.nodes.Sphere(Name="RedSphere", Color=avango.osg.Vec4(1., 0., 0., 1))
sphere1.Matrix.value = avango.osg.make_trans_mat(1, 0, 0)
sphere1b = avango.osg.nodes.Sphere(Name="RedGreenSphere", Color=avango.osg.Vec4(1., -1., 0., 1))
sphere1b.Matrix.value = avango.osg.make_trans_mat(-1, 0, 0)
sphere1.Children.value = [ sphere1b]
sphere2 = avango.osg.nodes.Sphere(Name="GreenSphere", Color=avango.osg.Vec4(0., 1., 0., 1))
sphere2.Matrix.value = avango.osg.make_trans_mat(-1, 0, 0)
root.Children.value = [ sphere1, sphere2 ]
inspector = QTInspector(Children=[root])
avango.osg.simpleviewer.run(root)
|
vrsys/avangong
|
examples/inspector/inspector_qt.py
|
Python
|
lgpl-3.0
| 13,470
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represents a lexographic range of namespaces."""
# pylint: disable=g-bad-name
__all__ = [
'NAMESPACE_CHARACTERS',
'MAX_NAMESPACE_LENGTH',
'MAX_NAMESPACE',
'MIN_NAMESPACE',
'NAMESPACE_BATCH_SIZE',
'NamespaceRange',
'get_namespace_keys',
]
import itertools
import string
from google.appengine.api import datastore
from google.appengine.ext import db
from google.appengine.ext.db import metadata
NAMESPACE_CHARACTERS = ''.join(sorted(string.digits +
string.lowercase +
string.uppercase +
'._-'))
MAX_NAMESPACE_LENGTH = 100
MIN_NAMESPACE = ''
NAMESPACE_BATCH_SIZE = 50
def _setup_constants(alphabet=NAMESPACE_CHARACTERS,
max_length=MAX_NAMESPACE_LENGTH,
batch_size=NAMESPACE_BATCH_SIZE):
"""Calculate derived constant values. Only useful for testing."""
global NAMESPACE_CHARACTERS
global MAX_NAMESPACE_LENGTH
global MAX_NAMESPACE
global _LEX_DISTANCE
global NAMESPACE_BATCH_SIZE
NAMESPACE_CHARACTERS = alphabet
MAX_NAMESPACE_LENGTH = max_length
MAX_NAMESPACE = NAMESPACE_CHARACTERS[-1] * MAX_NAMESPACE_LENGTH
NAMESPACE_BATCH_SIZE = batch_size
# _LEX_DISTANCE will contain the lexical distance between two adjacent
# characters in NAMESPACE_CHARACTERS at each character index. This is used
# to calculate the ordinal for each string. Example:
# NAMESPACE_CHARACTERS = 'ab'
# MAX_NAMESPACE_LENGTH = 3
# _LEX_DISTANCE = [1, 3, 7]
# '' => 0
# 'a' => 1
# 'aa' => 2
# 'aaa' => 3
# 'aab' => 4 - Distance between 'aaa' and 'aab' is 1.
# 'ab' => 5 - Distance between 'aa' and 'ab' is 3.
# 'aba' => 6
# 'abb' => 7
# 'b' => 8 - Distance between 'a' and 'b' is 7.
# 'ba' => 9
# 'baa' => 10
# 'bab' => 11
# ...
# _namespace_to_ord('bab') = (1 * 7 + 1) + (0 * 3 + 1) + (1 * 1 + 1) = 11
_LEX_DISTANCE = [1]
for i in range(1, MAX_NAMESPACE_LENGTH):
_LEX_DISTANCE.append(
_LEX_DISTANCE[i-1] * len(NAMESPACE_CHARACTERS) + 1)
del i
_setup_constants()
def _ord_to_namespace(n, _max_length=None):
"""Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
Returns:
A string representing the nth namespace in lexographical order.
"""
if _max_length is None:
_max_length = MAX_NAMESPACE_LENGTH
length = _LEX_DISTANCE[_max_length - 1]
if n == 0:
return ''
n -= 1
return (NAMESPACE_CHARACTERS[n / length] +
_ord_to_namespace(n % length, _max_length - 1))
def _namespace_to_ord(namespace):
"""Converts a namespace string into an int representing its lexographic order.
>>> _namespace_to_ord('')
''
>>> _namespace_to_ord('_')
1
>>> _namespace_to_ord('__')
2
Args:
namespace: A namespace string.
Returns:
An int representing the lexographical order of the given namespace string.
"""
n = 0
for i, c in enumerate(namespace):
n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] *
NAMESPACE_CHARACTERS.index(c)
+ 1)
return n
def _key_for_namespace(namespace, app):
"""Return the __namespace__ key for a namespace.
Args:
namespace: The namespace whose key is requested.
app: The id of the application that the key belongs to.
Returns:
A db.Key representing the namespace.
"""
if namespace:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
namespace,
_app=app)
else:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
metadata.Namespace.EMPTY_NAMESPACE_ID,
_app=app)
class NamespaceRange(object):
"""An inclusive lexographical range of namespaces.
This class is immutable.
"""
def __init__(self,
namespace_start=None,
namespace_end=None,
_app=None):
"""Initializes a NamespaceRange instance.
Args:
namespace_start: A string representing the start of the namespace range.
namespace_start is included in the range. If namespace_start is None
then the lexographically first namespace is used.
namespace_end: A string representing the end of the namespace range.
namespace_end is included in the range and must be >= namespace_start.
If namespace_end is None then the lexographically last namespace is
used.
Raises:
ValueError: if namespace_start > namespace_end.
"""
if namespace_start is None:
namespace_start = MIN_NAMESPACE
if namespace_end is None:
namespace_end = MAX_NAMESPACE
if namespace_start > namespace_end:
raise ValueError('namespace_start (%r) > namespace_end (%r)' % (
namespace_start, namespace_end))
self.__namespace_start = namespace_start
self.__namespace_end = namespace_end
self.__app = _app
@property
def app(self):
return self.__app
@property
def namespace_start(self):
return self.__namespace_start
@property
def namespace_end(self):
return self.__namespace_end
@property
def is_single_namespace(self):
"""True if the namespace range only includes a single namespace."""
return self.namespace_start == self.namespace_end
def split_range(self):
"""Splits the NamespaceRange into two nearly equal-sized ranges.
Returns:
If this NamespaceRange contains a single namespace then a list containing
this NamespaceRange is returned. Otherwise a two-element list containing
two NamespaceRanges whose total range is identical to this
NamespaceRange's is returned.
"""
if self.is_single_namespace:
return [self]
mid_point = (_namespace_to_ord(self.namespace_start) +
_namespace_to_ord(self.namespace_end)) // 2
return [NamespaceRange(self.namespace_start,
_ord_to_namespace(mid_point),
_app=self.app),
NamespaceRange(_ord_to_namespace(mid_point+1),
self.namespace_end,
_app=self.app)]
def __copy__(self):
return self.__class__(self.__namespace_start,
self.__namespace_end,
self.__app)
def __eq__(self, o):
return (self.namespace_start == o.namespace_start and
self.namespace_end == o.namespace_end)
def __hash__(self):
return hash((self.namespace_start, self.namespace_end, self.app))
def __repr__(self):
if self.app is None:
return 'NamespaceRange(namespace_start=%r, namespace_end=%r)' % (
self.namespace_start, self.namespace_end)
else:
return 'NamespaceRange(namespace_start=%r, namespace_end=%r, _app=%r)' % (
self.namespace_start, self.namespace_end, self.app)
def with_start_after(self, after_namespace):
"""Returns a copy of this NamespaceName with a new namespace_start.
Args:
after_namespace: A namespace string.
Returns:
A NamespaceRange object whose namespace_start is the lexographically next
namespace after the given namespace string.
Raises:
ValueError: if the NamespaceRange includes only a single namespace.
"""
namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1)
return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)
def make_datastore_query(self, cursor=None):
"""Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
"""
filters = {}
filters['__key__ >= '] = _key_for_namespace(
self.namespace_start, self.app)
filters['__key__ <= '] = _key_for_namespace(
self.namespace_end, self.app)
return datastore.Query('__namespace__',
filters=filters,
keys_only=True,
cursor=cursor,
_app=self.app)
def normalized_start(self):
"""Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exclude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore.
"""
namespaces_after_key = list(self.make_datastore_query().Run(limit=1))
if not namespaces_after_key:
return None
namespace_after_key = namespaces_after_key[0].name() or ''
return NamespaceRange(namespace_after_key,
self.namespace_end,
_app=self.app)
def to_json_object(self):
"""Returns a dict representation that can be serialized to JSON."""
obj_dict = dict(namespace_start=self.namespace_start,
namespace_end=self.namespace_end)
if self.app is not None:
obj_dict['app'] = self.app
return obj_dict
@classmethod
def from_json_object(cls, json):
"""Returns a NamespaceRange from an object deserialized from JSON."""
return cls(json['namespace_start'],
json['namespace_end'],
_app=json.get('app'))
# TODO(user): Implement an option where the returned namespace range is
# not normalized using with_start_after to support consistent namespace
# queries.
@classmethod
def split(cls,
n,
contiguous,
can_query=itertools.chain(itertools.repeat(True, 50),
itertools.repeat(False)).next,
_app=None):
"""Splits the complete NamespaceRange into n equally-sized NamespaceRanges.
Args:
n: The maximum number of NamespaceRanges to return. Fewer than n
namespaces may be returned.
contiguous: If True then the returned NamespaceRanges will cover the
entire space of possible namespaces (i.e. from MIN_NAMESPACE to
MAX_NAMESPACE) without gaps. If False then the returned
NamespaceRanges may exclude namespaces that don't appear in the
datastore.
can_query: A function that returns True if split() can query the datastore
to generate more fair namespace range splits, and False otherwise.
If not set then split() is allowed to make 50 datastore queries.
Returns:
A list of at most n NamespaceRanges representing a near-equal distribution
of actual existant datastore namespaces. The returned list will be sorted
lexographically.
Raises:
ValueError: if n is < 1.
"""
if n < 1:
raise ValueError('n must be >= 1')
ns_range = NamespaceRange(_app=_app)
if can_query():
ns_range = ns_range.normalized_start()
if ns_range is None:
if contiguous:
return [NamespaceRange(_app=_app)]
else:
return []
ranges = [ns_range]
singles = []
while ranges and (len(ranges) + len(singles)) < n:
namespace_range = ranges.pop(0)
if namespace_range.is_single_namespace:
singles.append(namespace_range)
else:
left, right = namespace_range.split_range()
if can_query():
right = right.normalized_start()
if right is not None:
ranges.append(right)
ranges.append(left)
ns_ranges = sorted(singles + ranges,
key=lambda ns_range: ns_range.namespace_start)
if contiguous:
if not ns_ranges:
# This condition is possible if every namespace was deleted after the
# first call to ns_range.normalized_start().
return [NamespaceRange(_app=_app)]
continuous_ns_ranges = []
for i in range(len(ns_ranges)):
if i == 0:
namespace_start = MIN_NAMESPACE
else:
namespace_start = ns_ranges[i].namespace_start
if i == len(ns_ranges) - 1:
namespace_end = MAX_NAMESPACE
else:
namespace_end = _ord_to_namespace(
_namespace_to_ord(ns_ranges[i+1].namespace_start) - 1)
continuous_ns_ranges.append(NamespaceRange(namespace_start,
namespace_end,
_app=_app))
return continuous_ns_ranges
else:
return ns_ranges
def __iter__(self):
"""Iterate over all the namespaces within this range."""
cursor = None
while True:
query = self.make_datastore_query(cursor=cursor)
count = 0
for ns_key in query.Run(limit=NAMESPACE_BATCH_SIZE):
count += 1
yield ns_key.name() or ''
if count < NAMESPACE_BATCH_SIZE:
break
cursor = query.GetCursor()
def get_namespace_keys(app, limit):
"""Get namespace keys."""
ns_query = datastore.Query('__namespace__', keys_only=True, _app=app)
return list(ns_query.Run(limit=limit))
|
rolepoint/appengine-mapreduce
|
python/src/mapreduce/namespace_range.py
|
Python
|
apache-2.0
| 14,002
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ops used with embeddings."""
import itertools
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import compat
def _AsLong(array):
"""Casts arrays elements to long type. Used to convert from numpy tf."""
return [int(x) for x in array]
class ScatterAddSubTest(test.TestCase):
def _TestCase(self, shape, indices, scatter_op=state_ops.scatter_add):
"""Run a random test case with the given shape and indices.
Args:
shape: Shape of the parameters array.
indices: One-dimensional array of ints, the indices of the last dimension
of the parameters to update.
scatter_op: ScatterAdd or ScatterSub.
"""
super(ScatterAddSubTest, self).setUp()
with self.cached_session(use_gpu=False):
# Create a random parameter array of given shape
p_init = np.random.rand(*shape).astype("f")
# Create the shape of the update array. All dimensions except the last
# match the parameter array, the last dimension equals the # of indices.
vals_shape = [len(indices)] + shape[1:]
vals_init = np.random.rand(*vals_shape).astype("f")
v_i = [float(x) for x in vals_init.ravel()]
p = variables.Variable(p_init)
vals = constant_op.constant(v_i, shape=vals_shape, name="vals")
ind = constant_op.constant(indices, dtype=dtypes.int32)
p2 = scatter_op(p, ind, vals, name="updated_p")
# p = init
self.evaluate(variables.global_variables_initializer())
# p += vals
result = self.evaluate(p2)
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
if scatter_op == state_ops.scatter_add:
p_init.reshape(shape[0], -1)[ind, :] += (vals_init.reshape(
vals_shape[0], -1)[i, :])
else:
p_init.reshape(shape[0], -1)[ind, :] -= (vals_init.reshape(
vals_shape[0], -1)[i, :])
self.assertTrue(all((p_init == result).ravel()))
@test_util.run_deprecated_v1
def testNoRepetitions(self):
self._TestCase([2, 2], [1])
self._TestCase([4, 4, 4], [2, 0])
self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
@test_util.run_deprecated_v1
def testWithRepetitions(self):
self._TestCase([2, 2], [1, 1])
self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])
self._TestCase([32, 4, 4], [31] * 8)
@test_util.run_deprecated_v1
def testRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices))
@test_util.run_deprecated_v1
def testSubRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices), state_ops.scatter_sub)
@test_util.run_deprecated_v1
def testWrongShape(self):
# Indices and values mismatch.
var = variables.Variable(
array_ops.zeros(shape=[1024, 64, 64], dtype=dtypes.float32))
indices = array_ops.placeholder(dtypes.int32, shape=[32])
values = array_ops.placeholder(dtypes.float32, shape=[33, 64, 64])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
# Var and values mismatch.
values = array_ops.placeholder(dtypes.float32, shape=[32, 64, 63])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape:
shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
param = constant_op.constant(
1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingParamsAsPartitionedVariable(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_resource=False):
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, dtype=dtype, shape=shape)
shape = shape or [10]
partitioned_variable = variable_scope.get_variable(
"p",
shape=[vocab_size] + shape,
initializer=array_ops.concat([params[p_i.name] for p_i in p], 0),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1),
use_resource=use_resource)
return p, partitioned_variable, params, feed_dict
def _EmbeddingResult(params,
id_vals,
num_shards,
vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(
params[_PName(partition) + ":0"][offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
assert weight_aggregation is None
assert squared_weight_aggregation is None
value_aggregation = val
weight_aggregation = weight_value
squared_weight_aggregation = weight_value * weight_value
else:
assert weight_aggregation is not None
assert squared_weight_aggregation is not None
value_aggregation += val
weight_aggregation += weight_value
squared_weight_aggregation += weight_value * weight_value
values.append(value_aggregation)
weights.append(weight_aggregation)
weights_squared.append(squared_weight_aggregation)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
weights_squared = np.array(weights_squared).astype(np.float32)
return values, weights, weights_squared
class EmbeddingLookupTest(test.TestCase):
# This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
# both the ids are in the first shard, one of the resulting lookup
# vector is going to be empty. The subsequent DivOp fails because of that.
# TODO(keveman): Disabling the test until the underlying problem is fixed.
@test_util.run_deprecated_v1
def testSimpleSharded(self):
with self.cached_session():
num_shards = 2
vocab_size = 4
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testMaxNorm(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0]])
ids = constant_op.constant([0], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding, [[1.0]])
@test_util.run_deprecated_v1
def testMaxNormNontrivial(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0, 4.0], [3.0, 1.0]])
ids = constant_op.constant([0, 1], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=2.0)
norms = math_ops.sqrt(
math_ops.reduce_sum(embeddings * embeddings, axis=1))
normalized = embeddings / array_ops.stack([norms, norms], axis=1)
self.assertAllClose(embedding, 2 * self.evaluate(normalized))
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
self.evaluate(variables.global_variables_initializer())
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedResourceVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, _ = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size, use_resource=True)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
self.evaluate(variables.global_variables_initializer())
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
print(ops.get_default_graph().as_graph_def())
tf_result = self.evaluate(embedding)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32IdsPartitionedVariable(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
_, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
self.evaluate(variables.global_variables_initializer())
embedding = embedding_ops.embedding_lookup(
p_variable, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningUnknownParamShape(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
# We clear parameter shapes, to test when shape is not statically known.
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, use_shapeless_placeholder=True)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookup(self):
vocab_size = 9
num_ids = 10
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for ids_shape in [(10,), (2, 5)]:
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(
id_vals, shape=ids_shape, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
y = embedding_ops.embedding_lookup(x, ids)
y_shape = ids_shape + tuple(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupWithComputedParams(self):
vocab_size = 9
num_ids = 5
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(id_vals, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
# This will force a conversion from IndexedSlices to Tensor.
x_squared = [math_ops.square(elem) for elem in x]
y = embedding_ops.embedding_lookup(x_squared, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-3)
def testConstructionNonSharded(self):
with ops.Graph().as_default():
p = variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
ids = constant_op.constant([0, 1, 1, 7], dtype=dtypes.int32)
embedding_ops.embedding_lookup([p], ids)
def testConstructionSharded(self):
with ops.Graph().as_default():
p = []
for _ in range(2):
p += [
variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
]
ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32)
embedding_ops.embedding_lookup(p, ids)
@test_util.run_deprecated_v1
def testHigherRank(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3):
params = np.random.randn(*params_shape)
for ids_shape in (3, 2), (4, 3):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids)
self.assertAllEqual(simple, array_ops.gather(params, ids))
# Run a few random sharded versions
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in range(procs)
]
sharded = embedding_ops.embedding_lookup(split_params, ids)
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testHigherRankMaxNorm(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3), (6, 2, 3):
# Test embedding rank 0, 1, 2.
# Note: the first dimension must be a common multiple of procs below.
params = 2 * np.ones(params_shape)
params_norm = params / np.sqrt(
np.sum(
params * params, tuple(range(params.ndim)[1:]), keepdims=True))
for ids_shape in (), (3), (4, 3), (2, 3, 4):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids, max_norm=1.0)
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in range(procs)
]
sharded = embedding_ops.embedding_lookup(
split_params, ids, max_norm=1.0)
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testTransform(self):
# This tests all combinations of:
# - ids rank 0, 1, >1
# - params sharded/unsharded
# It always applies max_norm.
np.random.seed(8)
l2_norm = 2.
with self.cached_session():
# Param values are in [l2_norm, l2_norm+1) so it will always clip.
params = np.random.rand(6, 3) + l2_norm
params_norm = l2_norm * params / np.sqrt(
np.sum(params * params, axis=1, keepdims=True))
# Compute the norm of each embedding. This will change the embedding
# rank to 0.
params_norm = np.linalg.norm(params_norm, axis=1)
transform = lambda x: linalg_ops.norm(x, axis=1)
for ids_shape in (), (3), (4, 3), (2, 3, 4):
# Test ids rank 0, 1, 2, 3.
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather.
simple = embedding_ops._embedding_lookup_and_transform(
params, ids, max_norm=l2_norm, transform_fn=transform)
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in range(procs)
]
sharded = embedding_ops._embedding_lookup_and_transform(
split_params, ids, max_norm=l2_norm, transform_fn=transform)
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, sharded)
def testRaggedMaxNorm(self):
embeddings = constant_op.constant([[2.0]])
ids = ragged_factory_ops.constant([[0, 0], [0]], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup([embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding, [[[1.0], [1.0]], [[1.0]]])
class EmbeddingLookupSparseTest(test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
@test_util.run_deprecated_v1
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5], ["sum", "mean", "sqrtn"],
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64],
[True, False]):
with self.cached_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
embedding_sum = embedding_ops.embedding_lookup_sparse(
p,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
expected_lookup_result_shape)
self.assertEqual(embedding_sum.dtype, dtype)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params,
grouped_ids,
num_shards,
vocab_size,
weight_vals=grouped_ignored_weights
if ignore_weights else grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1))
rtol = 1e-6
if dtype == dtypes.bfloat16:
rtol = 1e-2
elif dtype == dtypes.float16:
rtol = 1e-3
atol = rtol
self.assertAllClose(np_embedding_sum, tf_embedding_sum, rtol, atol)
def testMissingInSparseIds(self):
# Github issue, 36359
with self.test_session():
x = array_ops.ones((4, 5))
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant([[1, 0], [3, 0]], dtypes.int64),
constant_op.constant([0, 2], dtypes.int32),
constant_op.constant([4, 1], dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant([[1, 0], [3, 0]], dtypes.int64),
constant_op.constant([1, 1], dtypes.float32),
constant_op.constant([4, 1], dtypes.int64))
for combiner in ["sum", "mean", "sqrtn"]:
embedding_sum = embedding_ops.embedding_lookup_sparse(
x, sp_ids, sp_weights, combiner=combiner)
tf_embedding_sum = ops.convert_to_tensor(embedding_sum)
self.assertAllClose(tf_embedding_sum[0], np.zeros(5))
self.assertAllClose(tf_embedding_sum[1], np.ones(5))
self.assertAllClose(tf_embedding_sum[2], np.zeros(5))
self.assertAllClose(tf_embedding_sum[3], np.ones(5))
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (self._RandomIdsAndWeights(
batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32,
dtypes.float64], [True, False]):
with self.cached_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
y = embedding_ops.embedding_lookup_sparse(
x,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
@test_util.run_deprecated_v1
def testIncompatibleShapes(self):
with self.cached_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
constant_op.constant([0, 1, 2], dtypes.int32),
constant_op.constant([2, 2], dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1]], dtypes.int64),
constant_op.constant([12.0, 5.0], dtypes.float32),
constant_op.constant([1, 2], dtypes.int64))
with self.assertRaises(ValueError):
embedding_ops.embedding_lookup_sparse(
x, sp_ids, sp_weights, combiner="mean")
class SafeEmbeddingLookupSparseTest(test.TestCase):
def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
assert vocab_size > 0
assert embed_dim > 0
assert num_shards > 0
assert num_shards <= vocab_size
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32)
embedding_weights = list(variable_scope.get_variable(
name="embedding_weights",
shape=[vocab_size, embed_dim],
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=initializer))
for w in embedding_weights:
self.evaluate(w.initializer)
embedding_weights = [self.evaluate(w) for w in embedding_weights]
return embedding_weights
def _ids_and_weights_2d(self):
# Each row demonstrates a test case:
# Row 0: multiple valid ids, 1 invalid id, weighted mean
# Row 1: all ids are invalid (leaving no valid ids after pruning)
# Row 2: no ids to begin with
# Row 3: single id
# Row 4: all ids have <=0 weight
indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [5, 4]
sparse_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
def _ids_and_weights_3d(self):
# Each (2-D) index demonstrates a test case:
# Index 0, 0: multiple valid ids, 1 invalid id, weighted mean
# Index 0, 1: all ids are invalid (leaving no valid ids after pruning)
# Index 0, 2: no ids to begin with
# Index 1, 0: single id
# Index 1, 1: all ids have <=0 weight
# Index 1, 2: no ids to begin with
indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0],
[1, 1, 1]]
ids = [0, 1, -1, -1, 2, 0, 1]
weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
shape = [2, 3, 4]
sparse_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
sparse_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sparse_ids, sparse_weights
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids,
sparse_weights))
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights, default_id=3))
self.assertAllClose(
embedding_lookup_result,
[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3],
embedding_weights[0][2], embedding_weights[0][3]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
self.assertAllClose(
embedding_lookup_result,
[(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4,
[0] * 4, embedding_weights[0][2], (
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_2d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result,
[(embedding_weights[0] + embedding_weights[1]) / 2.0,
[0] * 4, [0] * 4, embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_2d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids,
sparse_weights))
self.assertAllClose(embedding_lookup_result, [[
(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0,
[0] * 4, [0] * 4
], [embedding_weights[0][2], [0] * 4, [0] * 4]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(
embedding_weights, sparse_ids, sparse_weights, default_id=3))
self.assertAllClose(
embedding_lookup_result,
[[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) /
3.0, embedding_weights[0][3], embedding_weights[0][3]], [
embedding_weights[0][2], embedding_weights[0][3],
embedding_weights[0][3]
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_no_weights(self):
with self.cached_session():
embedding_weights = self._random_weights()
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
self.assertAllClose(embedding_lookup_result, [[(
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [
0
] * 4], [
embedding_weights[0][2],
(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_partitioned(self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, _ = self._ids_and_weights_3d()
embedding_lookup_result = (
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids, None))
embedding_weights = list(itertools.chain(*embedding_weights))
self.assertAllClose(embedding_lookup_result, [[
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4
], [
embedding_weights[2],
(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4
]])
@test_util.run_deprecated_v1
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights(
self):
with self.cached_session():
embedding_weights = self._random_weights(num_shards=3)
sparse_ids, sparse_weights = self._ids_and_weights_3d()
embedding_weights[1] = embedding_weights[1].astype(np.float64)
self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids)
embedding_weights = [
constant_op.constant(w, dtype=dtypes.float64)
for w in embedding_weights
]
self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse,
embedding_weights, sparse_ids, sparse_weights)
class DynamicStitchOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testCint32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testCint32Gpu(self):
with self.session():
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testInt32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testInt32Gpu(self):
with self.session():
indices = [
ops.convert_to_tensor([0, 1, 2]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]),
ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
@test_util.run_deprecated_v1
def testSumGradArgs(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2, 3]),
ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([2, 3, 5, 7]),
ops.convert_to_tensor([1, 1])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values), [2, 3, 1, 1])
# We expect that the values are merged in order.
@test_util.run_deprecated_v1
def testStitchOrder(self):
with self.cached_session():
indices = []
np_values = []
values = []
for _ in range(10):
indices.extend([ops.convert_to_tensor(np.arange(100).astype(np.int32))])
np_values.extend([np.random.uniform(size=100)])
values.extend([ops.convert_to_tensor(np_values[-1])])
stitched = data_flow_ops.dynamic_stitch(indices, values)
self.assertAllEqual(np_values[-1], stitched)
class ParallelDynamicStitchOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testCint32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 4, 6]),
ops.convert_to_tensor([2, 3, 5])
]
values = [
ops.convert_to_tensor([12, 23, 34, 45]),
ops.convert_to_tensor([1, 2, 3])
]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values),
[12, 23, 1, 2, 34, 3, 45])
@test_util.run_deprecated_v1
def testInt32Cpu(self):
with self.session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 5, 6, 7]),
ops.convert_to_tensor([2, 4, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34, 45, 56]),
ops.convert_to_tensor([1, 3, 2])
]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values),
[12, 23, 1, 2, 3, 34, 45, 56])
@test_util.run_deprecated_v1
def testSimple(self):
with self.session(use_gpu=False):
indices = [ops.convert_to_tensor([0, 1]), ops.convert_to_tensor([2, 3])]
values = [ops.convert_to_tensor([2, 3]), ops.convert_to_tensor([1, 1])]
self.assertAllEqual(
data_flow_ops.parallel_dynamic_stitch(indices, values), [2, 3, 1, 1])
if __name__ == "__main__":
test.main()
|
tensorflow/tensorflow
|
tensorflow/python/kernel_tests/nn_ops/embedding_ops_test.py
|
Python
|
apache-2.0
| 47,859
|
"""Support for Matrix notifications."""
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, SERVICE_SEND_MESSAGE
CONF_DEFAULT_ROOM = "default_room"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_DEFAULT_ROOM): cv.string})
def get_service(hass, config, discovery_info=None):
"""Get the Matrix notification service."""
return MatrixNotificationService(config[CONF_DEFAULT_ROOM])
class MatrixNotificationService(BaseNotificationService):
"""Send notifications to a Matrix room."""
def __init__(self, default_room):
"""Set up the Matrix notification service."""
self._default_room = default_room
def send_message(self, message="", **kwargs):
"""Send the message to the Matrix server."""
target_rooms = kwargs.get(ATTR_TARGET) or [self._default_room]
service_data = {ATTR_TARGET: target_rooms, ATTR_MESSAGE: message}
if (data := kwargs.get(ATTR_DATA)) is not None:
service_data[ATTR_DATA] = data
return self.hass.services.call(
DOMAIN, SERVICE_SEND_MESSAGE, service_data=service_data
)
|
jawilson/home-assistant
|
homeassistant/components/matrix/notify.py
|
Python
|
apache-2.0
| 1,313
|
from zerver.lib.test_classes import WebhookTestCase
class LidarrHookTests(WebhookTestCase):
STREAM_NAME = "lidarr"
URL_TEMPLATE = "/api/v1/external/lidarr?api_key={api_key}&stream={stream}"
WEBHOOK_DIR_NAME = "lidarr"
def test_lidarr_test(self) -> None:
"""
Tests if lidarr test payload is handled correctly
"""
expected_topic = "Lidarr - Test"
expected_message = "Lidarr webhook has been successfully configured."
self.check_webhook("lidarr_test", expected_topic, expected_message)
def test_lidarr_tracks_renamed(self) -> None:
"""
Tests if lidarr tracks renamed payload is handled correctly
"""
expected_topic = "Little Mix"
expected_message = "The artist Little Mix has had its tracks renamed."
self.check_webhook("lidarr_tracks_renamed", expected_topic, expected_message)
def test_lidarr_tracks_retagged(self) -> None:
"""
Tests if lidarr tracks retagged payload is handled correctly
"""
expected_topic = "Little Mix"
expected_message = "The artist Little Mix has had its tracks retagged."
self.check_webhook("lidarr_tracks_retagged", expected_topic, expected_message)
def test_lidarr_tracks_imported(self) -> None:
"""
Tests if lidarr tracks imported payload is handled correctly
"""
expected_topic = "UB40"
expected_message = """
The following tracks by UB40 have been imported:
* Cherry Oh Baby
* Keep On Moving
* Please Don't Make Me Cry
* Sweet Sensation
* Johnny Too Bad
* Red Red Wine
* Guilty
* She Caught the Train
* Version Girl
* Many Rivers to Cross
""".strip()
self.check_webhook("lidarr_tracks_imported", expected_topic, expected_message)
def test_lidarr_tracks_imported_upgrade(self) -> None:
"""
Tests if lidarr tracks imported upgrade payload is handled correctly
"""
expected_topic = "Little Mix"
expected_message = """
The following tracks by Little Mix have been imported due to upgrade:
* The National Manthem
* Woman Like Me
* Think About Us
* Strip
* Monster in Me
* Joan of Arc
* Love a Girl Right
* American Boy
* Told You So
* Wasabi
* More Than Words
* Motivate
* Notice
* The Cure
* Forget You Not
* Woman’s World
* The Cure (stripped)
* Only You
""".strip()
self.check_webhook("lidarr_tracks_imported_upgrade", expected_topic, expected_message)
def test_lidarr_album_grabbed(self) -> None:
"""
Tests if lidarr album grabbed payload is handled correctly
"""
expected_topic = "UB40"
expected_message = "The album Labour of Love by UB40 has been grabbed."
self.check_webhook("lidarr_album_grabbed", expected_topic, expected_message)
def test_lidarr_tracks_imported_over_limit(self) -> None:
"""
Tests if lidarr tracks imported over limit payload is handled correctly
"""
expected_topic = "Michael Jackson"
expected_message = """
The following tracks by Michael Jackson have been imported:
* Scream
* Billie Jean
* The Way You Make Me Feel
* They Don’t Care About Us
* Stranger in Moscow
* Black or White
* This Time Around
* Rock With You
* Earth Song
* She’s Out of My Life
* D.S.
* Bad
* Money
* I Just Can’t Stop Loving You
* Man in the Mirror
* Come Together
* Thriller
* You Are Not Alone
* Beat It
* Childhood (theme from “Free Willy 2”)
[and 10 more tracks(s)]
""".strip()
self.check_webhook("lidarr_tracks_imported_over_limit", expected_topic, expected_message)
def test_lidarr_tracks_imported_upgrade_over_limit(self) -> None:
"""
Tests if lidarr tracks imported upgrade over limit payload is handled correctly
"""
expected_topic = "Michael Jackson"
expected_message = """
The following tracks by Michael Jackson have been imported due to upgrade:
* Scream
* Billie Jean
* The Way You Make Me Feel
* They Don’t Care About Us
* Stranger in Moscow
* Black or White
* This Time Around
* Rock With You
* Earth Song
* She’s Out of My Life
* D.S.
* Bad
* Money
* I Just Can’t Stop Loving You
* Man in the Mirror
* Come Together
* Thriller
* You Are Not Alone
* Beat It
* Childhood (theme from “Free Willy 2”)
[and 10 more tracks(s)]
""".strip()
self.check_webhook(
"lidarr_tracks_imported_upgrade_over_limit", expected_topic, expected_message
)
|
rht/zulip
|
zerver/webhooks/lidarr/tests.py
|
Python
|
apache-2.0
| 4,458
|
"""Translate generators test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tarfile
import tensorflow as tf
from data_generators import text_problems
from data_generators import translate
class TranslateTest(tf.test.TestCase):
DATASETS = [
["data1.tgz", ("train1.en", "train1.de")],
["data2.tgz", ("train2.en", "train2.de")],
["data3.tgz", ("train3.en", "train3.de")],
]
@classmethod
def setUpClass(cls):
tmp_dir = tf.test.get_temp_dir()
compressed_dir = os.path.join(tmp_dir, "compressed")
shutil.rmtree(tmp_dir)
tf.gfile.MakeDirs(compressed_dir)
en_data = [str(i) for i in range(10, 40)]
de_data = [str(i) for i in range(100, 130)]
data = list(zip(en_data, de_data))
for i, dataset in enumerate(cls.DATASETS):
tar_file = dataset[0]
en_file, de_file = [
os.path.join(compressed_dir, name) for name in dataset[1]
]
with tf.gfile.Open(en_file, "w") as en_f:
with tf.gfile.Open(de_file, "w") as de_f:
start = i * 10
end = start + 10
for en_line, de_line in data[start:end]:
en_f.write(en_line)
en_f.write("\n")
de_f.write(de_line)
de_f.write("\n")
with tarfile.open(os.path.join(tmp_dir, tar_file), "w:gz") as tar_f:
tar_f.add(en_file, os.path.basename(en_file))
tar_f.add(de_file, os.path.basename(de_file))
cls.tmp_dir = tmp_dir
cls.data = data
def testCompileData(self):
filename = "out"
filepath = os.path.join(self.tmp_dir, filename)
translate.compile_data(self.tmp_dir, self.DATASETS, filename)
count = 0
for i, example in enumerate(
text_problems.text2text_txt_iterator(filepath + ".lang1",
filepath + ".lang2")):
expected = self.data[i]
self.assertEqual(list(expected), [example["inputs"], example["targets"]])
count += 1
self.assertEqual(count, len(self.data))
if __name__ == "__main__":
tf.test.main()
|
mlperf/training_results_v0.6
|
Google/benchmarks/transformer/implementations/tpu-v3-512-transformer/transformer/data_generators/translate_test.py
|
Python
|
apache-2.0
| 2,128
|
"""
Upload handlers to test the upload API.
"""
from django.core.files.uploadhandler import (
FileUploadHandler, StopUpload, TemporaryFileUploadHandler,
)
class QuotaUploadHandler(FileUploadHandler):
"""
This test upload handler terminates the connection if more than a quota
(5MB) is uploaded.
"""
QUOTA = 5 * 2 ** 20 # 5 MB
def __init__(self, request=None):
super().__init__(request)
self.total_upload = 0
def receive_data_chunk(self, raw_data, start):
self.total_upload += len(raw_data)
if self.total_upload >= self.QUOTA:
raise StopUpload(connection_reset=True)
return raw_data
def file_complete(self, file_size):
return None
class StopUploadTemporaryFileHandler(TemporaryFileUploadHandler):
"""A handler that raises a StopUpload exception."""
def receive_data_chunk(self, raw_data, start):
raise StopUpload()
class CustomUploadError(Exception):
pass
class ErroringUploadHandler(FileUploadHandler):
"""A handler that raises an exception."""
def receive_data_chunk(self, raw_data, start):
raise CustomUploadError("Oops!")
|
elena/django
|
tests/file_uploads/uploadhandler.py
|
Python
|
bsd-3-clause
| 1,171
|
from __future__ import absolute_import
import logging
import requests
from six.moves.urllib.parse import quote
from sentry import options
logger = logging.getLogger(__name__)
def sms_available():
return bool(options.get('sms.twilio-account'))
def send_sms(body, to, from_=None):
account = options.get('sms.twilio-account')
if not account:
raise RuntimeError('SMS backend is not configured.')
if account[:2] != 'AC':
account = 'AC' + account
url = 'https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json' % \
quote(account)
rv = requests.post(url, auth=(account,
options.get('sms.twilio-token')), data={
'To': to,
'From': options.get('sms.twilio-number'),
'Body': body,
})
if not rv.ok:
logging.exception('Failed to send text message to %s: (%s) %s', to,
rv.status_code, rv.content)
return False
return True
|
JamesMura/sentry
|
src/sentry/utils/sms.py
|
Python
|
bsd-3-clause
| 976
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Translates gRPC's client-side API into gRPC's client-side Beta API."""
import grpc
from grpc import _common
from grpc._cython import cygrpc
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
_STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = {
grpc.StatusCode.CANCELLED: (face.Abortion.Kind.CANCELLED,
face.CancellationError),
grpc.StatusCode.UNKNOWN: (face.Abortion.Kind.REMOTE_FAILURE,
face.RemoteError),
grpc.StatusCode.DEADLINE_EXCEEDED: (face.Abortion.Kind.EXPIRED,
face.ExpirationError),
grpc.StatusCode.UNIMPLEMENTED: (face.Abortion.Kind.LOCAL_FAILURE,
face.LocalError),
}
def _effective_metadata(metadata, metadata_transformer):
non_none_metadata = () if metadata is None else metadata
if metadata_transformer is None:
return non_none_metadata
else:
return metadata_transformer(non_none_metadata)
def _credentials(grpc_call_options):
return None if grpc_call_options is None else grpc_call_options.credentials
def _abortion(rpc_error_call):
code = rpc_error_call.code()
pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
return face.Abortion(error_kind,
rpc_error_call.initial_metadata(),
rpc_error_call.trailing_metadata(), code,
rpc_error_call.details())
def _abortion_error(rpc_error_call):
code = rpc_error_call.code()
pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
exception_class = face.AbortionError if pair is None else pair[1]
return exception_class(rpc_error_call.initial_metadata(),
rpc_error_call.trailing_metadata(), code,
rpc_error_call.details())
class _InvocationProtocolContext(interfaces.GRPCInvocationContext):
def disable_next_request_compression(self):
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _Rendezvous(future.Future, face.Call):
def __init__(self, response_future, response_iterator, call):
self._future = response_future
self._iterator = response_iterator
self._call = call
def cancel(self):
return self._call.cancel()
def cancelled(self):
return self._future.cancelled()
def running(self):
return self._future.running()
def done(self):
return self._future.done()
def result(self, timeout=None):
try:
return self._future.result(timeout=timeout)
except grpc.RpcError as rpc_error_call:
raise _abortion_error(rpc_error_call)
except grpc.FutureTimeoutError:
raise future.TimeoutError()
except grpc.FutureCancelledError:
raise future.CancelledError()
def exception(self, timeout=None):
try:
rpc_error_call = self._future.exception(timeout=timeout)
if rpc_error_call is None:
return None
else:
return _abortion_error(rpc_error_call)
except grpc.FutureTimeoutError:
raise future.TimeoutError()
except grpc.FutureCancelledError:
raise future.CancelledError()
def traceback(self, timeout=None):
try:
return self._future.traceback(timeout=timeout)
except grpc.FutureTimeoutError:
raise future.TimeoutError()
except grpc.FutureCancelledError:
raise future.CancelledError()
def add_done_callback(self, fn):
self._future.add_done_callback(lambda ignored_callback: fn(self))
def __iter__(self):
return self
def _next(self):
try:
return next(self._iterator)
except grpc.RpcError as rpc_error_call:
raise _abortion_error(rpc_error_call)
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
return self._call.is_active()
def time_remaining(self):
return self._call.time_remaining()
def add_abortion_callback(self, abortion_callback):
def done_callback():
if self.code() is not grpc.StatusCode.OK:
abortion_callback(_abortion(self._call))
registered = self._call.add_callback(done_callback)
return None if registered else done_callback()
def protocol_context(self):
return _InvocationProtocolContext()
def initial_metadata(self):
return self._call.initial_metadata()
def terminal_metadata(self):
return self._call.terminal_metadata()
def code(self):
return self._call.code()
def details(self):
return self._call.details()
def _blocking_unary_unary(channel, group, method, timeout, with_call,
protocol_options, metadata, metadata_transformer,
request, request_serializer, response_deserializer):
try:
multi_callable = channel.unary_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
if with_call:
response, call = multi_callable.with_call(
request,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return response, _Rendezvous(None, None, call)
else:
return multi_callable(
request,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
except grpc.RpcError as rpc_error_call:
raise _abortion_error(rpc_error_call)
def _future_unary_unary(channel, group, method, timeout, protocol_options,
metadata, metadata_transformer, request,
request_serializer, response_deserializer):
multi_callable = channel.unary_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
response_future = multi_callable.future(
request,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return _Rendezvous(response_future, None, response_future)
def _unary_stream(channel, group, method, timeout, protocol_options, metadata,
metadata_transformer, request, request_serializer,
response_deserializer):
multi_callable = channel.unary_stream(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
response_iterator = multi_callable(
request,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return _Rendezvous(None, response_iterator, response_iterator)
def _blocking_stream_unary(channel, group, method, timeout, with_call,
protocol_options, metadata, metadata_transformer,
request_iterator, request_serializer,
response_deserializer):
try:
multi_callable = channel.stream_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
if with_call:
response, call = multi_callable.with_call(
request_iterator,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return response, _Rendezvous(None, None, call)
else:
return multi_callable(
request_iterator,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
except grpc.RpcError as rpc_error_call:
raise _abortion_error(rpc_error_call)
def _future_stream_unary(channel, group, method, timeout, protocol_options,
metadata, metadata_transformer, request_iterator,
request_serializer, response_deserializer):
multi_callable = channel.stream_unary(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
response_future = multi_callable.future(
request_iterator,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return _Rendezvous(response_future, None, response_future)
def _stream_stream(channel, group, method, timeout, protocol_options, metadata,
metadata_transformer, request_iterator, request_serializer,
response_deserializer):
multi_callable = channel.stream_stream(
_common.fully_qualified_method(group, method),
request_serializer=request_serializer,
response_deserializer=response_deserializer)
effective_metadata = _effective_metadata(metadata, metadata_transformer)
response_iterator = multi_callable(
request_iterator,
timeout=timeout,
metadata=effective_metadata,
credentials=_credentials(protocol_options))
return _Rendezvous(None, response_iterator, response_iterator)
class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
def __init__(self, channel, group, method, metadata_transformer,
request_serializer, response_deserializer):
self._channel = channel
self._group = group
self._method = method
self._metadata_transformer = metadata_transformer
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request,
timeout,
metadata=None,
with_call=False,
protocol_options=None):
return _blocking_unary_unary(
self._channel, self._group, self._method, timeout, with_call,
protocol_options, metadata, self._metadata_transformer, request,
self._request_serializer, self._response_deserializer)
def future(self, request, timeout, metadata=None, protocol_options=None):
return _future_unary_unary(
self._channel, self._group, self._method, timeout, protocol_options,
metadata, self._metadata_transformer, request,
self._request_serializer, self._response_deserializer)
def event(self,
request,
receiver,
abortion_callback,
timeout,
metadata=None,
protocol_options=None):
raise NotImplementedError()
class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
def __init__(self, channel, group, method, metadata_transformer,
request_serializer, response_deserializer):
self._channel = channel
self._group = group
self._method = method
self._metadata_transformer = metadata_transformer
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self, request, timeout, metadata=None, protocol_options=None):
return _unary_stream(
self._channel, self._group, self._method, timeout, protocol_options,
metadata, self._metadata_transformer, request,
self._request_serializer, self._response_deserializer)
def event(self,
request,
receiver,
abortion_callback,
timeout,
metadata=None,
protocol_options=None):
raise NotImplementedError()
class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
def __init__(self, channel, group, method, metadata_transformer,
request_serializer, response_deserializer):
self._channel = channel
self._group = group
self._method = method
self._metadata_transformer = metadata_transformer
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request_iterator,
timeout,
metadata=None,
with_call=False,
protocol_options=None):
return _blocking_stream_unary(
self._channel, self._group, self._method, timeout, with_call,
protocol_options, metadata, self._metadata_transformer,
request_iterator, self._request_serializer,
self._response_deserializer)
def future(self,
request_iterator,
timeout,
metadata=None,
protocol_options=None):
return _future_stream_unary(
self._channel, self._group, self._method, timeout, protocol_options,
metadata, self._metadata_transformer, request_iterator,
self._request_serializer, self._response_deserializer)
def event(self,
receiver,
abortion_callback,
timeout,
metadata=None,
protocol_options=None):
raise NotImplementedError()
class _StreamStreamMultiCallable(face.StreamStreamMultiCallable):
def __init__(self, channel, group, method, metadata_transformer,
request_serializer, response_deserializer):
self._channel = channel
self._group = group
self._method = method
self._metadata_transformer = metadata_transformer
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request_iterator,
timeout,
metadata=None,
protocol_options=None):
return _stream_stream(
self._channel, self._group, self._method, timeout, protocol_options,
metadata, self._metadata_transformer, request_iterator,
self._request_serializer, self._response_deserializer)
def event(self,
receiver,
abortion_callback,
timeout,
metadata=None,
protocol_options=None):
raise NotImplementedError()
class _GenericStub(face.GenericStub):
def __init__(self, channel, metadata_transformer, request_serializers,
response_deserializers):
self._channel = channel
self._metadata_transformer = metadata_transformer
self._request_serializers = request_serializers or {}
self._response_deserializers = response_deserializers or {}
def blocking_unary_unary(self,
group,
method,
request,
timeout,
metadata=None,
with_call=None,
protocol_options=None):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _blocking_unary_unary(self._channel, group, method, timeout,
with_call, protocol_options, metadata,
self._metadata_transformer, request,
request_serializer, response_deserializer)
def future_unary_unary(self,
group,
method,
request,
timeout,
metadata=None,
protocol_options=None):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _future_unary_unary(self._channel, group, method, timeout,
protocol_options, metadata,
self._metadata_transformer, request,
request_serializer, response_deserializer)
def inline_unary_stream(self,
group,
method,
request,
timeout,
metadata=None,
protocol_options=None):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _unary_stream(self._channel, group, method, timeout,
protocol_options, metadata,
self._metadata_transformer, request,
request_serializer, response_deserializer)
def blocking_stream_unary(self,
group,
method,
request_iterator,
timeout,
metadata=None,
with_call=None,
protocol_options=None):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _blocking_stream_unary(
self._channel, group, method, timeout, with_call, protocol_options,
metadata, self._metadata_transformer, request_iterator,
request_serializer, response_deserializer)
def future_stream_unary(self,
group,
method,
request_iterator,
timeout,
metadata=None,
protocol_options=None):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _future_stream_unary(
self._channel, group, method, timeout, protocol_options, metadata,
self._metadata_transformer, request_iterator, request_serializer,
response_deserializer)
def inline_stream_stream(self,
group,
method,
request_iterator,
timeout,
metadata=None,
protocol_options=None):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _stream_stream(self._channel, group, method, timeout,
protocol_options, metadata,
self._metadata_transformer, request_iterator,
request_serializer, response_deserializer)
def event_unary_unary(self,
group,
method,
request,
receiver,
abortion_callback,
timeout,
metadata=None,
protocol_options=None):
raise NotImplementedError()
def event_unary_stream(self,
group,
method,
request,
receiver,
abortion_callback,
timeout,
metadata=None,
protocol_options=None):
raise NotImplementedError()
def event_stream_unary(self,
group,
method,
receiver,
abortion_callback,
timeout,
metadata=None,
protocol_options=None):
raise NotImplementedError()
def event_stream_stream(self,
group,
method,
receiver,
abortion_callback,
timeout,
metadata=None,
protocol_options=None):
raise NotImplementedError()
def unary_unary(self, group, method):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _UnaryUnaryMultiCallable(
self._channel, group, method, self._metadata_transformer,
request_serializer, response_deserializer)
def unary_stream(self, group, method):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _UnaryStreamMultiCallable(
self._channel, group, method, self._metadata_transformer,
request_serializer, response_deserializer)
def stream_unary(self, group, method):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _StreamUnaryMultiCallable(
self._channel, group, method, self._metadata_transformer,
request_serializer, response_deserializer)
def stream_stream(self, group, method):
request_serializer = self._request_serializers.get((
group,
method,))
response_deserializer = self._response_deserializers.get((
group,
method,))
return _StreamStreamMultiCallable(
self._channel, group, method, self._metadata_transformer,
request_serializer, response_deserializer)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class _DynamicStub(face.DynamicStub):
def __init__(self, generic_stub, group, cardinalities):
self._generic_stub = generic_stub
self._group = group
self._cardinalities = cardinalities
def __getattr__(self, attr):
method_cardinality = self._cardinalities.get(attr)
if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
return self._generic_stub.unary_unary(self._group, attr)
elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
return self._generic_stub.unary_stream(self._group, attr)
elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
return self._generic_stub.stream_unary(self._group, attr)
elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
return self._generic_stub.stream_stream(self._group, attr)
else:
raise AttributeError('_DynamicStub object has no attribute "%s"!' %
attr)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def generic_stub(channel, host, metadata_transformer, request_serializers,
response_deserializers):
return _GenericStub(channel, metadata_transformer, request_serializers,
response_deserializers)
def dynamic_stub(channel, service, cardinalities, host, metadata_transformer,
request_serializers, response_deserializers):
return _DynamicStub(
_GenericStub(channel, metadata_transformer, request_serializers,
response_deserializers), service, cardinalities)
|
soltanmm-google/grpc
|
src/python/grpcio/grpc/beta/_client_adaptations.py
|
Python
|
bsd-3-clause
| 26,840
|
from __future__ import absolute_import
from qinfer.tomography.distributions import *
from qinfer.tomography.models import *
from qinfer.tomography.plotting_tools import *
from qinfer.tomography.bases import *
from qinfer.tomography.expdesign import *
|
QInfer/python-qinfer
|
src/qinfer/tomography/__init__.py
|
Python
|
bsd-3-clause
| 251
|
from __future__ import absolute_import
from future.standard_library import suspend_hooks
from future.utils import PY3
if PY3:
from urllib.request import *
# This aren't in __all__:
from urllib.request import (getproxies,
pathname2url,
proxy_bypass,
quote,
request_host,
thishost,
unquote,
url2pathname,
urlcleanup,
urljoin,
urlopen,
urlparse,
urlretrieve,
urlsplit,
urlunparse)
from urllib.parse import (splitattr,
splithost,
splitpasswd,
splitport,
splitquery,
splittag,
splittype,
splituser,
splitvalue,
to_bytes,
unwrap)
else:
__future_module__ = True
with suspend_hooks():
from urllib import *
from urllib2 import *
from urlparse import *
# Rename:
from urllib import toBytes # missing from __all__ on Py2.6
to_bytes = toBytes
# from urllib import (pathname2url,
# url2pathname,
# getproxies,
# urlretrieve,
# urlcleanup,
# URLopener,
# FancyURLopener,
# proxy_bypass)
# from urllib2 import (
# AbstractBasicAuthHandler,
# AbstractDigestAuthHandler,
# BaseHandler,
# CacheFTPHandler,
# FileHandler,
# FTPHandler,
# HTTPBasicAuthHandler,
# HTTPCookieProcessor,
# HTTPDefaultErrorHandler,
# HTTPDigestAuthHandler,
# HTTPErrorProcessor,
# HTTPHandler,
# HTTPPasswordMgr,
# HTTPPasswordMgrWithDefaultRealm,
# HTTPRedirectHandler,
# HTTPSHandler,
# URLError,
# build_opener,
# install_opener,
# OpenerDirector,
# ProxyBasicAuthHandler,
# ProxyDigestAuthHandler,
# ProxyHandler,
# Request,
# UnknownHandler,
# urlopen,
# )
# from urlparse import (
# urldefrag
# urljoin,
# urlparse,
# urlunparse,
# urlsplit,
# urlunsplit,
# parse_qs,
# parse_q"
# )
|
PythonCharmers/python-future
|
src/future/moves/urllib/request.py
|
Python
|
mit
| 3,496
|
# -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
import Commit
import File
class Comparison(GithubObject.GithubObject):
@property
def ahead_by(self):
self._completeIfNotSet(self._ahead_by)
return self._NoneIfNotSet(self._ahead_by)
@property
def base_commit(self):
self._completeIfNotSet(self._base_commit)
return self._NoneIfNotSet(self._base_commit)
@property
def behind_by(self):
self._completeIfNotSet(self._behind_by)
return self._NoneIfNotSet(self._behind_by)
@property
def commits(self):
self._completeIfNotSet(self._commits)
return self._NoneIfNotSet(self._commits)
@property
def diff_url(self):
self._completeIfNotSet(self._diff_url)
return self._NoneIfNotSet(self._diff_url)
@property
def files(self):
self._completeIfNotSet(self._files)
return self._NoneIfNotSet(self._files)
@property
def html_url(self):
self._completeIfNotSet(self._html_url)
return self._NoneIfNotSet(self._html_url)
@property
def patch_url(self):
self._completeIfNotSet(self._patch_url)
return self._NoneIfNotSet(self._patch_url)
@property
def permalink_url(self):
self._completeIfNotSet(self._permalink_url)
return self._NoneIfNotSet(self._permalink_url)
@property
def status(self):
self._completeIfNotSet(self._status)
return self._NoneIfNotSet(self._status)
@property
def total_commits(self):
self._completeIfNotSet(self._total_commits)
return self._NoneIfNotSet(self._total_commits)
@property
def url(self):
self._completeIfNotSet(self._url)
return self._NoneIfNotSet(self._url)
def _initAttributes(self):
self._ahead_by = GithubObject.NotSet
self._base_commit = GithubObject.NotSet
self._behind_by = GithubObject.NotSet
self._commits = GithubObject.NotSet
self._diff_url = GithubObject.NotSet
self._files = GithubObject.NotSet
self._html_url = GithubObject.NotSet
self._patch_url = GithubObject.NotSet
self._permalink_url = GithubObject.NotSet
self._status = GithubObject.NotSet
self._total_commits = GithubObject.NotSet
self._url = GithubObject.NotSet
def _useAttributes(self, attributes):
if "ahead_by" in attributes: # pragma no branch
assert attributes["ahead_by"] is None or isinstance(attributes["ahead_by"], (int, long)), attributes["ahead_by"]
self._ahead_by = attributes["ahead_by"]
if "base_commit" in attributes: # pragma no branch
assert attributes["base_commit"] is None or isinstance(attributes["base_commit"], dict), attributes["base_commit"]
self._base_commit = None if attributes["base_commit"] is None else Commit.Commit(self._requester, attributes["base_commit"], completed=False)
if "behind_by" in attributes: # pragma no branch
assert attributes["behind_by"] is None or isinstance(attributes["behind_by"], (int, long)), attributes["behind_by"]
self._behind_by = attributes["behind_by"]
if "commits" in attributes: # pragma no branch
assert attributes["commits"] is None or all(isinstance(element, dict) for element in attributes["commits"]), attributes["commits"]
self._commits = None if attributes["commits"] is None else [
Commit.Commit(self._requester, element, completed=False)
for element in attributes["commits"]
]
if "diff_url" in attributes: # pragma no branch
assert attributes["diff_url"] is None or isinstance(attributes["diff_url"], (str, unicode)), attributes["diff_url"]
self._diff_url = attributes["diff_url"]
if "files" in attributes: # pragma no branch
assert attributes["files"] is None or all(isinstance(element, dict) for element in attributes["files"]), attributes["files"]
self._files = None if attributes["files"] is None else [
File.File(self._requester, element, completed=False)
for element in attributes["files"]
]
if "html_url" in attributes: # pragma no branch
assert attributes["html_url"] is None or isinstance(attributes["html_url"], (str, unicode)), attributes["html_url"]
self._html_url = attributes["html_url"]
if "patch_url" in attributes: # pragma no branch
assert attributes["patch_url"] is None or isinstance(attributes["patch_url"], (str, unicode)), attributes["patch_url"]
self._patch_url = attributes["patch_url"]
if "permalink_url" in attributes: # pragma no branch
assert attributes["permalink_url"] is None or isinstance(attributes["permalink_url"], (str, unicode)), attributes["permalink_url"]
self._permalink_url = attributes["permalink_url"]
if "status" in attributes: # pragma no branch
assert attributes["status"] is None or isinstance(attributes["status"], (str, unicode)), attributes["status"]
self._status = attributes["status"]
if "total_commits" in attributes: # pragma no branch
assert attributes["total_commits"] is None or isinstance(attributes["total_commits"], (int, long)), attributes["total_commits"]
self._total_commits = attributes["total_commits"]
if "url" in attributes: # pragma no branch
assert attributes["url"] is None or isinstance(attributes["url"], (str, unicode)), attributes["url"]
self._url = attributes["url"]
|
azumimuo/family-xbmc-addon
|
plugin.video.dragon.sports/lib/utils/github/Comparison.py
|
Python
|
gpl-2.0
| 6,448
|
#!/usr/bin/python
#
# Reporter: pixdamix
#
# What steps will reproduce the problem?
# ======================================
#
# 1. Given three packages A, B, C
# A depends on B
# A failed postinst and is in unpacked state
# C depends on B
#
# 2. Upgrade to a new version of C which do not depends on B anymore, and use
# --autoremove
#
#
# What is the expected output? What do you see instead?
# =====================================================
#
# B should not be removed, bot opkg uninstall it.
#
#
# Status
# ======
#
# Fixed in r625.
import os
import opk, cfg, opkgcl
opk.regress_init()
o = opk.OpkGroup()
o.add(Package="a", Version="1.0", Depends="b")
o.add(Package="b", Version="1.0")
o.add(Package="c", Version="1.0", Depends="b")
o.write_opk()
o.write_list()
opkgcl.update()
opkgcl.install("a")
opkgcl.install("c")
opkgcl.flag_unpacked("a")
o = opk.OpkGroup()
o.add(Package="a", Version="1.0", Depends="b")
o.add(Package="b", Version="1.0")
o.add(Package="c", Version="2.0")
o.write_opk()
o.write_list()
opkgcl.update()
opkgcl.upgrade("--autoremove")
if not opkgcl.is_installed("b", "1.0"):
opk.fail("b has been removed even though a still depends on it")
|
nrclark/opkg
|
tests/regress/issue79.py
|
Python
|
gpl-2.0
| 1,182
|
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
log = CPLog(__name__)
class ClientScript(Plugin):
urls = {
'style': {},
'script': {},
}
html = {
'style': '<link rel="stylesheet" href="%s" type="text/css">',
'script': '<script type="text/javascript" src="%s"></script>',
}
def __init__(self):
addEvent('register_style', self.registerStyle)
addEvent('register_script', self.registerScript)
addEvent('clientscript.get_styles', self.getStyles)
addEvent('clientscript.get_scripts', self.getScripts)
def getStyles(self, *args, **kwargs):
return self.get('style', *args, **kwargs)
def getScripts(self, *args, **kwargs):
return self.get('script', *args, **kwargs)
def get(self, type, as_html = False, location = 'head'):
data = '' if as_html else []
try:
return self.urls[type][location]
except Exception, e:
log.error(e)
return data
def registerStyle(self, path, position = 'head'):
self.register(path, 'style', position)
def registerScript(self, path, position = 'head'):
self.register(path, 'script', position)
def register(self, filepath, type, location):
if not self.urls[type].get(location):
self.urls[type][location] = []
filePath = filepath
self.urls[type][location].append(filePath)
|
jayme-github/CouchPotatoServer
|
couchpotato/core/_base/clientscript/main.py
|
Python
|
gpl-3.0
| 1,521
|
from _test_lbfgs_cpp import *
from _test_lj_cpp import *
from _test_lj_interaction_list import *
from _test_frozen_atoms import *
from _test_bljcut import *
if __name__ == "__main__":
unittest.main()
|
khs26/pele
|
playground/native_code/tests/__init__.py
|
Python
|
gpl-3.0
| 206
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_cancel.py - unit test for query cancellation
#
# Copyright (C) 2010-2011 Jan Urbański <wulczer@wulczer.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import time
import threading
import psycopg2
import psycopg2.extensions
from psycopg2 import extras
from testconfig import dsn
from testutils import unittest, ConnectingTestCase, skip_before_postgres, slow
class CancelTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
cur = self.conn.cursor()
cur.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
self.conn.commit()
def test_empty_cancel(self):
self.conn.cancel()
@slow
@skip_before_postgres(8, 2)
def test_cancel(self):
errors = []
def neverending(conn):
cur = conn.cursor()
try:
self.assertRaises(psycopg2.extensions.QueryCanceledError,
cur.execute, "select pg_sleep(60)")
# make sure the connection still works
conn.rollback()
cur.execute("select 1")
self.assertEqual(cur.fetchall(), [(1, )])
except Exception, e:
errors.append(e)
raise
def canceller(conn):
cur = conn.cursor()
try:
conn.cancel()
except Exception, e:
errors.append(e)
raise
del cur
thread1 = threading.Thread(target=neverending, args=(self.conn, ))
# wait a bit to make sure that the other thread is already in
# pg_sleep -- ugly and racy, but the chances are ridiculously low
thread2 = threading.Timer(0.3, canceller, args=(self.conn, ))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
self.assertEqual(errors, [])
@slow
@skip_before_postgres(8, 2)
def test_async_cancel(self):
async_conn = psycopg2.connect(dsn, async_=True)
self.assertRaises(psycopg2.OperationalError, async_conn.cancel)
extras.wait_select(async_conn)
cur = async_conn.cursor()
cur.execute("select pg_sleep(10)")
time.sleep(1)
self.assertTrue(async_conn.isexecuting())
async_conn.cancel()
self.assertRaises(psycopg2.extensions.QueryCanceledError,
extras.wait_select, async_conn)
cur.execute("select 1")
extras.wait_select(async_conn)
self.assertEqual(cur.fetchall(), [(1, )])
def test_async_connection_cancel(self):
async_conn = psycopg2.connect(dsn, async_=True)
async_conn.close()
self.assertTrue(async_conn.closed)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
nwokeo/supysonic
|
venv/lib/python2.7/site-packages/psycopg2/tests/test_cancel.py
|
Python
|
agpl-3.0
| 3,785
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Mathieu Jourdan
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from datetime import date
from weboob.deprecated.browser import Page
from weboob.capabilities.bill import Subscription
class LoginPage(Page):
def login(self, login, password):
self.browser.select_form('symConnexionForm')
self.browser["portlet_login_plein_page_3{pageFlow.mForm.login}"] = unicode(login)
self.browser["portlet_login_plein_page_3{pageFlow.mForm.password}"] = unicode(password)
self.browser.submit()
class HomePage(Page):
def on_loaded(self):
pass
class AccountPage(Page):
def get_subscription_list(self):
table = self.document.xpath('//table[@id="ensemble_contrat_N0"]')[0]
if len(table) > 0:
# some clients may have subscriptions to gas and electricity,
# but they receive a single bill
# to avoid "boobill details" and "boobill bills" returning the same
# table twice, we could return only one subscription for both.
# We do not, and "boobill details" will take care of parsing only the
# relevant section in the bill files.
for line in table[0].xpath('//tbody/tr'):
cells = line.xpath('td')
snumber = cells[2].attrib['id'].replace('Contrat_', '')
slabel = cells[0].xpath('a')[0].text.replace('offre', '').strip()
d = unicode(cells[3].xpath('strong')[0].text.strip())
sdate = date(*reversed([int(x) for x in d.split("/")]))
sub = Subscription(snumber)
sub._id = snumber
sub.label = slabel
sub.subscriber = unicode(cells[1])
sub.renewdate = sdate
yield sub
class TimeoutPage(Page):
def on_loaded(self):
pass
|
sputnick-dev/weboob
|
modules/gdfsuez/pages/homepage.py
|
Python
|
agpl-3.0
| 2,508
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_vertex_shader'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_vertex_shader',error_checker=_errors._error_checker)
GL_CURRENT_VERTEX_EXT=_C('GL_CURRENT_VERTEX_EXT',0x87E2)
GL_FULL_RANGE_EXT=_C('GL_FULL_RANGE_EXT',0x87E1)
GL_INVARIANT_DATATYPE_EXT=_C('GL_INVARIANT_DATATYPE_EXT',0x87EB)
GL_INVARIANT_EXT=_C('GL_INVARIANT_EXT',0x87C2)
GL_INVARIANT_VALUE_EXT=_C('GL_INVARIANT_VALUE_EXT',0x87EA)
GL_LOCAL_CONSTANT_DATATYPE_EXT=_C('GL_LOCAL_CONSTANT_DATATYPE_EXT',0x87ED)
GL_LOCAL_CONSTANT_EXT=_C('GL_LOCAL_CONSTANT_EXT',0x87C3)
GL_LOCAL_CONSTANT_VALUE_EXT=_C('GL_LOCAL_CONSTANT_VALUE_EXT',0x87EC)
GL_LOCAL_EXT=_C('GL_LOCAL_EXT',0x87C4)
GL_MATRIX_EXT=_C('GL_MATRIX_EXT',0x87C0)
GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT',0x87CA)
GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT',0x87CD)
GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT',0x87CE)
GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',0x87CC)
GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT=_C('GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT',0x87CB)
GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT=_C('GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT',0x87C5)
GL_MAX_VERTEX_SHADER_INVARIANTS_EXT=_C('GL_MAX_VERTEX_SHADER_INVARIANTS_EXT',0x87C7)
GL_MAX_VERTEX_SHADER_LOCALS_EXT=_C('GL_MAX_VERTEX_SHADER_LOCALS_EXT',0x87C9)
GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT=_C('GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',0x87C8)
GL_MAX_VERTEX_SHADER_VARIANTS_EXT=_C('GL_MAX_VERTEX_SHADER_VARIANTS_EXT',0x87C6)
GL_MVP_MATRIX_EXT=_C('GL_MVP_MATRIX_EXT',0x87E3)
GL_NEGATIVE_ONE_EXT=_C('GL_NEGATIVE_ONE_EXT',0x87DF)
GL_NEGATIVE_W_EXT=_C('GL_NEGATIVE_W_EXT',0x87DC)
GL_NEGATIVE_X_EXT=_C('GL_NEGATIVE_X_EXT',0x87D9)
GL_NEGATIVE_Y_EXT=_C('GL_NEGATIVE_Y_EXT',0x87DA)
GL_NEGATIVE_Z_EXT=_C('GL_NEGATIVE_Z_EXT',0x87DB)
GL_NORMALIZED_RANGE_EXT=_C('GL_NORMALIZED_RANGE_EXT',0x87E0)
GL_ONE_EXT=_C('GL_ONE_EXT',0x87DE)
GL_OP_ADD_EXT=_C('GL_OP_ADD_EXT',0x8787)
GL_OP_CLAMP_EXT=_C('GL_OP_CLAMP_EXT',0x878E)
GL_OP_CROSS_PRODUCT_EXT=_C('GL_OP_CROSS_PRODUCT_EXT',0x8797)
GL_OP_DOT3_EXT=_C('GL_OP_DOT3_EXT',0x8784)
GL_OP_DOT4_EXT=_C('GL_OP_DOT4_EXT',0x8785)
GL_OP_EXP_BASE_2_EXT=_C('GL_OP_EXP_BASE_2_EXT',0x8791)
GL_OP_FLOOR_EXT=_C('GL_OP_FLOOR_EXT',0x878F)
GL_OP_FRAC_EXT=_C('GL_OP_FRAC_EXT',0x8789)
GL_OP_INDEX_EXT=_C('GL_OP_INDEX_EXT',0x8782)
GL_OP_LOG_BASE_2_EXT=_C('GL_OP_LOG_BASE_2_EXT',0x8792)
GL_OP_MADD_EXT=_C('GL_OP_MADD_EXT',0x8788)
GL_OP_MAX_EXT=_C('GL_OP_MAX_EXT',0x878A)
GL_OP_MIN_EXT=_C('GL_OP_MIN_EXT',0x878B)
GL_OP_MOV_EXT=_C('GL_OP_MOV_EXT',0x8799)
GL_OP_MULTIPLY_MATRIX_EXT=_C('GL_OP_MULTIPLY_MATRIX_EXT',0x8798)
GL_OP_MUL_EXT=_C('GL_OP_MUL_EXT',0x8786)
GL_OP_NEGATE_EXT=_C('GL_OP_NEGATE_EXT',0x8783)
GL_OP_POWER_EXT=_C('GL_OP_POWER_EXT',0x8793)
GL_OP_RECIP_EXT=_C('GL_OP_RECIP_EXT',0x8794)
GL_OP_RECIP_SQRT_EXT=_C('GL_OP_RECIP_SQRT_EXT',0x8795)
GL_OP_ROUND_EXT=_C('GL_OP_ROUND_EXT',0x8790)
GL_OP_SET_GE_EXT=_C('GL_OP_SET_GE_EXT',0x878C)
GL_OP_SET_LT_EXT=_C('GL_OP_SET_LT_EXT',0x878D)
GL_OP_SUB_EXT=_C('GL_OP_SUB_EXT',0x8796)
GL_OUTPUT_COLOR0_EXT=_C('GL_OUTPUT_COLOR0_EXT',0x879B)
GL_OUTPUT_COLOR1_EXT=_C('GL_OUTPUT_COLOR1_EXT',0x879C)
GL_OUTPUT_FOG_EXT=_C('GL_OUTPUT_FOG_EXT',0x87BD)
GL_OUTPUT_TEXTURE_COORD0_EXT=_C('GL_OUTPUT_TEXTURE_COORD0_EXT',0x879D)
GL_OUTPUT_TEXTURE_COORD10_EXT=_C('GL_OUTPUT_TEXTURE_COORD10_EXT',0x87A7)
GL_OUTPUT_TEXTURE_COORD11_EXT=_C('GL_OUTPUT_TEXTURE_COORD11_EXT',0x87A8)
GL_OUTPUT_TEXTURE_COORD12_EXT=_C('GL_OUTPUT_TEXTURE_COORD12_EXT',0x87A9)
GL_OUTPUT_TEXTURE_COORD13_EXT=_C('GL_OUTPUT_TEXTURE_COORD13_EXT',0x87AA)
GL_OUTPUT_TEXTURE_COORD14_EXT=_C('GL_OUTPUT_TEXTURE_COORD14_EXT',0x87AB)
GL_OUTPUT_TEXTURE_COORD15_EXT=_C('GL_OUTPUT_TEXTURE_COORD15_EXT',0x87AC)
GL_OUTPUT_TEXTURE_COORD16_EXT=_C('GL_OUTPUT_TEXTURE_COORD16_EXT',0x87AD)
GL_OUTPUT_TEXTURE_COORD17_EXT=_C('GL_OUTPUT_TEXTURE_COORD17_EXT',0x87AE)
GL_OUTPUT_TEXTURE_COORD18_EXT=_C('GL_OUTPUT_TEXTURE_COORD18_EXT',0x87AF)
GL_OUTPUT_TEXTURE_COORD19_EXT=_C('GL_OUTPUT_TEXTURE_COORD19_EXT',0x87B0)
GL_OUTPUT_TEXTURE_COORD1_EXT=_C('GL_OUTPUT_TEXTURE_COORD1_EXT',0x879E)
GL_OUTPUT_TEXTURE_COORD20_EXT=_C('GL_OUTPUT_TEXTURE_COORD20_EXT',0x87B1)
GL_OUTPUT_TEXTURE_COORD21_EXT=_C('GL_OUTPUT_TEXTURE_COORD21_EXT',0x87B2)
GL_OUTPUT_TEXTURE_COORD22_EXT=_C('GL_OUTPUT_TEXTURE_COORD22_EXT',0x87B3)
GL_OUTPUT_TEXTURE_COORD23_EXT=_C('GL_OUTPUT_TEXTURE_COORD23_EXT',0x87B4)
GL_OUTPUT_TEXTURE_COORD24_EXT=_C('GL_OUTPUT_TEXTURE_COORD24_EXT',0x87B5)
GL_OUTPUT_TEXTURE_COORD25_EXT=_C('GL_OUTPUT_TEXTURE_COORD25_EXT',0x87B6)
GL_OUTPUT_TEXTURE_COORD26_EXT=_C('GL_OUTPUT_TEXTURE_COORD26_EXT',0x87B7)
GL_OUTPUT_TEXTURE_COORD27_EXT=_C('GL_OUTPUT_TEXTURE_COORD27_EXT',0x87B8)
GL_OUTPUT_TEXTURE_COORD28_EXT=_C('GL_OUTPUT_TEXTURE_COORD28_EXT',0x87B9)
GL_OUTPUT_TEXTURE_COORD29_EXT=_C('GL_OUTPUT_TEXTURE_COORD29_EXT',0x87BA)
GL_OUTPUT_TEXTURE_COORD2_EXT=_C('GL_OUTPUT_TEXTURE_COORD2_EXT',0x879F)
GL_OUTPUT_TEXTURE_COORD30_EXT=_C('GL_OUTPUT_TEXTURE_COORD30_EXT',0x87BB)
GL_OUTPUT_TEXTURE_COORD31_EXT=_C('GL_OUTPUT_TEXTURE_COORD31_EXT',0x87BC)
GL_OUTPUT_TEXTURE_COORD3_EXT=_C('GL_OUTPUT_TEXTURE_COORD3_EXT',0x87A0)
GL_OUTPUT_TEXTURE_COORD4_EXT=_C('GL_OUTPUT_TEXTURE_COORD4_EXT',0x87A1)
GL_OUTPUT_TEXTURE_COORD5_EXT=_C('GL_OUTPUT_TEXTURE_COORD5_EXT',0x87A2)
GL_OUTPUT_TEXTURE_COORD6_EXT=_C('GL_OUTPUT_TEXTURE_COORD6_EXT',0x87A3)
GL_OUTPUT_TEXTURE_COORD7_EXT=_C('GL_OUTPUT_TEXTURE_COORD7_EXT',0x87A4)
GL_OUTPUT_TEXTURE_COORD8_EXT=_C('GL_OUTPUT_TEXTURE_COORD8_EXT',0x87A5)
GL_OUTPUT_TEXTURE_COORD9_EXT=_C('GL_OUTPUT_TEXTURE_COORD9_EXT',0x87A6)
GL_OUTPUT_VERTEX_EXT=_C('GL_OUTPUT_VERTEX_EXT',0x879A)
GL_SCALAR_EXT=_C('GL_SCALAR_EXT',0x87BE)
GL_VARIANT_ARRAY_EXT=_C('GL_VARIANT_ARRAY_EXT',0x87E8)
GL_VARIANT_ARRAY_POINTER_EXT=_C('GL_VARIANT_ARRAY_POINTER_EXT',0x87E9)
GL_VARIANT_ARRAY_STRIDE_EXT=_C('GL_VARIANT_ARRAY_STRIDE_EXT',0x87E6)
GL_VARIANT_ARRAY_TYPE_EXT=_C('GL_VARIANT_ARRAY_TYPE_EXT',0x87E7)
GL_VARIANT_DATATYPE_EXT=_C('GL_VARIANT_DATATYPE_EXT',0x87E5)
GL_VARIANT_EXT=_C('GL_VARIANT_EXT',0x87C1)
GL_VARIANT_VALUE_EXT=_C('GL_VARIANT_VALUE_EXT',0x87E4)
GL_VECTOR_EXT=_C('GL_VECTOR_EXT',0x87BF)
GL_VERTEX_SHADER_BINDING_EXT=_C('GL_VERTEX_SHADER_BINDING_EXT',0x8781)
GL_VERTEX_SHADER_EXT=_C('GL_VERTEX_SHADER_EXT',0x8780)
GL_VERTEX_SHADER_INSTRUCTIONS_EXT=_C('GL_VERTEX_SHADER_INSTRUCTIONS_EXT',0x87CF)
GL_VERTEX_SHADER_INVARIANTS_EXT=_C('GL_VERTEX_SHADER_INVARIANTS_EXT',0x87D1)
GL_VERTEX_SHADER_LOCALS_EXT=_C('GL_VERTEX_SHADER_LOCALS_EXT',0x87D3)
GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT=_C('GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT',0x87D2)
GL_VERTEX_SHADER_OPTIMIZED_EXT=_C('GL_VERTEX_SHADER_OPTIMIZED_EXT',0x87D4)
GL_VERTEX_SHADER_VARIANTS_EXT=_C('GL_VERTEX_SHADER_VARIANTS_EXT',0x87D0)
GL_W_EXT=_C('GL_W_EXT',0x87D8)
GL_X_EXT=_C('GL_X_EXT',0x87D5)
GL_Y_EXT=_C('GL_Y_EXT',0x87D6)
GL_ZERO_EXT=_C('GL_ZERO_EXT',0x87DD)
GL_Z_EXT=_C('GL_Z_EXT',0x87D7)
@_f
@_p.types(None,)
def glBeginVertexShaderEXT():pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLenum)
def glBindLightParameterEXT(light,value):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLenum)
def glBindMaterialParameterEXT(face,value):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum)
def glBindParameterEXT(value):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLenum,_cs.GLenum)
def glBindTexGenParameterEXT(unit,coord,value):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLenum)
def glBindTextureUnitParameterEXT(unit,value):pass
@_f
@_p.types(None,_cs.GLuint)
def glBindVertexShaderEXT(id):pass
@_f
@_p.types(None,_cs.GLuint)
def glDeleteVertexShaderEXT(id):pass
@_f
@_p.types(None,_cs.GLuint)
def glDisableVariantClientStateEXT(id):pass
@_f
@_p.types(None,_cs.GLuint)
def glEnableVariantClientStateEXT(id):pass
@_f
@_p.types(None,)
def glEndVertexShaderEXT():pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glExtractComponentEXT(res,src,num):pass
@_f
@_p.types(_cs.GLuint,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLuint)
def glGenSymbolsEXT(datatype,storagetype,range,components):pass
@_f
@_p.types(_cs.GLuint,_cs.GLuint)
def glGenVertexShadersEXT(range):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLbooleanArray)
def glGetInvariantBooleanvEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLfloatArray)
def glGetInvariantFloatvEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetInvariantIntegervEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLbooleanArray)
def glGetLocalConstantBooleanvEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLfloatArray)
def glGetLocalConstantFloatvEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetLocalConstantIntegervEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLbooleanArray)
def glGetVariantBooleanvEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLfloatArray)
def glGetVariantFloatvEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetVariantIntegervEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,arrays.GLvoidpArray)
def glGetVariantPointervEXT(id,value,data):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glInsertComponentEXT(res,src,num):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLuint,_cs.GLenum)
def glIsVariantEnabledEXT(id,cap):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,ctypes.c_void_p)
def glSetInvariantEXT(id,type,addr):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,ctypes.c_void_p)
def glSetLocalConstantEXT(id,type,addr):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint)
def glShaderOp1EXT(op,res,arg1):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glShaderOp2EXT(op,res,arg1,arg2):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glShaderOp3EXT(op,res,arg1,arg2,arg3):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum)
def glSwizzleEXT(res,in_,outX,outY,outZ,outW):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,ctypes.c_void_p)
def glVariantPointerEXT(id,type,stride,addr):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLbyteArray)
def glVariantbvEXT(id,addr):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLdoubleArray)
def glVariantdvEXT(id,addr):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glVariantfvEXT(id,addr):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLintArray)
def glVariantivEXT(id,addr):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLshortArray)
def glVariantsvEXT(id,addr):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLubyteArray)
def glVariantubvEXT(id,addr):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLuintArray)
def glVariantuivEXT(id,addr):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLushortArray)
def glVariantusvEXT(id,addr):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum)
def glWriteMaskEXT(res,in_,outX,outY,outZ,outW):pass
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/EXT/vertex_shader.py
|
Python
|
lgpl-3.0
| 11,362
|
import pilas
def test_todos_los_objetos_de_interfaz_se_pueden_crear():
pilas.iniciar()
deslizador = pilas.interfaz.Deslizador()
assert deslizador
assert deslizador.progreso == 0
boton = pilas.interfaz.Boton()
assert boton
ingreso = pilas.interfaz.IngresoDeTexto()
assert ingreso
try:
pilas.interfaz.ListaSeleccion()
except TypeError:
assert True # Se espera esta excepcion, porque un argumento es obligatorio
lista = pilas.interfaz.ListaSeleccion([('uno')])
assert lista
try:
pilas.interfaz.Selector()
except TypeError:
assert True # el argumento texto es obligatorio.
selector = pilas.interfaz.Selector("hola")
assert selector
|
irvingprog/pilas
|
pilas/test/test_interface.py
|
Python
|
lgpl-3.0
| 736
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training.input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import itertools
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class MatchFilenamesOnceTest(tf.test.TestCase):
def test(self):
temp_dir = self.get_temp_dir()
filenames = [os.path.join(temp_dir, n) for n in os.listdir(temp_dir)]
additional = [os.path.join(self.get_temp_dir(), "match_filenames.%d" % i)
for i in range(3)]
for name in additional:
open(name, "w").write("Some contents")
filenames = list(set(filenames + additional))
with self.test_session():
star = tf.train.match_filenames_once(
os.path.join(self.get_temp_dir(), "*"))
question = tf.train.match_filenames_once(
os.path.join(self.get_temp_dir(), "match_filenames.?"))
one = tf.train.match_filenames_once(additional[1])
tf.initialize_all_variables().run()
self.assertItemsEqual(map(tf.compat.as_bytes, filenames), star.eval())
self.assertItemsEqual(map(tf.compat.as_bytes, additional),
question.eval())
self.assertItemsEqual([tf.compat.as_bytes(additional[1])], one.eval())
class LimitEpochsTest(tf.test.TestCase):
def testNoLimit(self):
with self.test_session():
seven = tf.constant(7)
seven_forever = tf.train.limit_epochs(seven)
tf.initialize_all_variables().run()
for i in range(100):
self.assertEqual(7, seven_forever.eval())
def testLimit(self):
with self.test_session():
love_me = tf.constant("Love Me")
love_me_two_times = tf.train.limit_epochs(love_me, num_epochs=2)
tf.initialize_all_variables().run()
self.assertEqual(b"Love Me", love_me_two_times.eval())
self.assertEqual(b"Love Me", love_me_two_times.eval())
with self.assertRaises(tf.errors.OutOfRangeError):
love_me_two_times.eval()
class StringInputProducerTest(tf.test.TestCase):
def testNoShuffle(self):
with self.test_session():
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
num_epochs = 3
queue = tf.train.string_input_producer(
strings, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(strings) * num_epochs)
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = dequeue_many.eval()
self.assertAllEqual(strings * num_epochs, output)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testShuffle(self):
with self.test_session():
strings = [b"a", b"b", b"c"]
num_epochs = 600
queue = tf.train.string_input_producer(
strings, num_epochs=num_epochs, shuffle=True, seed=271828)
dequeue_many = queue.dequeue_many(len(strings))
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Validate that we only shuffle the strings within an epoch and
# count how often each possible order appears.
expected = [b"abc", b"acb", b"bac", b"bca", b"cab", b"cba"]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = dequeue_many.eval()
key = b"".join(output)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf.logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testNullStringPython(self):
# Graph-construction time check for empty string list:
with self.test_session():
with self.assertRaises(ValueError):
_ = tf.train.string_input_producer([])
def testNullString(self):
# Runtime check for empty string list. This is slightly oblique:
# The queue runner should die with an assertion error on the null
# input tensor, causing the dequeue to fail with an OutOfRangeError.
with self.test_session():
coord = tf.train.Coordinator()
queue = tf.train.string_input_producer(tf.constant([], dtype=tf.string))
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
coord.request_stop()
for thread in threads:
thread.join()
class RangeInputProducerTest(tf.test.TestCase):
def testNoShuffle(self):
with self.test_session():
num_epochs = 3
range_size = 5
queue = tf.train.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(range_size * num_epochs)
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = dequeue_many.eval()
self.assertAllEqual(list(xrange(range_size)) * num_epochs, output)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testShuffle(self):
with self.test_session():
num_epochs = 200
range_size = 2
queue = tf.train.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=True, seed=314159)
dequeue_many = queue.dequeue_many(range_size)
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [12, 21]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = dequeue_many.eval()
key = 10 * (output[0] + 1) + (output[1] + 1)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf.logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
class SliceInputProducerTest(tf.test.TestCase):
def testNoShuffle(self):
with self.test_session() as sess:
num_epochs = 3
source_strings = [b"Alpha", b"Beta", b"Delta", b"Gamma"]
source_ints = [2, 3, 5, 7]
slices = tf.train.slice_input_producer(
[source_strings, source_ints], num_epochs=num_epochs, shuffle=False)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# No randomness, so just see repeated copies of the input.
num_items = len(source_strings) * num_epochs
output = [sess.run(slices) for _ in range(num_items)]
out_strings, out_ints = zip(*output)
self.assertAllEqual(source_strings * num_epochs, out_strings)
self.assertAllEqual(source_ints * num_epochs, out_ints)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(slices)
for thread in threads:
thread.join()
def testShuffle(self):
with self.test_session() as sess:
num_epochs = 1200
source_strings = ["A", "B", "D", "G"]
source_ints = [7, 3, 5, 2]
slices = tf.train.slice_input_producer(
[source_strings, source_ints], num_epochs=num_epochs, shuffle=True,
seed=161803)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [b",".join(x) for x in
itertools.permutations([b"A7", b"B3", b"D5", b"G2"])]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = [sess.run(slices) for _ in range(len(source_strings))]
key = b",".join([s + tf.compat.as_bytes(str(i)) for s, i in output])
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf.logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(slices)
for thread in threads:
thread.join()
class BatchTest(tf.test.TestCase):
def testOneThread(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = tf.train.batch([counter, "string"], batch_size=batch_size)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(results[0], np.arange(i * batch_size,
(i + 1) * batch_size))
self.assertAllEqual(results[1], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testOneThreadEnqueueMany(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
pre_batched = tf.train.batch([counter, "string"], batch_size=2)
batched = tf.train.batch(pre_batched, enqueue_many=True,
batch_size=batch_size)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(results[0], np.arange(i * batch_size,
(i + 1) * batch_size))
self.assertAllEqual(results[1], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreads(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = tf.train.batch([counter, "string"], batch_size=batch_size,
num_threads=4)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf.logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(results[1], [b"string"] * batch_size)
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
class BatchJoinTest(tf.test.TestCase):
def testTwoThreads(self):
with self.test_session() as sess:
# Two threads, the first generates (0..69, "a").
num_a = 70
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, "b") 90 times and then stops.
num_b = 90
ninety_nine = tf.train.limit_epochs(
tf.constant(99, dtype=tf.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = tf.train.batch_join([[counter, "a"], [ninety_nine, "b"]],
batch_size=batch_size)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf.logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
which_a = [i for i, s in enumerate(results[1]) if s == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if len(which_a) > 0 and len(which_b) > 0: saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
class ShuffleBatchTest(tf.test.TestCase):
def testOneThread(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = tf.train.shuffle_batch(
[counter, "string"], batch_size=batch_size, capacity=32,
min_after_dequeue=16, seed=141421)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(results[1], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [all_counts[i + 1] - all_counts[i]
for i in range(len(all_counts) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreads(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = tf.train.shuffle_batch(
[counter, "string"], batch_size=batch_size, capacity=32,
min_after_dequeue=16, seed=173205, num_threads=4)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf.logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(results[1], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [all_counts[i + 1] - all_counts[i]
for i in range(len(all_counts) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
class ShuffleBatchJoinTest(tf.test.TestCase):
def testTwoThreads(self):
with self.test_session() as sess:
# Two threads, the first generates (0..24, "a").
num_a = 25
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, "b") 35 times and then stops.
num_b = 35
ninety_nine = tf.train.limit_epochs(
tf.constant(99, dtype=tf.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = tf.train.shuffle_batch_join(
[[counter, "a"], [ninety_nine, "b"]], batch_size=batch_size,
capacity=32, min_after_dequeue=16, seed=223607)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf.logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
which_a = [i for i, s in enumerate(results[1]) if s == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if len(which_a) > 0 and len(which_b) > 0: saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Saw all the items from "a", but scrambled.
self.assertItemsEqual(all_a, range(num_a))
deltas = [all_a[i + 1] - all_a[i]
for i in range(len(all_a) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
if __name__ == "__main__":
tf.test.main()
|
DeepThoughtTeam/tensorflow
|
tensorflow/python/training/input_test.py
|
Python
|
apache-2.0
| 20,244
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
FORMAT = """%(asctime)s
:%(name)s:
%(levelname)s
:%(module)s
:%(funcName)
:%(lineno)s
:%(message)s"""
SELENIUM_REMOTE_CONNECTION = "selenium.webdriver.remote.remote_connection"
|
NejcZupec/ggrc-core
|
test/selenium/src/lib/constants/log.py
|
Python
|
apache-2.0
| 344
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from itertools import count
from threading import Event
from base import benchmark, BenchmarkThread
from six.moves import range
log = logging.getLogger(__name__)
sentinel = object()
class Runner(BenchmarkThread):
def __init__(self, *args, **kwargs):
BenchmarkThread.__init__(self, *args, **kwargs)
self.num_started = count()
self.num_finished = count()
self.event = Event()
def insert_next(self, previous_result=sentinel):
if previous_result is not sentinel:
if isinstance(previous_result, BaseException):
log.error("Error on insert: %r", previous_result)
if next(self.num_finished) >= self.num_queries:
self.event.set()
if next(self.num_started) <= self.num_queries:
future = self.session.execute_async(self.query, self.values, timeout=None)
future.add_callbacks(self.insert_next, self.insert_next)
def run(self):
self.start_profile()
if self.protocol_version >= 3:
concurrency = 1000
else:
concurrency = 100
for _ in range(min(concurrency, self.num_queries)):
self.insert_next()
self.event.wait()
self.finish_profile()
if __name__ == "__main__":
benchmark(Runner)
|
bbirand/python-driver
|
benchmarks/callback_full_pipeline.py
|
Python
|
apache-2.0
| 1,906
|
from types import *
from robot import utils
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.common.exceptions import NoSuchWindowException
class WindowManager(object):
def __init__(self):
self._strategies = {
'title': self._select_by_title,
'name': self._select_by_name,
'url': self._select_by_url,
None: self._select_by_default
}
def get_window_ids(self, browser):
return [ window_info[1] for window_info in self._get_window_infos(browser) ]
def get_window_names(self, browser):
return [ window_info[2] for window_info in self._get_window_infos(browser) ]
def get_window_titles(self, browser):
return [ window_info[3] for window_info in self._get_window_infos(browser) ]
def select(self, browser, locator):
assert browser is not None
if locator is not None:
if isinstance(locator, list):
self._select_by_excludes(browser, locator)
return
if locator.lower() == "self" or locator.lower() == "current":
return
if locator.lower() == "new" or locator.lower() == "popup":
self._select_by_last_index(browser)
return
(prefix, criteria) = self._parse_locator(locator)
strategy = self._strategies.get(prefix)
if strategy is None:
raise ValueError("Window locator with prefix '" + prefix + "' is not supported")
return strategy(browser, criteria)
# Strategy routines, private
def _select_by_title(self, browser, criteria):
self._select_matching(
browser,
lambda window_info: window_info[3].strip().lower() == criteria.lower(),
"Unable to locate window with title '" + criteria + "'")
def _select_by_name(self, browser, criteria):
self._select_matching(
browser,
lambda window_info: window_info[2].strip().lower() == criteria.lower(),
"Unable to locate window with name '" + criteria + "'")
def _select_by_url(self, browser, criteria):
self._select_matching(
browser,
lambda window_info: window_info[4].strip().lower() == criteria.lower(),
"Unable to locate window with URL '" + criteria + "'")
def _select_by_default(self, browser, criteria):
if criteria is None or len(criteria) == 0 or criteria.lower() == "null":
handles = browser.get_window_handles()
browser.switch_to_window(handles[0])
return
try:
starting_handle = browser.get_current_window_handle()
except NoSuchWindowException:
starting_handle = None
for handle in browser.get_window_handles():
browser.switch_to_window(handle)
if criteria == handle:
return
for item in browser.get_current_window_info()[2:4]:
if item.strip().lower() == criteria.lower():
return
if starting_handle:
browser.switch_to_window(starting_handle)
raise ValueError("Unable to locate window with handle or name or title or URL '" + criteria + "'")
def _select_by_last_index(self, browser):
handles = browser.get_window_handles()
try:
if handles[-1] == browser.get_current_window_handle():
raise AssertionError("No new window at last index. Please use '@{ex}= | List Windows' + new window trigger + 'Select Window | ${ex}' to find it.")
except IndexError:
raise AssertionError("No window found")
except NoSuchWindowException:
raise AssertionError("Currently no focus window. where are you making a popup window?")
browser.switch_to_window(handles[-1])
def _select_by_excludes(self, browser, excludes):
for handle in browser.get_window_handles():
if handle not in excludes:
browser.switch_to_window(handle)
return
raise ValueError("Unable to locate new window")
# Private
def _parse_locator(self, locator):
prefix = None
criteria = locator
if locator is not None and len(locator) > 0:
locator_parts = locator.partition('=')
if len(locator_parts[1]) > 0:
prefix = locator_parts[0].strip().lower()
criteria = locator_parts[2].strip()
if prefix is None or prefix == 'name':
if criteria is None or criteria.lower() == 'main':
criteria = ''
return (prefix, criteria)
def _get_window_infos(self, browser):
window_infos = []
try:
starting_handle = browser.get_current_window_handle()
except NoSuchWindowException:
starting_handle = None
try:
for handle in browser.get_window_handles():
browser.switch_to_window(handle)
window_infos.append(browser.get_current_window_info())
finally:
if starting_handle:
browser.switch_to_window(starting_handle)
return window_infos
def _select_matching(self, browser, matcher, error):
try:
starting_handle = browser.get_current_window_handle()
except NoSuchWindowException:
starting_handle = None
for handle in browser.get_window_handles():
browser.switch_to_window(handle)
if matcher(browser.get_current_window_info()):
return
if starting_handle:
browser.switch_to_window(starting_handle)
raise ValueError(error)
|
gotcha/Selenium2Library
|
src/Selenium2Library/locators/windowmanager.py
|
Python
|
apache-2.0
| 5,724
|
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import AbstractNode, NodeLog
from osf.utils import permissions
from osf.utils.sanitize import strip_html
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
OSFGroupFactory,
RegistrationFactory,
AuthUserFactory,
PrivateLinkFactory,
)
from tests.base import fake
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeChildrenList:
@pytest.fixture()
def private_project(self, user):
private_project = ProjectFactory()
private_project.add_contributor(
user,
permissions=permissions.WRITE
)
private_project.save()
return private_project
@pytest.fixture()
def component(self, user, private_project):
return NodeFactory(parent=private_project, creator=user)
@pytest.fixture()
def pointer(self):
return ProjectFactory()
@pytest.fixture()
def private_project_url(self, private_project):
return '/{}nodes/{}/children/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_component(self, user, public_project):
return NodeFactory(parent=public_project, creator=user, is_public=True)
@pytest.fixture()
def public_project_url(self, user, public_project):
return '/{}nodes/{}/children/'.format(API_BASE, public_project._id)
@pytest.fixture()
def view_only_link(self, private_project):
view_only_link = PrivateLinkFactory(name='node_view_only_link')
view_only_link.nodes.add(private_project)
view_only_link.save()
return view_only_link
def test_return_public_node_children_list(
self, app, public_component,
public_project_url):
# test_return_public_node_children_list_logged_out
res = app.get(public_project_url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
# test_return_public_node_children_list_logged_in
non_contrib = AuthUserFactory()
res = app.get(public_project_url, auth=non_contrib.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
def test_return_private_node_children_list(
self, app, user, component, private_project, private_project_url):
# test_return_private_node_children_list_logged_out
res = app.get(private_project_url, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.get(
private_project_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_contributor
res = app.get(private_project_url, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == component._id
# test_return_private_node_children_osf_group_member_admin
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
private_project.add_osf_group(group, permissions.ADMIN)
res = app.get(private_project_url, auth=group_mem.auth)
assert res.status_code == 200
# Can view node children that you have implict admin permissions
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == component._id
def test_node_children_list_does_not_include_pointers(
self, app, user, component, private_project_url):
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_unauthorized_projects(
self, app, user, component, private_project, private_project_url):
NodeFactory(parent=private_project)
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_deleted(
self, app, user, public_project, public_component,
component, public_project_url):
child_project = NodeFactory(parent=public_project, creator=user)
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id in ids
assert 2 == len(ids)
child_project.is_deleted = True
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id not in ids
assert 1 == len(ids)
def test_node_children_list_does_not_include_node_links(
self, app, user, public_project, public_component,
public_project_url):
pointed_to = ProjectFactory(is_public=True)
public_project.add_pointer(
pointed_to,
auth=Auth(public_project.creator)
)
res = app.get(public_project_url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert public_component._id in ids # sanity check
assert pointed_to._id not in ids
# Regression test for https://openscience.atlassian.net/browse/EMB-593
# Duplicates returned in child count
def test_node_children_related_counts_duplicate_query_results(self, app, user, public_project,
private_project, public_project_url):
user_2 = AuthUserFactory()
# Adding a child component
child = NodeFactory(parent=public_project, creator=user, is_public=True, category='software')
child.add_contributor(user_2, permissions.WRITE, save=True)
# Adding a grandchild
NodeFactory(parent=child, creator=user, is_public=True)
# Adding a node link
public_project.add_pointer(
private_project,
auth=Auth(public_project.creator)
)
# Assert NodeChildrenList returns one result
res = app.get(public_project_url, auth=user.auth)
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == child._id
project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id)
res = app.get(project_url, auth=user.auth)
assert res.status_code == 200
# Verifying related_counts match direct children count (grandchildren not included, pointers not included)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_node_children_related_counts(self, app, user, public_project):
parent = ProjectFactory(creator=user, is_public=False)
user_2 = AuthUserFactory()
parent.add_contributor(user_2, permissions.ADMIN)
child = NodeFactory(parent=parent, creator=user_2, is_public=False, category='software')
NodeFactory(parent=child, creator=user_2, is_public=False)
# child has one component. `user` can view due to implict admin perms
component_url = '/{}nodes/{}/children/'.format(API_BASE, child._id, auth=user.auth)
res = app.get(component_url, auth=user.auth)
assert len(res.json['data']) == 1
project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, child._id)
res = app.get(project_url, auth=user.auth)
assert res.status_code == 200
# Nodes with implicit admin perms are also included in the count
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_child_counts_permissions(self, app, user, public_project):
NodeFactory(parent=public_project, creator=user)
url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id)
user_two = AuthUserFactory()
# Unauthorized
res = app.get(url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# Logged in noncontrib
res = app.get(url, auth=user_two.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# Logged in contrib
res = app.get(url, auth=user.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_private_node_children_with_view_only_link(self, user, app, private_project,
component, view_only_link, private_project_url):
# get node related_counts with vol before vol is attached to components
node_url = '/{}nodes/{}/?related_counts=children&view_only={}'.format(API_BASE,
private_project._id, view_only_link.key)
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# view only link is not attached to components
view_only_link_url = '{}?view_only={}'.format(private_project_url, view_only_link.key)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert len(ids) == 0
assert component._id not in ids
# view only link is attached to components
view_only_link.nodes.add(component)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert component._id in ids
assert 'contributors' in res.json['data'][0]['relationships']
assert 'implicit_contributors' in res.json['data'][0]['relationships']
assert 'bibliographic_contributors' in res.json['data'][0]['relationships']
# get node related_counts with vol once vol is attached to components
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
# make private vol anonymous
view_only_link.anonymous = True
view_only_link.save()
res = app.get(view_only_link_url)
assert 'contributors' not in res.json['data'][0]['relationships']
assert 'implicit_contributors' not in res.json['data'][0]['relationships']
assert 'bibliographic_contributors' not in res.json['data'][0]['relationships']
# delete vol
view_only_link.is_deleted = True
view_only_link.save()
res = app.get(view_only_link_url, expect_errors=True)
assert res.status_code == 401
@pytest.mark.django_db
class TestNodeChildrenListFiltering:
def test_node_child_filtering(self, app, user):
project = ProjectFactory(creator=user)
title_one, title_two = fake.bs(), fake.bs()
component = NodeFactory(title=title_one, parent=project)
component_two = NodeFactory(title=title_two, parent=project)
url = '/{}nodes/{}/children/?filter[title]={}'.format(
API_BASE,
project._id,
title_one
)
res = app.get(url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert component._id in ids
assert component_two._id not in ids
@pytest.mark.django_db
class TestNodeChildCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child(self):
return {
'data': {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
}
def test_creates_child(self, app, user, project, child, url):
# test_creates_child_logged_out_user
res = app.post_json_api(url, child, expect_errors=True)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=permissions.READ,
auth=Auth(user), save=True
)
res = app.post_json_api(
url, child, auth=read_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url, child, auth=non_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_group_member_read
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
project.add_osf_group(group, permissions.READ)
res = app.post_json_api(
url, child, auth=group_mem.auth,
expect_errors=True
)
assert res.status_code == 403
project.update_osf_group(group, permissions.WRITE)
res = app.post_json_api(
url, child, auth=group_mem.auth,
expect_errors=True
)
assert res.status_code == 201
# test_creates_child_no_type
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_creates_child_incorrect_type
child = {
'data': {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
# test_creates_child_properties_not_nested
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project'
},
'category': 'project'
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
assert res.json['errors'][1]['detail'] == 'This field is required.'
assert res.json['errors'][1]['source']['pointer'] == '/data/attributes/category'
def test_creates_child_logged_in_write_contributor(
self, app, user, project, child, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=permissions.WRITE,
auth=Auth(user),
save=True)
res = app.post_json_api(url, child, auth=write_contrib.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_creates_child_logged_in_owner(
self, app, user, project, child, url):
res = app.post_json_api(url, child, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
assert res.json['data']['id'] == project.nodes[0]._id
assert project.nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
def test_creates_child_creates_child_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasonable</em> <strong>Project</strong>'
description = 'An <script>alert("even reasonabler")</script> child'
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}
}, auth=user.auth)
child_id = res.json['data']['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_create_child_on_a_registration(self, app, user, project):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestNodeChildrenBulkCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child_one(self):
return {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
@pytest.fixture()
def child_two(self):
return {
'type': 'nodes',
'attributes': {
'title': 'second child',
'description': 'this is my hypothesis',
'category': 'hypothesis'
}
}
def test_bulk_children_create_blank_request(self, app, user, url):
res = app.post_json_api(
url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
def test_bulk_creates_children_limits(self, app, user, child_one, url):
res = app.post_json_api(
url, {'data': [child_one] * 101},
auth=user.auth, expect_errors=True, bulk=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
def test_bulk_creates_children_auth_errors(
self, app, user, project, child_one, child_two, url):
# test_bulk_creates_children_logged_out_user
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
expect_errors=True, bulk=True
)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=permissions.READ,
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=read_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=non_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_logged_in_owner(
self, app, user, project, child_one, child_two, url):
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=user.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
nodes = project.nodes
assert res.json['data'][0]['id'] == nodes[0]._id
assert res.json['data'][1]['id'] == nodes[1]._id
assert nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_child_logged_in_write_contributor(
self, app, user, project, child_one, child_two, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=permissions.WRITE,
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=write_contrib.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
child_id = res.json['data'][0]['id']
child_two_id = res.json['data'][1]['id']
nodes = project.nodes
assert child_id == nodes[0]._id
assert child_two_id == nodes[1]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasoning</em> <strong>Aboot Projects</strong>'
description = 'A <script>alert("super reasonable")</script> child'
res = app.post_json_api(url, {
'data': [{
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}]
}, auth=user.auth, bulk=True)
child_id = res.json['data'][0]['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_bulk_create_children_on_a_registration(
self, app, user, project, child_two):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': [child_two, {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}]
}, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 404
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_payload_errors(
self, app, user, project, child_two, url):
# def test_bulk_creates_children_no_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_incorrect_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_properties_not_nested(self, app, user,
# project, child_two, url):
child = {
'data': [child_two, {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
assert res.json['errors'][1]['detail'] == 'This field is required.'
assert res.json['errors'][1]['source']['pointer'] == '/data/1/attributes/title'
assert res.json['errors'][2]['detail'] == 'This field is required.'
assert res.json['errors'][2]['source']['pointer'] == '/data/1/attributes/category'
project.reload()
assert len(project.nodes) == 0
|
Johnetordoff/osf.io
|
api_tests/nodes/views/test_node_children_list.py
|
Python
|
apache-2.0
| 30,028
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import md5
import api.resources.impala_engine as ImpalaEngine
import api.resources.hdfs_client as HDFSClient
from hdfs.util import HdfsError
import api.resources.configurator as Configuration
from collections import defaultdict
import json
import os
"""
--------------------------------------------------------------------------
Return list(dict) of all the connectios related to a request name in one hour
--------------------------------------------------------------------------
"""
def suspicious_requests(date,uri=None,ip=None,limit=250):
db = Configuration.db()
proxy_query = ("""
SELECT STRAIGHT_JOIN
ps.tdate,ps.time,ps.clientip,ps.host,ps.reqmethod,ps.useragent,
ps.resconttype,ps.duration,ps.username,ps.webcat,ps.referer,
ps.respcode,ps.uriport,ps.uripath,ps.uriquery,ps.serverip,ps.scbytes,
ps.csbytes,ps.fulluri,ps.ml_score,ps.uri_rep,ps.respcode_name,
ps.network_context
FROM
{0}.proxy_scores ps
LEFT JOIN
{0}.proxy_threat_investigation pt
ON (ps.fulluri = pt.fulluri)
WHERE
ps.y={1} AND ps.m={2} AND ps.d={3}
AND (pt.fulluri is NULL)
""").format(db,date.year,date.month,date.day)
p_filter = ""
p_filter += " AND ps.fulluri LIKE '%{0}%'".format(uri) if uri else ""
p_filter += " AND ps.clientip = '{0}'".format(ip) if ip else ""
p_filter += " ORDER BY ps.ml_score limit {0}".format(limit)
proxy_query = proxy_query + p_filter
return ImpalaEngine.execute_query_as_list(proxy_query)
"""
--------------------------------------------------------------------------
Return list(dict) of all the connectios details for one request.
--------------------------------------------------------------------------
"""
def details(date,uri,ip):
if not uri and not ip:
return None
db = Configuration.db()
p_details = ("""
SELECT
tdate,time,clientIp,host,webcat,respcode,respcode_name
,reqmethod,useragent,resconttype,referer,uriport,serverip
,scbytes,csbytes,fulluri,hh
FROM
{0}.proxy_edge
WHERE
y={1} AND m={2} AND d={3} AND
(fulluri='{4}' AND clientIp='{5}')
""").format(db,date.year,date.month,date.day,uri.replace("'","//'"),ip)
return ImpalaEngine.execute_query_as_list(p_details)
"""
--------------------------------------------------------------------------
Score a request
--------------------------------------------------------------------------
"""
def score_request(date,score,uri):
if not score and not uri:
return None
db = Configuration.db()
p_query = ("""
SELECT
tdate,time,clientip,host,reqmethod,useragent,resconttype
,duration,username,webcat,referer,respcode,uriport
,uripath,uriquery,serverip,scbytes,csbytes,fulluri
,word,ml_score,uri_rep,respcode_name,network_context
FROM
{0}.proxy_scores
WHERE
y={1} and m={2} and d={3}
AND fulluri = '{4}'
""").format(db,date.year,date.month,date.day,uri)
connections = ImpalaEngine.execute_query(p_query)
# add score to connections
insert_command = ("""
INSERT INTO {0}.proxy_threat_investigation PARTITION (y={1},m={2},d={3})
VALUES (""") \
.format(db,date.year,date.month,date.day)
fb_data = []
first = True
num_rows = 0
for row in connections:
cip_index = row[2]
uri_index = row[18]
tme_index = row[2]
hash_field = [str( md5.new(str(cip_index) + str(uri_index)).hexdigest() \
+ str((tme_index.split(":"))[0]) )]
threat_data = (row[0],row[18],score)
fb_data.append([row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7] \
,row[8],row[9],row[10],row[11],row[12],row[13],row[14],row[15] \
,row[16],row[17],row[18],row[19],score,row[20],row[21],row[22], \
row[23],hash_field])
insert_command += "{0}{1}".format("," if not first else "", threat_data)
first = False
num_rows += 1
insert_command += ")"
if num_rows > 0: ImpalaEngine.execute_query(insert_command)
# create feedback file.
app_path = Configuration.spot()
feedback_path = "{0}/proxy/scored_results/{1}{2}{3}/feedback"\
.format(app_path,date.year,str(date.month).zfill(2),str(date.day).zfill(2))
ap_file = True
if len(HDFSClient.list_dir(feedback_path)) == 0:
fb_data.insert(0,["p_date","p_time","clientip","host","reqmethod",\
"useragent","resconttype","duration","username","webcat","referer",\
"respcode","uriport","uripath","uriquery","serverip","scbytes","csbytes",\
"fulluri","word","score","uri_rep","uri_sev","respcode_name",\
"network_context","hash"])
ap_file = False
HDFSClient.put_file_csv(fb_data,feedback_path,"ml_feedback.csv",append_file=ap_file)
return True
"""
--------------------------------------------------------------------------
Get expanded search from raw table.
--------------------------------------------------------------------------
"""
def expanded_search(date,uri):
db = Configuration.db()
expanded_query = ("""
SELECT p_date, p_time, clientip, username, duration, fulluri,\
webcat, respcode, reqmethod,useragent, resconttype,\
referer, uriport, serverip, scbytes, csbytes
FROM {0}.proxy
WHERE y='{1}' AND m='{2}' AND d='{3}'
AND (fulluri='{4}' OR referer ='{4}')
ORDER BY p_time
""")\
.format(db,date.year,str(date.month).zfill(2),str(date.day).zfill(2),uri)
return ImpalaEngine.execute_query_as_list(expanded_query)
"""
--------------------------------------------------------------------------
Get scored request from threat investigation.
--------------------------------------------------------------------------
"""
def get_scored_requests(date):
db = Configuration.db()
sc_query = ("""
SELECT
tdate,fulluri,uri_sev
FROM
{0}.proxy_threat_investigation
WHERE
y={1} AND m={2} AND d={3}
""").format(db,date.year,date.month,date.day)
return ImpalaEngine.execute_query_as_list(sc_query)
"""
--------------------------------------------------------------------------
Create storyboard.
Migrated from IPython Notebooks
--------------------------------------------------------------------------
"""
def create_storyboard(uri,date,title,text,expanded_search,top_results):
clientips = defaultdict(int)
reqmethods = defaultdict(int)
rescontype = defaultdict(int)
referers = defaultdict(int)
refered = defaultdict(int)
requests = []
for row in expanded_search:
clientips[row['clientIp']]+=1
reqmethods[row['requestMethod']]+=1
rescontype[row['responseContentType']]+=1
if row['uri'] == uri:
#Source URI's that refered the user to the threat
referers[row['referer']]+=1
requests += [{'clientip':row['clientIp'], 'referer':row['referer'],'reqmethod':row['requestMethod'], 'resconttype':row['responseContentType']}]
else:
#Destination URI's refered by the threat
refered[row['uri']]+=1
create_incident_progression(uri,requests,refered,date)
create_timeline(uri,clientips,date,top_results)
save_comments(uri,title,text,date)
return True
"""
--------------------------------------------------------------------------
Create timeline for storyboard
--------------------------------------------------------------------------
"""
def create_timeline(anchor,clientips,date,top_results):
response = ""
susp_ips = []
if clientips:
srtlist = sorted(list(clientips.items()), key=lambda x: x[1], reverse=True)
for val in srtlist[:top_results]:
susp_ips.append(val[0])
if anchor != "":
db = Configuration.db()
time_line_query = ("""
SELECT p_threat,tstart,tend,duration,clientip,respcode,respcodename
FROM {0}.proxy_timeline
WHERE
y={1} AND m={2} AND d={3} AND p_threat != '{4}'
""").format(db,date.year,date.month,date.day,anchor.replace("'","//'"))
tmp_timeline_data = ImpalaEngine.execute_query_as_list(time_line_query)
imp_query = ("""
INSERT INTO TABLE {0}.proxy_timeline
PARTITION (y={2}, m={3},d={4})
SELECT
'{7}' as p_threat, concat(cast(p_date as string),
' ', cast(MIN(p_time) as string)) AS tstart,
concat(cast(p_date as string), ' ',
cast(MAX(p_time) as string)) AS tend,
SUM(duration) AS duration,
clientip, respcode,"respCodeName" as respCodeName
FROM {0}.proxy
WHERE fulluri='{1}' AND clientip IN ({5})
AND y='{2}' AND m='{3}' AND d='{4}'
GROUP BY clientip, p_time, respcode, p_date
LIMIT {6}
""")\
.format(db,anchor,date.year,str(date.month).zfill(2),\
str(date.day).zfill(2),("'" + "','".join(susp_ips) + "'")\
,top_results,anchor)
app_path = Configuration.spot()
old_file = "{0}/proxy/hive/oa/timeline/y={1}/m={2}/d={3}"\
.format(app_path,date.year,date.month,date.day)
HDFSClient.delete_folder(old_file,"impala")
ImpalaEngine.execute_query("invalidate metadata")
#Insert temporary values
for item in tmp_timeline_data:
insert_query = ("""
INSERT INTO {0}.proxy_timeline PARTITION(y={1} , m={2} ,d={3})
VALUES ('{4}', '{5}', '{6}',{7},'{8}','{9}','{10}')
""")\
.format(db,date.year,date.month,date.day,\
item["p_threat"],item["tstart"],item["tend"],item["duration"],item["clientip"],item["respcode"],item["respcodename"])
ImpalaEngine.execute_query(insert_query)
ImpalaEngine.execute_query(imp_query)
response = "Timeline successfully saved"
else:
response = "Timeline couldn't be created"
"""
--------------------------------------------------------------------------
Create inciden progression for storyboard.
--------------------------------------------------------------------------
"""
def create_incident_progression(anchor,requests,referers,date):
hash_name = md5.new(str(anchor)).hexdigest()
file_name = "incident-progression-{0}.json".format(hash_name)
app_path = Configuration.spot()
hdfs_path = "{0}/proxy/oa/storyboard/{1}/{2}/{3}"\
.format(app_path,date.year,date.month,date.day)
data = {'fulluri':anchor, 'requests':requests,'referer_for':referers.keys()}
if HDFSClient.put_file_json(data,hdfs_path,file_name,overwrite_file=True) :
response = "Incident progression successfuly created"
else:
return False
"""
--------------------------------------------------------------------------
Save comments for storyboard.
--------------------------------------------------------------------------
"""
def save_comments(uri,title,text,date):
db = Configuration.db()
sb_query = ("""
SELECT
p_threat,title,text
FROM
{0}.proxy_storyboard
WHERE
y = {1} AND m= {2} AND d={3}
""").format(db,date.year,date.month,date.day)
sb_data = ImpalaEngine.execute_query_as_list(sb_query)
# find value if already exists.
saved = False
for item in sb_data:
if item["p_threat"] == uri:
item["title"] = title
item["text"] = text
saved = True
if not saved:
sb_data.append({'text': text, 'p_threat': str(uri), 'title': title})
#remove old file.
app_path = Configuration.spot()
old_file = "{0}/proxy/hive/oa/storyboard/y={1}/m={2}/d={3}/"\
.format(app_path,date.year,date.month,date.day)
HDFSClient.delete_folder(old_file,"impala")
ImpalaEngine.execute_query("invalidate metadata")
for item in sb_data:
insert_query = ("""
INSERT INTO {0}.proxy_storyboard PARTITION(y={1} , m={2} ,d={3})
VALUES ( '{4}', '{5}', '{6}')
""")\
.format(db,date.year,date.month,date.day,\
item["p_threat"],item["title"],item["text"])
ImpalaEngine.execute_query(insert_query)
"""
--------------------------------------------------------------------------
Get storyboard comments.
--------------------------------------------------------------------------
"""
def story_board(date):
db = Configuration.db()
sb_query= ("""
SELECT
p_threat,title,text
FROM
{0}.proxy_storyboard
WHERE
y={1} AND m={2} AND d={3}
""").format(db,date.year,date.month,date.day)
results = ImpalaEngine.execute_query_as_list(sb_query)
for row in results:
row["text"] = row["text"].replace("\n","\\n")
return results
"""
--------------------------------------------------------------------------
Get timeline for storyboard.
--------------------------------------------------------------------------
"""
def time_line(date,uri):
db = Configuration.db()
time_line_query = ("""
SELECT
p_threat,tstart,tend,duration,clientip,respcode,respcodename
FROM {0}.proxy_timeline
WHERE
y={1} AND m={2} AND d={3}
AND p_threat = '{4}'
""").format(db,date.year,date.month,date.day,uri)
return ImpalaEngine.execute_query_as_list(time_line_query)
"""
--------------------------------------------------------------------------
Get incident progression for storyboard.
--------------------------------------------------------------------------
"""
def incident_progression(date,uri):
app_path = Configuration.spot()
hdfs_path = "{0}/proxy/oa/storyboard/{1}/{2}/{3}".format(app_path,\
date.year,date.month,date.day)
hash_name = md5.new(str(uri)).hexdigest()
file_name = "incident-progression-{0}.json".format(hash_name)
if HDFSClient.file_exists(hdfs_path,file_name):
return json.loads(HDFSClient.get_file("{0}/{1}"\
.format(hdfs_path,file_name)))
else:
return {}
"""
Return a list(dict) with all the data ingested during the time frame provided.
"""
def ingest_summary(start_date,end_date):
db = Configuration.db()
is_query = ("""
SELECT
tdate,total
FROM {0}.proxy_ingest_summary
WHERE
( y >= {1} and y <= {2}) AND
( m >= {3} and m <= {4}) AND
( d >= {5} and d <= {6})
""")\
.format(db,start_date.year,end_date.year,start_date.month,end_date.month, start_date.day, end_date.day)
return ImpalaEngine.execute_query_as_list(is_query)
"""
--------------------------------------------------------------------------
Reset scored connections.
--------------------------------------------------------------------------
"""
def reset_scored_connections(date):
proxy_storyboard = "proxy/hive/oa/storyboard"
proxy_threat_investigation = "dns_threat_dendro/hive/oa/timeline"
proxy_timeline = "proxy/hive/oa/threat_investigation"
app_path = Configuration.spot()
try:
# remove parquet files manually to allow the comments update.
HDFSClient.delete_folder("{0}/{1}/y={2}/m={3}/d={4}/".format( \
app_path,proxy_storyboard,date.year,date.month,date.day) , "impala")
HDFSClient.delete_folder("{0}/{1}/y={2}/m={3}/d={4}/".format( \
app_path,proxy_threat_investigation,date.year,date.month,date.day), "impala")
HDFSClient.delete_folder("{0}/{1}/y={2}/m={3}/d={4}/".format( \
app_path,proxy_timeline,date.year,date.month,date.day), "impala")
ImpalaEngine.execute_query("invalidate metadata")
return True
except HdfsError:
return False
|
LedaLima/incubator-spot
|
spot-oa/api/resources/proxy.py
|
Python
|
apache-2.0
| 17,096
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
# TODO(berrange): Remove NovaObjectDictCompat
class Migration(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'source_compute': fields.StringField(nullable=True),
'dest_compute': fields.StringField(nullable=True),
'source_node': fields.StringField(nullable=True),
'dest_node': fields.StringField(nullable=True),
'dest_host': fields.StringField(nullable=True),
'old_instance_type_id': fields.IntegerField(nullable=True),
'new_instance_type_id': fields.IntegerField(nullable=True),
'instance_uuid': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, migration, db_migration):
for key in migration.fields:
migration[key] = db_migration[key]
migration._context = context
migration.obj_reset_changes()
return migration
@base.remotable_classmethod
def get_by_id(cls, context, migration_id):
db_migration = db.migration_get(context, migration_id)
return cls._from_db_object(context, cls(), db_migration)
@base.remotable_classmethod
def get_by_instance_and_status(cls, context, instance_uuid, status):
db_migration = db.migration_get_by_instance_and_status(
context, instance_uuid, status)
return cls._from_db_object(context, cls(), db_migration)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
db_migration = db.migration_create(self._context, updates)
self._from_db_object(self._context, self, db_migration)
@base.remotable
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
db_migration = db.migration_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_migration)
self.obj_reset_changes()
@property
def instance(self):
return objects.Instance.get_by_uuid(self._context, self.instance_uuid)
class MigrationList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Migration <= 1.1
# Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('Migration'),
}
child_versions = {
'1.0': '1.1',
# NOTE(danms): Migration was at 1.1 before we added this
'1.1': '1.1',
}
@base.remotable_classmethod
def get_unconfirmed_by_dest_compute(cls, context, confirm_window,
dest_compute, use_slave=False):
db_migrations = db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute, use_slave=use_slave)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
@base.remotable_classmethod
def get_in_progress_by_host_and_node(cls, context, host, node):
db_migrations = db.migration_get_in_progress_by_host_and_node(
context, host, node)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
@base.remotable_classmethod
def get_by_filters(cls, context, filters):
db_migrations = db.migration_get_all_by_filters(context, filters)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
|
petrutlucian94/nova
|
nova/objects/migration.py
|
Python
|
apache-2.0
| 4,655
|
#! /usr/bin/env python
import os
import sys
from barf.barf import BARF
if __name__ == "__main__":
#
# Open file
#
try:
filename = os.path.abspath("../../samples/toy/arm/branch4")
barf = BARF(filename)
except Exception as err:
print err
print "[-] Error opening file : %s" % filename
sys.exit(1)
#
# Translate to REIL
#
print("[+] Translating: x86 -> REIL -> SMT...")
for addr, asm_instr, reil_instrs in barf.translate():
print("0x{0:08x} : {1}".format(addr, asm_instr))
for reil_instr in reil_instrs:
print("{0:14}{1}".format("", reil_instr))
try:
# Some instructions cannot be translate to SMT, i.e,
# UNKN, UNDEF, JCC. In those cases, an exception is
# raised.
smt_exprs = barf.smt_translator.translate(reil_instr)
for smt_expr in smt_exprs:
print("{0:16}{1}".format("", smt_expr))
except:
pass
|
ignaeche/barf-project
|
barf/examples/arm/translate_smt.py
|
Python
|
bsd-2-clause
| 1,055
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import IsDeadlineExceededError, logservice
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import GetAppVersion, IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Future
from gcs_file_system_provider import CloudStorageFileSystemProvider
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def error(self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush()
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_cronlog.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def CreateGCSFileSystemProvider(self, object_store_creator):
return CloudStorageFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cronlog.info('starting')
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(callback=resolve)
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers,
server_instance.api_models])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
#
# XXX(kalman): Currently samples are *always* too expensive to fetch, so
# disabling them for now. It won't break anything so long as we're still
# not enforcing that everything gets cached for normal instances.
if False: # should be "not IsDevServer()":
# Fetch each individual sample file.
results.append(request_files_in_dir(EXAMPLES,
prefix='extensions/examples'))
# Resolve the hand-written Cron method futures.
title = 'resolving %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
for future in cron_futures:
future.Get()
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
results.append(False)
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_cronlog.error('uncaught error: %s' % traceback.format_exc())
raise
finally:
success = all(results)
_cronlog.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent revision, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentRevision())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_cronlog.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentRevision(self):
'''Gets the revision of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete revision so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetTrunk())
return head_fs.Stat('').version
def _CreateServerInstance(self, revision):
'''Creates a ServerInstance pinned to |revision|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, max_trunk_revision=revision)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
gcs_file_system_provider = self._delegate.CreateGCSFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider,
gcs_file_system_provider)
|
TeamEOS/external_chromium_org
|
chrome/common/extensions/docs/server2/cron_servlet.py
|
Python
|
bsd-3-clause
| 12,019
|
"""Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-d invert|normal invert transparency
-b bake material colors into face colors
-x 10.0 scale and truncate
-f 2 morph frame sampling step
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
original model is assumed to use non-inverted transparency / dissolve (0.0 fully transparent, 1.0 fully opaque)
no face colors baking
no scale and truncate
morph frame step = 1 (all files will be processed)
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( "Model_ascii.js", function( geometry ) { createScene( geometry ) } );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( "Model_bin.js", function( geometry ) { createScene( geometry) } );
function createScene( geometry ) {
var mesh = new THREE.Mesh( geometry, new THREE.MeshFaceMaterial() );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj)
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRANSPARENCY = "normal" # normal invert
TRUNCATE = False
SCALE = 1.0
FRAMESTEP = 1
BAKE_COLORS = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
{
"metadata" :
{
"formatVersion" : 3,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s]
}
"""
TEMPLATE_FILE_BIN = u"""\
{
"metadata" :
{
"formatVersion" : 3,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"materials": [%(materials)s],
"buffers": "%(buffers)s"
}
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in xrange(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath)
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
for line in fileinput.input(fname):
chunks = line.split()
if len(chunks) > 0:
# Material start
# newmtl identifier
if chunks[0] == "newmtl" and len(chunks) == 2:
identifier = chunks[1]
if not identifier in materials:
materials[identifier] = {}
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Ambient color
# Ka 1.000 1.000 1.000
if chunks[0] == "Ka" and len(chunks) == 4:
materials[identifier]["colorAmbient"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Transparency
# Tr 0.9 or d 0.9
if (chunks[0] == "Tr" or chunks[0] == "d") and len(chunks) == 2:
if TRANSPARENCY == "invert":
materials[identifier]["transparency"] = 1.0 - float(chunks[1])
else:
materials[identifier]["transparency"] = float(chunks[1])
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Ambient texture
# map_Ka texture_ambient.jpg
if chunks[0] == "map_Ka" and len(chunks) == 2:
materials[identifier]["mapAmbient"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
for line in fileinput.input(fname):
chunks = line.split()
if len(chunks) > 0:
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
vertex_index.append(vertex['v'])
if vertex['t']:
uv_index.append(vertex['t'])
if vertex['n']:
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl" and len(chunks) == 2:
material = chunks[1]
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in xrange(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in xrange(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in xrange(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], 1.0 - uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
indices = range(0, len(matches), FRAMESTEP)
for i in indices:
path = matches[i]
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print "adding [%s] with %d vertices" % (name, n_morph_vertices)
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
elif n_faces != n_morph_faces:
print "WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces)
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print "adding [%s] with %d face colors" % (name, len(morphFaceColors))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print "Couldn't find [%s]" % fname
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print "%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials))
# #############################################################################
# API - Binary converter
# #############################################################################
def dump_materials_to_buffer(faces, buffer):
for f in faces:
data = struct.pack('<H',
f['material'])
buffer.append(data)
def dump_vertices3_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<III',
vi[0]-1, vi[1]-1, vi[2]-1)
buffer.append(data)
def dump_vertices4_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<IIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1)
buffer.append(data)
def dump_normals3_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<III',
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
def dump_normals4_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<IIII',
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
def dump_uvs3_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<III',
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
def dump_uvs4_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<IIII',
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
def add_padding(buffer, n):
if n % 4:
for i in range(4 - n % 4):
data = struct.pack('<B', 0)
buffer.append(data)
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials),
"nnormal" : nnormals,
"nuv" : len(uvs)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
buffer = []
# header
# ------
header_bytes = struct.calcsize('<12s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack('<12s', 'Three.js 003')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
ntri_flat = len(sfaces['triangles_flat'])
ntri_smooth = len(sfaces['triangles_smooth'])
ntri_flat_uv = len(sfaces['triangles_flat_uv'])
ntri_smooth_uv = len(sfaces['triangles_smooth_uv'])
nquad_flat = len(sfaces['quads_flat'])
nquad_smooth = len(sfaces['quads_smooth'])
nquad_flat_uv = len(sfaces['quads_flat_uv'])
nquad_smooth_uv = len(sfaces['quads_smooth_uv'])
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
ntri_flat,
ntri_smooth,
ntri_flat_uv,
ntri_smooth_uv,
nquad_flat,
nquad_smooth,
nquad_flat_uv,
nquad_smooth_uv)
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
add_padding(buffer, nnormals * 3)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], 1.0-uv[1])
buffer.append(data)
# padding
#data = struct.pack('<BB', 0, 0)
#buffer.append(data)
# 4. flat triangles (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# ------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat'], buffer)
add_padding(buffer, ntri_flat * 2)
# 5. smooth triangles (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# -------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# -------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth'], buffer)
add_padding(buffer, ntri_smooth * 2)
# 6. flat triangles uv (vertices + materials + uvs)
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat_uv'], buffer)
add_padding(buffer, ntri_flat_uv * 2)
# 7. smooth triangles uv (vertices + materials + normals + uvs)
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth_uv'], buffer)
add_padding(buffer, ntri_smooth_uv * 2)
# 8. flat quads (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat'], buffer)
dump_materials_to_buffer(sfaces['quads_flat'], buffer)
add_padding(buffer, nquad_flat * 2)
# 9. smooth quads (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth'], buffer)
add_padding(buffer, nquad_smooth * 2)
# 10. flat quads uv (vertices + materials + uvs)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_flat_uv'], buffer)
add_padding(buffer, nquad_flat_uv * 2)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth_uv'], buffer)
add_padding(buffer, nquad_smooth_uv * 2)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write("".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbi:m:c:b:o:a:s:t:d:x:f:", ["help", "bakecolors", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale=", "framestep="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-d", "--dissolve"):
if a in ("normal", "invert"):
TRANSPARENCY = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
elif o in ("-f", "--framestep"):
FRAMESTEP = int(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Converting [%s] into [%s] ..." % (infile, outfile)
if morphfiles:
print "Morphs [%s]" % morphfiles
if colorfiles:
print "Colors [%s]" % colorfiles
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
|
css-umsetzung/three.js
|
utils/exporters/convert_obj_three.py
|
Python
|
mit
| 46,781
|
###########################################################
#
# Copyright (c) 2005-2009, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from ui_playground_panel_wdg import *
from font_palettes_example_wdg import *
from panning_scroll_example_wdg import *
from menu_examples_wdg import *
from event_examples_wdg import *
from misc_examples_wdg import *
from fx_anim_examples_wdg import *
from keyboard_handler_examples_wdg import *
from search_class_tag_examples_wdg import *
from efficient_table_example_wdg import *
from dev_sandbox_01_wdg import *
from dev_sandbox_02_wdg import *
from dev_sandbox_03_wdg import *
|
Southpaw-TACTIC/TACTIC
|
src/tactic/ui/examples/__init__.py
|
Python
|
epl-1.0
| 823
|
"""SCons.Environment
Base class for construction Environments. These are
the primary objects used to communicate dependency and
construction information to the build engine.
Keyword arguments supplied when the construction Environment
is created are construction variables used to initialize the
Environment
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Environment.py 2014/03/02 14:18:15 garyo"
import copy
import os
import sys
import re
import shlex
from collections import UserDict
import SCons.Action
import SCons.Builder
import SCons.Debug
from SCons.Debug import logInstanceCreation
import SCons.Defaults
import SCons.Errors
import SCons.Memoize
import SCons.Node
import SCons.Node.Alias
import SCons.Node.FS
import SCons.Node.Python
import SCons.Platform
import SCons.SConf
import SCons.SConsign
import SCons.Subst
import SCons.Tool
import SCons.Util
import SCons.Warnings
class _Null(object):
pass
_null = _Null
_warn_copy_deprecated = True
_warn_source_signatures_deprecated = True
_warn_target_signatures_deprecated = True
CleanTargets = {}
CalculatorArgs = {}
semi_deepcopy = SCons.Util.semi_deepcopy
semi_deepcopy_dict = SCons.Util.semi_deepcopy_dict
# Pull UserError into the global name space for the benefit of
# Environment().SourceSignatures(), which has some import statements
# which seem to mess up its ability to reference SCons directly.
UserError = SCons.Errors.UserError
def alias_builder(env, target, source):
pass
AliasBuilder = SCons.Builder.Builder(action = alias_builder,
target_factory = SCons.Node.Alias.default_ans.Alias,
source_factory = SCons.Node.FS.Entry,
multi = 1,
is_explicit = None,
name='AliasBuilder')
def apply_tools(env, tools, toolpath):
# Store the toolpath in the Environment.
if toolpath is not None:
env['toolpath'] = toolpath
if not tools:
return
# Filter out null tools from the list.
for tool in [_f for _f in tools if _f]:
if SCons.Util.is_List(tool) or isinstance(tool, tuple):
toolname = tool[0]
toolargs = tool[1] # should be a dict of kw args
tool = env.Tool(toolname, **toolargs)
else:
env.Tool(tool)
# These names are (or will be) controlled by SCons; users should never
# set or override them. This warning can optionally be turned off,
# but scons will still ignore the illegal variable names even if it's off.
reserved_construction_var_names = [
'CHANGED_SOURCES',
'CHANGED_TARGETS',
'SOURCE',
'SOURCES',
'TARGET',
'TARGETS',
'UNCHANGED_SOURCES',
'UNCHANGED_TARGETS',
]
future_reserved_construction_var_names = [
#'HOST_OS',
#'HOST_ARCH',
#'HOST_CPU',
]
def copy_non_reserved_keywords(dict):
result = semi_deepcopy(dict)
for k in result.keys():
if k in reserved_construction_var_names:
msg = "Ignoring attempt to set reserved variable `$%s'"
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % k)
del result[k]
return result
def _set_reserved(env, key, value):
msg = "Ignoring attempt to set reserved variable `$%s'"
SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % key)
def _set_future_reserved(env, key, value):
env._dict[key] = value
msg = "`$%s' will be reserved in a future release and setting it will become ignored"
SCons.Warnings.warn(SCons.Warnings.FutureReservedVariableWarning, msg % key)
def _set_BUILDERS(env, key, value):
try:
bd = env._dict[key]
for k in bd.keys():
del bd[k]
except KeyError:
bd = BuilderDict(kwbd, env)
env._dict[key] = bd
for k, v in value.items():
if not SCons.Builder.is_a_Builder(v):
raise SCons.Errors.UserError('%s is not a Builder.' % repr(v))
bd.update(value)
def _del_SCANNERS(env, key):
del env._dict[key]
env.scanner_map_delete()
def _set_SCANNERS(env, key, value):
env._dict[key] = value
env.scanner_map_delete()
def _delete_duplicates(l, keep_last):
"""Delete duplicates from a sequence, keeping the first or last."""
seen={}
result=[]
if keep_last: # reverse in & out, then keep first
l.reverse()
for i in l:
try:
if i not in seen:
result.append(i)
seen[i]=1
except TypeError:
# probably unhashable. Just keep it.
result.append(i)
if keep_last:
result.reverse()
return result
# The following is partly based on code in a comment added by Peter
# Shannon at the following page (there called the "transplant" class):
#
# ASPN : Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
#
# We had independently been using the idiom as BuilderWrapper, but
# factoring out the common parts into this base class, and making
# BuilderWrapper a subclass that overrides __call__() to enforce specific
# Builder calling conventions, simplified some of our higher-layer code.
class MethodWrapper(object):
"""
A generic Wrapper class that associates a method (which can
actually be any callable) with an object. As part of creating this
MethodWrapper object an attribute with the specified (by default,
the name of the supplied method) is added to the underlying object.
When that new "method" is called, our __call__() method adds the
object as the first argument, simulating the Python behavior of
supplying "self" on method calls.
We hang on to the name by which the method was added to the underlying
base class so that we can provide a method to "clone" ourselves onto
a new underlying object being copied (without which we wouldn't need
to save that info).
"""
def __init__(self, object, method, name=None):
if name is None:
name = method.__name__
self.object = object
self.method = method
self.name = name
setattr(self.object, name, self)
def __call__(self, *args, **kwargs):
nargs = (self.object,) + args
return self.method(*nargs, **kwargs)
def clone(self, new_object):
"""
Returns an object that re-binds the underlying "method" to
the specified new object.
"""
return self.__class__(new_object, self.method, self.name)
class BuilderWrapper(MethodWrapper):
"""
A MethodWrapper subclass that that associates an environment with
a Builder.
This mainly exists to wrap the __call__() function so that all calls
to Builders can have their argument lists massaged in the same way
(treat a lone argument as the source, treat two arguments as target
then source, make sure both target and source are lists) without
having to have cut-and-paste code to do it.
As a bit of obsessive backwards compatibility, we also intercept
attempts to get or set the "env" or "builder" attributes, which were
the names we used before we put the common functionality into the
MethodWrapper base class. We'll keep this around for a while in case
people shipped Tool modules that reached into the wrapper (like the
Tool/qt.py module does, or did). There shouldn't be a lot attribute
fetching or setting on these, so a little extra work shouldn't hurt.
"""
def __call__(self, target=None, source=_null, *args, **kw):
if source is _null:
source = target
target = None
if target is not None and not SCons.Util.is_List(target):
target = [target]
if source is not None and not SCons.Util.is_List(source):
source = [source]
return MethodWrapper.__call__(self, target, source, *args, **kw)
def __repr__(self):
return '<BuilderWrapper %s>' % repr(self.name)
def __str__(self):
return self.__repr__()
def __getattr__(self, name):
if name == 'env':
return self.object
elif name == 'builder':
return self.method
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == 'env':
self.object = value
elif name == 'builder':
self.method = value
else:
self.__dict__[name] = value
# This allows a Builder to be executed directly
# through the Environment to which it's attached.
# In practice, we shouldn't need this, because
# builders actually get executed through a Node.
# But we do have a unit test for this, and can't
# yet rule out that it would be useful in the
# future, so leave it for now.
#def execute(self, **kw):
# kw['env'] = self.env
# self.builder.execute(**kw)
class BuilderDict(UserDict):
"""This is a dictionary-like class used by an Environment to hold
the Builders. We need to do this because every time someone changes
the Builders in the Environment's BUILDERS dictionary, we must
update the Environment's attributes."""
def __init__(self, dict, env):
# Set self.env before calling the superclass initialization,
# because it will end up calling our other methods, which will
# need to point the values in this dictionary to self.env.
self.env = env
UserDict.__init__(self, dict)
def __semi_deepcopy__(self):
# These cannot be copied since they would both modify the same builder object, and indeed
# just copying would modify the original builder
raise TypeError( 'cannot semi_deepcopy a BuilderDict' )
def __setitem__(self, item, val):
try:
method = getattr(self.env, item).method
except AttributeError:
pass
else:
self.env.RemoveMethod(method)
UserDict.__setitem__(self, item, val)
BuilderWrapper(self.env, val, item)
def __delitem__(self, item):
UserDict.__delitem__(self, item)
delattr(self.env, item)
def update(self, dict):
for i, v in dict.items():
self.__setitem__(i, v)
_is_valid_var = re.compile(r'[_a-zA-Z]\w*$')
def is_valid_construction_var(varstr):
"""Return if the specified string is a legitimate construction
variable.
"""
return _is_valid_var.match(varstr)
class SubstitutionEnvironment(object):
"""Base class for different flavors of construction environments.
This class contains a minimal set of methods that handle contruction
variable expansion and conversion of strings to Nodes, which may or
may not be actually useful as a stand-alone class. Which methods
ended up in this class is pretty arbitrary right now. They're
basically the ones which we've empirically determined are common to
the different construction environment subclasses, and most of the
others that use or touch the underlying dictionary of construction
variables.
Eventually, this class should contain all the methods that we
determine are necessary for a "minimal" interface to the build engine.
A full "native Python" SCons environment has gotten pretty heavyweight
with all of the methods and Tools and construction variables we've
jammed in there, so it would be nice to have a lighter weight
alternative for interfaces that don't need all of the bells and
whistles. (At some point, we'll also probably rename this class
"Base," since that more reflects what we want this class to become,
but because we've released comments that tell people to subclass
Environment.Base to create their own flavors of construction
environment, we'll save that for a future refactoring when this
class actually becomes useful.)
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
def __init__(self, **kw):
"""Initialization of an underlying SubstitutionEnvironment class.
"""
if SCons.Debug.track_instances: logInstanceCreation(self, 'Environment.SubstitutionEnvironment')
self.fs = SCons.Node.FS.get_default_fs()
self.ans = SCons.Node.Alias.default_ans
self.lookup_list = SCons.Node.arg2nodes_lookups
self._dict = kw.copy()
self._init_special()
self.added_methods = []
#self._memo = {}
def _init_special(self):
"""Initial the dispatch tables for special handling of
special construction variables."""
self._special_del = {}
self._special_del['SCANNERS'] = _del_SCANNERS
self._special_set = {}
for key in reserved_construction_var_names:
self._special_set[key] = _set_reserved
for key in future_reserved_construction_var_names:
self._special_set[key] = _set_future_reserved
self._special_set['BUILDERS'] = _set_BUILDERS
self._special_set['SCANNERS'] = _set_SCANNERS
# Freeze the keys of self._special_set in a list for use by
# methods that need to check. (Empirically, list scanning has
# gotten better than dict.has_key() in Python 2.5.)
self._special_set_keys = list(self._special_set.keys())
def __cmp__(self, other):
return cmp(self._dict, other._dict)
def __delitem__(self, key):
special = self._special_del.get(key)
if special:
special(self, key)
else:
del self._dict[key]
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
# This is heavily used. This implementation is the best we have
# according to the timings in bench/env.__setitem__.py.
#
# The "key in self._special_set_keys" test here seems to perform
# pretty well for the number of keys we have. A hard-coded
# list works a little better in Python 2.5, but that has the
# disadvantage of maybe getting out of sync if we ever add more
# variable names. Using self._special_set.has_key() works a
# little better in Python 2.4, but is worse than this test.
# So right now it seems like a good trade-off, but feel free to
# revisit this with bench/env.__setitem__.py as needed (and
# as newer versions of Python come out).
if key in self._special_set_keys:
self._special_set[key](self, key, value)
else:
# If we already have the entry, then it's obviously a valid
# key and we don't need to check. If we do check, using a
# global, pre-compiled regular expression directly is more
# efficient than calling another function or a method.
if key not in self._dict \
and not _is_valid_var.match(key):
raise SCons.Errors.UserError("Illegal construction variable `%s'" % key)
self._dict[key] = value
def get(self, key, default=None):
"""Emulates the get() method of dictionaries."""
return self._dict.get(key, default)
def has_key(self, key):
return key in self._dict
def __contains__(self, key):
return self._dict.__contains__(key)
def items(self):
return list(self._dict.items())
def arg2nodes(self, args, node_factory=_null, lookup_list=_null, **kw):
if node_factory is _null:
node_factory = self.fs.File
if lookup_list is _null:
lookup_list = self.lookup_list
if not args:
return []
args = SCons.Util.flatten(args)
nodes = []
for v in args:
if SCons.Util.is_String(v):
n = None
for l in lookup_list:
n = l(v)
if n is not None:
break
if n is not None:
if SCons.Util.is_String(n):
# n = self.subst(n, raw=1, **kw)
kw['raw'] = 1
n = self.subst(n, **kw)
if node_factory:
n = node_factory(n)
if SCons.Util.is_List(n):
nodes.extend(n)
else:
nodes.append(n)
elif node_factory:
# v = node_factory(self.subst(v, raw=1, **kw))
kw['raw'] = 1
v = node_factory(self.subst(v, **kw))
if SCons.Util.is_List(v):
nodes.extend(v)
else:
nodes.append(v)
else:
nodes.append(v)
return nodes
def gvars(self):
return self._dict
def lvars(self):
return {}
def subst(self, string, raw=0, target=None, source=None, conv=None, executor=None):
"""Recursively interpolates construction variables from the
Environment into the specified string, returning the expanded
result. Construction variables are specified by a $ prefix
in the string and begin with an initial underscore or
alphabetic character followed by any number of underscores
or alphanumeric characters. The construction variable names
may be surrounded by curly braces to separate the name from
trailing characters.
"""
gvars = self.gvars()
lvars = self.lvars()
lvars['__env__'] = self
if executor:
lvars.update(executor.get_lvars())
return SCons.Subst.scons_subst(string, self, raw, target, source, gvars, lvars, conv)
def subst_kw(self, kw, raw=0, target=None, source=None):
nkw = {}
for k, v in kw.items():
k = self.subst(k, raw, target, source)
if SCons.Util.is_String(v):
v = self.subst(v, raw, target, source)
nkw[k] = v
return nkw
def subst_list(self, string, raw=0, target=None, source=None, conv=None, executor=None):
"""Calls through to SCons.Subst.scons_subst_list(). See
the documentation for that function."""
gvars = self.gvars()
lvars = self.lvars()
lvars['__env__'] = self
if executor:
lvars.update(executor.get_lvars())
return SCons.Subst.scons_subst_list(string, self, raw, target, source, gvars, lvars, conv)
def subst_path(self, path, target=None, source=None):
"""Substitute a path list, turning EntryProxies into Nodes
and leaving Nodes (and other objects) as-is."""
if not SCons.Util.is_List(path):
path = [path]
def s(obj):
"""This is the "string conversion" routine that we have our
substitutions use to return Nodes, not strings. This relies
on the fact that an EntryProxy object has a get() method that
returns the underlying Node that it wraps, which is a bit of
architectural dependence that we might need to break or modify
in the future in response to additional requirements."""
try:
get = obj.get
except AttributeError:
obj = SCons.Util.to_String_for_subst(obj)
else:
obj = get()
return obj
r = []
for p in path:
if SCons.Util.is_String(p):
p = self.subst(p, target=target, source=source, conv=s)
if SCons.Util.is_List(p):
if len(p) == 1:
p = p[0]
else:
# We have an object plus a string, or multiple
# objects that we need to smush together. No choice
# but to make them into a string.
p = ''.join(map(SCons.Util.to_String_for_subst, p))
else:
p = s(p)
r.append(p)
return r
subst_target_source = subst
def backtick(self, command):
import subprocess
# common arguments
kw = { 'stdin' : 'devnull',
'stdout' : subprocess.PIPE,
'stderr' : subprocess.PIPE,
'universal_newlines' : True,
}
# if the command is a list, assume it's been quoted
# othewise force a shell
if not SCons.Util.is_List(command): kw['shell'] = True
# run constructed command
p = SCons.Action._subproc(self, command, **kw)
out,err = p.communicate()
status = p.wait()
if err:
sys.stderr.write(unicode(err))
if status:
raise OSError("'%s' exited %d" % (command, status))
return out
def AddMethod(self, function, name=None):
"""
Adds the specified function as a method of this construction
environment with the specified name. If the name is omitted,
the default name is the name of the function itself.
"""
method = MethodWrapper(self, function, name)
self.added_methods.append(method)
def RemoveMethod(self, function):
"""
Removes the specified function's MethodWrapper from the
added_methods list, so we don't re-bind it when making a clone.
"""
self.added_methods = [dm for dm in self.added_methods if not dm.method is function]
def Override(self, overrides):
"""
Produce a modified environment whose variables are overriden by
the overrides dictionaries. "overrides" is a dictionary that
will override the variables of this environment.
This function is much more efficient than Clone() or creating
a new Environment because it doesn't copy the construction
environment dictionary, it just wraps the underlying construction
environment, and doesn't even create a wrapper object if there
are no overrides.
"""
if not overrides: return self
o = copy_non_reserved_keywords(overrides)
if not o: return self
overrides = {}
merges = None
for key, value in o.items():
if key == 'parse_flags':
merges = value
else:
overrides[key] = SCons.Subst.scons_subst_once(value, self, key)
env = OverrideEnvironment(self, overrides)
if merges: env.MergeFlags(merges)
return env
def ParseFlags(self, *flags):
"""
Parse the set of flags and return a dict with the flags placed
in the appropriate entry. The flags are treated as a typical
set of command-line flags for a GNU-like toolchain and used to
populate the entries in the dict immediately below. If one of
the flag strings begins with a bang (exclamation mark), it is
assumed to be a command and the rest of the string is executed;
the result of that evaluation is then added to the dict.
"""
dict = {
'ASFLAGS' : SCons.Util.CLVar(''),
'CFLAGS' : SCons.Util.CLVar(''),
'CCFLAGS' : SCons.Util.CLVar(''),
'CXXFLAGS' : SCons.Util.CLVar(''),
'CPPDEFINES' : [],
'CPPFLAGS' : SCons.Util.CLVar(''),
'CPPPATH' : [],
'FRAMEWORKPATH' : SCons.Util.CLVar(''),
'FRAMEWORKS' : SCons.Util.CLVar(''),
'LIBPATH' : [],
'LIBS' : [],
'LINKFLAGS' : SCons.Util.CLVar(''),
'RPATH' : [],
}
def do_parse(arg):
# if arg is a sequence, recurse with each element
if not arg:
return
if not SCons.Util.is_String(arg):
for t in arg: do_parse(t)
return
# if arg is a command, execute it
if arg[0] == '!':
arg = self.backtick(arg[1:])
# utility function to deal with -D option
def append_define(name, dict = dict):
t = name.split('=')
if len(t) == 1:
dict['CPPDEFINES'].append(name)
else:
dict['CPPDEFINES'].append([t[0], '='.join(t[1:])])
# Loop through the flags and add them to the appropriate option.
# This tries to strike a balance between checking for all possible
# flags and keeping the logic to a finite size, so it doesn't
# check for some that don't occur often. It particular, if the
# flag is not known to occur in a config script and there's a way
# of passing the flag to the right place (by wrapping it in a -W
# flag, for example) we don't check for it. Note that most
# preprocessor options are not handled, since unhandled options
# are placed in CCFLAGS, so unless the preprocessor is invoked
# separately, these flags will still get to the preprocessor.
# Other options not currently handled:
# -iqoutedir (preprocessor search path)
# -u symbol (linker undefined symbol)
# -s (linker strip files)
# -static* (linker static binding)
# -shared* (linker dynamic binding)
# -symbolic (linker global binding)
# -R dir (deprecated linker rpath)
# IBM compilers may also accept -qframeworkdir=foo
params = shlex.split(arg)
append_next_arg_to = None # for multi-word args
for arg in params:
if append_next_arg_to:
if append_next_arg_to == 'CPPDEFINES':
append_define(arg)
elif append_next_arg_to == '-include':
t = ('-include', self.fs.File(arg))
dict['CCFLAGS'].append(t)
elif append_next_arg_to == '-isysroot':
t = ('-isysroot', arg)
dict['CCFLAGS'].append(t)
dict['LINKFLAGS'].append(t)
elif append_next_arg_to == '-arch':
t = ('-arch', arg)
dict['CCFLAGS'].append(t)
dict['LINKFLAGS'].append(t)
else:
dict[append_next_arg_to].append(arg)
append_next_arg_to = None
elif not arg[0] in ['-', '+']:
dict['LIBS'].append(self.fs.File(arg))
elif arg == '-dylib_file':
dict['LINKFLAGS'].append(arg)
append_next_arg_to = 'LINKFLAGS'
elif arg[:2] == '-L':
if arg[2:]:
dict['LIBPATH'].append(arg[2:])
else:
append_next_arg_to = 'LIBPATH'
elif arg[:2] == '-l':
if arg[2:]:
dict['LIBS'].append(arg[2:])
else:
append_next_arg_to = 'LIBS'
elif arg[:2] == '-I':
if arg[2:]:
dict['CPPPATH'].append(arg[2:])
else:
append_next_arg_to = 'CPPPATH'
elif arg[:4] == '-Wa,':
dict['ASFLAGS'].append(arg[4:])
dict['CCFLAGS'].append(arg)
elif arg[:4] == '-Wl,':
if arg[:11] == '-Wl,-rpath=':
dict['RPATH'].append(arg[11:])
elif arg[:7] == '-Wl,-R,':
dict['RPATH'].append(arg[7:])
elif arg[:6] == '-Wl,-R':
dict['RPATH'].append(arg[6:])
else:
dict['LINKFLAGS'].append(arg)
elif arg[:4] == '-Wp,':
dict['CPPFLAGS'].append(arg)
elif arg[:2] == '-D':
if arg[2:]:
append_define(arg[2:])
else:
append_next_arg_to = 'CPPDEFINES'
elif arg == '-framework':
append_next_arg_to = 'FRAMEWORKS'
elif arg[:14] == '-frameworkdir=':
dict['FRAMEWORKPATH'].append(arg[14:])
elif arg[:2] == '-F':
if arg[2:]:
dict['FRAMEWORKPATH'].append(arg[2:])
else:
append_next_arg_to = 'FRAMEWORKPATH'
elif arg in ['-mno-cygwin',
'-pthread',
'-openmp',
'-fopenmp']:
dict['CCFLAGS'].append(arg)
dict['LINKFLAGS'].append(arg)
elif arg == '-mwindows':
dict['LINKFLAGS'].append(arg)
elif arg[:5] == '-std=':
if arg[5:].find('++')!=-1:
key='CXXFLAGS'
else:
key='CFLAGS'
dict[key].append(arg)
elif arg[0] == '+':
dict['CCFLAGS'].append(arg)
dict['LINKFLAGS'].append(arg)
elif arg in ['-include', '-isysroot', '-arch']:
append_next_arg_to = arg
else:
dict['CCFLAGS'].append(arg)
for arg in flags:
do_parse(arg)
return dict
def MergeFlags(self, args, unique=1, dict=None):
"""
Merge the dict in args into the construction variables of this
env, or the passed-in dict. If args is not a dict, it is
converted into a dict using ParseFlags. If unique is not set,
the flags are appended rather than merged.
"""
if dict is None:
dict = self
if not SCons.Util.is_Dict(args):
args = self.ParseFlags(args)
if not unique:
self.Append(**args)
return self
for key, value in args.items():
if not value:
continue
try:
orig = self[key]
except KeyError:
orig = value
else:
if not orig:
orig = value
elif value:
# Add orig and value. The logic here was lifted from
# part of env.Append() (see there for a lot of comments
# about the order in which things are tried) and is
# used mainly to handle coercion of strings to CLVar to
# "do the right thing" given (e.g.) an original CCFLAGS
# string variable like '-pipe -Wall'.
try:
orig = orig + value
except (KeyError, TypeError):
try:
add_to_orig = orig.append
except AttributeError:
value.insert(0, orig)
orig = value
else:
add_to_orig(value)
t = []
if key[-4:] == 'PATH':
### keep left-most occurence
for v in orig:
if v not in t:
t.append(v)
else:
### keep right-most occurence
orig.reverse()
for v in orig:
if v not in t:
t.insert(0, v)
self[key] = t
return self
# def MergeShellPaths(self, args, prepend=1):
# """
# Merge the dict in args into the shell environment in env['ENV'].
# Shell path elements are appended or prepended according to prepend.
# Uses Pre/AppendENVPath, so it always appends or prepends uniquely.
# Example: env.MergeShellPaths({'LIBPATH': '/usr/local/lib'})
# prepends /usr/local/lib to env['ENV']['LIBPATH'].
# """
# for pathname, pathval in args.items():
# if not pathval:
# continue
# if prepend:
# self.PrependENVPath(pathname, pathval)
# else:
# self.AppendENVPath(pathname, pathval)
def default_decide_source(dependency, target, prev_ni):
f = SCons.Defaults.DefaultEnvironment().decide_source
return f(dependency, target, prev_ni)
def default_decide_target(dependency, target, prev_ni):
f = SCons.Defaults.DefaultEnvironment().decide_target
return f(dependency, target, prev_ni)
def default_copy_from_cache(src, dst):
f = SCons.Defaults.DefaultEnvironment().copy_from_cache
return f(src, dst)
class Base(SubstitutionEnvironment):
"""Base class for "real" construction Environments. These are the
primary objects used to communicate dependency and construction
information to the build engine.
Keyword arguments supplied when the construction Environment
is created are construction variables used to initialize the
Environment.
"""
memoizer_counters = []
#######################################################################
# This is THE class for interacting with the SCons build engine,
# and it contains a lot of stuff, so we're going to try to keep this
# a little organized by grouping the methods.
#######################################################################
#######################################################################
# Methods that make an Environment act like a dictionary. These have
# the expected standard names for Python mapping objects. Note that
# we don't actually make an Environment a subclass of UserDict for
# performance reasons. Note also that we only supply methods for
# dictionary functionality that we actually need and use.
#######################################################################
def __init__(self,
platform=None,
tools=None,
toolpath=None,
variables=None,
parse_flags = None,
**kw):
"""
Initialization of a basic SCons construction environment,
including setting up special construction variables like BUILDER,
PLATFORM, etc., and searching for and applying available Tools.
Note that we do *not* call the underlying base class
(SubsitutionEnvironment) initialization, because we need to
initialize things in a very specific order that doesn't work
with the much simpler base class initialization.
"""
if SCons.Debug.track_instances: logInstanceCreation(self, 'Environment.Base')
self._memo = {}
self.fs = SCons.Node.FS.get_default_fs()
self.ans = SCons.Node.Alias.default_ans
self.lookup_list = SCons.Node.arg2nodes_lookups
self._dict = semi_deepcopy(SCons.Defaults.ConstructionEnvironment)
self._init_special()
self.added_methods = []
# We don't use AddMethod, or define these as methods in this
# class, because we *don't* want these functions to be bound
# methods. They need to operate independently so that the
# settings will work properly regardless of whether a given
# target ends up being built with a Base environment or an
# OverrideEnvironment or what have you.
self.decide_target = default_decide_target
self.decide_source = default_decide_source
self.copy_from_cache = default_copy_from_cache
self._dict['BUILDERS'] = BuilderDict(self._dict['BUILDERS'], self)
if platform is None:
platform = self._dict.get('PLATFORM', None)
if platform is None:
platform = SCons.Platform.Platform()
if SCons.Util.is_String(platform):
platform = SCons.Platform.Platform(platform)
self._dict['PLATFORM'] = str(platform)
platform(self)
self._dict['HOST_OS'] = self._dict.get('HOST_OS',None)
self._dict['HOST_ARCH'] = self._dict.get('HOST_ARCH',None)
# Now set defaults for TARGET_{OS|ARCH}
self._dict['TARGET_OS'] = self._dict.get('TARGET_OS',None)
self._dict['TARGET_ARCH'] = self._dict.get('TARGET_ARCH',None)
# Apply the passed-in and customizable variables to the
# environment before calling the tools, because they may use
# some of them during initialization.
if 'options' in kw:
# Backwards compatibility: they may stll be using the
# old "options" keyword.
variables = kw['options']
del kw['options']
self.Replace(**kw)
keys = list(kw.keys())
if variables:
keys = keys + list(variables.keys())
variables.Update(self)
save = {}
for k in keys:
try:
save[k] = self._dict[k]
except KeyError:
# No value may have been set if they tried to pass in a
# reserved variable name like TARGETS.
pass
SCons.Tool.Initializers(self)
if tools is None:
tools = self._dict.get('TOOLS', None)
if tools is None:
tools = ['default']
apply_tools(self, tools, toolpath)
# Now restore the passed-in and customized variables
# to the environment, since the values the user set explicitly
# should override any values set by the tools.
for key, val in save.items():
self._dict[key] = val
# Finally, apply any flags to be merged in
if parse_flags: self.MergeFlags(parse_flags)
#######################################################################
# Utility methods that are primarily for internal use by SCons.
# These begin with lower-case letters.
#######################################################################
def get_builder(self, name):
"""Fetch the builder with the specified name from the environment.
"""
try:
return self._dict['BUILDERS'][name]
except KeyError:
return None
def get_CacheDir(self):
try:
path = self._CacheDir_path
except AttributeError:
path = SCons.Defaults.DefaultEnvironment()._CacheDir_path
try:
if path == self._last_CacheDir_path:
return self._last_CacheDir
except AttributeError:
pass
cd = SCons.CacheDir.CacheDir(path)
self._last_CacheDir_path = path
self._last_CacheDir = cd
return cd
def get_factory(self, factory, default='File'):
"""Return a factory function for creating Nodes for this
construction environment.
"""
name = default
try:
is_node = issubclass(factory, SCons.Node.FS.Base)
except TypeError:
# The specified factory isn't a Node itself--it's
# most likely None, or possibly a callable.
pass
else:
if is_node:
# The specified factory is a Node (sub)class. Try to
# return the FS method that corresponds to the Node's
# name--that is, we return self.fs.Dir if they want a Dir,
# self.fs.File for a File, etc.
try: name = factory.__name__
except AttributeError: pass
else: factory = None
if not factory:
# They passed us None, or we picked up a name from a specified
# class, so return the FS method. (Note that we *don't*
# use our own self.{Dir,File} methods because that would
# cause env.subst() to be called twice on the file name,
# interfering with files that have $$ in them.)
factory = getattr(self.fs, name)
return factory
memoizer_counters.append(SCons.Memoize.CountValue('_gsm'))
def _gsm(self):
try:
return self._memo['_gsm']
except KeyError:
pass
result = {}
try:
scanners = self._dict['SCANNERS']
except KeyError:
pass
else:
# Reverse the scanner list so that, if multiple scanners
# claim they can scan the same suffix, earlier scanners
# in the list will overwrite later scanners, so that
# the result looks like a "first match" to the user.
if not SCons.Util.is_List(scanners):
scanners = [scanners]
else:
scanners = scanners[:] # copy so reverse() doesn't mod original
scanners.reverse()
for scanner in scanners:
for k in scanner.get_skeys(self):
if k and self['PLATFORM'] == 'win32':
k = k.lower()
result[k] = scanner
self._memo['_gsm'] = result
return result
def get_scanner(self, skey):
"""Find the appropriate scanner given a key (usually a file suffix).
"""
if skey and self['PLATFORM'] == 'win32':
skey = skey.lower()
return self._gsm().get(skey)
def scanner_map_delete(self, kw=None):
"""Delete the cached scanner map (if we need to).
"""
try:
del self._memo['_gsm']
except KeyError:
pass
def _update(self, dict):
"""Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
"""
self._dict.update(dict)
def get_src_sig_type(self):
try:
return self.src_sig_type
except AttributeError:
t = SCons.Defaults.DefaultEnvironment().src_sig_type
self.src_sig_type = t
return t
def get_tgt_sig_type(self):
try:
return self.tgt_sig_type
except AttributeError:
t = SCons.Defaults.DefaultEnvironment().tgt_sig_type
self.tgt_sig_type = t
return t
#######################################################################
# Public methods for manipulating an Environment. These begin with
# upper-case letters. The essential characteristic of methods in
# this section is that they do *not* have corresponding same-named
# global functions. For example, a stand-alone Append() function
# makes no sense, because Append() is all about appending values to
# an Environment's construction variables.
#######################################################################
def Append(self, **kw):
"""Append values to existing construction variables
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
# within try:-except: blocks, so we have to nest our code.
try:
if key == 'CPPDEFINES' and SCons.Util.is_String(self._dict[key]):
self._dict[key] = [self._dict[key]]
orig = self._dict[key]
except KeyError:
# No existing variable in the environment, so just set
# it to the new value.
if key == 'CPPDEFINES' and SCons.Util.is_String(val):
self._dict[key] = [val]
else:
self._dict[key] = val
else:
try:
# Check if the original looks like a dictionary.
# If it is, we can't just try adding the value because
# dictionaries don't have __add__() methods, and
# things like UserList will incorrectly coerce the
# original dict to a list (which we don't want).
update_dict = orig.update
except AttributeError:
try:
# Most straightforward: just try to add them
# together. This will work in most cases, when the
# original and new values are of compatible types.
self._dict[key] = orig + val
except (KeyError, TypeError):
try:
# Check if the original is a list.
add_to_orig = orig.append
except AttributeError:
# The original isn't a list, but the new
# value is (by process of elimination),
# so insert the original in the new value
# (if there's one to insert) and replace
# the variable with it.
if orig:
val.insert(0, orig)
self._dict[key] = val
else:
# The original is a list, so append the new
# value to it (if there's a value to append).
if val:
add_to_orig(val)
else:
# The original looks like a dictionary, so update it
# based on what we think the value looks like.
if SCons.Util.is_List(val):
if key == 'CPPDEFINES':
orig = orig.items()
orig += val
self._dict[key] = orig
else:
for v in val:
orig[v] = None
else:
try:
update_dict(val)
except (AttributeError, TypeError, ValueError):
if SCons.Util.is_Dict(val):
for k, v in val.items():
orig[k] = v
else:
orig[val] = None
self.scanner_map_delete(kw)
# allow Dirs and strings beginning with # for top-relative
# Note this uses the current env's fs (in self).
def _canonicalize(self, path):
if not SCons.Util.is_String(path): # typically a Dir
path = str(path)
if path and path[0] == '#':
path = str(self.fs.Dir(path))
return path
def AppendENVPath(self, name, newpath, envname = 'ENV',
sep = os.pathsep, delete_existing=1):
"""Append path elements to the path 'name' in the 'ENV'
dictionary for this environment. Will only add any particular
path once, and will normpath and normcase all paths to help
assure this. This can also handle the case where the env
variable is a list instead of a string.
If delete_existing is 0, a newpath which is already in the path
will not be moved to the end (it will be left where it is).
"""
orig = ''
if envname in self._dict and name in self._dict[envname]:
orig = self._dict[envname][name]
nv = SCons.Util.AppendPath(orig, newpath, sep, delete_existing,
canonicalize=self._canonicalize)
if envname not in self._dict:
self._dict[envname] = {}
self._dict[envname][name] = nv
def AppendUnique(self, delete_existing=0, **kw):
"""Append values to existing construction variables
in an Environment, if they're not already there.
If delete_existing is 1, removes existing values first, so
values move to end.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if SCons.Util.is_List(val):
val = _delete_duplicates(val, delete_existing)
if key not in self._dict or self._dict[key] in ('', None):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if key == 'CPPDEFINES':
tmp = []
for i in val:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
val = tmp
if SCons.Util.is_Dict(dk):
dk = dk.items()
elif SCons.Util.is_String(dk):
dk = [(dk,)]
else:
tmp = []
for i in dk:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
dk = tmp
else:
if not SCons.Util.is_List(dk):
dk = [dk]
if delete_existing:
dk = [x for x in dk if x not in val]
else:
val = [x for x in val if x not in dk]
self._dict[key] = dk + val
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
if key == 'CPPDEFINES':
tmp = []
for i in dk:
if SCons.Util.is_List(i):
if len(i) >= 2:
tmp.append((i[0], i[1]))
else:
tmp.append((i[0],))
elif SCons.Util.is_Tuple(i):
tmp.append(i)
else:
tmp.append((i,))
dk = tmp
if SCons.Util.is_Dict(val):
val = val.items()
elif SCons.Util.is_String(val):
val = [(val,)]
if delete_existing:
dk = filter(lambda x, val=val: x not in val, dk)
self._dict[key] = dk + val
else:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
else:
# By elimination, val is not a list. Since dk is a
# list, wrap val in a list first.
if delete_existing:
dk = filter(lambda x, val=val: x not in val, dk)
self._dict[key] = dk + [val]
else:
if not val in dk:
self._dict[key] = dk + [val]
else:
if key == 'CPPDEFINES':
if SCons.Util.is_String(dk):
dk = [dk]
elif SCons.Util.is_Dict(dk):
dk = dk.items()
if SCons.Util.is_String(val):
if val in dk:
val = []
else:
val = [val]
elif SCons.Util.is_Dict(val):
tmp = []
for i,j in val.iteritems():
if j is not None:
tmp.append((i,j))
else:
tmp.append(i)
val = tmp
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = dk + val
self.scanner_map_delete(kw)
def Clone(self, tools=[], toolpath=None, parse_flags = None, **kw):
"""Return a copy of a construction Environment. The
copy is like a Python "deep copy"--that is, independent
copies are made recursively of each objects--except that
a reference is copied when an object is not deep-copyable
(like a function). There are no references to any mutable
objects in the original Environment.
"""
try:
builders = self._dict['BUILDERS']
except KeyError:
pass
clone = copy.copy(self)
# BUILDERS is not safe to do a simple copy
clone._dict = semi_deepcopy_dict(self._dict, ['BUILDERS'])
clone._dict['BUILDERS'] = BuilderDict(builders, clone)
# Check the methods added via AddMethod() and re-bind them to
# the cloned environment. Only do this if the attribute hasn't
# been overwritten by the user explicitly and still points to
# the added method.
clone.added_methods = []
for mw in self.added_methods:
if mw == getattr(self, mw.name):
clone.added_methods.append(mw.clone(clone))
clone._memo = {}
# Apply passed-in variables before the tools
# so the tools can use the new variables
kw = copy_non_reserved_keywords(kw)
new = {}
for key, value in kw.items():
new[key] = SCons.Subst.scons_subst_once(value, self, key)
clone.Replace(**new)
apply_tools(clone, tools, toolpath)
# apply them again in case the tools overwrote them
clone.Replace(**new)
# Finally, apply any flags to be merged in
if parse_flags: clone.MergeFlags(parse_flags)
if SCons.Debug.track_instances: logInstanceCreation(self, 'Environment.EnvironmentClone')
return clone
def Copy(self, *args, **kw):
global _warn_copy_deprecated
if _warn_copy_deprecated:
msg = "The env.Copy() method is deprecated; use the env.Clone() method instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedCopyWarning, msg)
_warn_copy_deprecated = False
return self.Clone(*args, **kw)
def _changed_build(self, dependency, target, prev_ni):
if dependency.changed_state(target, prev_ni):
return 1
return self.decide_source(dependency, target, prev_ni)
def _changed_content(self, dependency, target, prev_ni):
return dependency.changed_content(target, prev_ni)
def _changed_source(self, dependency, target, prev_ni):
target_env = dependency.get_build_env()
type = target_env.get_tgt_sig_type()
if type == 'source':
return target_env.decide_source(dependency, target, prev_ni)
else:
return target_env.decide_target(dependency, target, prev_ni)
def _changed_timestamp_then_content(self, dependency, target, prev_ni):
return dependency.changed_timestamp_then_content(target, prev_ni)
def _changed_timestamp_newer(self, dependency, target, prev_ni):
return dependency.changed_timestamp_newer(target, prev_ni)
def _changed_timestamp_match(self, dependency, target, prev_ni):
return dependency.changed_timestamp_match(target, prev_ni)
def _copy_from_cache(self, src, dst):
return self.fs.copy(src, dst)
def _copy2_from_cache(self, src, dst):
return self.fs.copy2(src, dst)
def Decider(self, function):
copy_function = self._copy2_from_cache
if function in ('MD5', 'content'):
if not SCons.Util.md5:
raise UserError("MD5 signatures are not available in this version of Python.")
function = self._changed_content
elif function == 'MD5-timestamp':
function = self._changed_timestamp_then_content
elif function in ('timestamp-newer', 'make'):
function = self._changed_timestamp_newer
copy_function = self._copy_from_cache
elif function == 'timestamp-match':
function = self._changed_timestamp_match
elif not callable(function):
raise UserError("Unknown Decider value %s" % repr(function))
# We don't use AddMethod because we don't want to turn the
# function, which only expects three arguments, into a bound
# method, which would add self as an initial, fourth argument.
self.decide_target = function
self.decide_source = function
self.copy_from_cache = copy_function
def Detect(self, progs):
"""Return the first available program in progs.
"""
if not SCons.Util.is_List(progs):
progs = [ progs ]
for prog in progs:
path = self.WhereIs(prog)
if path: return prog
return None
def Dictionary(self, *args):
if not args:
return self._dict
dlist = [self._dict[x] for x in args]
if len(dlist) == 1:
dlist = dlist[0]
return dlist
def Dump(self, key = None):
"""
Using the standard Python pretty printer, dump the contents of the
scons build environment to stdout.
If the key passed in is anything other than None, then that will
be used as an index into the build environment dictionary and
whatever is found there will be fed into the pretty printer. Note
that this key is case sensitive.
"""
import pprint
pp = pprint.PrettyPrinter(indent=2)
if key:
dict = self.Dictionary(key)
else:
dict = self.Dictionary()
return pp.pformat(dict)
def FindIxes(self, paths, prefix, suffix):
"""
Search a list of paths for something that matches the prefix and suffix.
paths - the list of paths or nodes.
prefix - construction variable for the prefix.
suffix - construction variable for the suffix.
"""
suffix = self.subst('$'+suffix)
prefix = self.subst('$'+prefix)
for path in paths:
dir,name = os.path.split(str(path))
if name[:len(prefix)] == prefix and name[-len(suffix):] == suffix:
return path
def ParseConfig(self, command, function=None, unique=1):
"""
Use the specified function to parse the output of the command
in order to modify the current environment. The 'command' can
be a string or a list of strings representing a command and
its arguments. 'Function' is an optional argument that takes
the environment, the output of the command, and the unique flag.
If no function is specified, MergeFlags, which treats the output
as the result of a typical 'X-config' command (i.e. gtk-config),
will merge the output into the appropriate variables.
"""
if function is None:
def parse_conf(env, cmd, unique=unique):
return env.MergeFlags(cmd, unique)
function = parse_conf
if SCons.Util.is_List(command):
command = ' '.join(command)
command = self.subst(command)
return function(self, self.backtick(command))
def ParseDepends(self, filename, must_exist=None, only_one=0):
"""
Parse a mkdep-style file for explicit dependencies. This is
completely abusable, and should be unnecessary in the "normal"
case of proper SCons configuration, but it may help make
the transition from a Make hierarchy easier for some people
to swallow. It can also be genuinely useful when using a tool
that can write a .d file, but for which writing a scanner would
be too complicated.
"""
filename = self.subst(filename)
try:
fp = open(filename, 'r')
except IOError:
if must_exist:
raise
return
lines = SCons.Util.LogicalLines(fp).readlines()
lines = [l for l in lines if l[0] != '#']
tdlist = []
for line in lines:
try:
target, depends = line.split(':', 1)
except (AttributeError, ValueError):
# Throws AttributeError if line isn't a string. Can throw
# ValueError if line doesn't split into two or more elements.
pass
else:
tdlist.append((target.split(), depends.split()))
if only_one:
targets = []
for td in tdlist:
targets.extend(td[0])
if len(targets) > 1:
raise SCons.Errors.UserError(
"More than one dependency target found in `%s': %s"
% (filename, targets))
for target, depends in tdlist:
self.Depends(target, depends)
def Platform(self, platform):
platform = self.subst(platform)
return SCons.Platform.Platform(platform)(self)
def Prepend(self, **kw):
"""Prepend values to existing construction variables
in an Environment.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
# It would be easier on the eyes to write this using
# "continue" statements whenever we finish processing an item,
# but Python 1.5.2 apparently doesn't let you use "continue"
# within try:-except: blocks, so we have to nest our code.
try:
orig = self._dict[key]
except KeyError:
# No existing variable in the environment, so just set
# it to the new value.
self._dict[key] = val
else:
try:
# Check if the original looks like a dictionary.
# If it is, we can't just try adding the value because
# dictionaries don't have __add__() methods, and
# things like UserList will incorrectly coerce the
# original dict to a list (which we don't want).
update_dict = orig.update
except AttributeError:
try:
# Most straightforward: just try to add them
# together. This will work in most cases, when the
# original and new values are of compatible types.
self._dict[key] = val + orig
except (KeyError, TypeError):
try:
# Check if the added value is a list.
add_to_val = val.append
except AttributeError:
# The added value isn't a list, but the
# original is (by process of elimination),
# so insert the the new value in the original
# (if there's one to insert).
if val:
orig.insert(0, val)
else:
# The added value is a list, so append
# the original to it (if there's a value
# to append).
if orig:
add_to_val(orig)
self._dict[key] = val
else:
# The original looks like a dictionary, so update it
# based on what we think the value looks like.
if SCons.Util.is_List(val):
for v in val:
orig[v] = None
else:
try:
update_dict(val)
except (AttributeError, TypeError, ValueError):
if SCons.Util.is_Dict(val):
for k, v in val.items():
orig[k] = v
else:
orig[val] = None
self.scanner_map_delete(kw)
def PrependENVPath(self, name, newpath, envname = 'ENV', sep = os.pathsep,
delete_existing=1):
"""Prepend path elements to the path 'name' in the 'ENV'
dictionary for this environment. Will only add any particular
path once, and will normpath and normcase all paths to help
assure this. This can also handle the case where the env
variable is a list instead of a string.
If delete_existing is 0, a newpath which is already in the path
will not be moved to the front (it will be left where it is).
"""
orig = ''
if envname in self._dict and name in self._dict[envname]:
orig = self._dict[envname][name]
nv = SCons.Util.PrependPath(orig, newpath, sep, delete_existing,
canonicalize=self._canonicalize)
if envname not in self._dict:
self._dict[envname] = {}
self._dict[envname][name] = nv
def PrependUnique(self, delete_existing=0, **kw):
"""Prepend values to existing construction variables
in an Environment, if they're not already there.
If delete_existing is 1, removes existing values first, so
values move to front.
"""
kw = copy_non_reserved_keywords(kw)
for key, val in kw.items():
if SCons.Util.is_List(val):
val = _delete_duplicates(val, not delete_existing)
if key not in self._dict or self._dict[key] in ('', None):
self._dict[key] = val
elif SCons.Util.is_Dict(self._dict[key]) and \
SCons.Util.is_Dict(val):
self._dict[key].update(val)
elif SCons.Util.is_List(val):
dk = self._dict[key]
if not SCons.Util.is_List(dk):
dk = [dk]
if delete_existing:
dk = [x for x in dk if x not in val]
else:
val = [x for x in val if x not in dk]
self._dict[key] = val + dk
else:
dk = self._dict[key]
if SCons.Util.is_List(dk):
# By elimination, val is not a list. Since dk is a
# list, wrap val in a list first.
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = [val] + dk
else:
if not val in dk:
self._dict[key] = [val] + dk
else:
if delete_existing:
dk = [x for x in dk if x not in val]
self._dict[key] = val + dk
self.scanner_map_delete(kw)
def Replace(self, **kw):
"""Replace existing construction variables in an Environment
with new construction variables and/or values.
"""
try:
kwbd = kw['BUILDERS']
except KeyError:
pass
else:
kwbd = BuilderDict(kwbd,self)
del kw['BUILDERS']
self.__setitem__('BUILDERS', kwbd)
kw = copy_non_reserved_keywords(kw)
self._update(semi_deepcopy(kw))
self.scanner_map_delete(kw)
def ReplaceIxes(self, path, old_prefix, old_suffix, new_prefix, new_suffix):
"""
Replace old_prefix with new_prefix and old_suffix with new_suffix.
env - Environment used to interpolate variables.
path - the path that will be modified.
old_prefix - construction variable for the old prefix.
old_suffix - construction variable for the old suffix.
new_prefix - construction variable for the new prefix.
new_suffix - construction variable for the new suffix.
"""
old_prefix = self.subst('$'+old_prefix)
old_suffix = self.subst('$'+old_suffix)
new_prefix = self.subst('$'+new_prefix)
new_suffix = self.subst('$'+new_suffix)
dir,name = os.path.split(str(path))
if name[:len(old_prefix)] == old_prefix:
name = name[len(old_prefix):]
if name[-len(old_suffix):] == old_suffix:
name = name[:-len(old_suffix)]
return os.path.join(dir, new_prefix+name+new_suffix)
def SetDefault(self, **kw):
for k in kw.keys():
if k in self._dict:
del kw[k]
self.Replace(**kw)
def _find_toolpath_dir(self, tp):
return self.fs.Dir(self.subst(tp)).srcnode().abspath
def Tool(self, tool, toolpath=None, **kw):
if SCons.Util.is_String(tool):
tool = self.subst(tool)
if toolpath is None:
toolpath = self.get('toolpath', [])
toolpath = list(map(self._find_toolpath_dir, toolpath))
tool = SCons.Tool.Tool(tool, toolpath, **kw)
tool(self)
def WhereIs(self, prog, path=None, pathext=None, reject=[]):
"""Find prog in the path.
"""
if path is None:
try:
path = self['ENV']['PATH']
except KeyError:
pass
elif SCons.Util.is_String(path):
path = self.subst(path)
if pathext is None:
try:
pathext = self['ENV']['PATHEXT']
except KeyError:
pass
elif SCons.Util.is_String(pathext):
pathext = self.subst(pathext)
prog = self.subst(prog)
path = SCons.Util.WhereIs(prog, path, pathext, reject)
if path: return path
return None
#######################################################################
# Public methods for doing real "SCons stuff" (manipulating
# dependencies, setting attributes on targets, etc.). These begin
# with upper-case letters. The essential characteristic of methods
# in this section is that they all *should* have corresponding
# same-named global functions.
#######################################################################
def Action(self, *args, **kw):
def subst_string(a, self=self):
if SCons.Util.is_String(a):
a = self.subst(a)
return a
nargs = list(map(subst_string, args))
nkw = self.subst_kw(kw)
return SCons.Action.Action(*nargs, **nkw)
def AddPreAction(self, files, action):
nodes = self.arg2nodes(files, self.fs.Entry)
action = SCons.Action.Action(action)
uniq = {}
for executor in [n.get_executor() for n in nodes]:
uniq[executor] = 1
for executor in uniq.keys():
executor.add_pre_action(action)
return nodes
def AddPostAction(self, files, action):
nodes = self.arg2nodes(files, self.fs.Entry)
action = SCons.Action.Action(action)
uniq = {}
for executor in [n.get_executor() for n in nodes]:
uniq[executor] = 1
for executor in uniq.keys():
executor.add_post_action(action)
return nodes
def Alias(self, target, source=[], action=None, **kw):
tlist = self.arg2nodes(target, self.ans.Alias)
if not SCons.Util.is_List(source):
source = [source]
source = [_f for _f in source if _f]
if not action:
if not source:
# There are no source files and no action, so just
# return a target list of classic Alias Nodes, without
# any builder. The externally visible effect is that
# this will make the wrapping Script.BuildTask class
# say that there's "Nothing to be done" for this Alias,
# instead of that it's "up to date."
return tlist
# No action, but there are sources. Re-call all the target
# builders to add the sources to each target.
result = []
for t in tlist:
bld = t.get_builder(AliasBuilder)
result.extend(bld(self, t, source))
return result
nkw = self.subst_kw(kw)
nkw.update({
'action' : SCons.Action.Action(action),
'source_factory' : self.fs.Entry,
'multi' : 1,
'is_explicit' : None,
})
bld = SCons.Builder.Builder(**nkw)
# Apply the Builder separately to each target so that the Aliases
# stay separate. If we did one "normal" Builder call with the
# whole target list, then all of the target Aliases would be
# associated under a single Executor.
result = []
for t in tlist:
# Calling the convert() method will cause a new Executor to be
# created from scratch, so we have to explicitly initialize
# it with the target's existing sources, plus our new ones,
# so nothing gets lost.
b = t.get_builder()
if b is None or b is AliasBuilder:
b = bld
else:
nkw['action'] = b.action + action
b = SCons.Builder.Builder(**nkw)
t.convert()
result.extend(b(self, t, t.sources + source))
return result
def AlwaysBuild(self, *targets):
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_always_build()
return tlist
def BuildDir(self, *args, **kw):
msg = """BuildDir() and the build_dir keyword have been deprecated;\n\tuse VariantDir() and the variant_dir keyword instead."""
SCons.Warnings.warn(SCons.Warnings.DeprecatedBuildDirWarning, msg)
if 'build_dir' in kw:
kw['variant_dir'] = kw['build_dir']
del kw['build_dir']
return self.VariantDir(*args, **kw)
def Builder(self, **kw):
nkw = self.subst_kw(kw)
return SCons.Builder.Builder(**nkw)
def CacheDir(self, path):
import SCons.CacheDir
if path is not None:
path = self.subst(path)
self._CacheDir_path = path
def Clean(self, targets, files):
global CleanTargets
tlist = self.arg2nodes(targets, self.fs.Entry)
flist = self.arg2nodes(files, self.fs.Entry)
for t in tlist:
try:
CleanTargets[t].extend(flist)
except KeyError:
CleanTargets[t] = flist
def Configure(self, *args, **kw):
nargs = [self]
if args:
nargs = nargs + self.subst_list(args)[0]
nkw = self.subst_kw(kw)
nkw['_depth'] = kw.get('_depth', 0) + 1
try:
nkw['custom_tests'] = self.subst_kw(nkw['custom_tests'])
except KeyError:
pass
return SCons.SConf.SConf(*nargs, **nkw)
def Command(self, target, source, action, **kw):
"""Builds the supplied target files from the supplied
source files using the supplied action. Action may
be any type that the Builder constructor will accept
for an action."""
bkw = {
'action' : action,
'target_factory' : self.fs.Entry,
'source_factory' : self.fs.Entry,
}
try: bkw['source_scanner'] = kw['source_scanner']
except KeyError: pass
else: del kw['source_scanner']
bld = SCons.Builder.Builder(**bkw)
return bld(self, target, source, **kw)
def Depends(self, target, dependency):
"""Explicity specify that 'target's depend on 'dependency'."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_dependency(dlist)
return tlist
def Dir(self, name, *args, **kw):
"""
"""
s = self.subst(name)
if SCons.Util.is_Sequence(s):
result=[]
for e in s:
result.append(self.fs.Dir(e, *args, **kw))
return result
return self.fs.Dir(s, *args, **kw)
def NoClean(self, *targets):
"""Tags a target so that it will not be cleaned by -c"""
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_noclean()
return tlist
def NoCache(self, *targets):
"""Tags a target so that it will not be cached"""
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_nocache()
return tlist
def Entry(self, name, *args, **kw):
"""
"""
s = self.subst(name)
if SCons.Util.is_Sequence(s):
result=[]
for e in s:
result.append(self.fs.Entry(e, *args, **kw))
return result
return self.fs.Entry(s, *args, **kw)
def Environment(self, **kw):
return SCons.Environment.Environment(**self.subst_kw(kw))
def Execute(self, action, *args, **kw):
"""Directly execute an action through an Environment
"""
action = self.Action(action, *args, **kw)
result = action([], [], self)
if isinstance(result, SCons.Errors.BuildError):
errstr = result.errstr
if result.filename:
errstr = result.filename + ': ' + errstr
sys.stderr.write("scons: *** %s\n" % errstr)
return result.status
else:
return result
def File(self, name, *args, **kw):
"""
"""
s = self.subst(name)
if SCons.Util.is_Sequence(s):
result=[]
for e in s:
result.append(self.fs.File(e, *args, **kw))
return result
return self.fs.File(s, *args, **kw)
def FindFile(self, file, dirs):
file = self.subst(file)
nodes = self.arg2nodes(dirs, self.fs.Dir)
return SCons.Node.FS.find_file(file, tuple(nodes))
def Flatten(self, sequence):
return SCons.Util.flatten(sequence)
def GetBuildPath(self, files):
result = list(map(str, self.arg2nodes(files, self.fs.Entry)))
if SCons.Util.is_List(files):
return result
else:
return result[0]
def Glob(self, pattern, ondisk=True, source=False, strings=False):
return self.fs.Glob(self.subst(pattern), ondisk, source, strings)
def Ignore(self, target, dependency):
"""Ignore a dependency."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_ignore(dlist)
return tlist
def Literal(self, string):
return SCons.Subst.Literal(string)
def Local(self, *targets):
ret = []
for targ in targets:
if isinstance(targ, SCons.Node.Node):
targ.set_local()
ret.append(targ)
else:
for t in self.arg2nodes(targ, self.fs.Entry):
t.set_local()
ret.append(t)
return ret
def Precious(self, *targets):
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_precious()
return tlist
def Pseudo(self, *targets):
tlist = []
for t in targets:
tlist.extend(self.arg2nodes(t, self.fs.Entry))
for t in tlist:
t.set_pseudo()
return tlist
def Repository(self, *dirs, **kw):
dirs = self.arg2nodes(list(dirs), self.fs.Dir)
self.fs.Repository(*dirs, **kw)
def Requires(self, target, prerequisite):
"""Specify that 'prerequisite' must be built before 'target',
(but 'target' does not actually depend on 'prerequisite'
and need not be rebuilt if it changes)."""
tlist = self.arg2nodes(target, self.fs.Entry)
plist = self.arg2nodes(prerequisite, self.fs.Entry)
for t in tlist:
t.add_prerequisite(plist)
return tlist
def Scanner(self, *args, **kw):
nargs = []
for arg in args:
if SCons.Util.is_String(arg):
arg = self.subst(arg)
nargs.append(arg)
nkw = self.subst_kw(kw)
return SCons.Scanner.Base(*nargs, **nkw)
def SConsignFile(self, name=".sconsign", dbm_module=None):
if name is not None:
name = self.subst(name)
if not os.path.isabs(name):
name = os.path.join(str(self.fs.SConstruct_dir), name)
if name:
name = os.path.normpath(name)
sconsign_dir = os.path.dirname(name)
if sconsign_dir and not os.path.exists(sconsign_dir):
self.Execute(SCons.Defaults.Mkdir(sconsign_dir))
SCons.SConsign.File(name, dbm_module)
def SideEffect(self, side_effect, target):
"""Tell scons that side_effects are built as side
effects of building targets."""
side_effects = self.arg2nodes(side_effect, self.fs.Entry)
targets = self.arg2nodes(target, self.fs.Entry)
for side_effect in side_effects:
if side_effect.multiple_side_effect_has_builder():
raise SCons.Errors.UserError("Multiple ways to build the same target were specified for: %s" % str(side_effect))
side_effect.add_source(targets)
side_effect.side_effect = 1
self.Precious(side_effect)
for target in targets:
target.side_effects.append(side_effect)
return side_effects
def SourceCode(self, entry, builder):
"""Arrange for a source code builder for (part of) a tree."""
msg = """SourceCode() has been deprecated and there is no replacement.
\tIf you need this function, please contact dev@scons.tigris.org."""
SCons.Warnings.warn(SCons.Warnings.DeprecatedSourceCodeWarning, msg)
entries = self.arg2nodes(entry, self.fs.Entry)
for entry in entries:
entry.set_src_builder(builder)
return entries
def SourceSignatures(self, type):
global _warn_source_signatures_deprecated
if _warn_source_signatures_deprecated:
msg = "The env.SourceSignatures() method is deprecated;\n" + \
"\tconvert your build to use the env.Decider() method instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedSourceSignaturesWarning, msg)
_warn_source_signatures_deprecated = False
type = self.subst(type)
self.src_sig_type = type
if type == 'MD5':
if not SCons.Util.md5:
raise UserError("MD5 signatures are not available in this version of Python.")
self.decide_source = self._changed_content
elif type == 'timestamp':
self.decide_source = self._changed_timestamp_match
else:
raise UserError("Unknown source signature type '%s'" % type)
def Split(self, arg):
"""This function converts a string or list into a list of strings
or Nodes. This makes things easier for users by allowing files to
be specified as a white-space separated list to be split.
The input rules are:
- A single string containing names separated by spaces. These will be
split apart at the spaces.
- A single Node instance
- A list containing either strings or Node instances. Any strings
in the list are not split at spaces.
In all cases, the function returns a list of Nodes and strings."""
if SCons.Util.is_List(arg):
return list(map(self.subst, arg))
elif SCons.Util.is_String(arg):
return self.subst(arg).split()
else:
return [self.subst(arg)]
def TargetSignatures(self, type):
global _warn_target_signatures_deprecated
if _warn_target_signatures_deprecated:
msg = "The env.TargetSignatures() method is deprecated;\n" + \
"\tconvert your build to use the env.Decider() method instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedTargetSignaturesWarning, msg)
_warn_target_signatures_deprecated = False
type = self.subst(type)
self.tgt_sig_type = type
if type in ('MD5', 'content'):
if not SCons.Util.md5:
raise UserError("MD5 signatures are not available in this version of Python.")
self.decide_target = self._changed_content
elif type == 'timestamp':
self.decide_target = self._changed_timestamp_match
elif type == 'build':
self.decide_target = self._changed_build
elif type == 'source':
self.decide_target = self._changed_source
else:
raise UserError("Unknown target signature type '%s'"%type)
def Value(self, value, built_value=None):
"""
"""
return SCons.Node.Python.Value(value, built_value)
def VariantDir(self, variant_dir, src_dir, duplicate=1):
variant_dir = self.arg2nodes(variant_dir, self.fs.Dir)[0]
src_dir = self.arg2nodes(src_dir, self.fs.Dir)[0]
self.fs.VariantDir(variant_dir, src_dir, duplicate)
def FindSourceFiles(self, node='.'):
""" returns a list of all source files.
"""
node = self.arg2nodes(node, self.fs.Entry)[0]
sources = []
def build_source(ss):
for s in ss:
if isinstance(s, SCons.Node.FS.Dir):
build_source(s.all_children())
elif s.has_builder():
build_source(s.sources)
elif isinstance(s.disambiguate(), SCons.Node.FS.File):
sources.append(s)
build_source(node.all_children())
def final_source(node):
while (node != node.srcnode()):
node = node.srcnode()
return node
sources = map( final_source, sources );
# remove duplicates
return list(set(sources))
def FindInstalledFiles(self):
""" returns the list of all targets of the Install and InstallAs Builder.
"""
from SCons.Tool import install
if install._UNIQUE_INSTALLED_FILES is None:
install._UNIQUE_INSTALLED_FILES = SCons.Util.uniquer_hashables(install._INSTALLED_FILES)
return install._UNIQUE_INSTALLED_FILES
class OverrideEnvironment(Base):
"""A proxy that overrides variables in a wrapped construction
environment by returning values from an overrides dictionary in
preference to values from the underlying subject environment.
This is a lightweight (I hope) proxy that passes through most use of
attributes to the underlying Environment.Base class, but has just
enough additional methods defined to act like a real construction
environment with overridden values. It can wrap either a Base
construction environment, or another OverrideEnvironment, which
can in turn nest arbitrary OverrideEnvironments...
Note that we do *not* call the underlying base class
(SubsitutionEnvironment) initialization, because we get most of those
from proxying the attributes of the subject construction environment.
But because we subclass SubstitutionEnvironment, this class also
has inherited arg2nodes() and subst*() methods; those methods can't
be proxied because they need *this* object's methods to fetch the
values from the overrides dictionary.
"""
def __init__(self, subject, overrides={}):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Environment.OverrideEnvironment')
self.__dict__['__subject'] = subject
self.__dict__['overrides'] = overrides
# Methods that make this class act like a proxy.
def __getattr__(self, name):
return getattr(self.__dict__['__subject'], name)
def __setattr__(self, name, value):
setattr(self.__dict__['__subject'], name, value)
# Methods that make this class act like a dictionary.
def __getitem__(self, key):
try:
return self.__dict__['overrides'][key]
except KeyError:
return self.__dict__['__subject'].__getitem__(key)
def __setitem__(self, key, value):
if not is_valid_construction_var(key):
raise SCons.Errors.UserError("Illegal construction variable `%s'" % key)
self.__dict__['overrides'][key] = value
def __delitem__(self, key):
try:
del self.__dict__['overrides'][key]
except KeyError:
deleted = 0
else:
deleted = 1
try:
result = self.__dict__['__subject'].__delitem__(key)
except KeyError:
if not deleted:
raise
result = None
return result
def get(self, key, default=None):
"""Emulates the get() method of dictionaries."""
try:
return self.__dict__['overrides'][key]
except KeyError:
return self.__dict__['__subject'].get(key, default)
def has_key(self, key):
try:
self.__dict__['overrides'][key]
return 1
except KeyError:
return key in self.__dict__['__subject']
def __contains__(self, key):
if self.__dict__['overrides'].__contains__(key):
return 1
return self.__dict__['__subject'].__contains__(key)
def Dictionary(self):
"""Emulates the items() method of dictionaries."""
d = self.__dict__['__subject'].Dictionary().copy()
d.update(self.__dict__['overrides'])
return d
def items(self):
"""Emulates the items() method of dictionaries."""
return list(self.Dictionary().items())
# Overridden private construction environment methods.
def _update(self, dict):
"""Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
"""
self.__dict__['overrides'].update(dict)
def gvars(self):
return self.__dict__['__subject'].gvars()
def lvars(self):
lvars = self.__dict__['__subject'].lvars()
lvars.update(self.__dict__['overrides'])
return lvars
# Overridden public construction environment methods.
def Replace(self, **kw):
kw = copy_non_reserved_keywords(kw)
self.__dict__['overrides'].update(semi_deepcopy(kw))
# The entry point that will be used by the external world
# to refer to a construction environment. This allows the wrapper
# interface to extend a construction environment for its own purposes
# by subclassing SCons.Environment.Base and then assigning the
# class to SCons.Environment.Environment.
Environment = Base
# An entry point for returning a proxy subclass instance that overrides
# the subst*() methods so they don't actually perform construction
# variable substitution. This is specifically intended to be the shim
# layer in between global function calls (which don't want construction
# variable substitution) and the DefaultEnvironment() (which would
# substitute variables if left to its own devices)."""
#
# We have to wrap this in a function that allows us to delay definition of
# the class until it's necessary, so that when it subclasses Environment
# it will pick up whatever Environment subclass the wrapper interface
# might have assigned to SCons.Environment.Environment.
def NoSubstitutionProxy(subject):
class _NoSubstitutionProxy(Environment):
def __init__(self, subject):
self.__dict__['__subject'] = subject
def __getattr__(self, name):
return getattr(self.__dict__['__subject'], name)
def __setattr__(self, name, value):
return setattr(self.__dict__['__subject'], name, value)
def executor_to_lvars(self, kwdict):
if kwdict.has_key('executor'):
kwdict['lvars'] = kwdict['executor'].get_lvars()
del kwdict['executor']
else:
kwdict['lvars'] = {}
def raw_to_mode(self, dict):
try:
raw = dict['raw']
except KeyError:
pass
else:
del dict['raw']
dict['mode'] = raw
def subst(self, string, *args, **kwargs):
return string
def subst_kw(self, kw, *args, **kwargs):
return kw
def subst_list(self, string, *args, **kwargs):
nargs = (string, self,) + args
nkw = kwargs.copy()
nkw['gvars'] = {}
self.executor_to_lvars(nkw)
self.raw_to_mode(nkw)
return SCons.Subst.scons_subst_list(*nargs, **nkw)
def subst_target_source(self, string, *args, **kwargs):
nargs = (string, self,) + args
nkw = kwargs.copy()
nkw['gvars'] = {}
self.executor_to_lvars(nkw)
self.raw_to_mode(nkw)
return SCons.Subst.scons_subst(*nargs, **nkw)
return _NoSubstitutionProxy(subject)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
h4ck3rm1k3/OpenWrt-Firefly-SDK
|
staging_dir/host/lib/scons-2.3.1/SCons/Environment.py
|
Python
|
gpl-2.0
| 96,203
|
"""SCons.Tool.Perforce.py
Tool-specific initialization for Perforce Source Code Management system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/Perforce.py 2014/03/02 14:18:15 garyo"
import os
import SCons.Action
import SCons.Builder
import SCons.Node.FS
import SCons.Util
# This function should maybe be moved to SCons.Util?
from SCons.Tool.PharLapCommon import addPathIfNotExists
# Variables that we want to import from the base OS environment.
_import_env = [ 'P4PORT', 'P4CLIENT', 'P4USER', 'USER', 'USERNAME', 'P4PASSWD',
'P4CHARSET', 'P4LANGUAGE', 'SystemRoot' ]
PerforceAction = SCons.Action.Action('$P4COM', '$P4COMSTR')
def generate(env):
"""Add a Builder factory function and construction variables for
Perforce to an Environment."""
def PerforceFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The Perforce() factory is deprecated and there is no replacement.""")
return SCons.Builder.Builder(action = PerforceAction, env = env)
#setattr(env, 'Perforce', PerforceFactory)
env.Perforce = PerforceFactory
env['P4'] = 'p4'
env['P4FLAGS'] = SCons.Util.CLVar('')
env['P4COM'] = '$P4 $P4FLAGS sync $TARGET'
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Perforce seems to use the PWD environment variable rather than
# calling getcwd() for itself, which is odd. If no PWD variable
# is present, p4 WILL call getcwd, but this seems to cause problems
# with good ol' Windows's tilde-mangling for long file names.
environ['PWD'] = env.Dir('#').get_abspath()
for var in _import_env:
v = os.environ.get(var)
if v:
environ[var] = v
if SCons.Util.can_read_reg:
# If we can read the registry, add the path to Perforce to our environment.
try:
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Perforce\\environment')
val, tok = SCons.Util.RegQueryValueEx(k, 'P4INSTROOT')
addPathIfNotExists(environ, 'PATH', val)
except SCons.Util.RegError:
# Can't detect where Perforce is, hope the user has it set in the
# PATH.
pass
def exists(env):
return env.Detect('p4')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
michalliu/OpenWrt-Firefly-Libraries
|
staging_dir/host/lib/scons-2.3.1/SCons/Tool/Perforce.py
|
Python
|
gpl-2.0
| 3,834
|
callback_classes = [
['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::MobilityModel>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'std::list<ns3::Ptr<ns3::LteControlMessage>, std::allocator<ns3::Ptr<ns3::LteControlMessage> > >', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'ns3::Ptr<ns3::SpectrumValue>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::DlInfoListElement_s', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::UlInfoListElement_s', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::PacketBurst>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::PhyReceptionStatParameters', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::PhyTransmissionStatParameters', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'ns3::LteUePhy::State', 'ns3::LteUePhy::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'double', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'double', 'double', 'bool', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'const ns3::Address &', 'const ns3::Address &', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::EpcUeNas::State', 'ns3::EpcUeNas::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'const ns3::SpectrumValue &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned char', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned char', 'unsigned int', 'unsigned long', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::UeManager::State', 'ns3::UeManager::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::LteRrcSap::MeasurementReport', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::DlSchedulingCallbackInfo', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned int', 'unsigned int', 'unsigned short', 'unsigned char', 'unsigned short', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'unsigned short', 'ns3::LteUeRrc::State', 'ns3::LteUeRrc::State', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'unsigned long', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
pradeepnazareth/NS-3-begining
|
src/lte/bindings/callbacks_list.py
|
Python
|
gpl-2.0
| 6,324
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for nacl_utils.py."""
import fileinput
import mox
import nacl_utils
import os
import sys
import unittest
def TestMock(file_path, open_func):
temp_file = open_func(file_path)
temp_file.close()
class TestNaClUtils(unittest.TestCase):
"""Class for test cases to cover globally declared helper functions."""
def setUp(self):
self.script_dir = os.path.abspath(os.path.dirname(__file__))
self.mock_factory = mox.Mox()
self.InitializeResourceMocks()
def InitializeResourceMocks(self):
"""Can be called multiple times if multiple functions need to be tested."""
self.fileinput_mock = self.mock_factory.CreateMock(fileinput)
self.os_mock = self.mock_factory.CreateMock(os)
self.sys_mock = self.mock_factory.CreateMock(sys)
def testToolchainPath(self):
output = nacl_utils.ToolchainPath('nacl_sdk_root')
head, tail = os.path.split(output)
base, toolchain = os.path.split(head)
self.assertEqual('nacl_sdk_root', base)
self.assertEqual('toolchain', toolchain)
self.assertRaises(ValueError,
nacl_utils.ToolchainPath,
'nacl_sdk_root',
arch='nosucharch')
self.assertRaises(ValueError,
nacl_utils.ToolchainPath,
'nacl_sdk_root',
variant='nosuchvariant')
def testGetJSONFromNexeSpec(self):
valid_empty_json = '{\n "program": {\n }\n}\n'
null_json = nacl_utils.GetJSONFromNexeSpec(None)
self.assertEqual(null_json, valid_empty_json)
empty_json = nacl_utils.GetJSONFromNexeSpec({})
self.assertEqual(empty_json, valid_empty_json)
nexes = {'x86-32': 'nacl_x86_32.nexe',
'x86-64': 'nacl_x86_64.nexe',
'arm': 'nacl_ARM.nexe'}
json = nacl_utils.GetJSONFromNexeSpec(nexes)
# Assert that the resulting JSON has all the right parts: the "nexes"
# dict, followed by one entry for each architecture. Also make sure that
# the last entry doesn't have a trailing ','
json_lines = json.splitlines()
self.assertEqual(len(json_lines), 7)
self.assertEqual(json_lines[0], '{')
self.assertEqual(json_lines[1], ' "program": {')
self.assertTrue(json_lines[2].endswith(','))
self.assertTrue(json_lines[3].endswith(','))
self.assertFalse(json_lines[4].endswith(','))
self.assertEqual(json_lines[5], ' }')
self.assertEqual(json_lines[6], '}')
# Assert that the key-value pair lines have the right form. The order
# of the keys doesn't matter. Note that the key values are enclosed in
# "" (e.g. "x86-32") - this is intentional.
valid_arch_keys = ['"x86-32"', '"x86-64"', '"arm"']
for line in json_lines[2:4]:
key_value = line.split(':')
self.assertEqual(len(key_value), 3)
self.assertTrue(key_value[0].lstrip().rstrip() in valid_arch_keys)
def testGenerateNmf(self):
# Assert that failure cases properly fail.
self.assertRaises(ValueError, nacl_utils.GenerateNmf, None, None, None)
self.assertRaises(ValueError, nacl_utils.GenerateNmf, [], [], {})
def testGetArchFromSpec(self):
default_arch, default_subarch = nacl_utils.GetArchFromSpec(None)
self.assertEqual(default_arch, nacl_utils.DEFAULT_ARCH)
self.assertEqual(default_subarch, nacl_utils.DEFAULT_SUBARCH)
default_arch, subarch = nacl_utils.GetArchFromSpec({'subarch': '64'})
self.assertEqual(default_arch, nacl_utils.DEFAULT_ARCH)
self.assertEqual(subarch, '64')
arch, default_subarch = nacl_utils.GetArchFromSpec({'arch': 'x86'})
self.assertEqual(arch, 'x86')
self.assertEqual(default_subarch, nacl_utils.DEFAULT_SUBARCH)
arch, subarch = nacl_utils.GetArchFromSpec({'arch': 'x86', 'subarch': '64'})
self.assertEqual(arch, 'x86')
self.assertEqual(subarch, '64')
def RunTests():
return_value = 1
test_suite = unittest.TestLoader().loadTestsFromTestCase(TestNaClUtils)
test_results = unittest.TextTestRunner(verbosity=2).run(test_suite)
if test_results.wasSuccessful():
return_value = 0
return return_value
if __name__ == '__main__':
sys.exit(RunTests())
|
aYukiSekiguchi/ACCESS-Chromium
|
native_client_sdk/src/build_tools/nacl_sdk_scons/nacl_utils_test.py
|
Python
|
bsd-3-clause
| 4,315
|
'''tzinfo timezone information for Africa/Asmera.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Asmera(DstTzInfo):
'''Africa/Asmera timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Asmera'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1936,5,4,21,24,40),
]
_transition_info = [
i(9300,0,'ADMT'),
i(10800,0,'EAT'),
]
Asmera = Asmera()
|
newvem/pytz
|
pytz/zoneinfo/Africa/Asmera.py
|
Python
|
mit
| 483
|
from django.db import models
class Foo(models.Model):
name = models.CharField(max_length=100)
bar = models.ForeignKey('Bar', related_name='bar_fk')
def __unicode__(self):
return self.name
class Bar(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
|
spookylukey/django-autocomplete-light
|
test_project/admin_test_autocomplete/models.py
|
Python
|
mit
| 335
|
#
# sublimelinter.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Ryan Hileman and Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module provides the SublimeLinter plugin class and supporting methods."""
import os
import re
import sublime
import sublime_plugin
from .lint.linter import Linter
from .lint.highlight import HighlightSet
from .lint.queue import queue
from .lint import persist, util
def plugin_loaded():
"""The ST3 entry point for plugins."""
persist.plugin_is_loaded = True
persist.settings.load()
persist.printf('debug mode:', 'on' if persist.debug_mode() else 'off')
util.create_tempdir()
for linter in persist.linter_classes.values():
linter.initialize()
plugin = SublimeLinter.shared_plugin()
queue.start(plugin.lint)
util.generate_menus()
util.generate_color_scheme(from_reload=False)
util.install_syntaxes()
persist.settings.on_update_call(SublimeLinter.on_settings_updated)
# This ensures we lint the active view on a fresh install
window = sublime.active_window()
if window:
plugin.on_activated(window.active_view())
class SublimeLinter(sublime_plugin.EventListener):
"""The main ST3 plugin class."""
# We use this to match linter settings filenames.
LINTER_SETTINGS_RE = re.compile('^SublimeLinter(-.+?)?\.sublime-settings')
shared_instance = None
@classmethod
def shared_plugin(cls):
"""Return the plugin instance."""
return cls.shared_instance
def __init__(self, *args, **kwargs):
"""Initialize a new instance."""
super().__init__(*args, **kwargs)
# Keeps track of which views we have assigned linters to
self.loaded_views = set()
# Keeps track of which views have actually been linted
self.linted_views = set()
# A mapping between view ids and syntax names
self.view_syntax = {}
self.__class__.shared_instance = self
@classmethod
def lint_all_views(cls):
"""Simulate a modification of all views, which will trigger a relint."""
def apply(view):
if view.id() in persist.view_linters:
cls.shared_instance.hit(view)
util.apply_to_all_views(apply)
def lint(self, view_id, hit_time=None, callback=None):
"""
Lint the view with the given id.
This method is called asynchronously by persist.Daemon when a lint
request is pulled off the queue, or called synchronously when the
Lint command is executed or a file is saved and Show Errors on Save
is enabled.
If provided, hit_time is the time at which the lint request was added
to the queue. It is used to determine if the view has been modified
since the lint request was queued. If so, the lint is aborted, since
another lint request is already in the queue.
callback is the method to call when the lint is finished. If not
provided, it defaults to highlight().
"""
# If the view has been modified since the lint was triggered,
# don't lint again.
if hit_time is not None and persist.last_hit_times.get(view_id, 0) > hit_time:
return
view = Linter.get_view(view_id)
if view is None:
return
filename = view.file_name()
code = Linter.text(view)
callback = callback or self.highlight
Linter.lint_view(view, filename, code, hit_time, callback)
def highlight(self, view, linters, hit_time):
"""
Highlight any errors found during a lint of the given view.
This method is called by Linter.lint_view after linting is finished.
linters is a list of the linters that ran. hit_time has the same meaning
as in lint(), and if the view was modified since the lint request was
made, this method aborts drawing marks.
If the view has not been modified since hit_time, all of the marks and
errors from the list of linters are aggregated and drawn, and the status
is updated.
"""
vid = view.id()
# If the view has been modified since the lint was triggered,
# don't draw marks.
if hit_time is not None and persist.last_hit_times.get(vid, 0) > hit_time:
return
errors = {}
highlights = persist.highlights[vid] = HighlightSet()
for linter in linters:
if linter.highlight:
highlights.add(linter.highlight)
if linter.errors:
for line, errs in linter.errors.items():
errors.setdefault(line, []).extend(errs)
# Keep track of one view in each window that shares view's buffer
window_views = {}
buffer_id = view.buffer_id()
for window in sublime.windows():
wid = window.id()
for other_view in window.views():
if other_view.buffer_id() == buffer_id:
vid = other_view.id()
persist.highlights[vid] = highlights
highlights.clear(other_view)
highlights.draw(other_view)
persist.errors[vid] = errors
if window_views.get(wid) is None:
window_views[wid] = other_view
for view in window_views.values():
self.on_selection_modified_async(view)
def hit(self, view):
"""Record an activity that could trigger a lint and enqueue a desire to lint."""
vid = view.id()
self.check_syntax(view)
self.linted_views.add(vid)
if view.size() == 0:
for linter in Linter.get_linters(vid):
linter.clear()
return
persist.last_hit_times[vid] = queue.hit(view)
def check_syntax(self, view):
"""
Check and return if view's syntax has changed.
If the syntax has changed, a new linter is assigned.
"""
vid = view.id()
syntax = persist.get_syntax(view)
# Syntax either has never been set or just changed
if vid not in self.view_syntax or self.view_syntax[vid] != syntax:
self.view_syntax[vid] = syntax
Linter.assign(view, reset=True)
self.clear(view)
return True
else:
return False
def clear(self, view):
"""Clear all marks, errors and status from the given view."""
Linter.clear_view(view)
def is_scratch(self, view):
"""
Return whether a view is effectively scratch.
There is a bug (or feature) in the current ST3 where the Find panel
is not marked scratch but has no window.
There is also a bug where settings files opened from within .sublime-package
files are not marked scratch during the initial on_modified event, so we have
to check that a view with a filename actually exists on disk if the file
being opened is in the Sublime Text packages directory.
"""
if view.is_scratch() or view.is_read_only() or view.window() is None or view.settings().get("repl") is not None:
return True
elif (
view.file_name() and
view.file_name().startswith(sublime.packages_path() + os.path.sep) and
not os.path.exists(view.file_name())
):
return True
else:
return False
def view_has_file_only_linter(self, vid):
"""Return True if any linters for the given view are file-only."""
for lint in persist.view_linters.get(vid, []):
if lint.tempfile_suffix == '-':
return True
return False
# sublime_plugin.EventListener event handlers
def on_modified(self, view):
"""Called when a view is modified."""
if self.is_scratch(view):
return
if view.id() not in persist.view_linters:
syntax_changed = self.check_syntax(view)
if not syntax_changed:
return
else:
syntax_changed = False
if syntax_changed or persist.settings.get('lint_mode', 'background') == 'background':
self.hit(view)
else:
self.clear(view)
def on_activated(self, view):
"""Called when a view gains input focus."""
if self.is_scratch(view):
return
# Reload the plugin settings.
persist.settings.load()
self.check_syntax(view)
view_id = view.id()
if view_id not in self.linted_views:
if view_id not in self.loaded_views:
self.on_new(view)
if persist.settings.get('lint_mode', 'background') in ('background', 'load/save'):
self.hit(view)
self.on_selection_modified_async(view)
def on_open_settings(self, view):
"""
Called when any settings file is opened.
view is the view that contains the text of the settings file.
"""
if self.is_settings_file(view, user_only=True):
persist.settings.save(view=view)
def is_settings_file(self, view, user_only=False):
"""Return True if view is a SublimeLinter settings file."""
filename = view.file_name()
if not filename:
return False
if not filename.startswith(sublime.packages_path()):
return False
dirname, filename = os.path.split(filename)
dirname = os.path.basename(dirname)
if self.LINTER_SETTINGS_RE.match(filename):
if user_only:
return dirname == 'User'
else:
return dirname in (persist.PLUGIN_DIRECTORY, 'User')
@classmethod
def on_settings_updated(cls, relint=False):
"""Callback triggered when the settings are updated."""
if relint:
cls.lint_all_views()
else:
Linter.redraw_all()
def on_new(self, view):
"""Called when a new buffer is created."""
self.on_open_settings(view)
if self.is_scratch(view):
return
vid = view.id()
self.loaded_views.add(vid)
self.view_syntax[vid] = persist.get_syntax(view)
def get_focused_view_id(self, view):
"""
Return the focused view which shares view's buffer.
When updating the status, we want to make sure we get
the selection of the focused view, since multiple views
into the same buffer may be open.
"""
active_view = view.window().active_view()
for view in view.window().views():
if view == active_view:
return view
def on_selection_modified_async(self, view):
"""Called when the selection changes (cursor moves or text selected)."""
if self.is_scratch(view):
return
view = self.get_focused_view_id(view)
if view is None:
return
vid = view.id()
# Get the line number of the first line of the first selection.
try:
lineno = view.rowcol(view.sel()[0].begin())[0]
except IndexError:
lineno = -1
if vid in persist.errors:
errors = persist.errors[vid]
if errors:
lines = sorted(list(errors))
counts = [len(errors[line]) for line in lines]
count = sum(counts)
plural = 's' if count > 1 else ''
if lineno in errors:
# Sort the errors by column
line_errors = sorted(errors[lineno], key=lambda error: error[0])
line_errors = [error[1] for error in line_errors]
if plural:
# Sum the errors before the first error on this line
index = lines.index(lineno)
first = sum(counts[0:index]) + 1
if len(line_errors) > 1:
last = first + len(line_errors) - 1
status = '{}-{} of {} errors: '.format(first, last, count)
else:
status = '{} of {} errors: '.format(first, count)
else:
status = 'Error: '
status += '; '.join(line_errors)
else:
status = '%i error%s' % (count, plural)
view.set_status('sublimelinter', status)
else:
view.erase_status('sublimelinter')
def on_pre_save(self, view):
"""
Called before view is saved.
If a settings file is the active view and is saved,
copy the current settings first so we can compare post-save.
"""
if view.window().active_view() == view and self.is_settings_file(view):
persist.settings.copy()
def on_post_save(self, view):
"""Called after view is saved."""
if self.is_scratch(view):
return
# First check to see if the project settings changed
if view.window().project_file_name() == view.file_name():
self.lint_all_views()
else:
# Now see if a .sublimelinterrc has changed
filename = os.path.basename(view.file_name())
if filename == '.sublimelinterrc':
# If a .sublimelinterrc has changed, to be safe
# clear the rc cache and relint.
util.get_rc_settings.cache_clear()
self.lint_all_views()
# If a file other than one of our settings files changed,
# check if the syntax changed or if we need to show errors.
elif filename != 'SublimeLinter.sublime-settings':
self.file_was_saved(view)
def file_was_saved(self, view):
"""Check if the syntax changed or if we need to show errors."""
syntax_changed = self.check_syntax(view)
vid = view.id()
mode = persist.settings.get('lint_mode', 'background')
show_errors = persist.settings.get('show_errors_on_save', False)
if syntax_changed:
self.clear(view)
if vid in persist.view_linters:
if mode != 'manual':
self.lint(vid)
else:
show_errors = False
else:
show_errors = False
else:
if (
show_errors or
mode in ('load/save', 'save only') or
mode == 'background' and self.view_has_file_only_linter(vid)
):
self.lint(vid)
elif mode == 'manual':
show_errors = False
if show_errors and vid in persist.errors and persist.errors[vid]:
view.run_command('sublimelinter_show_all_errors')
def on_close(self, view):
"""Called after view is closed."""
if self.is_scratch(view):
return
vid = view.id()
if vid in self.loaded_views:
self.loaded_views.remove(vid)
if vid in self.linted_views:
self.linted_views.remove(vid)
if vid in self.view_syntax:
del self.view_syntax[vid]
persist.view_did_close(vid)
class SublimelinterEditCommand(sublime_plugin.TextCommand):
"""A plugin command used to generate an edit object for a view."""
def run(self, edit):
"""Run the command."""
persist.edit(self.view.id(), edit)
|
yubchen/Qlinter
|
sublimelinter.py
|
Python
|
mit
| 15,716
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda
short_description: Manage AWS Lambda functions
description:
- Allows for the management of Lambda functions.
version_added: '2.2'
requirements: [ boto3 ]
options:
name:
description:
- The name you want to assign to the function you are uploading. Cannot be changed.
required: true
state:
description:
- Create or delete Lambda function.
default: present
choices: [ 'present', 'absent' ]
runtime:
description:
- The runtime environment for the Lambda function you are uploading.
- Required when creating a function. Uses parameters as described in boto3 docs.
- Required when C(state=present).
- For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html).
role:
description:
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
resources. You may use the bare ARN if the role belongs to the same AWS account.
- Required when C(state=present).
handler:
description:
- The function within your code that Lambda calls to begin execution.
zip_file:
description:
- A .zip file containing your deployment package
- If C(state=present) then either zip_file or s3_bucket must be present.
aliases: [ 'src' ]
s3_bucket:
description:
- Amazon S3 bucket name where the .zip file containing your deployment package is stored.
- If C(state=present) then either zip_file or s3_bucket must be present.
- C(s3_bucket) and C(s3_key) are required together.
s3_key:
description:
- The Amazon S3 object (the deployment package) key name you want to upload.
- C(s3_bucket) and C(s3_key) are required together.
s3_object_version:
description:
- The Amazon S3 object (the deployment package) version you want to upload.
description:
description:
- A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
timeout:
description:
- The function maximum execution time in seconds after which Lambda should terminate the function.
default: 3
memory_size:
description:
- The amount of memory, in MB, your Lambda function is given.
default: 128
vpc_subnet_ids:
description:
- List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run
the function in a VPC.
vpc_security_group_ids:
description:
- List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
environment_variables:
description:
- A dictionary of environment variables the Lambda function is given.
aliases: [ 'environment' ]
version_added: "2.3"
dead_letter_arn:
description:
- The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
version_added: "2.3"
tags:
description:
- tag dict to apply to the function (requires botocore 1.5.40 or above).
version_added: "2.5"
author:
- 'Steyn Huizinga (@steynovich)'
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create Lambda functions
- name: looped creation
lambda:
name: '{{ item.name }}'
state: present
zip_file: '{{ item.zip_file }}'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
vpc_subnet_ids:
- subnet-123abcde
- subnet-edcba321
vpc_security_group_ids:
- sg-123abcde
- sg-edcba321
environment_variables: '{{ item.env_vars }}'
tags:
key1: 'value1'
loop:
- name: HelloWorld
zip_file: hello-code.zip
env_vars:
key1: "first"
key2: "second"
- name: ByeBye
zip_file: bye-code.zip
env_vars:
key1: "1"
key2: "2"
# To remove previously added tags pass an empty dict
- name: remove tags
lambda:
name: 'Lambda function'
state: present
zip_file: 'code.zip'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
tags: {}
# Basic Lambda function deletion
- name: Delete Lambda functions HelloWorld and ByeBye
lambda:
name: '{{ item }}'
state: absent
loop:
- HelloWorld
- ByeBye
'''
RETURN = '''
code:
description: the lambda function location returned by get_function in boto3
returned: success
type: dict
sample:
{
'location': 'a presigned S3 URL',
'repository_type': 'S3',
}
configuration:
description: the lambda function metadata returned by get_function in boto3
returned: success
type: dict
sample:
{
'code_sha256': 'SHA256 hash',
'code_size': 123,
'description': 'My function',
'environment': {
'variables': {
'key': 'value'
}
},
'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1',
'function_name': 'myFunction',
'handler': 'index.handler',
'last_modified': '2017-08-01T00:00:00.000+0000',
'memory_size': 128,
'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution',
'runtime': 'nodejs6.10',
'timeout': 3,
'version': '1',
'vpc_config': {
'security_group_ids': [],
'subnet_ids': []
}
}
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import compare_aws_tags
import base64
import hashlib
import traceback
import re
try:
from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError
except ImportError:
pass # protected by AnsibleAWSModule
def get_account_info(module, region=None, endpoint=None, **aws_connect_kwargs):
"""return the account information (account id and partition) we are currently working on
get_account_info tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privileges to
the account should be enough to permit this.
"""
account_id = None
partition = None
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=endpoint, **aws_connect_kwargs)
caller_id = sts_client.get_caller_identity()
account_id = caller_id.get('Account')
partition = caller_id.get('Arn').split(':')[1]
except ClientError:
try:
iam_client = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=endpoint, **aws_connect_kwargs)
arn, partition, service, reg, account_id, resource = iam_client.get_user()['User']['Arn'].split(':')
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e.message)
m = except_msg.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/")
account_id = m.group(4)
partition = m.group(1)
if account_id is None:
module.fail_json_aws(e, msg="getting account information")
if partition is None:
module.fail_json_aws(e, msg="getting account information: partition")
except Exception as e:
module.fail_json_aws(e, msg="getting account information")
return account_id, partition
def get_current_function(connection, function_name, qualifier=None):
try:
if qualifier is not None:
return connection.get_function(FunctionName=function_name, Qualifier=qualifier)
return connection.get_function(FunctionName=function_name)
except ClientError as e:
try:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return None
except (KeyError, AttributeError):
pass
raise e
def sha256sum(filename):
hasher = hashlib.sha256()
with open(filename, 'rb') as f:
hasher.update(f.read())
code_hash = hasher.digest()
code_b64 = base64.b64encode(code_hash)
hex_digest = code_b64.decode('utf-8')
return hex_digest
def set_tag(client, module, tags, function):
if not hasattr(client, "list_tags"):
module.fail_json(msg="Using tags requires botocore 1.5.40 or above")
changed = False
arn = function['Configuration']['FunctionArn']
try:
current_tags = client.list_tags(Resource=arn).get('Tags', {})
except ClientError as e:
module.fail_json(msg="Unable to list tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True)
try:
if tags_to_remove:
client.untag_resource(
Resource=arn,
TagKeys=tags_to_remove
)
changed = True
if tags_to_add:
client.tag_resource(
Resource=arn,
Tags=tags_to_add
)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc())
return changed
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
runtime=dict(),
role=dict(),
handler=dict(),
zip_file=dict(aliases=['src']),
s3_bucket=dict(),
s3_key=dict(),
s3_object_version=dict(),
description=dict(default=''),
timeout=dict(type='int', default=3),
memory_size=dict(type='int', default=128),
vpc_subnet_ids=dict(type='list'),
vpc_security_group_ids=dict(type='list'),
environment_variables=dict(type='dict'),
dead_letter_arn=dict(),
tags=dict(type='dict'),
)
mutually_exclusive = [['zip_file', 's3_key'],
['zip_file', 's3_bucket'],
['zip_file', 's3_object_version']]
required_together = [['s3_key', 's3_bucket'],
['vpc_subnet_ids', 'vpc_security_group_ids']]
required_if = [['state', 'present', ['runtime', 'handler', 'role']]]
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_if=required_if)
name = module.params.get('name')
state = module.params.get('state').lower()
runtime = module.params.get('runtime')
role = module.params.get('role')
handler = module.params.get('handler')
s3_bucket = module.params.get('s3_bucket')
s3_key = module.params.get('s3_key')
s3_object_version = module.params.get('s3_object_version')
zip_file = module.params.get('zip_file')
description = module.params.get('description')
timeout = module.params.get('timeout')
memory_size = module.params.get('memory_size')
vpc_subnet_ids = module.params.get('vpc_subnet_ids')
vpc_security_group_ids = module.params.get('vpc_security_group_ids')
environment_variables = module.params.get('environment_variables')
dead_letter_arn = module.params.get('dead_letter_arn')
tags = module.params.get('tags')
check_mode = module.check_mode
changed = False
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
try:
client = boto3_conn(module, conn_type='client', resource='lambda',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (ClientError, ValidationError) as e:
module.fail_json_aws(e, msg="Trying to connect to AWS")
if state == 'present':
if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role):
role_arn = role
else:
# get account ID and assemble ARN
account_id, partition = get_account_info(module, region=region, endpoint=ec2_url, **aws_connect_kwargs)
role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role)
# Get function configuration if present, False otherwise
current_function = get_current_function(client, name)
# Update existing Lambda function
if state == 'present' and current_function:
# Get current state
current_config = current_function['Configuration']
current_version = None
# Update function configuration
func_kwargs = {'FunctionName': name}
# Update configuration if needed
if role_arn and current_config['Role'] != role_arn:
func_kwargs.update({'Role': role_arn})
if handler and current_config['Handler'] != handler:
func_kwargs.update({'Handler': handler})
if description and current_config['Description'] != description:
func_kwargs.update({'Description': description})
if timeout and current_config['Timeout'] != timeout:
func_kwargs.update({'Timeout': timeout})
if memory_size and current_config['MemorySize'] != memory_size:
func_kwargs.update({'MemorySize': memory_size})
if (environment_variables is not None) and (current_config.get(
'Environment', {}).get('Variables', {}) != environment_variables):
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn is not None:
if current_config.get('DeadLetterConfig'):
if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
else:
if dead_letter_arn != "":
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# Check for unsupported mutation
if current_config['Runtime'] != runtime:
module.fail_json(msg='Cannot change runtime. Please recreate the function')
# If VPC configuration is desired
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
if 'VpcConfig' in current_config:
# Compare VPC config with current config
current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
new_vpc_config = {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}
func_kwargs.update({'VpcConfig': new_vpc_config})
else:
# No VPC configuration is desired, assure VPC config is empty when present in current config
if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
# Upload new configuration if configuration has changed
if len(func_kwargs) > 1:
try:
if not check_mode:
response = client.update_function_configuration(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to update lambda configuration")
# Update code configuration
code_kwargs = {'FunctionName': name, 'Publish': True}
# Update S3 location
if s3_bucket and s3_key:
# If function is stored on S3 always update
code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
# If S3 Object Version is given
if s3_object_version:
code_kwargs.update({'S3ObjectVersion': s3_object_version})
# Compare local checksum, update remote code when different
elif zip_file:
local_checksum = sha256sum(zip_file)
remote_checksum = current_config['CodeSha256']
# Only upload new code when local code is different compared to the remote code
if local_checksum != remote_checksum:
try:
with open(zip_file, 'rb') as f:
encoded_zip = f.read()
code_kwargs.update({'ZipFile': encoded_zip})
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
# Tag Function
if tags is not None:
if set_tag(client, module, tags, current_function):
changed = True
# Upload new code if needed (e.g. code checksum has changed)
if len(code_kwargs) > 2:
try:
if not check_mode:
response = client.update_function_code(**code_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to upload new code")
# Describe function code and configuration
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after updating')
# We're done
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Function doesn't exists, create new Lambda function
elif state == 'present':
if s3_bucket and s3_key:
# If function is stored on S3
code = {'S3Bucket': s3_bucket,
'S3Key': s3_key}
if s3_object_version:
code.update({'S3ObjectVersion': s3_object_version})
elif zip_file:
# If function is stored in local zipfile
try:
with open(zip_file, 'rb') as f:
zip_content = f.read()
code = {'ZipFile': zip_content}
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
else:
module.fail_json(msg='Either S3 object or path to zipfile required')
func_kwargs = {'FunctionName': name,
'Publish': True,
'Runtime': runtime,
'Role': role_arn,
'Code': code,
'Timeout': timeout,
'MemorySize': memory_size,
}
if description is not None:
func_kwargs.update({'Description': description})
if handler is not None:
func_kwargs.update({'Handler': handler})
if environment_variables:
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# If VPC configuration is given
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}})
# Finally try to create function
current_version = None
try:
if not check_mode:
response = client.create_function(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to create function")
# Tag Function
if tags is not None:
if set_tag(client, module, tags, get_current_function(client, name)):
changed = True
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after creating')
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Delete existing Lambda function
if state == 'absent' and current_function:
try:
if not check_mode:
client.delete_function(FunctionName=name)
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to delete Lambda function")
module.exit_json(changed=changed)
# Function already absent, do nothing
elif state == 'absent':
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
thaim/ansible
|
lib/ansible/modules/cloud/amazon/lambda.py
|
Python
|
mit
| 23,103
|
#!/usr/bin/env cctools_python
# CCTOOLS_PYTHON_VERSION 2.7 2.6
import random
import math
#-------------------------------Constants-----------------------------------
DEFAULT_MONTE_CARLO_STEPS = 100
DEFAULT_OUTPUT_PATH = '.'
DEFAULT_MDSTEPS = 10000
DEFAULT_BOUNDARY_CONDITIONS = "Vacuum"
DEFAULT_OUTPUT_FREQ = 10000
DEFAULT_PHYSICAL_TEMP = 300
EXECUTABLE = "ProtoMol"
#-----------------------------Global Data-------------------------------------
pdb_file = ""
psf_file = ""
par_file = ""
xyz_file_name = ""
dcd_file_name = ""
boundary_conditions = DEFAULT_BOUNDARY_CONDITIONS
monte_carlo_steps = DEFAULT_MONTE_CARLO_STEPS
md_steps = DEFAULT_MDSTEPS
output_freq = DEFAULT_OUTPUT_FREQ
output_path = DEFAULT_OUTPUT_PATH
replica_list = []
#------------------------Initialize random generator----------------------------
random.seed()
#-------------------------Global functions---------------------------------
#Function to parse the file name from a string holding its location.
def parse_file_name(file_name):
split_name = file_name.split('/')
return split_name[len(split_name)-1]
#Function to parse the file name and leave out its extension.
def remove_trailing_dots(file_name):
split_name = file_name.split('.')
return split_name[0]
#-------------------------Define Replica Object---------------------------------
class Replica(object):
def __init__(self, id, temp):
self.id = id
self.temp = temp
self.exchgd_replica_id = -1
self.potential_energy = None
self.prev_temp = None
self.exch_steps = []
self.running = 0
self.last_seen_step = -1
def __str__(self):
return "Replica %d at temp %f" % (self.id, self.temp)
#Function to generate a config file to send to workqueue. It returns the generated config file name.
def generate_config(output_path, pdb_file, psf_file, par_file, monte_carlo_step, md_steps, output_freq, replica_obj, generate_xyz = False, generate_dcd = False):
#initialize the config file name based on the replica id.
cfg_file_name = "%s/%s/%s/%d/%d-%d.cfg" % ( output_path, "simfiles", "config", replica_obj.id, replica_obj.id, monte_carlo_step)
cfg_file_stream = open(cfg_file_name, "w")
#initialize string that will hold the config file values
write_str = ""
#Parse supplied files so only actual file name is passed, not full path of the file name
input_pdb = "%s.%d-%d.pdb" % (remove_trailing_dots(parse_file_name(pdb_file)), replica_obj.id, monte_carlo_step)
parsed_psf_file = parse_file_name(psf_file)
parsed_par_file = parse_file_name(par_file)
#Start writing the config file parameters and values
write_str += "randomtype 1\n"
write_str += "numsteps %d\n" % md_steps
write_str += "outputfreq %d\n" % output_freq
write_str += "posfile %s\n" % input_pdb
write_str += "psffile %s\n" % parsed_psf_file
write_str += "parfile %s\n" % parsed_par_file
if monte_carlo_step > 0:
write_str += "velfile %s.%d-%d.vel\n" % (remove_trailing_dots(parse_file_name(pdb_file)), replica_obj.id, monte_carlo_step)
write_str += "dofinPDBPosFile true\n"
write_str += "finPDBPosFile %s.%d-%d.pdb\n" % (remove_trailing_dots(parse_file_name(pdb_file)), replica_obj.id, monte_carlo_step+1)
write_str += "finXYZVelFile %s.%d-%d.vel\n" % (remove_trailing_dots(parse_file_name(pdb_file)), replica_obj.id, monte_carlo_step+1)
write_str += "temperature %f\n" % replica_obj.temp
write_str += "boundaryConditions %s\n" % boundary_conditions
write_str += "cellManager Cubic\n"
write_str += "cellsize 69\n"
if generate_xyz:
write_str += "XYZPosFile %d.xyz\n" % replica_obj.id
write_str += "XYZPosFileOutputFreq %d\n" % md_steps
if generate_dcd:
write_str += "DCDFile %d.dcd\n" % replica_obj.id
write_str += "DCDFileOutputFreq %d\n" % output_freq
write_str += "allEnergiesFile %d.eng\n" % replica_obj.id
write_str += "allEnergiesFileOutputFreq %d\n" % output_freq
write_str += "seed %d\n" % random.randint(1, 1000000)
write_str += "shake on\n"
write_str += "integrator {\n"
write_str += "level 0 langevinImpulse {\n"
write_str += "temperature %f\n" % replica_obj.temp
write_str += "gamma 5\n"
write_str += "timestep 2\n"
write_str += "force bond\n"
write_str += "force angle\n"
write_str += "force dihedral\n"
write_str += "force improper\n"
write_str += "force LennardJones Coulomb\n"
write_str += " -switchingFunction C2 -switchingFunction C1 -algorithm NonbondedCutoff\n"
write_str += " -switchon 10\n"
write_str += " -cutoff 12\n"
write_str += " -cutoff 12\n"
write_str += " -cutoff 12\n"
write_str += "}\n}"
#Write to the config file
cfg_file_stream.write(write_str)
return cfg_file_name
#Function that to perform metropolis criteria check for two replicas.
def metropolis( u_i, u_j, t_i, t_j ):
# Metropolis for replica i with potential energy u_i, temp t_i
# and replica j with potential energy u_j, temp t_j
K_b = 0.001987191 #Constants.boltzmann()
deltaE = (1 / (K_b * t_i) - 1/ (K_b * t_j) ) - (u_j - u_i)
if( deltaE < 0 ):
return True
acceptProb = math.exp(-deltaE)
randNum = random.random()
if( randNum < acceptProb ):
return True
else:
return False
|
charleszheng44/cctools
|
apps/wq_replica_exchange/protomol_functions.py
|
Python
|
gpl-2.0
| 5,145
|
"""Module containing tools that are useful when benchmarking algorithms
"""
from math import hypot, sqrt
from functools import wraps
from itertools import repeat
try:
import numpy
except ImportError:
numpy = False
class translate(object):
"""Decorator for evaluation functions, it translates the objective
function by *vector* which should be the same length as the individual
size. When called the decorated function should take as first argument the
individual to be evaluated. The inverse translation vector is actually
applied to the individual and the resulting list is given to the
evaluation function. Thus, the evaluation function shall not be expecting
an individual as it will receive a plain list.
This decorator adds a :func:`translate` method to the decorated function.
"""
def __init__(self, vector):
self.vector = vector
def __call__(self, func):
# wraps is used to combine stacked decorators that would add functions
@wraps(func)
def wrapper(individual, *args, **kargs):
# A subtraction is applied since the translation is applied to the
# individual and not the function
return func([v - t for v, t in zip(individual, self.vector)],
*args, **kargs)
wrapper.translate = self.translate
return wrapper
def translate(self, vector):
"""Set the current translation to *vector*. After decorating the
evaluation function, this function will be available directly from
the function object. ::
@translate([0.25, 0.5, ..., 0.1])
def evaluate(individual):
return sum(individual),
# This will cancel the translation
evaluate.translate([0.0, 0.0, ..., 0.0])
"""
self.vector = vector
class rotate(object):
"""Decorator for evaluation functions, it rotates the objective function
by *matrix* which should be a valid orthogonal NxN rotation matrix, with N
the length of an individual. When called the decorated function should
take as first argument the individual to be evaluated. The inverse
rotation matrix is actually applied to the individual and the resulting
list is given to the evaluation function. Thus, the evaluation function
shall not be expecting an individual as it will receive a plain list
(numpy.array). The multiplication is done using numpy.
This decorator adds a :func:`rotate` method to the decorated function.
.. note::
A random orthogonal matrix Q can be created via QR decomposition. ::
A = numpy.random.random((n,n))
Q, _ = numpy.linalg.qr(A)
"""
def __init__(self, matrix):
if not numpy:
raise RuntimeError("Numpy is required for using the rotation "
"decorator")
# The inverse is taken since the rotation is applied to the individual
# and not the function which is the inverse
self.matrix = numpy.linalg.inv(matrix)
def __call__(self, func):
# wraps is used to combine stacked decorators that would add functions
@wraps(func)
def wrapper(individual, *args, **kargs):
return func(numpy.dot(self.matrix, individual), *args, **kargs)
wrapper.rotate = self.rotate
return wrapper
def rotate(self, matrix):
"""Set the current rotation to *matrix*. After decorating the
evaluation function, this function will be available directly from
the function object. ::
# Create a random orthogonal matrix
A = numpy.random.random((n,n))
Q, _ = numpy.linalg.qr(A)
@rotate(Q)
def evaluate(individual):
return sum(individual),
# This will reset rotation to identity
evaluate.rotate(numpy.identity(n))
"""
self.matrix = numpy.linalg.inv(matrix)
class noise(object):
"""Decorator for evaluation functions, it evaluates the objective function
and adds noise by calling the function(s) provided in the *noise*
argument. The noise functions are called without any argument, consider
using the :class:`~deap.base.Toolbox` or Python's
:func:`functools.partial` to provide any required argument. If a single
function is provided it is applied to all objectives of the evaluation
function. If a list of noise functions is provided, it must be of length
equal to the number of objectives. The noise argument also accept
:obj:`None`, which will leave the objective without noise.
This decorator adds a :func:`noise` method to the decorated
function.
"""
def __init__(self, noise):
try:
self.rand_funcs = tuple(noise)
except TypeError:
self.rand_funcs = repeat(noise)
def __call__(self, func):
# wraps is used to combine stacked decorators that would add functions
@wraps(func)
def wrapper(individual, *args, **kargs):
result = func(individual, *args, **kargs)
noisy = list()
for r, f in zip(result, self.rand_funcs):
if f is None:
noisy.append(r)
else:
noisy.append(r + f())
return tuple(noisy)
wrapper.noise = self.noise
return wrapper
def noise(self, noise):
"""Set the current noise to *noise*. After decorating the
evaluation function, this function will be available directly from
the function object. ::
prand = functools.partial(random.gauss, mu=0.0, sigma=1.0)
@noise(prand)
def evaluate(individual):
return sum(individual),
# This will remove noise from the evaluation function
evaluate.noise(None)
"""
try:
self.rand_funcs = tuple(noise)
except TypeError:
self.rand_funcs = repeat(noise)
class scale(object):
"""Decorator for evaluation functions, it scales the objective function by
*factor* which should be the same length as the individual size. When
called the decorated function should take as first argument the individual
to be evaluated. The inverse factor vector is actually applied to the
individual and the resulting list is given to the evaluation function.
Thus, the evaluation function shall not be expecting an individual as it
will receive a plain list.
This decorator adds a :func:`scale` method to the decorated function.
"""
def __init__(self, factor):
# Factor is inverted since it is aplied to the individual and not the
# objective function
self.factor = tuple(1.0/f for f in factor)
def __call__(self, func):
# wraps is used to combine stacked decorators that would add functions
@wraps(func)
def wrapper(individual, *args, **kargs):
return func([v * f for v, f in zip(individual, self.factor)],
*args, **kargs)
wrapper.scale = self.scale
return wrapper
def scale(self, factor):
"""Set the current scale to *factor*. After decorating the
evaluation function, this function will be available directly from
the function object. ::
@scale([0.25, 2.0, ..., 0.1])
def evaluate(individual):
return sum(individual),
# This will cancel the scaling
evaluate.scale([1.0, 1.0, ..., 1.0])
"""
# Factor is inverted since it is aplied to the individual and not the
# objective function
self.factor = tuple(1.0/f for f in factor)
class bound(object):
"""Decorator for crossover and mutation functions, it changes the
individuals after the modification is done to bring it back in the allowed
*bounds*. The *bounds* are functions taking individual and returning
wheter of not the variable is allowed. You can provide one or multiple such
functions. In the former case, the function is used on all dimensions and
in the latter case, the number of functions must be greater or equal to
the number of dimension of the individuals.
The *type* determines how the attributes are brought back into the valid
range
This decorator adds a :func:`bound` method to the decorated function.
"""
def _clip(self, individual):
return individual
def _wrap(self, individual):
return individual
def _mirror(self, individual):
return individual
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kargs):
individuals = func(*args, **kargs)
return self.bound(individuals)
wrapper.bound = self.bound
return wrapper
def __init__(self, bounds, type):
try:
self.bounds = tuple(bounds)
except TypeError:
self.bounds = itertools.repeat(bounds)
if type == "mirror":
self.bound = self._mirror
elif type == "wrap":
self.bound = self._wrap
elif type == "clip":
self.bound = self._clip
def diversity(first_front, first, last):
"""Given a Pareto front `first_front` and the two extreme points of the
optimal Pareto front, this function returns a metric of the diversity
of the front as explained in the original NSGA-II article by K. Deb.
The smaller the value is, the better the front is.
"""
df = hypot(first_front[0].fitness.values[0] - first[0],
first_front[0].fitness.values[1] - first[1])
dl = hypot(first_front[-1].fitness.values[0] - last[0],
first_front[-1].fitness.values[1] - last[1])
dt = [hypot(first.fitness.values[0] - second.fitness.values[0],
first.fitness.values[1] - second.fitness.values[1])
for first, second in zip(first_front[:-1], first_front[1:])]
if len(first_front) == 1:
return df + dl
dm = sum(dt)/len(dt)
di = sum(abs(d_i - dm) for d_i in dt)
delta = (df + dl + di)/(df + dl + len(dt) * dm )
return delta
def convergence(first_front, optimal_front):
"""Given a Pareto front `first_front` and the optimal Pareto front,
this function returns a metric of convergence
of the front as explained in the original NSGA-II article by K. Deb.
The smaller the value is, the closer the front is to the optimal one.
"""
distances = []
for ind in first_front:
distances.append(float("inf"))
for opt_ind in optimal_front:
dist = 0.
for i in xrange(len(opt_ind)):
dist += (ind.fitness.values[i] - opt_ind[i])**2
if dist < distances[-1]:
distances[-1] = dist
distances[-1] = sqrt(distances[-1])
return sum(distances) / len(distances)
|
CG-F16-24-Rutgers/steersuite-rutgers
|
steerstats/tools/deap/benchmarks/tools.py
|
Python
|
gpl-3.0
| 11,120
|
#!/usr/bin/env python
from setuptools import setup
with open('README.txt') as f:
readme = f.read()
setup(
name="cppcheck",
description='Python script to parse the XML (version 2) output of ' +
'cppcheck and generate an HTML report using Pygments for syntax ' +
'highlighting.',
long_description=readme,
author='Henrik Nilsson',
url='https://github.com/danmar/cppcheck',
license='GPL',
scripts=[
"cppcheck-htmlreport",
],
install_requires=['Pygments']
)
|
geminy/aidear
|
snippets/cppfunc/cppcheck/cppcheck-1.80/htmlreport/setup.py
|
Python
|
gpl-3.0
| 536
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Helper functions for reports testing.
Please /do not/ import this file by default, but only explicitly call it
through the code of yaml tests.
"""
import openerp.netsvc as netsvc
import openerp.tools as tools
import logging
import openerp.pooler as pooler
from openerp.tools.safe_eval import safe_eval
from subprocess import Popen, PIPE
import os
import tempfile
_logger = logging.getLogger(__name__)
def try_report(cr, uid, rname, ids, data=None, context=None, our_module=None):
""" Try to render a report <rname> with contents of ids
This function should also check for common pitfalls of reports.
"""
if data is None:
data = {}
if context is None:
context = {}
if rname.startswith('report.'):
rname_s = rname[7:]
else:
rname_s = rname
_logger.log(netsvc.logging.TEST, " - Trying %s.create(%r)", rname, ids)
res = netsvc.LocalService(rname).create(cr, uid, ids, data, context)
if not isinstance(res, tuple):
raise RuntimeError("Result of %s.create() should be a (data,format) tuple, now it is a %s" % \
(rname, type(res)))
(res_data, res_format) = res
if not res_data:
raise ValueError("Report %s produced an empty result!" % rname)
if tools.config['test_report_directory']:
file(os.path.join(tools.config['test_report_directory'], rname+ '.'+res_format), 'wb+').write(res_data)
_logger.debug("Have a %s report for %s, will examine it", res_format, rname)
if res_format == 'pdf':
if res_data[:5] != '%PDF-':
raise ValueError("Report %s produced a non-pdf header, %r" % (rname, res_data[:10]))
res_text = False
try:
fd, rfname = tempfile.mkstemp(suffix=res_format)
os.write(fd, res_data)
os.close(fd)
fp = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', rfname, '-'], shell=False, stdout=PIPE).stdout
res_text = tools.ustr(fp.read())
os.unlink(rfname)
except Exception:
_logger.debug("Unable to parse PDF report: install pdftotext to perform automated tests.")
if res_text is not False:
for line in res_text.split('\n'):
if ('[[' in line) or ('[ [' in line):
_logger.error("Report %s may have bad expression near: \"%s\".", rname, line[80:])
# TODO more checks, what else can be a sign of a faulty report?
elif res_format == 'foobar':
# TODO
pass
else:
_logger.warning("Report %s produced a \"%s\" chunk, cannot examine it", rname, res_format)
return False
_logger.log(netsvc.logging.TEST, " + Report %s produced correctly.", rname)
return True
def try_report_action(cr, uid, action_id, active_model=None, active_ids=None,
wiz_data=None, wiz_buttons=None,
context=None, our_module=None):
"""Take an ir.action.act_window and follow it until a report is produced
:param action_id: the integer id of an action, or a reference to xml id
of the act_window (can search [our_module.]+xml_id
:param active_model, active_ids: call the action as if it had been launched
from that model+ids (tree/form view action)
:param wiz_data: a dictionary of values to use in the wizard, if needed.
They will override (or complete) the default values of the
wizard form.
:param wiz_buttons: a list of button names, or button icon strings, which
should be preferred to press during the wizard.
Eg. 'OK' or 'gtk-print'
:param our_module: the name of the calling module (string), like 'account'
"""
if not our_module and isinstance(action_id, basestring):
if '.' in action_id:
our_module = action_id.split('.', 1)[0]
if context is None:
context = {}
else:
context = context.copy() # keep it local
# TODO context fill-up
pool = pooler.get_pool(cr.dbname)
def log_test(msg, *args):
_logger.log(netsvc.logging.TEST, " - " + msg, *args)
datas = {}
if active_model:
datas['model'] = active_model
if active_ids:
datas['ids'] = active_ids
if not wiz_buttons:
wiz_buttons = []
if isinstance(action_id, basestring):
if '.' in action_id:
act_module, act_xmlid = action_id.split('.', 1)
else:
if not our_module:
raise ValueError('You cannot only specify action_id "%s" without a module name' % action_id)
act_module = our_module
act_xmlid = action_id
act_model, act_id = pool.get('ir.model.data').get_object_reference(cr, uid, act_module, act_xmlid)
else:
assert isinstance(action_id, (long, int))
act_model = 'ir.action.act_window' # assume that
act_id = action_id
act_xmlid = '<%s>' % act_id
def _exec_action(action, datas, context):
# taken from client/modules/action/main.py:84 _exec_action()
if isinstance(action, bool) or 'type' not in action:
return
# Updating the context : Adding the context of action in order to use it on Views called from buttons
if datas.get('id',False):
context.update( {'active_id': datas.get('id',False), 'active_ids': datas.get('ids',[]), 'active_model': datas.get('model',False)})
context.update(safe_eval(action.get('context','{}'), context.copy()))
if action['type'] in ['ir.actions.act_window', 'ir.actions.submenu']:
for key in ('res_id', 'res_model', 'view_type', 'view_mode',
'limit', 'auto_refresh', 'search_view', 'auto_search', 'search_view_id'):
datas[key] = action.get(key, datas.get(key, None))
view_id = False
if action.get('views', []):
if isinstance(action['views'],list):
view_id = action['views'][0][0]
datas['view_mode']= action['views'][0][1]
else:
if action.get('view_id', False):
view_id = action['view_id'][0]
elif action.get('view_id', False):
view_id = action['view_id'][0]
assert datas['res_model'], "Cannot use the view without a model"
# Here, we have a view that we need to emulate
log_test("will emulate a %s view: %s#%s",
action['view_type'], datas['res_model'], view_id or '?')
view_res = pool.get(datas['res_model']).fields_view_get(cr, uid, view_id, action['view_type'], context)
assert view_res and view_res.get('arch'), "Did not return any arch for the view"
view_data = {}
if view_res.get('fields',{}).keys():
view_data = pool.get(datas['res_model']).default_get(cr, uid, view_res['fields'].keys(), context)
if datas.get('form'):
view_data.update(datas.get('form'))
if wiz_data:
view_data.update(wiz_data)
_logger.debug("View data is: %r", view_data)
for fk, field in view_res.get('fields',{}).items():
# Default fields returns list of int, while at create()
# we need to send a [(6,0,[int,..])]
if field['type'] in ('one2many', 'many2many') \
and view_data.get(fk, False) \
and isinstance(view_data[fk], list) \
and not isinstance(view_data[fk][0], tuple) :
view_data[fk] = [(6, 0, view_data[fk])]
action_name = action.get('name')
try:
from xml.dom import minidom
cancel_found = False
buttons = []
dom_doc = minidom.parseString(view_res['arch'])
if not action_name:
action_name = dom_doc.documentElement.getAttribute('name')
for button in dom_doc.getElementsByTagName('button'):
button_weight = 0
if button.getAttribute('special') == 'cancel':
cancel_found = True
continue
if button.getAttribute('icon') == 'gtk-cancel':
cancel_found = True
continue
if button.getAttribute('default_focus') == '1':
button_weight += 20
if button.getAttribute('string') in wiz_buttons:
button_weight += 30
elif button.getAttribute('icon') in wiz_buttons:
button_weight += 10
string = button.getAttribute('string') or '?%s' % len(buttons)
buttons.append( { 'name': button.getAttribute('name'),
'string': string,
'type': button.getAttribute('type'),
'weight': button_weight,
})
except Exception, e:
_logger.warning("Cannot resolve the view arch and locate the buttons!", exc_info=True)
raise AssertionError(e.args[0])
if not datas['res_id']:
# it is probably an orm_memory object, we need to create
# an instance
datas['res_id'] = pool.get(datas['res_model']).create(cr, uid, view_data, context)
if not buttons:
raise AssertionError("view form doesn't have any buttons to press!")
buttons.sort(key=lambda b: b['weight'])
_logger.debug('Buttons are: %s', ', '.join([ '%s: %d' % (b['string'], b['weight']) for b in buttons]))
res = None
while buttons and not res:
b = buttons.pop()
log_test("in the \"%s\" form, I will press the \"%s\" button.", action_name, b['string'])
if not b['type']:
log_test("the \"%s\" button has no type, cannot use it", b['string'])
continue
if b['type'] == 'object':
#there we are! press the button!
fn = getattr(pool.get(datas['res_model']), b['name'])
if not fn:
_logger.error("The %s model doesn't have a %s attribute!", datas['res_model'], b['name'])
continue
res = fn(cr, uid, [datas['res_id'],], context)
break
else:
_logger.warning("in the \"%s\" form, the \"%s\" button has unknown type %s",
action_name, b['string'], b['type'])
return res
elif action['type']=='ir.actions.report.xml':
if 'window' in datas:
del datas['window']
if not datas:
datas = action.get('datas',{})
datas = datas.copy()
ids = datas.get('ids')
if 'ids' in datas:
del datas['ids']
res = try_report(cr, uid, 'report.'+action['report_name'], ids, datas, context, our_module=our_module)
return res
else:
raise Exception("Cannot handle action of type %s" % act_model)
log_test("will be using %s action %s #%d", act_model, act_xmlid, act_id)
action = pool.get(act_model).read(cr, uid, act_id, context=context)
assert action, "Could not read action %s[%s]" %(act_model, act_id)
loop = 0
while action:
loop += 1
# This part tries to emulate the loop of the Gtk client
if loop > 100:
_logger.error("Passed %d loops, giving up", loop)
raise Exception("Too many loops at action")
log_test("it is an %s action at loop #%d", action.get('type', 'unknown'), loop)
result = _exec_action(action, datas, context)
if not isinstance(result, dict):
break
datas = result.get('datas', {})
if datas:
del result['datas']
action = result
return True
#eof
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
crmccreary/openerp_server
|
openerp/tools/test_reports.py
|
Python
|
agpl-3.0
| 13,310
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def convert_catalog(from_file, to_file, size=220) :
return __convert(from_file, to_file, size)
def convert(from_file, to_file):
size = 95
__convert(from_file, to_file, size=95)
def __convert(from_file, to_file, size=95):
from PIL import Image, ImageDraw, ImageFilter
im = Image.open(from_file)
if float(im.size[1]/im.size[0])>2:
im = im.resize((im.size[0]*size/im.size[1], size))
else:
im = im.resize((size,im.size[1]*size/im.size[0]))
newimg = Image.new('RGB', (im.size[0]+8,im.size[1]+8), (255,255,255) )
draw = ImageDraw.Draw(newimg)
draw.rectangle((6, im.size[1]-5, im.size[0], im.size[1]+5), fill=(90,90,90))
draw.rectangle((im.size[0]-5, 6, im.size[0]+5, im.size[1]), fill=(90,90,90))
del draw
newimg = newimg.filter(ImageFilter.BLUR)
newimg = newimg.filter(ImageFilter.BLUR)
newimg = newimg.filter(ImageFilter.BLUR)
newimg.paste(im, (0,0))
draw = ImageDraw.Draw(newimg)
draw.rectangle((0, 0, im.size[0], im.size[1]), outline=(0,0,0))
del draw
to_fp = file(to_file, 'wb')
newimg.save(to_fp, "JPEG")
to_fp.close()
res = newimg.size
del im
del newimg
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Johnzero/erp
|
openerp/addons/auction/report/photo_shadow.py
|
Python
|
agpl-3.0
| 2,248
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
from nova import db
from nova import exception
from nova.objects import aggregate
from nova.tests.unit import fake_notifier
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
fake_aggregate = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuidsentinel.fake_aggregate,
'name': 'fake-aggregate',
'hosts': ['foo', 'bar'],
'metadetails': {'this': 'that'},
}
SUBS = {'metadata': 'metadetails'}
class _TestAggregateObject(object):
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'aggregate_get')
db.aggregate_get(self.context, 123).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate.get_by_id(self.context, 123)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
@mock.patch('nova.objects.Aggregate.save')
@mock.patch('nova.db.aggregate_get')
def test_load_allocates_uuid(self, mock_get, mock_save):
fake_agg = dict(fake_aggregate)
del fake_agg['uuid']
mock_get.return_value = fake_agg
uuid = uuidsentinel.aggregate
with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_g:
mock_g.return_value = uuid
obj = aggregate.Aggregate.get_by_id(self.context, 123)
mock_g.assert_called_once_with()
self.assertEqual(uuid, obj.uuid)
mock_save.assert_called_once_with()
def test_create(self):
self.mox.StubOutWithMock(db, 'aggregate_create')
db.aggregate_create(self.context, {'name': 'foo',
'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'}).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'one': 'two'}
agg.uuid = uuidsentinel.fake_agg
agg.create()
self.compare_obj(agg, fake_aggregate, subs=SUBS)
def test_recreate_fails(self):
self.mox.StubOutWithMock(db, 'aggregate_create')
db.aggregate_create(self.context, {'name': 'foo',
'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'}).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'one': 'two'}
agg.uuid = uuidsentinel.fake_agg
agg.create()
self.assertRaises(exception.ObjectActionError, agg.create)
def test_save(self):
self.mox.StubOutWithMock(db, 'aggregate_update')
db.aggregate_update(self.context, 123, {'name': 'baz'}).AndReturn(
fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.name = 'baz'
agg.save()
self.compare_obj(agg, fake_aggregate, subs=SUBS)
def test_save_and_create_no_hosts(self):
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.hosts = ['foo', 'bar']
self.assertRaises(exception.ObjectActionError,
agg.create)
self.assertRaises(exception.ObjectActionError,
agg.save)
def test_update_metadata(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
db.aggregate_metadata_delete(self.context, 123, 'todelete')
db.aggregate_metadata_add(self.context, 123, {'toadd': 'myval'})
self.mox.ReplayAll()
fake_notifier.NOTIFICATIONS = []
agg = aggregate.Aggregate()
agg._context = self.context
agg.id = 123
agg.metadata = {'foo': 'bar'}
agg.obj_reset_changes()
agg.update_metadata({'todelete': None, 'toadd': 'myval'})
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
self.assertEqual({'todelete': None, 'toadd': 'myval'},
msg.payload['meta_data'])
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
self.assertEqual({'todelete': None, 'toadd': 'myval'},
msg.payload['meta_data'])
self.assertEqual({'foo': 'bar', 'toadd': 'myval'}, agg.metadata)
def test_destroy(self):
self.mox.StubOutWithMock(db, 'aggregate_delete')
db.aggregate_delete(self.context, 123)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.id = 123
agg.destroy()
def test_add_host(self):
self.mox.StubOutWithMock(db, 'aggregate_host_add')
db.aggregate_host_add(self.context, 123, 'bar'
).AndReturn({'host': 'bar'})
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo']
agg._context = self.context
agg.add_host('bar')
self.assertEqual(agg.hosts, ['foo', 'bar'])
def test_delete_host(self):
self.mox.StubOutWithMock(db, 'aggregate_host_delete')
db.aggregate_host_delete(self.context, 123, 'foo')
self.mox.ReplayAll()
agg = aggregate.Aggregate()
agg.id = 123
agg.hosts = ['foo', 'bar']
agg._context = self.context
agg.delete_host('foo')
self.assertEqual(agg.hosts, ['bar'])
def test_availability_zone(self):
agg = aggregate.Aggregate()
agg.metadata = {'availability_zone': 'foo'}
self.assertEqual('foo', agg.availability_zone)
def test_get_all(self):
self.mox.StubOutWithMock(db, 'aggregate_get_all')
db.aggregate_get_all(self.context).AndReturn([fake_aggregate])
self.mox.ReplayAll()
aggs = aggregate.AggregateList.get_all(self.context)
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
def test_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_get_by_host')
db.aggregate_get_by_host(self.context, 'fake-host', key=None,
).AndReturn([fake_aggregate])
self.mox.ReplayAll()
aggs = aggregate.AggregateList.get_by_host(self.context, 'fake-host')
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
@mock.patch('nova.db.aggregate_get_by_metadata_key')
def test_get_by_metadata_key(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this')
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
@mock.patch('nova.db.aggregate_get_by_metadata_key')
def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this', hosts=['baz'])
self.assertEqual(0, len(aggs))
@mock.patch('nova.db.aggregate_get_by_metadata_key')
def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key):
get_by_metadata_key.return_value = [fake_aggregate]
aggs = aggregate.AggregateList.get_by_metadata_key(
self.context, 'this', hosts=['foo', 'bar'])
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
class TestAggregateObject(test_objects._LocalTest,
_TestAggregateObject):
pass
class TestRemoteAggregateObject(test_objects._RemoteTest,
_TestAggregateObject):
pass
|
HybridF5/nova
|
nova/tests/unit/objects/test_aggregate.py
|
Python
|
apache-2.0
| 8,667
|
# Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
# Copyright (C) 2010 Serge Tarkovski <serge.tarkovski@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyjd
from pyjamas import DOM
from pyjamas import Window
from pyjamas import Factory
from __pyjamas__ import JS, doc
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.Widget import Widget
from pyjamas.ui.MouseListener import MouseHandler
from pyjamas.ui.RootPanel import RootPanel
mousecapturer = None
def getMouseCapturer(**kwargs):
global mousecapturer
if mousecapturer is None:
mousecapturer = GlassWidget(**kwargs)
# if mousecapturer has been overloaded with something
# other than a GlassWidget (as in IE override)
# just return None
elif not isinstance(mousecapturer, GlassWidget):
return None
return mousecapturer
def show(mousetarget, **kwargs):
global mousecapturer
mc = getMouseCapturer(**kwargs)
mc.mousetarget = mousetarget
if isinstance(mousetarget, MouseHandler):
mc.mousehandler = True
mc.show()
def hide():
global mousecapturer
mousecapturer.hide()
class GlassWidget(Widget, MouseHandler):
def __init__(self, **kwargs):
self.glassListeners = []
self.showing = False
self.mousehandler = False
if not 'StyleName' in kwargs:
kwargs['StyleName'] = "gwt-GlassWidget"
if 'Element' in kwargs:
element = kwargs.pop('Element')
else:
element = DOM.createDiv()
self.setElement(element)
Widget.__init__(self, **kwargs)
MouseHandler.__init__(self)
self.setzIndex(1000000)
self.addMouseListener(self)
def addGlassListener(self, listener):
self.glassListeners.append(listener)
def hide(self, autoClosed=False):
self.showing = False
self.hideGlass()
DOM.removeEventPreview(self)
RootPanel().remove(self)
self.onHideImpl(self.getElement())
DOM.releaseCapture(self.getElement())
for listener in self.glassListeners:
if hasattr(listener, 'onGlassHide'):
listener.onGlassHide(self, autoClosed)
else:
listener(self, autoClosed)
def _event_targets_popup(self, event):
target = DOM.eventGetTarget(event)
return target and DOM.isOrHasChild(self.getElement(), target)
def onEventPreview(self, event):
etype = DOM.eventGetType(event)
if etype == "mousedown" or etype == "blur":
if DOM.getCaptureElement() is not None:
return True
elif etype == "mouseup" or etype == "click" or \
etype == "mousemove" or etype == "dblclick":
if DOM.getCaptureElement() is not None:
return True
return self._event_targets_popup(event)
def onHideImpl(self, popup):
pass
def onShowImpl(self, popup):
pass
def removeGlassListener(self, listener):
self.glassListeners.remove(listener)
def setGlassPosition(self):
top = Window.getScrollTop()
left = Window.getScrollLeft()
height = Window.getClientHeight()
width = Window.getClientWidth()
el = self.getElement()
DOM.setStyleAttribute(el, "position", "absolute")
DOM.setStyleAttribute(el, "left",
"%s" % left if left == 0 else "%spx" % left)
DOM.setStyleAttribute(el, "top",
"%s" % top if top == 0 else "%spx" % top)
DOM.setStyleAttribute(el, "height", "%spx" % (top + height))
DOM.setStyleAttribute(el, "width", "%spx" % (left + width))
# under pyjd glasswidget cannot be transparent,
# otherwise it drops the mousecapture, so we have
# to give it a 1% opaque background color
if pyjd.is_desktop:
# pyjd uses IE style opacity
DOM.setStyleAttribute(el, "filter", "alpha(opacity=1)")
# this is the Moz form of transparency
DOM.setStyleAttribute(el, "background", "rgba(255,255,255,0.1)")
def showGlass(self):
self.setGlassPosition()
doc().body.appendChild(self.getElement())
Window.addWindowResizeListener(self)
def hideGlass(self):
Window.removeWindowResizeListener(self)
doc().body.removeChild(self.getElement())
def onWindowResized(self, width, height):
self.setGlassPosition()
def show(self):
if self.showing:
return
self.showing = True
self.showGlass()
DOM.addEventPreview(self)
RootPanel().add(self)
self.onShowImpl(self.getElement())
DOM.setCapture(self.getElement())
def adjustMousePos(self, x, y):
x += self.getAbsoluteLeft() - self.mousetarget.getAbsoluteLeft()
y += self.getAbsoluteTop() - self.mousetarget.getAbsoluteTop()
return x, y
def onMouseDown(self, sender, x, y):
x, y = self.adjustMousePos(x, y)
if self.mousehandler:
self.mousetarget.onBrowserEvent(DOM.eventGetCurrentEvent())
else:
self.mousetarget.onMouseDown(sender, x, y)
def onMouseEnter(self, sender):
self.mousetarget.onMouseGlassEnter(sender)
def onMouseLeave(self, sender):
self.mousetarget.onMouseGlassLeave(sender)
def onMouseMove(self, sender, x, y):
x, y = self.adjustMousePos(x, y)
if self.mousehandler:
self.mousetarget.onBrowserEvent(DOM.eventGetCurrentEvent())
else:
self.mousetarget.onMouseMove(sender, x, y)
def onMouseUp(self, sender, x, y):
x, y = self.adjustMousePos(x, y)
if self.mousehandler:
self.mousetarget.onBrowserEvent(DOM.eventGetCurrentEvent())
else:
self.mousetarget.onMouseUp(sender, x, y)
Factory.registerClass('pyjamas.ui.GlassWidget', 'GlassWidget', GlassWidget)
|
Hasimir/pyjs
|
pyjswidgets/pyjamas/ui/GlassWidget.py
|
Python
|
apache-2.0
| 6,537
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/structured_output | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
# The fNNNN name prefixes in this file are such that the sorted order of the
# functions in the resulting MLIR output match the order in the source file,
# allowing us to conveniently co-locate the CHECK's with the code they are
# checking.
#
# Note: CHECK-DAG doesn't work with CHECK-SAME/CHECK-NEXT.
# Check index paths for results.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = []})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0000_single_return"]
@tf.function(input_signature=[])
def f0000_single_return(self):
return tf.constant(1.0, shape=[1])
# Check index paths for results with multiple return values.
# Note that semantically in Python, multiple return values are equivalent
# to returning a tuple/list.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0001_multiple_results_no_punctuation"]
@tf.function(input_signature=[])
def f0001_multiple_results_no_punctuation(self):
return tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])
# Check index paths for results written explicitly with parentheses.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0002_multiple_results_parentheses"]
@tf.function(input_signature=[])
def f0002_multiple_results_parentheses(self):
return (tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2]))
# Check index paths for results written explicitly with brackets.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0003_multiple_results_brackets"]
@tf.function(input_signature=[])
def f0003_multiple_results_brackets(self):
return [tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]
# Check index paths for lists.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0, 0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [0, 1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0004_list_2_elements"]
@tf.function(input_signature=[])
def f0004_list_2_elements(self):
return [[tf.constant(1.0, shape=[1]), tf.constant(1.0, shape=[2])]]
# Check index paths for dicts.
# Keys are linearized in sorted order, matching `tf.nest.flatten`.
# More thorough testing of this is in structured_input.py. The underlying code
# path for linearization is shared, so no need to replicate that testing here.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = ["y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0005_dict_2_keys"]
@tf.function(input_signature=[])
def f0005_dict_2_keys(self):
return {
'x': tf.constant(1.0, shape=[1]),
'y': tf.constant(1.0, shape=[2]),
}
# Check index paths for outputs are correctly handled in the presence of
# multiple return statements.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf._user_specified_name = "x", tf_saved_model.index_path = [0]}
# CHECK-SAME: ) -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0006_multiple_return_statements"]
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def f0006_multiple_return_statements(self, x):
if x > 3.:
return {'x': tf.constant(1.0, shape=[1])}
else:
return {'x': tf.constant(1.0, shape=[1])}
if __name__ == '__main__':
common.do_test(TestModule)
|
karllessard/tensorflow
|
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/structured_output.py
|
Python
|
apache-2.0
| 5,614
|
# Natural Language Toolkit: Interface to Weka Classsifiers
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: naivebayes.py 2063 2004-07-17 21:02:24Z edloper $
import time, tempfile, os, os.path, subprocess, re
from api import *
from nltk.probability import *
from nltk.internals import java, config_java
"""
Classifiers that make use of the external 'Weka' package.
"""
_weka_classpath = None
_weka_search = ['.',
'/usr/share/weka',
'/usr/local/share/weka',
'/usr/lib/weka',
'/usr/local/lib/weka',]
def config_weka(classpath=None):
global _weka_classpath
# Make sure java's configured first.
config_java()
if classpath is not None:
_weka_classpath = classpath
if _weka_classpath is None:
searchpath = _weka_search
if 'WEKAHOME' in os.environ:
searchpath.insert(0, os.environ['WEKAHOME'])
for path in searchpath:
if os.path.exists(os.path.join(path, 'weka.jar')):
_weka_classpath = os.path.join(path, 'weka.jar')
print '[Found Weka: %s]' % _weka_classpath
if _weka_classpath is None:
raise LookupError('Unable to find weka.jar! Use config_weka() '
'or set the WEKAHOME environment variable. '
'For more information about Weka, please see '
'http://www.cs.waikato.ac.nz/ml/weka/')
class WekaClassifier(ClassifierI):
def __init__(self, formatter, model_filename):
self._formatter = formatter
self._model = model_filename
def batch_prob_classify(self, featuresets):
return self._batch_classify(featuresets, ['-p', '0', '-distribution'])
def batch_classify(self, featuresets):
return self._batch_classify(featuresets, ['-p', '0'])
def _batch_classify(self, featuresets, options):
# Make sure we can find java & weka.
config_weka()
temp_dir = tempfile.mkdtemp()
try:
# Write the test data file.
test_filename = os.path.join(temp_dir, 'test.arff')
self._formatter.write(test_filename, featuresets)
# Call weka to classify the data.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-l', self._model, '-T', test_filename] + options
(stdout, stderr) = java(cmd, classpath=_weka_classpath,
stdout=subprocess.PIPE)
# Parse weka's output.
return self.parse_weka_output(stdout.split('\n'))
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def parse_weka_distribution(self, s):
probs = [float(v) for v in re.split('[*,]+', s) if v.strip()]
probs = dict(zip(self._formatter.labels(), probs))
return DictionaryProbDist(probs)
def parse_weka_output(self, lines):
if lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'prediction']:
return [line.split()[2].split(':')[1]
for line in lines[1:] if line.strip()]
elif lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'distribution']:
return [self.parse_weka_distribution(line.split()[-1])
for line in lines[1:] if line.strip()]
else:
for line in lines[:10]: print line
raise ValueError('Unhandled output format -- your version '
'of weka may not be supported.\n'
' Header: %s' % lines[0])
@staticmethod
def train(model_filename, featuresets, quiet=True):
# Make sure we can find java & weka.
config_weka()
# Build an ARFF formatter.
formatter = ARFF_Formatter.from_train(featuresets)
temp_dir = tempfile.mkdtemp()
try:
# Write the training data file.
train_filename = os.path.join(temp_dir, 'train.arff')
formatter.write(train_filename, featuresets)
# Train the weka model.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-d', model_filename, '-t', train_filename]
if quiet: stdout = subprocess.PIPE
else: stdout = None
java(cmd, classpath=_weka_classpath, stdout=stdout)
# Return the new classifier.
return WekaClassifier(formatter, model_filename)
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
class ARFF_Formatter:
"""
Converts featuresets and labeled featuresets to ARFF-formatted
strings, appropriate for input into Weka.
"""
def __init__(self, labels, features):
"""
@param labels: A list of all labels that can be generated.
@param features: A list of feature specifications, where
each feature specification is a tuple (fname, ftype);
and ftype is an ARFF type string such as NUMERIC or
STRING.
"""
self._labels = labels
self._features = features
def format(self, tokens):
return self.header_section() + self.data_section(tokens)
def labels(self):
return list(self._labels)
def write(self, filename, tokens):
f = open(filename, 'w')
f.write(self.format(tokens))
f.close()
@staticmethod
def from_train(tokens):
# Find the set of all attested labels.
labels = set(label for (tok,label) in tokens)
# Determine the types of all features.
features = {}
for tok, label in tokens:
for (fname, fval) in tok.items():
if issubclass(type(fval), bool):
ftype = '{True, False}'
elif issubclass(type(fval), (int, float, long, bool)):
ftype = 'NUMERIC'
elif issubclass(type(fval), basestring):
ftype = 'STRING'
elif fval is None:
continue # can't tell the type.
else:
raise ValueError('Unsupported value type %r' % ftype)
if features.get(fname, ftype) != ftype:
raise ValueError('Inconsistent type for %s' % fname)
features[fname] = ftype
features = sorted(features.items())
return ARFF_Formatter(labels, features)
def header_section(self):
# Header comment.
s = ('% Weka ARFF file\n' +
'% Generated automatically by NLTK\n' +
'%% %s\n\n' % time.ctime())
# Relation name
s += '@RELATION rel\n\n'
# Input attribute specifications
for fname, ftype in self._features:
s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)
# Label attribute specification
s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))
return s
def data_section(self, tokens, labeled=None):
"""
@param labeled: Indicates whether the given tokens are labeled
or not. If C{None}, then the tokens will be assumed to be
labeled if the first token's value is a tuple or list.
"""
# Check if the tokens are labeled or unlabeled. If unlabeled,
# then use 'None'
if labeled is None:
labeled = tokens and isinstance(tokens[0], (tuple, list))
if not labeled:
tokens = [(tok, None) for tok in tokens]
# Data section
s = '\n@DATA\n'
for (tok, label) in tokens:
for fname, ftype in self._features:
s += '%s,' % self._fmt_arff_val(tok.get(fname))
s += '%s\n' % self._fmt_arff_val(label)
return s
def _fmt_arff_val(self, fval):
if fval is None:
return '?'
elif isinstance(fval, (bool, int, long)):
return '%s' % fval
elif isinstance(fval, float):
return '%r' % fval
else:
return '%r' % fval
if __name__ == '__main__':
from nltk.classify.util import names_demo,binary_names_demo_features
def make_classifier(featuresets):
return WekaClassifier.train('/tmp/name.model', featuresets)
classifier = names_demo(make_classifier,binary_names_demo_features)
|
hectormartinez/rougexstem
|
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/classify/weka.py
|
Python
|
apache-2.0
| 8,796
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PipelineRunner, an abstract base runner object."""
from __future__ import absolute_import
import logging
import os
import shelve
import shutil
import tempfile
__all__ = ['PipelineRunner', 'PipelineState', 'PipelineResult']
def _get_runner_map(runner_names, module_path):
"""Create a map of runner name in lower case to full import path to the
runner class.
"""
return {runner_name.lower(): module_path + runner_name
for runner_name in runner_names}
_DIRECT_RUNNER_PATH = 'apache_beam.runners.direct.direct_runner.'
_DATAFLOW_RUNNER_PATH = (
'apache_beam.runners.dataflow.dataflow_runner.')
_TEST_RUNNER_PATH = 'apache_beam.runners.test.'
_KNOWN_DIRECT_RUNNERS = ('DirectRunner', 'EagerRunner')
_KNOWN_DATAFLOW_RUNNERS = ('DataflowRunner',)
_KNOWN_TEST_RUNNERS = ('TestDataflowRunner',)
_RUNNER_MAP = {}
_RUNNER_MAP.update(_get_runner_map(_KNOWN_DIRECT_RUNNERS,
_DIRECT_RUNNER_PATH))
_RUNNER_MAP.update(_get_runner_map(_KNOWN_DATAFLOW_RUNNERS,
_DATAFLOW_RUNNER_PATH))
_RUNNER_MAP.update(_get_runner_map(_KNOWN_TEST_RUNNERS,
_TEST_RUNNER_PATH))
_ALL_KNOWN_RUNNERS = (
_KNOWN_DIRECT_RUNNERS + _KNOWN_DATAFLOW_RUNNERS + _KNOWN_TEST_RUNNERS)
def create_runner(runner_name):
"""For internal use only; no backwards-compatibility guarantees.
Creates a runner instance from a runner class name.
Args:
runner_name: Name of the pipeline runner. Possible values are:
DirectRunner, DataflowRunner and TestDataflowRunner.
Returns:
A runner object.
Raises:
RuntimeError: if an invalid runner name is used.
"""
# Get the qualified runner name by using the lower case runner name. If that
# fails try appending the name with 'runner' and check if it matches.
# If that also fails, use the given runner name as is.
runner_name = _RUNNER_MAP.get(
runner_name.lower(),
_RUNNER_MAP.get(runner_name.lower() + 'runner', runner_name))
if '.' in runner_name:
module, runner = runner_name.rsplit('.', 1)
try:
return getattr(__import__(module, {}, {}, [runner], -1), runner)()
except ImportError:
if runner_name in _KNOWN_DATAFLOW_RUNNERS:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
else:
raise
else:
raise ValueError(
'Unexpected pipeline runner: %s. Valid values are %s '
'or the fully qualified name of a PipelineRunner subclass.' % (
runner_name, ', '.join(_ALL_KNOWN_RUNNERS)))
class PipelineRunner(object):
"""A runner of a pipeline object.
The base runner provides a run() method for visiting every node in the
pipeline's DAG and executing the transforms computing the PValue in the node.
A custom runner will typically provide implementations for some of the
transform methods (ParDo, GroupByKey, Create, etc.). It may also
provide a new implementation for clear_pvalue(), which is used to wipe out
materialized values in order to reduce footprint.
"""
def run(self, pipeline):
"""Execute the entire pipeline or the sub-DAG reachable from a node."""
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
class RunVisitor(PipelineVisitor):
def __init__(self, runner):
self.runner = runner
def visit_transform(self, transform_node):
try:
self.runner.run_transform(transform_node)
except:
logging.error('Error while visiting %s', transform_node.full_label)
raise
pipeline.visit(RunVisitor(self))
def apply(self, transform, input):
"""Runner callback for a pipeline.apply call.
Args:
transform: the transform to apply.
input: transform's input (typically a PCollection).
A concrete implementation of the Runner class may want to do custom
pipeline construction for a given transform. To override the behavior
for a transform class Xyz, implement an apply_Xyz method with this same
signature.
"""
for cls in transform.__class__.mro():
m = getattr(self, 'apply_%s' % cls.__name__, None)
if m:
return m(transform, input)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (transform, self))
def apply_PTransform(self, transform, input):
# The base case of apply is to call the transform's expand.
return transform.expand(input)
def run_transform(self, transform_node):
"""Runner callback for a pipeline.run call.
Args:
transform_node: transform node for the transform to run.
A concrete implementation of the Runner class must implement run_Abc for
some class Abc in the method resolution order for every non-composite
transform Xyz in the pipeline.
"""
for cls in transform_node.transform.__class__.mro():
m = getattr(self, 'run_%s' % cls.__name__, None)
if m:
return m(transform_node)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (
transform_node.transform, self))
class PValueCache(object):
"""For internal use only; no backwards-compatibility guarantees.
Local cache for arbitrary information computed for PValue objects."""
def __init__(self, use_disk_backed_cache=False):
# Cache of values computed while a runner executes a pipeline. This is a
# dictionary of PValues and their computed values. Note that in principle
# the runner could contain PValues from several pipelines without clashes
# since a PValue is associated with one and only one pipeline. The keys of
# the dictionary are tuple of PValue instance addresses obtained using id()
# and tag names converted to strings.
self._use_disk_backed_cache = use_disk_backed_cache
if use_disk_backed_cache:
self._tempdir = tempfile.mkdtemp()
self._cache = shelve.open(os.path.join(self._tempdir, 'shelve'))
else:
self._cache = {}
def __del__(self):
if self._use_disk_backed_cache:
self._cache.close()
shutil.rmtree(self._tempdir)
def __len__(self):
return len(self._cache)
def to_cache_key(self, transform, tag):
return transform.full_label, tag
def _ensure_pvalue_has_real_producer(self, pvalue):
"""Ensure the passed-in PValue has the real_producer attribute.
Args:
pvalue: A PValue instance whose cached value is requested.
During the runner's execution only the results of the primitive transforms
are cached. Whenever we are looking for a PValue that is the output of a
composite transform we need to find the output of its rightmost transform
part.
"""
if not hasattr(pvalue, 'real_producer'):
real_producer = pvalue.producer
while real_producer.parts:
real_producer = real_producer.parts[-1]
pvalue.real_producer = real_producer
def is_cached(self, pobj):
from apache_beam.pipeline import AppliedPTransform
if isinstance(pobj, AppliedPTransform):
transform = pobj
tag = None
else:
self._ensure_pvalue_has_real_producer(pobj)
transform = pobj.real_producer
tag = pobj.tag
return self.to_cache_key(transform, tag) in self._cache
def cache_output(self, transform, tag_or_value, value=None):
if value is None:
value = tag_or_value
tag = None
else:
tag = tag_or_value
self._cache[
self.to_cache_key(transform, tag)] = [value, transform.refcounts[tag]]
def get_pvalue(self, pvalue):
"""Gets the value associated with a PValue from the cache."""
self._ensure_pvalue_has_real_producer(pvalue)
try:
value_with_refcount = self._cache[self.key(pvalue)]
value_with_refcount[1] -= 1
logging.debug('PValue computed by %s (tag %s): refcount: %d => %d',
pvalue.real_producer.full_label, self.key(pvalue)[1],
value_with_refcount[1] + 1, value_with_refcount[1])
if value_with_refcount[1] <= 0:
self.clear_pvalue(pvalue)
return value_with_refcount[0]
except KeyError:
if (pvalue.tag is not None
and self.to_cache_key(pvalue.real_producer, None) in self._cache):
# This is an undeclared, empty output of a DoFn executed
# in the local runner before this output was referenced.
return []
else:
raise
def get_unwindowed_pvalue(self, pvalue):
return [v.value for v in self.get_pvalue(pvalue)]
def clear_pvalue(self, pvalue):
"""Removes a PValue from the cache."""
if self.is_cached(pvalue):
del self._cache[self.key(pvalue)]
def key(self, pobj):
self._ensure_pvalue_has_real_producer(pobj)
return self.to_cache_key(pobj.real_producer, pobj.tag)
class PipelineState(object):
"""State of the Pipeline, as returned by PipelineResult.state.
This is meant to be the union of all the states any runner can put a
pipeline in. Currently, it represents the values of the dataflow
API JobState enum.
"""
UNKNOWN = 'UNKNOWN' # not specified
STOPPED = 'STOPPED' # paused or not yet started
RUNNING = 'RUNNING' # currently running
DONE = 'DONE' # successfully completed (terminal state)
FAILED = 'FAILED' # failed (terminal state)
CANCELLED = 'CANCELLED' # explicitly cancelled (terminal state)
UPDATED = 'UPDATED' # replaced by another job (terminal state)
DRAINING = 'DRAINING' # still processing, no longer reading data
DRAINED = 'DRAINED' # draining completed (terminal state)
class PipelineResult(object):
"""A PipelineResult provides access to info about a pipeline."""
def __init__(self, state):
self._state = state
@property
def state(self):
"""Return the current state of the pipeline execution."""
return self._state
def wait_until_finish(self, duration=None):
"""Waits until the pipeline finishes and returns the final status.
Args:
duration: The time to wait (in milliseconds) for job to finish. If it is
set to None, it will wait indefinitely until the job is finished.
Raises:
IOError: If there is a persistent problem getting job information.
NotImplementedError: If the runner does not support this operation.
Returns:
The final state of the pipeline, or None on timeout.
"""
raise NotImplementedError
def cancel(self):
"""Cancels the pipeline execution.
Raises:
IOError: If there is a persistent problem getting job information.
NotImplementedError: If the runner does not support this operation.
Returns:
The final state of the pipeline.
"""
raise NotImplementedError
def metrics(self):
"""Returns MetricsResult object to query metrics from the runner.
Raises:
NotImplementedError: If the runner does not support this operation.
"""
raise NotImplementedError
# pylint: disable=unused-argument
def aggregated_values(self, aggregator_or_name):
"""Return a dict of step names to values of the Aggregator."""
logging.warn('%s does not implement aggregated_values',
self.__class__.__name__)
return {}
|
dhalperi/beam
|
sdks/python/apache_beam/runners/runner.py
|
Python
|
apache-2.0
| 12,102
|
from collections import namedtuple
# Structure returned by DatabaseIntrospection.get_table_list()
TableInfo = namedtuple('TableInfo', ['name', 'type'])
# Structure returned by the DB-API cursor.description interface (PEP 249)
FieldInfo = namedtuple('FieldInfo', 'name type_code display_size internal_size precision scale null_ok default')
class BaseDatabaseIntrospection:
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def column_name_converter(self, name):
"""
Apply a conversion to the column name for the purposes of comparison.
Uses table_name_converter() by default.
"""
return self.table_name_converter(name)
def table_names(self, cursor=None, include_views=False):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
"""
def get_names(cursor):
return sorted(ti.name for ti in self.get_table_list(cursor)
if include_views or ti.type == 't')
if cursor is None:
with self.connection.cursor() as cursor:
return get_names(cursor)
return get_names(cursor)
def get_table_list(self, cursor):
"""
Returns an unsorted list of TableInfo named tuples of all tables and
views that exist in the database.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_list() method')
def django_table_names(self, only_existing=False, include_views=True):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.apps import apps
from django.db import router
tables = set()
for app_config in apps.get_app_configs():
for model in router.get_migratable_models(app_config, self.connection.alias):
if not model._meta.managed:
continue
tables.add(model._meta.db_table)
tables.update(
f.m2m_db_table() for f in model._meta.local_many_to_many
if f.remote_field.through._meta.managed
)
tables = list(tables)
if only_existing:
existing_tables = self.table_names(include_views=include_views)
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.apps import apps
from django.db import router
all_models = []
for app_config in apps.get_app_configs():
all_models.extend(router.get_migratable_models(app_config, self.connection.alias))
tables = list(map(self.table_name_converter, tables))
return {
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
}
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.apps import apps
from django.db import models, router
sequence_list = []
for app_config in apps.get_app_configs():
for model in router.get_migratable_models(app_config, self.connection.alias):
if not model._meta.managed:
continue
if model._meta.swapped:
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.remote_field.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_key_columns() method')
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table.
"""
for constraint in self.get_constraints(cursor, table_name).values():
if constraint['primary_key']:
return constraint['columns'][0]
return None
def get_indexes(self, cursor, table_name):
"""
Deprecated in Django 1.11, use get_constraints instead.
Returns a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_indexes() method')
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index)
across one or more columns.
Returns a dict mapping constraint names to their attributes,
where attributes is a dict with keys:
* columns: List of columns this covers
* primary_key: True if primary key, False otherwise
* unique: True if this is a unique constraint, False otherwise
* foreign_key: (table, column) of target, or None
* check: True if check constraint, False otherwise
* index: True if index, False otherwise.
* orders: The order (ASC/DESC) defined for the columns of indexes
* type: The type of the index (btree, hash, etc.)
Some backends may return special constraint names that don't exist
if they don't name constraints of a certain type (e.g. SQLite)
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_constraints() method')
|
mattseymour/django
|
django/db/backends/base/introspection.py
|
Python
|
bsd-3-clause
| 7,643
|
"""
GravMag: Use the DipoleMagDir class to estimate the magnetization direction
of dipoles with known centers
"""
import numpy
from fatiando import mesher, gridder
from fatiando.utils import ang2vec, vec2ang, contaminate
from fatiando.gravmag import sphere
from fatiando.vis import mpl
from fatiando.gravmag.magdir import DipoleMagDir
from fatiando.constants import CM
# Make noise-corrupted synthetic data
inc, dec = -10.0, -15.0 # inclination and declination of the Geomagnetic Field
model = [mesher.Sphere(3000, 3000, 1000, 1000,
{'magnetization': ang2vec(6.0, -20.0, -10.0)}),
mesher.Sphere(7000, 7000, 1000, 1000,
{'magnetization': ang2vec(10.0, 3.0, -67.0)})]
area = (0, 10000, 0, 10000)
x, y, z = gridder.scatter(area, 1000, z=-150, seed=0)
tf = contaminate(sphere.tf(x, y, z, model, inc, dec), 5.0, seed=0)
# Give the centers of the dipoles
centers = [[3000, 3000, 1000], [7000, 7000, 1000]]
# Estimate the magnetization vectors
solver = DipoleMagDir(x, y, z, tf, inc, dec, centers).fit()
# Print the estimated and true dipole monents, inclinations and declinations
print 'Estimated magnetization (intensity, inclination, declination)'
for e in solver.estimate_:
print e
# Plot the fit and the normalized histogram of the residuals
mpl.figure(figsize=(14, 5))
mpl.subplot(1, 2, 1)
mpl.title("Total Field Anomaly (nT)", fontsize=14)
mpl.axis('scaled')
nlevels = mpl.contour(y, x, tf, (50, 50), 15, interp=True, color='r',
label='Observed', linewidth=2.0)
mpl.contour(y, x, solver.predicted(), (50, 50), nlevels, interp=True,
color='b', label='Predicted', style='dashed', linewidth=2.0)
mpl.legend(loc='upper left', shadow=True, prop={'size': 13})
mpl.xlabel('East y (m)', fontsize=14)
mpl.ylabel('North x (m)', fontsize=14)
mpl.subplot(1, 2, 2)
residuals_mean = numpy.mean(solver.residuals())
residuals_std = numpy.std(solver.residuals())
# Each residual is subtracted from the mean and the resulting
# difference is divided by the standard deviation
s = (solver.residuals() - residuals_mean) / residuals_std
mpl.hist(s, bins=21, range=None, normed=True, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None)
mpl.xlim(-4, 4)
mpl.title("mean = %.3f std = %.3f" % (residuals_mean, residuals_std),
fontsize=14)
mpl.ylabel("P(z)", fontsize=14)
mpl.xlabel("z", fontsize=14)
mpl.show()
|
eusoubrasileiro/fatiando_seismic
|
cookbook/gravmag_magdir_dipolemagdir.py
|
Python
|
bsd-3-clause
| 2,519
|