repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
nuobit/odoo-addons | refs/heads/11.0 | sale_order_invoice_date/models/__init__.py | 2 | # Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from . import sale
|
a10networks/a10sdk-python | refs/heads/master | a10sdk/core/cgnv6/cgnv6_fixed_nat_alg_tftp.py | 2 | from a10sdk.common.A10BaseClass import A10BaseClass
class SamplingEnable(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param counters1: {"enum": ["all", "session-created", "placeholder-debug"], "type": "string", "description": "'all': all; 'session-created': TFTP Client Sessions Created; 'placeholder-debug': Placeholder Debug; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "sampling-enable"
self.DeviceProxy = ""
self.counters1 = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Tftp(A10BaseClass):
""" :param sampling_enable: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "counters1": {"enum": ["all", "session-created", "placeholder-debug"], "type": "string", "description": "'all': all; 'session-created': TFTP Client Sessions Created; 'placeholder-debug': Placeholder Debug; ", "format": "enum"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Change Fixed NAT TFTP ALG Settings.
Class tftp supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/fixed-nat/alg/tftp`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "tftp"
self.a10_url="/axapi/v3/cgnv6/fixed-nat/alg/tftp"
self.DeviceProxy = ""
self.sampling_enable = []
for keys, value in kwargs.items():
setattr(self,keys, value)
|
liyitest/rr | refs/heads/master | openstack_dashboard/dashboards/project/access_and_security/floating_ips/tables.py | 56 | # Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django import shortcuts
from django.utils.http import urlencode
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
LOG = logging.getLogger(__name__)
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION", lambda p, r: True)
class AllocateIP(tables.LinkAction):
name = "allocate"
verbose_name = _("Allocate IP To Project")
classes = ("ajax-modal",)
icon = "link"
url = "horizon:project:access_and_security:floating_ips:allocate"
def single(self, data_table, request, *args):
return shortcuts.redirect('horizon:project:access_and_security:index')
def allowed(self, request, fip=None):
usages = quotas.tenant_quota_usages(request)
if usages['floating_ips']['available'] <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Allocate IP To Project")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:allocate_floating_ip"),)
return POLICY_CHECK(policy, request)
class ReleaseIPs(tables.BatchAction):
name = "release"
classes = ('btn-danger',)
icon = "unlink"
help_text = _("Once a floating IP is released, there is"
" no guarantee the same IP can be allocated again.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Release Floating IP",
u"Release Floating IPs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Released Floating IP",
u"Released Floating IPs",
count
)
def allowed(self, request, fip=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:release_floating_ip"),)
return POLICY_CHECK(policy, request)
def action(self, request, obj_id):
api.network.tenant_floating_ip_release(request, obj_id)
class AssociateIP(tables.LinkAction):
name = "associate"
verbose_name = _("Associate")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:associate_floating_ip"),)
return not fip.port_id and POLICY_CHECK(policy, request)
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"ip_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
class DisassociateIP(tables.Action):
name = "disassociate"
verbose_name = _("Disassociate")
classes = ("btn-disassociate", "btn-danger")
icon = "unlink"
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:disassociate_floating_ip"),)
return fip.port_id and POLICY_CHECK(policy, request)
def single(self, table, request, obj_id):
try:
fip = table.get_object_by_id(filters.get_int_or_uuid(obj_id))
api.network.floating_ip_disassociate(request, fip.id)
LOG.info('Disassociating Floating IP "%s".' % obj_id)
messages.success(request,
_('Successfully disassociated Floating IP: %s')
% fip.ip)
except Exception:
exceptions.handle(request,
_('Unable to disassociate floating IP.'))
return shortcuts.redirect('horizon:project:access_and_security:index')
def get_instance_info(fip):
if fip.instance_type == 'compute':
return (_("%(instance_name)s %(fixed_ip)s")
% {'instance_name': getattr(fip, "instance_name", ''),
'fixed_ip': fip.fixed_ip})
elif fip.instance_type == 'loadbalancer':
return _("Load Balancer VIP %s") % fip.fixed_ip
elif fip.instance_type:
return fip.fixed_ip
else:
return None
def get_instance_link(datum):
if datum.instance_type == 'compute':
return reverse("horizon:project:instances:detail",
args=(datum.instance_id,))
else:
return None
STATUS_DISPLAY_CHOICES = (
("active", pgettext_lazy("Current status of a Floating IP", u"Active")),
("down", pgettext_lazy("Current status of a Floating IP", u"Down")),
("error", pgettext_lazy("Current status of a Floating IP", u"Error")),
)
class FloatingIPsTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("down", True),
("error", False)
)
ip = tables.Column("ip",
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
fixed_ip = tables.Column(get_instance_info,
link=get_instance_link,
verbose_name=_("Mapped Fixed IP Address"))
pool = tables.Column("pool_name",
verbose_name=_("Pool"))
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(FloatingIPsTable, self).__init__(
request, data=data, needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.base.is_service_enabled(request, 'network'):
del self.columns['status']
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, datum):
return datum.ip
class Meta(object):
name = "floating_ips"
verbose_name = _("Floating IPs")
table_actions = (AllocateIP, ReleaseIPs)
row_actions = (AssociateIP, DisassociateIP, ReleaseIPs)
|
IronLanguages/ironpython2 | refs/heads/master | Src/StdLib/Lib/ctypes/test/test_anon.py | 4 | import unittest
from test.support import cpython_only
from ctypes import *
class AnonTest(unittest.TestCase):
def test_anon(self):
class ANON(Union):
_fields_ = [("a", c_int),
("b", c_int)]
class Y(Structure):
_fields_ = [("x", c_int),
("_", ANON),
("y", c_int)]
_anonymous_ = ["_"]
self.assertEqual(Y.a.offset, sizeof(c_int))
self.assertEqual(Y.b.offset, sizeof(c_int))
self.assertEqual(ANON.a.offset, 0)
self.assertEqual(ANON.b.offset, 0)
def test_anon_nonseq(self):
# TypeError: _anonymous_ must be a sequence
self.assertRaises(TypeError,
lambda: type(Structure)("Name",
(Structure,),
{"_fields_": [], "_anonymous_": 42}))
def test_anon_nonmember(self):
# AttributeError: type object 'Name' has no attribute 'x'
self.assertRaises(AttributeError,
lambda: type(Structure)("Name",
(Structure,),
{"_fields_": [],
"_anonymous_": ["x"]}))
@cpython_only
def test_issue31490(self):
# There shouldn't be an assertion failure in case the class has an
# attribute whose name is specified in _anonymous_ but not in _fields_.
# AttributeError: 'x' is specified in _anonymous_ but not in _fields_
with self.assertRaises(AttributeError):
class Name(Structure):
_fields_ = []
_anonymous_ = ["x"]
x = 42
def test_nested(self):
class ANON_S(Structure):
_fields_ = [("a", c_int)]
class ANON_U(Union):
_fields_ = [("_", ANON_S),
("b", c_int)]
_anonymous_ = ["_"]
class Y(Structure):
_fields_ = [("x", c_int),
("_", ANON_U),
("y", c_int)]
_anonymous_ = ["_"]
self.assertEqual(Y.x.offset, 0)
self.assertEqual(Y.a.offset, sizeof(c_int))
self.assertEqual(Y.b.offset, sizeof(c_int))
self.assertEqual(Y._.offset, sizeof(c_int))
self.assertEqual(Y.y.offset, sizeof(c_int) * 2)
if __name__ == "__main__":
unittest.main()
|
mortada/tensorflow | refs/heads/master | tensorflow/contrib/quantization/python/math_ops.py | 179 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Math Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=unused-import,wildcard-import
|
DinoCow/airflow | refs/heads/master | airflow/providers/google/suite/operators/sheets.py | 7 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.suite.hooks.sheets import GSheetsHook
from airflow.utils.decorators import apply_defaults
class GoogleSheetsCreateSpreadsheetOperator(BaseOperator):
"""
Creates a new spreadsheet.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSheetsCreateSpreadsheetOperator`
:param spreadsheet: an instance of Spreadsheet
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
:type spreadsheet: Dict[str, Any]
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
"spreadsheet",
"impersonation_chain",
]
@apply_defaults
def __init__(
self,
*,
spreadsheet: Dict[str, Any],
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.spreadsheet = spreadsheet
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: Any) -> Dict[str, Any]:
hook = GSheetsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
spreadsheet = hook.create_spreadsheet(spreadsheet=self.spreadsheet)
self.xcom_push(context, "spreadsheet_id", spreadsheet["spreadsheetId"])
self.xcom_push(context, "spreadsheet_url", spreadsheet["spreadsheetUrl"])
return spreadsheet
|
BitWriters/Zenith_project | refs/heads/master | zango/lib/python3.5/site-packages/django/db/backends/mysql/base.py | 103 | """
MySQL database backend for Django.
Requires mysqlclient: https://pypi.python.org/pypi/mysqlclient/
MySQLdb is supported for Python 2 only: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import six, timezone
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import Thing2Literal, conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("MySQL received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S.%f"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
_data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
@cached_property
def data_types(self):
if self.features.supports_microsecond_precision:
return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)')
else:
return self._data_types
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
with self.cursor() as cursor:
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
|
daxm/fmcapi | refs/heads/master | fmcapi/api_objects/device_group_services/__init__.py | 1 | """Device Group Services Classes."""
import logging
from .devicegrouprecords import DeviceGroupRecords
logging.debug("In the device_group_services __init__.py file.")
__all__: [
"DeviceGroupRecords",
]
|
hybrideagle/django | refs/heads/master | tests/migrations/migrations_test_apps/unspecified_app_with_conflict/migrations/0002_conflicting_second.py | 425 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("unspecified_app_with_conflict", "0001_initial")]
operations = [
migrations.CreateModel(
"Something",
[
("id", models.AutoField(primary_key=True)),
],
)
]
|
Korkki/django | refs/heads/master | tests/migrations/test_deprecated_fields.py | 504 | from django.core.management import call_command
from django.test import override_settings
from .test_base import MigrationTestBase
class Tests(MigrationTestBase):
"""
Deprecated model fields should still be usable in historic migrations.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.deprecated_field_migrations"})
def test_migrate(self):
# Make sure no tables are created
self.assertTableNotExists("migrations_ipaddressfield")
# Run migration
call_command("migrate", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_ipaddressfield")
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_ipaddressfield")
|
CJ8664/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/testing/test_collection.py | 187 | import pytest, py
from _pytest.main import Session, EXIT_NOTESTSCOLLECTED
class TestCollector:
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_compat_attributes(self, testdir, recwarn):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
recwarn.clear()
assert modcol.Module == pytest.Module
assert modcol.Class == pytest.Class
assert modcol.Item == pytest.Item
assert modcol.File == pytest.File
assert modcol.Function == pytest.Function
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if py.std.sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1,fn2,fn3:
assert fn != 3
assert fn != modcol
assert fn != [1,2,3]
assert [1,2,3] != fn
assert modcol != fn
def test_getparent(self, testdir):
modcol = testdir.getmodulecol("""
class TestClass:
def test_foo():
pass
""")
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(
testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile(path, parent=parent)
""")
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
class TestCollectFS:
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("_darcs", 'test_notfound.py')
tmpdir.ensure("CVS", 'test_notfound.py')
tmpdir.ensure("{arch}", 'test_notfound.py')
tmpdir.ensure(".whatever", 'test_notfound.py')
tmpdir.ensure(".bzr", 'test_notfound.py')
tmpdir.ensure("normal", 'test_found.py')
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
def test_custom_norecursedirs(self, testdir):
testdir.makeini("""
[pytest]
norecursedirs = mydir xyz*
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini("""
[pytest]
testpaths = gui uts
""")
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems('-v')
assert [x.name for x in items] == ['test_gui', 'test_uts']
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ('env', 'gui', 'uts'):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ['test_%s' % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ('env', 'gui', 'uts'):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ['test_%s' % dirname]
class TestCollectPluginHookRelay:
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_file(self, path, parent):
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == '.abc'
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_directory(self, path, parent):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback:
def test_collection_error(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
result = testdir.runpytest(p)
assert "__import__" not in result.stdout.str(), "too long traceback"
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*mport*not_exists*"
])
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*hello world*",
])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import pytest
def pytest_make_collect_report(__multicall__):
rep = __multicall__.execute()
rep.headerlines += ["header1"]
return rep
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*header1*",
])
class TestCustomConftests:
def test_ignore_collect_path(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
""")
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest("""
def pytest_ignore_collect(path, config):
return True
""")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines("*1 passed*")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines("*collected 0 items*")
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest("""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
""")
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "passed" not in result.stdout.str()
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
testdir.makeconftest("""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
""")
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule*",
"*test_x*"
])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest("""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
""")
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest("""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
""")
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines([
"*MyModule1*",
"*MyModule2*",
"*test_x*"
])
class TestSession:
def test_parsearg(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
assert rcol.fspath == subdir
parts = rcol._parsearg(p.basename)
assert parts[0] == target
assert len(parts) == 1
parts = rcol._parsearg(p.basename + "::test_func")
assert parts[0] == target
assert parts[1] == "test_func"
assert len(parts) == 2
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
assert topdir == rcol.fspath
#rootid = rcol.nodeid
#root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
#assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
item, = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
py.std.pprint.pprint(hookrec.calls)
topdir = testdir.tmpdir # noqa
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
("pytest_collectreport", "report.nodeid == ''")
])
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
normid = p.basename + "::TestClass::()::test_method"
for id in [p.basename,
p.basename + "::TestClass",
p.basename + "::TestClass::()",
normid,
]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest("""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
""" % p.basename)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
py.std.pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains([
("pytest_collectstart",
"collector.fspath == collector.session.fspath"),
("pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'"),
("pytest_collectstart",
"collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
#("pytest_collectreport",
# "report.fspath == %r" % str(rcol.fspath)),
])
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
py.std.pprint.pprint(hookrec.calls)
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport",
"report.nodeid.startswith('aaa/test_aaa.py')"),
])
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
py.std.pprint.pprint(hookrec.calls)
hookrec.assert_contains([
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
])
def test_serialization_byid(self, testdir):
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
item, = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
item2, = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile("""
class TestClass:
def test_method(self):
pass
""")
arg = p.basename + ("::TestClass::test_method")
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
item, = items
assert item.nodeid.endswith("TestClass::()::test_method")
class Test_getinitialnodes:
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'x.py'
assert col.parent.name == testdir.tmpdir.basename
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == 'x.py'
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems:
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile("""
def test_1():
pass
def test_2():
pass
""")
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile('''
def testone():
pass
class TestX:
def testmethod_one(self):
pass
class TestY(TestX):
pass
''')
items, reprec = testdir.inline_genitems(p)
assert len(items) == 3
assert items[0].name == 'testone'
assert items[1].name == 'testmethod_one'
assert items[2].name == 'testmethod_one'
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_class_and_functions_discovery_using_glob(self, testdir):
"""
tests that python_classes and python_functions config options work
as prefixes and glob-like patterns (issue #600).
"""
testdir.makeini("""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
""")
p = testdir.makepyfile('''
class MyTestSuite:
def x_test(self):
pass
class TestCase:
def test_y(self):
pass
''')
items, reprec = testdir.inline_genitems(p)
ids = [x.getmodpath() for x in items]
assert ids == ['MyTestSuite.x_test', 'TestCase.test_y']
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest("""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2:
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
""")
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*",
])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines([
"*1 passed*",
])
class TestNodekeywords:
def test_no_under(self, testdir):
modcol = testdir.getmodulecol("""
def test_pass(): pass
def test_fail(): assert 0
""")
l = list(modcol.keywords)
assert modcol.name in l
for x in l:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile("""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
""")
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
|
xzturn/tensorflow | refs/heads/master | tensorflow/python/keras/regularizers.py | 8 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in regularizers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.regularizers.Regularizer')
class Regularizer(object):
"""Regularizer base class.
Regularizers allow you to apply penalties on layer parameters or layer
activity during optimization. These penalties are summed into the loss
function that the network optimizes.
Regularization penalties are applied on a per-layer basis. The exact API will
depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D` and
`Conv3D`) have a unified API.
These layers expose 3 keyword arguments:
- `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
- `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
- `activity_regularizer`: Regularizer to apply a penalty on the layer's output
All layers (including custom layers) expose `activity_regularizer` as a
settable property, whether or not it is in the constructor arguments.
The value returned by the `activity_regularizer` is divided by the input
batch size so that the relative weighting between the weight regularizers and
the activity regularizers does not change with the batch size.
You can access a layer's regularization penalties by calling `layer.losses`
after calling the layer on inputs.
## Example
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5,
... kernel_initializer='ones',
... kernel_regularizer=tf.keras.regularizers.l1(0.01),
... activity_regularizer=tf.keras.regularizers.l2(0.01))
>>> tensor = tf.ones(shape=(5, 5)) * 2.0
>>> out = layer(tensor)
>>> # The kernel regularization term is 0.25
>>> # The activity regularization term (after dividing by the batch size) is 5
>>> tf.math.reduce_sum(layer.losses)
<tf.Tensor: shape=(), dtype=float32, numpy=5.25>
## Available penalties
```python
tf.keras.regularizers.l1(0.3) # L1 Regularization Penalty
tf.keras.regularizers.l2(0.1) # L2 Regularization Penalty
tf.keras.regularizers.l1_l2(l1=0.01, l2=0.01) # L1 + L2 penalties
```
## Directly calling a regularizer
Compute a regularization loss on a tensor by directly calling a regularizer
as if it is a one-argument function.
E.g.
>>> regularizer = tf.keras.regularizers.l2(2.)
>>> tensor = tf.ones(shape=(5, 5))
>>> regularizer(tensor)
<tf.Tensor: shape=(), dtype=float32, numpy=50.0>
## Developing new regularizers
Any function that takes in a weight matrix and returns a scalar
tensor can be used as a regularizer, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1')
... def l1_reg(weight_matrix):
... return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix))
...
>>> layer = tf.keras.layers.Dense(5, input_dim=5,
... kernel_initializer='ones', kernel_regularizer=l1_reg)
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=0.25>]
Alternatively, you can write your custom regularizers in an
object-oriented way by extending this regularizer base class, e.g.:
>>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2')
... class L2Regularizer(tf.keras.regularizers.Regularizer):
... def __init__(self, l2=0.): # pylint: disable=redefined-outer-name
... self.l2 = l2
...
... def __call__(self, x):
... return self.l2 * tf.math.reduce_sum(tf.math.square(x))
...
... def get_config(self):
... return {'l2': float(self.l2)}
...
>>> layer = tf.keras.layers.Dense(
... 5, input_dim=5, kernel_initializer='ones',
... kernel_regularizer=L2Regularizer(l2=0.5))
>>> tensor = tf.ones(shape=(5, 5))
>>> out = layer(tensor)
>>> layer.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=12.5>]
### A note on serialization and deserialization:
Registering the regularizers as serializable is optional if you are just
training and executing models, exporting to and from SavedModels, or saving
and loading weight checkpoints.
Registration is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON. If using this functionality,
you must make sure any python process running your model has also defined
and registered your custom regularizer.
`tf.keras.utils.register_keras_serializable` is only available in TF 2.1 and
beyond. In earlier versions of TensorFlow you must pass your custom
regularizer to the `custom_objects` argument of methods that expect custom
regularizers to be registered as serializable.
"""
def __call__(self, x):
"""Compute a regularization penalty from an input tensor."""
return 0.
@classmethod
def from_config(cls, config):
"""Creates a regularizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same regularizer from the config
dictionary.
This method is used by Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Arguments:
config: A Python dictionary, typically the output of get_config.
Returns:
A regularizer instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config of the regularizer.
An regularizer config is a Python dictionary (serializable)
containing all configuration parameters of the regularizer.
The same regularizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Returns:
Python dictionary.
"""
raise NotImplementedError(str(self) + ' does not implement get_config()')
@keras_export('keras.regularizers.L1L2')
class L1L2(Regularizer):
r"""A regularizer that applies both L1 and L2 regularization penalties.
The L1 regularization penalty is computed as:
$$\ell_1\,\,penalty =\ell_1\sum_{i=0}^n|x_i|$$
The L2 regularization penalty is computed as
$$\ell_2\,\,penalty =\ell_2\sum_{i=0}^nx_i^2$$
Attributes:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0., l2=0.): # pylint: disable=redefined-outer-name
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
if not self.l1 and not self.l2:
return K.constant(0.)
regularization = 0.
if self.l1:
regularization += self.l1 * math_ops.reduce_sum(math_ops.abs(x))
if self.l2:
regularization += self.l2 * math_ops.reduce_sum(math_ops.square(x))
return regularization
def get_config(self):
return {'l1': float(self.l1), 'l2': float(self.l2)}
# Aliases.
@keras_export('keras.regularizers.l1')
def l1(l=0.01):
r"""Create a regularizer that applies an L1 regularization penalty.
The L1 regularization penalty is computed as:
$$\ell_1\,\,penalty =\ell_1\sum_{i=0}^n|x_i|$$
Arguments:
l: Float; L1 regularization factor.
Returns:
An L1 Regularizer with the given regularization factor.
"""
return L1L2(l1=l)
@keras_export('keras.regularizers.l2')
def l2(l=0.01):
r"""Create a regularizer that applies an L2 regularization penalty.
The L2 regularization penalty is computed as:
$$\ell_2\,\,penalty =\ell_2\sum_{i=0}^nx_i^2$$
Arguments:
l: Float; L2 regularization factor.
Returns:
An L2 Regularizer with the given regularization factor.
"""
return L1L2(l2=l)
@keras_export('keras.regularizers.l1_l2')
def l1_l2(l1=0.01, l2=0.01): # pylint: disable=redefined-outer-name
r"""Create a regularizer that applies both L1 and L2 penalties.
The L1 regularization penalty is computed as:
$$\ell_1\,\,penalty =\ell_1\sum_{i=0}^n|x_i|$$
The L2 regularization penalty is computed as:
$$\ell_2\,\,penalty =\ell_2\sum_{i=0}^nx_i^2$$
Arguments:
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
Returns:
An L1L2 Regularizer with the given regularization factors.
"""
return L1L2(l1=l1, l2=l2)
@keras_export('keras.regularizers.serialize')
def serialize(regularizer):
return serialize_keras_object(regularizer)
@keras_export('keras.regularizers.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='regularizer')
@keras_export('keras.regularizers.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
identifier = str(identifier)
# We have to special-case functions that return classes.
# TODO(omalleyt): Turn these into classes or class aliases.
special_cases = ['l1', 'l2', 'l1_l2']
if identifier in special_cases:
# Treat like a class.
return deserialize({'class_name': identifier, 'config': {}})
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret regularizer identifier:', identifier)
|
liorvh/raspberry_pwn | refs/heads/master | src/pentest/grabber/xss.py | 8 | #!/usr/bin/env python
"""
Cross-Site Scripting Module for Grabber v0.1
Copyright (C) 2006 - Romain Gaucher - http://rgaucher.info
"""
import sys
from grabber import getContent_POST, getContent_GET
from grabber import getContentDirectURL_GET, getContentDirectURL_POST
from grabber import single_urlencode, partially_in, unescape
def detect_xss(instance, output):
if unescape(instance) in output:
return True
elif partially_in(unescape(instance), output):
return True
return False
def generateOutput(url, gParam, instance,method,type):
astr = "<xss>\n\t<method>%s</method>\n\t<url>%s</url>\n\t<parameter name='%s'>%s</parameter>\n\t<type name='XSS Injection Type'>%s</type>" % (method,url,gParam,str(instance),type)
if method in ("get","GET"):
# print the real URL
p = (url+"?"+gParam+"="+single_urlencode(str(instance)))
astr += "\n\t<result>%s</result>" % p
astr += "\n</xss>\n"
return astr
def generateOutputLong(url, urlString ,method,type, allParams = {}):
astr = "<xss>\n\t<method>%s</method>\n\t<url>%s</url>\n\t<type name='XSS Injection Type'>%s</type>" % (method,url,type)
if method in ("get","GET"):
# print the real URL
p = (url+"?"+urlString)
astr += "\n\t<result>%s</result>" % (p)
else:
astr += "\n\t<parameters>"
for k in allParams:
astr += "\n\t\t<parameter name='%s'>%s</parameter>" % (k, allParams[k])
astr += "\n\t</parameters>"
astr += "\n</xss>\n"
return astr
def permutations(L):
if len(L) == 1:
yield [L[0]]
elif len(L) >= 2:
(a, b) = (L[0:1], L[1:])
for p in permutations(b):
for i in range(len(p)+1):
yield b[:i] + a + b[i:]
def process(urlGlobal, database, attack_list):
plop = open('results/xss_GrabberAttacks.xml','w')
plop.write("<xssAttacks>\n")
for u in database.keys():
if len(database[u]['GET']):
print "Method = GET ", u
for gParam in database[u]['GET']:
for typeOfInjection in attack_list:
for instance in attack_list[typeOfInjection]:
if instance != "See Below":
handle = getContent_GET(u,gParam,instance)
if handle != None:
output = handle.read()
header = handle.info()
if detect_xss(str(instance),output):
# generate the info...
plop.write(generateOutput(u,gParam,instance,"GET",typeOfInjection))
# see the permutations
if len(database[u]['GET'].keys()) > 1:
for typeOfInjection in attack_list:
for instance in attack_list[typeOfInjection]:
url = ""
for gParam in database[u]['GET']:
url += ("%s=%s&" % (gParam, single_urlencode(str(instance))))
handle = getContentDirectURL_GET(u,url)
if handle != None:
output = handle.read()
if detect_xss(str(instance),output):
# generate the info...
plop.write(generateOutputLong(u,url,"GET",typeOfInjection))
if len(database[u]['POST']):
print "Method = POST ", u
for gParam in database[u]['POST']:
for typeOfInjection in attack_list:
for instance in attack_list[typeOfInjection]:
if instance != "See Below":
handle = getContent_POST(u,gParam,instance)
if handle != None:
output = handle.read()
header = handle.info()
if detect_xss(str(instance),output):
# generate the info...
plop.write(generateOutput(u,gParam,instance,"POST",typeOfInjection))
# see the permutations
if len(database[u]['POST'].keys()) > 1:
for typeOfInjection in attack_list:
for instance in attack_list[typeOfInjection]:
allParams = {}
for gParam in database[u]['POST']:
allParams[gParam] = str(instance)
handle = getContentDirectURL_POST(u,allParams)
if handle != None:
output = handle.read()
if detect_xss(str(instance), output):
# generate the info...
plop.write(generateOutputLong(u,url,"POST",typeOfInjection, allParams))
plop.write("\n</xssAttacks>\n")
plop.close()
return "" |
sakurahilljp/dojo | refs/heads/master | Facade/amplifier.py | 1 | # -*- coding: utf-8 -*-
class Amplifier(object):
def __init__(self):
self.__volume = 10
print 'Initializing %s' % self
print ' default volume is %d' % self.__volume
def __repr__(self):
return 'Amplifier()'
def power_on(self):
print '%s : power on' % (self,)
def power_off(self):
print '%s : power off' % (self,)
def volume_up(self, step=1):
self.__volume += step
print '%s : volume is up to %d' % (self, self.__volume)
def volume_down(self, step=1):
self.__volume -= step
print '%s : volume is down to %d' % (self, self.__volume)
if __name__ == '__main__':
# amplifier control example
amp = Amplifier()
amp.power_on()
amp.power_off()
amp.volume_up(10)
amp.volume_down(5)
|
jelugbo/tundex | refs/heads/master | lms/djangoapps/shoppingcart/processors/tests/__init__.py | 12133432 | |
tastynoodle/django | refs/heads/master | tests/admin_docs/__init__.py | 12133432 | |
willthames/ansible | refs/heads/devel | lib/ansible/modules/network/panos/panos_loadcfg.py | 78 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_loadcfg
short_description: load configuration on PAN-OS device
description:
- Load configuration on PAN-OS device
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
file:
description:
- configuration file to load
required: false
default: None
commit:
description:
- commit if changed
required: false
default: true
'''
EXAMPLES = '''
# Import and load config file from URL
- name: import configuration
panos_import:
ip_address: "192.168.1.1"
password: "admin"
url: "{{ConfigURL}}"
category: "configuration"
register: result
- name: load configuration
panos_loadcfg:
ip_address: "192.168.1.1"
password: "admin"
file: "{{result.filename}}"
'''
RETURN='''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def load_cfgfile(xapi, module, ip_address, file_):
# load configuration file
cmd = '<load><config><from>%s</from></config></load>' %\
file_
xapi.op(cmd=cmd)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
file=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
file_ = module.params['file']
commit = module.params['commit']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
changed = load_cfgfile(xapi, module, ip_address, file_)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
|
uranusjr/django | refs/heads/master | django/contrib/redirects/middleware.py | 110 | from django.apps import apps
from django.conf import settings
from django.contrib.redirects.models import Redirect
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseGone, HttpResponsePermanentRedirect
from django.utils.deprecation import MiddlewareMixin
class RedirectFallbackMiddleware(MiddlewareMixin):
# Defined as class-level attributes to be subclassing-friendly.
response_gone_class = HttpResponseGone
response_redirect_class = HttpResponsePermanentRedirect
def __init__(self, get_response=None):
if not apps.is_installed('django.contrib.sites'):
raise ImproperlyConfigured(
"You cannot use RedirectFallbackMiddleware when "
"django.contrib.sites is not installed."
)
super().__init__(get_response)
def process_response(self, request, response):
# No need to check for a redirect for non-404 responses.
if response.status_code != 404:
return response
full_path = request.get_full_path()
current_site = get_current_site(request)
r = None
try:
r = Redirect.objects.get(site=current_site, old_path=full_path)
except Redirect.DoesNotExist:
pass
if r is None and settings.APPEND_SLASH and not request.path.endswith('/'):
try:
r = Redirect.objects.get(
site=current_site,
old_path=request.get_full_path(force_append_slash=True),
)
except Redirect.DoesNotExist:
pass
if r is not None:
if r.new_path == '':
return self.response_gone_class()
return self.response_redirect_class(r.new_path)
# No redirect was found. Return the response.
return response
|
tasercake/Crypto_Algotrader | refs/heads/master | crizzle/services/base/__init__.py | 1 | from crizzle.services.base.service import Service
from crizzle.services.base.error import EnvironmentException
|
DrDrake/osgocean | refs/heads/master | scripts/convert_shaders.py | 8 | # This script is use to convert the shaders found
# within the resources/shaders directory to the native
# .inl format for compiling into the osgOcean source.
# After conversion, it will copy the .inl files into the
# include/osgOcean/shaders/ directory.
import os
import time
###############################################
def readCopyright():
copyrightFile = open("copyright_notice.txt","r")
copyright = copyrightFile.readlines()
copyrightFile.close()
return copyright
###############################################
def shaderVarName( shaderName ):
varName = shaderName.replace(".","_")
return varName
###############################################
def isCurrent( shader, header ):
if os.path.isfile( header ):
shaderMod = os.path.getmtime(shader)
headerMod = os.path.getmtime(header)
if shaderMod < headerMod:
return True
return False
###############################################
def createInlShader( shaderFile, shaderVar, headerFile ):
file = open(shaderFile,'r')
lines = file.readlines()
file.close()
oFile = open( headerFile, "w")
for line in copyright:
oFile.write(line)
oFile.write("\nstatic const char " + shaderVar +"[] =")
for line in lines:
newLine = line.replace("\n","").replace("\r","")
oFile.write('\n\t"'+newLine+'\\n"')
oFile.write(";\n");
oFile.flush()
oFile.close()
##############################################
print("\nThis script is used to convert the osgOcean shaders")
print("found within the resources/shaders directory to the")
print("native .inl format for compiling into the osgOcean")
print("source.\n")
print("Once converted, the .inl files will be copied into the")
print("include/osgOcean/shaders/ directory overwriting")
print("existing files.\n")
confirm = raw_input("Continue? [y/n]: ")
if confirm == 'n' or confirm == 'N':
exit()
shaderPath = "../resources/shaders/"
headerPath = "../include/osgOcean/shaders/"
shaderList = os.listdir( shaderPath )
skipped = 0
created = 0
print("\nProcessing shader files")
print("--------------------------------\n")
copyright = readCopyright()
for shader in shaderList:
if shader.find("osgOcean_") > -1:
if shader.rfind(".vert") > -1 or shader.rfind(".frag") > -1:
sVar = shaderVarName(shader)
hName = sVar + ".inl"
hFile = headerPath + hName
sFile = shaderPath + shader
if isCurrent(sFile,hFile) == True:
skipped += 1
print("[skipped] " + sVar)
else:
createInlShader( sFile, sVar, hFile )
created += 1
print("[CREATED] " + sVar )
print("\n--------------------------------")
print(str(created)+"\tCreated")
print(str(skipped)+"\tUp to date")
print(str(skipped+created)+"\tTotal")
print("--------------------------------")
|
elingg/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/barrier_ops_test.py | 56 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for barrier ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class BarrierTest(test.TestCase):
def testConstructorWithShapes(self):
with ops.Graph().as_default():
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((1, 2, 3), (8,)),
shared_name="B",
name="B")
self.assertTrue(isinstance(b.barrier_ref, ops.Tensor))
self.assertProtoEquals("""
name:'B' op:'Barrier'
attr {
key: "capacity"
value {
i: -1
}
}
attr { key: 'component_types'
value { list { type: DT_FLOAT type: DT_FLOAT } } }
attr {
key: 'shapes'
value {
list {
shape {
dim { size: 1 } dim { size: 2 } dim { size: 3 }
}
shape {
dim { size: 8 }
}
}
}
}
attr { key: 'container' value { s: "" } }
attr { key: 'shared_name' value: { s: 'B' } }
""", b.barrier_ref.op.node_def)
def testInsertMany(self):
with self.test_session():
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, [10.0, 20.0, 30.0])
insert_1_op = b.insert_many(1, keys, [100.0, 200.0, 300.0])
self.assertEquals(size_t.eval(), [0])
insert_0_op.run()
self.assertEquals(size_t.eval(), [0])
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
def testInsertManyEmptyTensor(self):
with self.test_session():
error_message = ("Empty tensors are not supported, but received shape "
r"\'\(0,\)\' at index 1")
with self.assertRaisesRegexp(ValueError, error_message):
data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((1,), (0,)), name="B")
def testInsertManyEmptyTensorUnknown(self):
with self.test_session():
b = data_flow_ops.Barrier((dtypes.float32, dtypes.float32), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, np.array([[], [], []], np.float32))
self.assertEquals(size_t.eval(), [0])
with self.assertRaisesOpError(
".*Tensors with no elements are not supported.*"):
insert_0_op.run()
def testTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
def testTakeManySmallBatch(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
size_i = b.incomplete_size()
keys = [b"a", b"b", b"c", b"d"]
values_0 = [10.0, 20.0, 30.0, 40.0]
values_1 = [100.0, 200.0, 300.0, 400.0]
insert_0_op = b.insert_many(0, keys, values_0)
# Split adding of the second component into two independent operations.
# After insert_1_1_op, we'll have two ready elements in the barrier,
# 2 will still be incomplete.
insert_1_1_op = b.insert_many(1, keys[0:2], values_1[0:2]) # add "a", "b"
insert_1_2_op = b.insert_many(1, keys[2:3], values_1[2:3]) # add "c"
insert_1_3_op = b.insert_many(1, keys[3:], values_1[3:]) # add "d"
insert_empty_op = b.insert_many(0, [], [])
close_op = b.close()
close_op_final = b.close(cancel_pending_enqueues=True)
index_t, key_t, value_list_t = b.take_many(3, allow_small_batch=True)
insert_0_op.run()
insert_1_1_op.run()
close_op.run()
# Now we have a closed barrier with 2 ready elements. Running take_t
# should return a reduced batch with 2 elements only.
self.assertEquals(size_i.eval(), [2]) # assert that incomplete size = 2
self.assertEquals(size_t.eval(), [2]) # assert that ready size = 2
_, keys_val, values_0_val, values_1_val = sess.run(
[index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# The next insert completes the element with key "c". The next take_t
# should return a batch with just 1 element.
insert_1_2_op.run()
self.assertEquals(size_i.eval(), [1]) # assert that incomplete size = 1
self.assertEquals(size_t.eval(), [1]) # assert that ready size = 1
_, keys_val, values_0_val, values_1_val = sess.run(
[index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[2:3], values_0[2:3], values_1[2:3]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# Adding nothing ought to work, even if the barrier is closed.
insert_empty_op.run()
# currently keys "a" and "b" are not in the barrier, adding them
# again after it has been closed, ought to cause failure.
with self.assertRaisesOpError("is closed"):
insert_1_1_op.run()
close_op_final.run()
# These ops should fail because the barrier has now been closed with
# cancel_pending_enqueues = True.
with self.assertRaisesOpError("is closed"):
insert_empty_op.run()
with self.assertRaisesOpError("is closed"):
insert_1_3_op.run()
def testUseBarrierWithShape(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((2, 2), (8,)), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = np.array(
[[[10.0] * 2] * 2, [[20.0] * 2] * 2, [[30.0] * 2] * 2], np.float32)
values_1 = np.array([[100.0] * 8, [200.0] * 8, [300.0] * 8], np.float32)
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
self.assertShapeEqual(keys_val, take_t[1])
self.assertShapeEqual(values_0_val, take_t[2][0])
self.assertShapeEqual(values_1_val, take_t[2][1])
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertAllEqual(values_0_val[idx], v0)
self.assertAllEqual(values_1_val[idx], v1)
def testParallelInsertMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
sess.run(insert_ops)
self.assertEquals(size_t.eval(), [10])
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(indices_val, [-2**63 + x for x in range(10)])
for k, v in zip(keys, values):
idx = keys_val.tolist().index(k)
self.assertEqual(values_val[idx], v)
def testParallelTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_op = b.insert_many(0, keys, values)
take_t = [b.take_many(1) for _ in keys]
insert_op.run()
self.assertEquals(size_t.eval(), [10])
index_fetches = []
key_fetches = []
value_fetches = []
for ix_t, k_t, v_t in take_t:
index_fetches.append(ix_t)
key_fetches.append(k_t)
value_fetches.append(v_t[0])
vals = sess.run(index_fetches + key_fetches + value_fetches)
index_vals = vals[:len(keys)]
key_vals = vals[len(keys):2 * len(keys)]
value_vals = vals[2 * len(keys):]
taken_elems = []
for k, v in zip(key_vals, value_vals):
taken_elems.append((k[0], v[0]))
self.assertAllEqual(np.hstack(index_vals), [-2**63] * 10)
self.assertItemsEqual(
zip(keys, values), [(k[0], v[0]) for k, v in zip(key_vals, value_vals)])
def testBlockingTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
def take():
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(indices_val,
[int(x.decode("ascii")) - 2**63 for x in keys_val])
self.assertItemsEqual(zip(keys, values), zip(keys_val, values_val))
t = self.checkedThread(target=take)
t.start()
time.sleep(0.1)
for insert_op in insert_ops:
insert_op.run()
t.join()
def testParallelInsertManyTakeMany(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)
]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)
]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
def take(sess, i, taken):
indices_val, keys_val, values_0_val, values_1_val = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0], take_ops[i][2][1]
])
taken.append({
"indices": indices_val,
"keys": keys_val,
"values_0": values_0_val,
"values_1": values_1_val
})
def insert(sess, i):
sess.run([insert_0_ops[i], insert_1_ops[i]])
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_threads = [
self.checkedThread(
target=insert, args=(sess, i)) for i in range(num_iterations)
]
for t in take_threads:
t.start()
time.sleep(0.1)
for t in insert_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_threads:
t.join()
self.assertEquals(len(taken), num_iterations)
flatten = lambda l: [item for sublist in l for item in sublist]
all_indices = sorted(flatten([t_i["indices"] for t_i in taken]))
all_keys = sorted(flatten([t_i["keys"] for t_i in taken]))
expected_keys = sorted(
flatten([keys_i(i) for i in range(num_iterations)]))
expected_indices = sorted(
flatten([-2**63 + j] * 10 for j in range(num_iterations)))
self.assertAllEqual(all_indices, expected_indices)
self.assertAllEqual(all_keys, expected_keys)
for taken_i in taken:
outer_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[0]) for k in taken_i["keys"]])
inner_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[1]) for k in taken_i["keys"]])
self.assertAllEqual(taken_i["values_0"],
outer_indices_from_keys + inner_indices_from_keys)
expected_values_1 = np.vstack(
(1 + outer_indices_from_keys + inner_indices_from_keys,
2 + outer_indices_from_keys + inner_indices_from_keys)).T
self.assertAllEqual(taken_i["values_1"], expected_values_1)
def testClose(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
close_op = b.close()
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(3)
take_too_many_t = b.take_many(4)
self.assertEquals(size_t.eval(), [0])
self.assertEquals(incomplete_t.eval(), [0])
insert_0_op.run()
self.assertEquals(size_t.eval(), [0])
self.assertEquals(incomplete_t.eval(), [3])
close_op.run()
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should succeed because the barrier has not cancelled
# pending enqueues
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
self.assertEquals(incomplete_t.eval(), [0])
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(r"is closed and has insufficient elements "
r"\(requested 4, total size 3\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
def testCancel(self):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys[0:2], values_1[0:2])
insert_2_op = b.insert_many(1, keys[2:], values_1[2:])
cancel_op = b.close(cancel_pending_enqueues=True)
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(2)
take_too_many_t = b.take_many(3)
self.assertEquals(size_t.eval(), [0])
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [2])
self.assertEquals(incomplete_t.eval(), [1])
cancel_op.run()
# This op should fail because the queue is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because the queue is cancelled.
with self.assertRaisesOpError("is closed"):
insert_2_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(r"is closed and has insufficient elements "
r"\(requested 3, total size 2\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 2)
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
def _testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self, cancel):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
take_t = b.take_many(1, allow_small_batch=True)
sess.run(b.close(cancel))
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t)
def testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self):
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=False)
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=True)
def _testParallelInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 50
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)
]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)
]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
close_op = b.close(cancel_pending_enqueues=cancel)
def take(sess, i, taken):
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
except errors_impl.OutOfRangeError:
taken.append(0)
def insert(sess, i):
try:
sess.run([insert_0_ops[i], insert_1_ops[i]])
except errors_impl.CancelledError:
pass
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_threads = [
self.checkedThread(
target=insert, args=(sess, i)) for i in range(num_iterations)
]
first_half_insert_threads = insert_threads[:num_iterations // 2]
second_half_insert_threads = insert_threads[num_iterations // 2:]
for t in take_threads:
t.start()
for t in first_half_insert_threads:
t.start()
for t in first_half_insert_threads:
t.join()
close_op.run()
for t in second_half_insert_threads:
t.start()
for t in take_threads:
t.join()
for t in second_half_insert_threads:
t.join()
self.assertEqual(
sorted(taken),
[0] * (num_iterations // 2) + [10] * (num_iterations // 2))
def testParallelInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=False)
def testParallelInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=True)
def _testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.test_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(
0, keys_i(i), values_0 + i, name="insert_0_%d" % i)
for i in range(num_iterations)
]
close_op = b.close(cancel_pending_enqueues=cancel)
take_ops = [
b.take_many(
10, name="take_%d" % i) for i in range(num_iterations)
]
# insert_1_ops will only run after closure
insert_1_ops = [
b.insert_many(
1, keys_i(i), values_1 + i, name="insert_1_%d" % i)
for i in range(num_iterations)
]
def take(sess, i, taken):
if cancel:
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
[
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
except errors_impl.OutOfRangeError:
taken.append(0)
else:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
def insert_0(sess, i):
insert_0_ops[i].run(session=sess)
def insert_1(sess, i):
if cancel:
try:
insert_1_ops[i].run(session=sess)
except errors_impl.CancelledError:
pass
else:
insert_1_ops[i].run(session=sess)
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_0_threads = [
self.checkedThread(
target=insert_0, args=(sess, i)) for i in range(num_iterations)
]
insert_1_threads = [
self.checkedThread(
target=insert_1, args=(sess, i)) for i in range(num_iterations)
]
for t in insert_0_threads:
t.start()
for t in insert_0_threads:
t.join()
for t in take_threads:
t.start()
close_op.run()
for t in insert_1_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_1_threads:
t.join()
if cancel:
self.assertEqual(taken, [0] * num_iterations)
else:
self.assertEqual(taken, [10] * num_iterations)
def testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=False)
def testParallelPartialInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=True)
def testIncompatibleSharedBarrierErrors(self):
with self.test_session():
# Do component types and shapes.
b_a_1 = data_flow_ops.Barrier(
(dtypes.float32,), shapes=(()), shared_name="b_a")
b_a_2 = data_flow_ops.Barrier(
(dtypes.int32,), shapes=(()), shared_name="b_a")
b_a_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_a_2.barrier_ref.eval()
b_b_1 = data_flow_ops.Barrier(
(dtypes.float32,), shapes=(()), shared_name="b_b")
b_b_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int32), shapes=((), ()), shared_name="b_b")
b_b_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_b_2.barrier_ref.eval()
b_c_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_c")
b_c_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shared_name="b_c")
b_c_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_c_2.barrier_ref.eval()
b_d_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), shared_name="b_d")
b_d_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_d")
b_d_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_d_2.barrier_ref.eval()
b_e_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_e")
b_e_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 5), (8,)),
shared_name="b_e")
b_e_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_e_2.barrier_ref.eval()
if __name__ == "__main__":
test.main()
|
ryukiri/android_kernel_samsung_kona | refs/heads/master | scripts/rt-tester/rt-tester.py | 11005 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
ayes/bsmapp | refs/heads/master | allauth/exceptions.py | 91 | class ImmediateHttpResponse(Exception):
"""
This exception is used to interrupt the flow of processing to immediately
return a custom HttpResponse.
"""
def __init__(self, response):
self.response = response
|
rmmh/kubernetes-test-infra | refs/heads/master | jenkins/docker_diff.py | 10 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Output the differences between two Docker images.
Usage:
python docker_diff.py [--deep=path] <image_1> <image_2>
"""
import argparse
import json
import logging
import os
import shutil
import subprocess
import tarfile
import tempfile
def call(cmd, **kwargs):
"""run call with args."""
logging.info('exec %s', ' '.join(cmd))
return subprocess.call(cmd, **kwargs)
def check_call(cmd):
"""run check_call with args."""
logging.info('exec %s', ' '.join(cmd))
return subprocess.check_call(cmd)
def dockerfile_layers(tarball):
'''Given a `docker save` tarball, return the layer metadata in order.'''
layer_by_parent = {}
for member in tarball.getmembers():
if member.name.endswith('/json'):
layer = json.load(tarball.extractfile(member))
layer_by_parent[layer.get('parent')] = layer
# assemble layers by following parent pointers
layers = []
parent = None # base image has no parent
while parent in layer_by_parent:
layer = layer_by_parent[parent]
layers.append(layer)
parent = layer['id']
return layers
def is_whiteout(fname):
"""Check if whiteout."""
return fname.startswith('.wh.') or '/.wh.' in fname
def extract_layers(tarball, layers, outdir):
'''Extract docker layers to a specific directory (fake a union mount).'''
for layer in layers:
obj = tarball.extractfile('%s/layer.tar' % layer['id'])
with tarfile.open(fileobj=obj) as fp:
# Complication: .wh. files indicate deletions.
# https://github.com/docker/docker/blob/master/image/spec/v1.md
members = fp.getmembers()
members_good = [m for m in members if not is_whiteout(m.name)]
fp.extractall(outdir, members_good)
for member in members:
name = member.name
if is_whiteout(name):
path = os.path.join(outdir, name.replace('.wh.', ''))
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.unlink(path)
def docker_diff(image_a, image_b, tmpdir, deep):
"""Diff two docker images."""
# dump images for inspection
tf_a_path = '%s/a.tar' % tmpdir
tf_b_path = '%s/b.tar' % tmpdir
check_call(['docker', 'save', '-o', tf_a_path, image_a])
check_call(['docker', 'save', '-o', tf_b_path, image_b])
tf_a = tarfile.open(tf_a_path)
tf_b = tarfile.open(tf_b_path)
# find layers in order
layers_a = dockerfile_layers(tf_a)
layers_b = dockerfile_layers(tf_b)
# minor optimization: skip identical layers
common = len(os.path.commonprefix([layers_a, layers_b]))
tf_a_out = '%s/a' % tmpdir
tf_b_out = '%s/b' % tmpdir
extract_layers(tf_a, layers_a[common:], tf_a_out)
extract_layers(tf_b, layers_b[common:], tf_b_out)
# actually compare the resulting directories
# just show whether something changed (OS upgrades change a lot)
call(['diff', '-qr', 'a', 'b'], cwd=tmpdir)
if deep:
# if requested, do a more in-depth content diff as well.
call([
'diff', '-rU5',
os.path.join('a', deep),
os.path.join('b', deep)],
cwd=tmpdir)
def main():
"""Run docker_diff."""
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--deep', help='Show full differences for specific directory')
parser.add_argument('image_a')
parser.add_argument('image_b')
options = parser.parse_args()
tmpdir = tempfile.mkdtemp(prefix='docker_diff_')
try:
docker_diff(options.image_a, options.image_b, tmpdir, options.deep)
finally:
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
|
MDS-PBSCB/mds | refs/heads/master | teme/migrations/0001_initial.py | 2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('grade', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
('course', models.ForeignKey(to='teme.Course')),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(to='teme.Teacher'),
),
]
|
hooting/show-me-the-code-python | refs/heads/master | ddkangfu/0004/0004.py | 40 | #coding=utf-8
import collections
import re
"""
0004, 任一个英文的纯文本文件,统计其中的单词出现的个数。
"""
def count_word(file_name):
f = open(file_name)
line = f.readline()
word_counter = collections.Counter()
while line:
words = re.findall("\w+", line.lower())
word_counter.update(words)
line = f.readline()
f.close()
return word_counter
if __name__ == '__main__':
print count_word('english.txt') |
Alberto-Beralix/Beralix | refs/heads/master | i386-squashfs-root/usr/share/pyshared/gwibber/microblog/uploader/imageshack/chuncked_upload.py | 1 | #!/usr/bin/env python
'''
Client API library for chuncked video uploading to imageshack.us
Using "Streaming upload API" as described here:
http://code.google.com/p/imageshackapi/wiki/StreamingAPI
'''
import os
import urllib
import httplib
import urllib2
from urlparse import urlparse
from os.path import exists
from urlparse import urlsplit
from mimetypes import guess_type
from xml.dom.minidom import parse
from xml.dom.minidom import parseString
BLOCK_SIZE=1024
SERVER='render.imageshack.us'
PATH='/renderapi'
ENDPOINT='http://'+SERVER+PATH
class UploadException(Exception):
''' Exceptions of this class are raised for various upload based errors '''
pass
class ServerException(Exception):
''' Exceptions of this class are raised for upload errors reported by server '''
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "ServerException:%s:%s" % (self.code, self.message)
class Uploader:
''' Class to upload images and video to imageshack.
'''
def __init__(self, dev_key, cookie=None, username=None, password=None):
'''Creates uploader object.
Args:
dev_key: developer key (mandatory)
cookie: imagesack user cookie (optional)
username,password: imageshack user account credentials (optional)
'''
self.cookie = cookie
self.username = username
self.password = password
self.dev_key = dev_key
def start(self, filename, tags = [], public = None):
'''Request file upload URL from server
tags: list of tags
public: visibility
'''
data = {'filename' : filename}
data['key'] = self.dev_key
if self.cookie is not None:
data['cookie'] = self.cookie
if tags:
data['tags'] = ','.join(tags)
if public in (True, False):
data['public'] = "yes" if public else "no"
if self.username is not None:
data['a_username'] = self.username
if self.password is not None:
data['a_password'] = self.password
print data
try:
req = urllib2.urlopen(ENDPOINT+'/start', urllib.urlencode(data))
xml = req.read()
except:
raise UploadException('Could not connect to server')
try:
dom = parseString(xml)
url = dom.documentElement.getAttribute('putURL')
getlenurl = dom.documentElement.getAttribute('getlengthURL')
except:
raise ServerException('Wrong server response')
dom.unlink()
req.close()
return (url, getlenurl)
def get_length(self, url):
'''Get uploaded file name
Args:
url: getlengthURL of start output
returns int byte count
'''
try: size = urllib.urlopen(url).read()
except: raise UploadException('Could not connect to server')
try: size = int(size)
except: raise ServerException('Wrong server response')
return size
def upload_file(self, filename, tags = [], public = True, end = -1):
'''Upload file to ImageShack using streaming API
Args:
tags: list of tags
public: visibility (True, False or None)
end: last byte number that will be uploaded.
If end is -1, file will be uploaded to the end.
'''
url = self.start(filename, tags, public)[0]
return self.upload_range(filename, url, 0, -1)
def resume_upload(self, filename, url, getlenurl, end = -1):
'''Resumes file upload
Args:
url: putURL from start output
getlenurl: getlenURL from start output
end: last byte number to upload (-1 for all file)
'''
size = self.get_length(getlenurl)
return self.upload_range(filename, url, size, end)
def upload_range(self, filename, url, begin = 0, end = -1):
'''Upload file to server
Args:
url: upload url (get one using start method)
begin: first byte number
end: last byte number to upload (-1 for all file)
'''
purl = urlparse(url)
current_byte = begin
filelen = os.path.getsize(filename)
if end == -1: end = filelen
if end > filelen: end = filelen
try:
conn = httplib.HTTPConnection(purl.netloc)
conn.connect()
conn.putrequest('PUT', purl.path)
range_str="bytes %d-%d/%d" % (begin, end, filelen)
conn.putheader('Content-range', range_str)
conn.putheader('Content-type', 'application/octet-stream')
conn.putheader('Content-length', (end - begin))
conn.endheaders()
except:
raise UploadException('Could not connect to server')
try: fileobj = open(filename, 'rb')
except: raise UploadException('Could not open file')
try: fileobj.seek(begin)
except: raise UploadException('Could not seek file')
while current_byte < end:
try:
data = fileobj.read(BLOCK_SIZE)
print 'sending %d bytes' % len(data)
except: raise UploadException('File I/O error')
try: conn.send(data)
except: raise UploadException('Could not send data')
current_byte += len(data)
print 'sent data'
fileobj.close()
try:
print 'waiting for response'
resp = conn.getresponse()
print 'reading response'
res = resp.read()
except:
raise UploadException('Could not get server response')
return (resp.status, resp.reason, res)
|
vuntz/glance | refs/heads/master | glance/db/sqlalchemy/migrate_repo/versions/025_placeholder.py | 57 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass
|
TrailingDots/easy_py_messaging | refs/heads/master | easy_py_messaging/logFilter.py | 3 | #!/bin/env python
import sys
import json
import utils
"""
Read a log file, apply user provided function for filtering.
With this base class CSV and JSON can result.
Further filtering such as WARNING and higher levels could
be filtered from the cruft.
"""
# The parameters controlling log filtering.
LogFilters = {
# Most commonly used settings
# 'level': 'DEBUG', # Default: All logs entries
# 'out_format': 'JSON', # Default format
# 'out_file': None, # Printable output filename
# 'in_file': None, # printable intput filename
# 'start': None, # start and end dates.
# 'end': None,
}
class LogFilter(object):
"""
Base class for user provided filters.
Give an input line, return a dictionary with the input parsed.
Special keywords:
DATE = the 1st portion with the ISO8601 date.
level = Level of the log: DEBUG, INFO, ...
The payload, the last part of the log, gets parsed and the dictionary
contains the provided keyword and values.
level = Lowest file output. Lesser levels get ignored.
For example, filter_level="WARNING" ignored DEBUG and INFO levels.
Suggested use:
* The data should contain the same keywords on each line.
* The first output would list the keywords for a spreadsheet
* Don't forget to list the data for the first line.
* Each additional line lists data
filter_fcn = filtering function to determine
if and what gets output. The signature:
filter_fcn(log_entry_dict, line_number)
This function returns a possibly changed
dictionary that will be converted to JSON
for outputting or None indicating this
entry gets ignored.
If filter_fcn == None, then nothing will
gets called.
A user should examine the relevant entries in
log_entry_dict. Depending upon desired effects,
keys may be deleted, values modified or the
entire log entry declared irrelevant by
returning None.
After scanning the single log entry dictionary,
the possibly modified dictionary should be
returned or return None if the entry should
be ignored.
"""
def __init__(self, log_filters, filter_fcn=None):
self.log_filters = log_filters
self.filter_fcn = filter_fcn
self.filter_dict = {}
self.log_entry = ''
self.log_dict = {}
self.log_seconds = 0
self.line_number = 0
if self.normalize_config() != 0:
raise Exception('Bad configuration')
def normalize_config(self):
"""
Some data may be missing. Verify a working set
of parameters.
If OK, return 0, else 1
"""
# All values must be string!
for key in self.log_filters.keys():
if type(self.log_filters[key]) != str:
sys.stderr.write(
'Configuration: Key %s must be a string! Not %s\n' %
(key, str(self.log_filters[key])))
return 1
# Verify input file.
in_file = self.log_filters.get('in_file', None)
if in_file is None:
self.log_filters['in_file_handle'] = sys.stdin
self.log_filters['in_file'] = 'sys.stdin'
else:
try:
file_handle = open(in_file, 'r')
except IOError as err:
sys.stderr.write('--in-file="%s": %s\n' %
(in_file, str(err)))
return 1
self.log_filters['in_file_handle'] = file_handle
# Verify output file.
out_file = self.log_filters.get('out_file', None)
if out_file is None:
self.log_filters['out_file_handle'] = sys.stdout
self.log_filters['out_file'] = '<sys.stdout>'
else:
try:
file_handle = open(out_file, 'w')
except IOError as err:
sys.stderr.write('--out-file="%s": %s\n' %
(out_file, str(err)))
return 1
self.log_filters['out_file_handle'] = file_handle
if 'start' not in self.log_filters:
# No start date. Use start of epoch.
self.log_filters['start'] = '1970-01-01T00:00:00.000'
start_date = utils.ISO8601_to_seconds(self.log_filters['start'])
if start_date is None:
sys.stderr.write('--start="%s" is not a valid ISO8601 date\n' %
self.log_filters['end'])
return 1
self.log_filters['start_secs'] = start_date
if 'end' not in self.log_filters:
# No end time specified. Assume now.
now_secs = utils.time_now()
self.log_filters['end_secs'] = now_secs
self.log_filters['end'] = utils.seconds_to_ISO8601(now_secs)
else:
end_secs = utils.ISO8601_to_seconds(self.log_filters['end'])
if end_secs is None:
sys.stderr.write('--end="%s" is not a valid ISO8601 date\n' %
self.log_filters['end'])
return 1
self.log_filters['end_secs'] = end_secs
if self.log_filters['end_secs'] <= self.log_filters['start_secs']:
sys.stderr.write('end time <= start time. start=%s, end=%s\n' %
self.log_filters['start'], self.log_filters['end'])
return 1
if 'sep_char' not in self.log_filters.keys():
self.log_filters['sep_char'] = utils.SEPARATION_CHAR
if 'key_val_sep' not in self.log_filters.keys():
self.log_filters['key_val_sep'] = \
utils.KEY_VALUE_SEPARATOR
if 'payload_connector' not in self.log_filters.keys():
self.log_filters['payload_connector'] = \
utils.PAYLOAD_CONNECTOR
if 'level' not in self.log_filters.keys():
self.log_filters['level'] = 'DEBUG' # Pass all logs
self.filter_dict = \
utils.filter_priority(self.log_filters['level'])
if 'line_number' not in self.log_filters.keys():
self.log_filters['line_number'] = 0
self.line_number = self.log_filters['line_number']
return 0
def parse_log_entry(self, log_entry):
"""
Break the log entry into small pieces and place results into log_dict
Returns the log dictonary of the pieces of the log entry.
"""
self.log_entry = log_entry
if log_entry == '':
# Returning None provides a convenient loop
# termination. This assumes that the data files
# contain no blank lines!
return None # Common at end of file
try:
date, level, payload = self.log_entry.split('\t')
except ValueError as err:
sys.stderr.write('ERROR: "%s", line#%d, Log line: "%s"\n' %
(str(err), self.line_number, self.log_entry))
self.log_dict = {'ERROR': '"""' + str(err) + '"""',
'LOG': str(self.log_entry)}
return None
# Does this log meet the log_filter level desired?
if level in self.filter_dict.keys(): # Ignore if level gets ignored.
self.log_dict['level'] = level
else:
return None # This level to be ignored.
self.log_dict['date'] = date
self.log_seconds = utils.ISO8601_to_seconds(date)
date_within = self.within_dates(self.log_seconds)
if date_within is not True: # date_with could be False or None
return None
self.parse_payload(payload)
if self.filter_fcn:
# User has provided a filter fcn.
return self.filter_fcn(self.log_dict, self.line_number)
else:
# No user provided filter fcn.
return self.log_dict
def parse_payload(self, payload):
"""
Parse the payload.
"""
items = payload.split(self.log_filters['payload_connector'])
for item in items:
if len(item) == 0:
# Ignore empty item.
# An example: name=value&&name1=value1
# The double '&&' results in an empty item.
continue
try:
key, value = item.split(self.log_filters['key_val_sep'])
except ValueError as err:
sys.stderr.write(('ERROR: "%s", line#:%d, ' +
'key=value: "%s" Line:%s\n') %
(str(err), self.line_number,
item, self.log_entry))
continue # Ignore this entry
# Duplicate keys get ignored.
self.log_dict[key] = value
def within_dates(self, log_seconds):
"""
Determines if the log date falls within requested
boundaries.
log_seconds = Date of log in seconds since epoch.
Return: True if log date is within start and end dates.
False if not within date boundaries.
None iF invalid log_date_str.
"""
if log_seconds is None:
return None
if self.log_filters['start_secs'] <= log_seconds <= self.log_filters['end_secs']:
return True
return False
class LogFilterCSV(LogFilter):
"""
For CSV, assume a log file contains exactly the
same payload format for each log line.
Our log files will contain multiple temperature sources:
inside=65.2 # Temp inside house
patio=73.2 # Temp under patio roof
sun=79.3 # Temp in sun
A typical log entry would be:
2016-03-10T09:01:22.808\tINFO\tinside=65.2,patio=73.2,sun=79.3,host=brass
This gets displayed without the tabs:
2016-03-10T09:01:22.808 INFO inside=65.2,patio=73.2,sun=79.3,host=brass
Each field gets separated by tabs.
The "host=brass" at the end of the payload gets appened automatically
by the sender of the log. Easily finding the system of the orignal
log eases possible problems.
CSV Notes
============
Using CSV as an output format implies that each line contains
the same fields. Data lines that use different keys will report
different data for the same column.
"""
def __init__(self, log_filters, filter_fcn=None):
super(LogFilterCSV, self).__init__(log_filters, filter_fcn)
def log_keys(self):
"""
Create 1st line of keywords
The user passes the data dictionary because
the user filter fcn may have modified it.
"""
output = ''
for key in sorted(self.log_dict):
output += key + ','
output = output[:-1]
return output
def log_data(self):
"""
Create a single data entry and return it
The user passes the data dictionary because
the user filter fcn may have modified it.
"""
output = ''
for key in sorted(self.log_dict):
try:
output += self.log_dict[key] + ','
except Exception as err:
sys.stderr.write('line#%d: %s\n' %
(self.line_number, str(err)))
output = output[:-1]
return output
class LogFilterJSON(LogFilter):
"""
JSON output provides a standard JSON syntax. Both keys and value
become double quoted.
"""
def __init__(self, log_filters, filter_fcn=None):
super(LogFilterJSON, self).__init__(log_filters, filter_fcn)
def log_2_JSON(self):
# Return the data dictionary as JSON.
return json.dumps(self.log_dict)
def log_file_2_JSON_handler(self, file_handle):
"""
Same as logFileToJSON except a file
handle gets passed instead of a filename.
"""
outline = '['
for line in file_handle:
self.log_dict = {} # Wipe out previous values
self.line_number += 1
try:
line = line.strip('\n')
self.log_entry = line
self.log_dict = self.parse_log_entry(line)
except Exception:
# Bad input. Lower levels have already reported.
continue # Ignore.
# This log level is to low to consider if == None
if self.log_dict is None:
continue
outline += json.dumps(self.log_dict) + ','
outline = outline[:-1]
outline += ']'
return outline
def log_file_2_JSON(self, filename):
"""
Given a log filename of logging data, return
an array of log dictionaries. Each log entry
becomes a JSON dictionary.
filter_fcn = filtering function to determine
if and what gets output. The signature:
filter_fcn(log_entry_dict, line_number)
This function returns a possibly changed
dictionary that will be converted to JSON
for outputting or None indicating this
entry gets ignored.
If filter_fcn == None, then nothing will
gets called.
Return: array of JSON objects.
Errors: Invalid filename returns None
"""
try:
file_handle = open(filename, 'r')
except IOError as err:
sys.stderr.write('%s: %s\n' % (filename, str(err)))
return None
return self.log_file_2_JSON_handler(file_handle)
|
shabab12/edx-platform | refs/heads/master | lms/envs/cms/microsite_test.py | 73 | """
This is a localdev test for the Microsite processing pipeline
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .dev import *
from ..dev import ENV_ROOT, FEATURES
MICROSITE_CONFIGURATION = {
"openedx": {
"domain_prefix": "openedx",
"university": "openedx",
"platform_name": "Open edX",
"logo_image_url": "openedx/images/header-logo.png",
"email_from_address": "openedx@edx.org",
"payment_support_email": "openedx@edx.org",
"ENABLE_MKTG_SITE": False,
"SITE_NAME": "openedx.localhost",
"course_org_filter": "CDX",
"course_about_show_social_links": False,
"css_overrides_file": "openedx/css/openedx.css",
"show_partners": False,
"show_homepage_promo_video": False,
"course_index_overlay_text": "Explore free courses from leading universities.",
"course_index_overlay_logo_file": "openedx/images/header-logo.png",
"homepage_overlay_html": "<h1>Take an Open edX Course</h1>"
}
}
MICROSITE_ROOT_DIR = ENV_ROOT / 'edx-microsite'
# pretend we are behind some marketing site, we want to be able to assert that the Microsite config values override
# this global setting
FEATURES['ENABLE_MKTG_SITE'] = True
FEATURES['USE_MICROSITES'] = True
|
starius/qBittorrent | refs/heads/master | src/searchengine/nova3/helpers.py | 35 | #VERSION: 1.40
# Author:
# Christophe DUMEZ (chris@qbittorrent.org)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re, html.entities
import tempfile
import os
import io, gzip, urllib.request, urllib.error, urllib.parse
import socket
import socks
import re
# Some sites blocks default python User-agent
user_agent = 'Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'
headers = {'User-Agent': user_agent}
# SOCKS5 Proxy support
if "sock_proxy" in os.environ and len(os.environ["sock_proxy"].strip()) > 0:
proxy_str = os.environ["sock_proxy"].strip()
m=re.match(r"^(?:(?P<username>[^:]+):(?P<password>[^@]+)@)?(?P<host>[^:]+):(?P<port>\w+)$", proxy_str)
if m is not None:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, m.group('host'), int(m.group('port')), True, m.group('username'), m.group('password'))
socket.socket = socks.socksocket
def htmlentitydecode(s):
# First convert alpha entities (such as é)
# (Inspired from http://mail.python.org/pipermail/python-list/2007-June/443813.html)
def entity2char(m):
entity = m.group(1)
if entity in html.entities.name2codepoint:
return chr(html.entities.name2codepoint[entity])
return " " # Unknown entity: We replace with a space.
t = re.sub('&(%s);' % '|'.join(html.entities.name2codepoint), entity2char, s)
# Then convert numerical entities (such as é)
t = re.sub('&#(\d+);', lambda x: chr(int(x.group(1))), t)
# Then convert hexa entities (such as é)
return re.sub('&#x(\w+);', lambda x: chr(int(x.group(1),16)), t)
def retrieve_url(url):
""" Return the content of the url page as a string """
req = urllib.request.Request(url, headers = headers)
try:
response = urllib.request.urlopen(req)
except urllib.error.URLError as errno:
print(" ".join(("Connection error:", str(errno.reason))))
return ""
dat = response.read()
# Check if it is gzipped
if dat[:2] == b'\x1f\x8b':
# Data is gzip encoded, decode it
compressedstream = io.BytesIO(dat)
gzipper = gzip.GzipFile(fileobj=compressedstream)
extracted_data = gzipper.read()
dat = extracted_data
info = response.info()
charset = 'utf-8'
try:
ignore, charset = info['Content-Type'].split('charset=')
except:
pass
dat = dat.decode(charset, 'replace')
dat = htmlentitydecode(dat)
#return dat.encode('utf-8', 'replace')
return dat
def download_file(url, referer=None):
""" Download file at url and write it to a file, return the path to the file and the url """
file, path = tempfile.mkstemp()
file = os.fdopen(file, "wb")
# Download url
req = urllib.request.Request(url, headers = headers)
if referer is not None:
req.add_header('referer', referer)
response = urllib.request.urlopen(req)
dat = response.read()
# Check if it is gzipped
if dat[:2] == b'\x1f\x8b':
# Data is gzip encoded, decode it
compressedstream = io.BytesIO(dat)
gzipper = gzip.GzipFile(fileobj=compressedstream)
extracted_data = gzipper.read()
dat = extracted_data
# Write it to a file
file.write(dat)
file.close()
# return file path
return path+" "+url
|
satvikdhandhania/vit-11 | refs/heads/master | build/lib.linux-x86_64-2.7/moca/manage.py | 2072 | #!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
ecell/libmoleculizer | refs/heads/master | python-src/bngparser/src/moleculizer/__init__.py | 1 | ###############################################################################
# BNGMZRConverter - A utility program for converting bngl input files to mzr
# input files.
# Copyright (C) 2007, 2008, 2009 The Molecular Sciences Institute
#
# Moleculizer is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Moleculizer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Moleculizer; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Original Author:
# Nathan Addy, Scientific Programmer Email: addy@molsci.org
# The Molecular Sciences Institute Email: addy@molsci.org
#
#
###############################################################################
from moleculizerobject import MoleculizerObject
|
petterreinholdtsen/creepy | refs/heads/master | creepy/ui/AboutDialog.py | 2 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui/aboutDialog.ui'
#
# Created: Mon Oct 19 00:13:43 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_aboutDialog(object):
def setupUi(self, aboutDialog):
aboutDialog.setObjectName(_fromUtf8("aboutDialog"))
aboutDialog.resize(519, 729)
aboutDialog.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/creepy/creepy")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
aboutDialog.setWindowIcon(icon)
aboutDialog.setModal(False)
self.verticalLayoutWidget = QtGui.QWidget(aboutDialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(20, 10, 478, 706))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setTextFormat(QtCore.Qt.RichText)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.buttonBox = QtGui.QDialogButtonBox(self.verticalLayoutWidget)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(aboutDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), aboutDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), aboutDialog.reject)
QtCore.QMetaObject.connectSlotsByName(aboutDialog)
def retranslateUi(self, aboutDialog):
aboutDialog.setWindowTitle(_translate("aboutDialog", "About", None))
self.label.setText(_translate("aboutDialog", "<html><head/><body><p align=\"center\"><img src=\":/creepy/creepy\"/></p><p><br/></p><p align=\"center\"><span style=\" font-size:9pt;\">Creepy is </span><span style=\" font-size:9pt; font-style:italic;\">the</span><span style=\" font-size:9pt;\"> geolocation OSINT tool. </span></p><p><br/></p><p><span style=\" font-weight:600;\">Version : </span>1.4.1 - Codename "GIJC"</p><p><span style=\" font-weight:600;\">Author</span> : Ioannis Kakavas < jkakavas@gmail.com ></p><p><span style=\" font-weight:600;\">Website</span>: www.geocreepy.com</p></body></html>", None))
import creepy_resources_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
aboutDialog = QtGui.QDialog()
ui = Ui_aboutDialog()
ui.setupUi(aboutDialog)
aboutDialog.show()
sys.exit(app.exec_())
|
pwoodworth/intellij-community | refs/heads/master | python/helpers/profiler/profiler/constants.py | 162 | #
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
|
etherkit/OpenBeacon2 | refs/heads/master | client/linux-x86/venv/lib/python3.8/site-packages/pip/_internal/utils/__init__.py | 12133432 | |
trishnaguha/ansible | refs/heads/devel | lib/ansible/modules/storage/glusterfs/__init__.py | 12133432 | |
edoburu/django-oscar-docdata | refs/heads/master | tests/__init__.py | 12133432 | |
jyotsna1820/django | refs/heads/master | tests/modeladmin/__init__.py | 12133432 | |
azul-cloud/cityinfo | refs/heads/master | settings/test.py | 12133432 | |
BiznetGIO/horizon | refs/heads/stable/pike-gio | openstack_dashboard/dashboards/project/network_topology/routers/__init__.py | 12133432 | |
nzavagli/UnrealPy | refs/heads/master | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/SQLAlchemy-1.0.6/test/orm/test_dynamic.py | 25 | from sqlalchemy import testing, desc, select, func, exc, cast, Integer
from sqlalchemy.orm import (
mapper, relationship, create_session, Query, attributes, exc as orm_exc,
Session, backref, configure_mappers)
from sqlalchemy.orm.dynamic import AppenderMixin
from sqlalchemy.testing import (
AssertsCompiledSQL, assert_raises_message, assert_raises, eq_, is_)
from test.orm import _fixtures
from sqlalchemy.testing.assertsql import CompiledSQL
class _DynamicFixture(object):
def _user_address_fixture(self, addresses_args={}):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(
User, users, properties={
'addresses': relationship(
Address, lazy="dynamic", **addresses_args)})
mapper(Address, addresses)
return User, Address
def _order_item_fixture(self, items_args={}):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(
Order, orders, properties={
'items': relationship(
Item, secondary=order_items, lazy="dynamic",
**items_args)})
mapper(Item, items)
return Order, Item
class DynamicTest(_DynamicFixture, _fixtures.FixtureTest, AssertsCompiledSQL):
def test_basic(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
eq_([User(id=7,
addresses=[Address(id=1, email_address='jack@bean.com')])],
q.filter(User.id == 7).all())
eq_(self.static.user_address_result, q.all())
def test_statement(self):
"""test that the .statement accessor returns the actual statement that
would render, without any _clones called."""
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
u = q.filter(User.id == 7).first()
self.assert_compile(
u.addresses.statement,
"SELECT addresses.id, addresses.user_id, addresses.email_address "
"FROM "
"addresses WHERE :param_1 = addresses.user_id",
use_default_dialect=True
)
def test_detached_raise(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
sess.expunge(u)
assert_raises(
orm_exc.DetachedInstanceError,
u.addresses.filter_by,
email_address='e'
)
def test_no_uselist_false(self):
User, Address = self._user_address_fixture(
addresses_args={"uselist": False})
assert_raises_message(
exc.InvalidRequestError,
"On relationship User.addresses, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_no_m2o(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(
Address, addresses, properties={
'user': relationship(User, lazy='dynamic')})
mapper(User, users)
assert_raises_message(
exc.InvalidRequestError,
"On relationship Address.user, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_order_by(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses.order_by(desc(Address.email_address))),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
def test_configured_order_by(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={"order_by": addresses.c.email_address.desc()})
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
# test cancellation of None, replacement with something else
eq_(
list(u.addresses.order_by(None).order_by(Address.email_address)),
[
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
]
)
# test cancellation of None, replacement with nothing
eq_(
set(u.addresses.order_by(None)),
set([
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
])
)
def test_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).first()
eq_(u.addresses.count(), 1)
def test_dynamic_on_backref(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User,
backref=backref('addresses', lazy='dynamic'))
})
mapper(User, users)
sess = create_session()
ad = sess.query(Address).get(1)
def go():
ad.user = None
self.assert_sql_count(testing.db, go, 0)
sess.flush()
u = sess.query(User).get(7)
assert ad not in u.addresses
def test_no_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
# dynamic collection cannot implement __len__() (at least one that
# returns a live database result), else additional count() queries are
# issued when evaluating in a list context
def go():
eq_(
q.filter(User.id == 7).all(),
[
User(
id=7, addresses=[
Address(id=1, email_address='jack@bean.com')])])
self.assert_sql_count(testing.db, go, 2)
def test_no_populate(self):
User, Address = self._user_address_fixture()
u1 = User()
assert_raises_message(
NotImplementedError,
"Dynamic attributes don't support collection population.",
attributes.set_committed_value, u1, 'addresses', []
)
def test_m2m(self):
Order, Item = self._order_item_fixture(
items_args={"backref": backref("orders", lazy="dynamic")})
sess = create_session()
o1 = Order(id=15, description="order 10")
i1 = Item(id=10, description="item 8")
o1.items.append(i1)
sess.add(o1)
sess.flush()
assert o1 in i1.orders.all()
assert i1 in o1.items.all()
@testing.exclude(
'mysql', 'between', ((5, 1, 49), (5, 1, 52)),
'https://bugs.launchpad.net/ubuntu/+source/mysql-5.1/+bug/706988')
def test_association_nonaliased(self):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items': relationship(Item,
secondary=order_items,
lazy="dynamic",
order_by=order_items.c.item_id)
})
mapper(Item, items)
sess = create_session()
o = sess.query(Order).first()
self.assert_compile(
o.items,
"SELECT items.id AS items_id, items.description AS "
"items_description FROM items,"
" order_items WHERE :param_1 = order_items.order_id AND "
"items.id = order_items.item_id"
" ORDER BY order_items.item_id",
use_default_dialect=True
)
# filter criterion against the secondary table
# works
eq_(
o.items.filter(order_items.c.item_id == 2).all(),
[Item(id=2)]
)
def test_transient_count(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses.count(), 1)
def test_transient_access(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses[0], Address())
def test_custom_query(self):
class MyQuery(Query):
pass
User, Address = self._user_address_fixture(
addresses_args={"query_class": MyQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
eq_(type(col).__name__, 'AppenderMyQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
eq_(type(q).__name__, 'MyQuery')
def test_custom_query_with_custom_mixin(self):
class MyAppenderMixin(AppenderMixin):
def add(self, items):
if isinstance(items, list):
for item in items:
self.append(item)
else:
self.append(items)
class MyQuery(Query):
pass
class MyAppenderQuery(MyAppenderMixin, MyQuery):
query_class = MyQuery
User, Address = self._user_address_fixture(
addresses_args={"query_class": MyAppenderQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
assert hasattr(col, 'add')
eq_(type(col).__name__, 'MyAppenderQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
assert not hasattr(q, 'add')
eq_(type(q).__name__, 'MyQuery')
class UOWTest(
_DynamicFixture, _fixtures.FixtureTest,
testing.AssertsExecutionResults):
run_inserts = None
def test_persistence(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture()
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='foo')
sess.add_all([u1, a1])
sess.flush()
eq_(
testing.db.scalar(
select(
[func.count(cast(1, Integer))]).
where(addresses.c.user_id != None)),
0)
u1 = sess.query(User).get(u1.id)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
u1.addresses.remove(a1)
sess.flush()
eq_(
testing.db.scalar(
select(
[func.count(cast(1, Integer))]).
where(addresses.c.user_id != None)),
0
)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
a2 = Address(email_address='bar')
u1.addresses.remove(a1)
u1.addresses.append(a2)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a2.id, u1.id, 'bar')]
)
def test_merge(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={"order_by": addresses.c.email_address})
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
u1.addresses.append(a2)
u1.addresses.append(a3)
sess.add_all([u1, a1])
sess.flush()
u1 = User(id=u1.id, name='jack')
u1.addresses.append(a1)
u1.addresses.append(a3)
u1 = sess.merge(u1)
eq_(attributes.get_history(u1, 'addresses'), (
[a1],
[a3],
[a2]
))
sess.flush()
eq_(
list(u1.addresses),
[a1, a3]
)
def test_hasattr(self):
User, Address = self._user_address_fixture()
u1 = User(name='jack')
assert 'addresses' not in u1.__dict__
u1.addresses = [Address(email_address='test')]
assert 'addresses' in u1.__dict__
def test_collection_set(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={"order_by": addresses.c.email_address})
sess = create_session(autoflush=True, autocommit=False)
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
a4 = Address(email_address='a4')
sess.add(u1)
u1.addresses = [a1, a3]
eq_(list(u1.addresses), [a1, a3])
u1.addresses = [a1, a2, a4]
eq_(list(u1.addresses), [a1, a2, a4])
u1.addresses = [a2, a3]
eq_(list(u1.addresses), [a2, a3])
u1.addresses = []
eq_(list(u1.addresses), [])
def test_noload_append(self):
# test that a load of User.addresses is not emitted
# when flushing an append
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
sess.add(u1)
sess.commit()
u1_id = u1.id
sess.expire_all()
u1.addresses.append(Address(email_address='a2'))
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: [{'email_address': 'a2', 'user_id': u1_id}]
)
)
def test_noload_remove(self):
# test that a load of User.addresses is not emitted
# when flushing a remove
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
a2 = Address(email_address='a2')
u1.addresses.append(a2)
sess.add(u1)
sess.commit()
u1_id = u1.id
a2_id = a2.id
sess.expire_all()
u1.addresses.remove(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM addresses "
"WHERE addresses.id = :param_1",
lambda ctx: [{'param_1': a2_id}]
),
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE addresses.id = "
":addresses_id",
lambda ctx: [{'addresses_id': a2_id, 'user_id': None}]
),
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
)
def test_rollback(self):
User, Address = self._user_address_fixture()
sess = create_session(
expire_on_commit=False, autocommit=False, autoflush=True)
u1 = User(name='jack')
u1.addresses.append(Address(email_address='lala@hoho.com'))
sess.add(u1)
sess.flush()
sess.commit()
u1.addresses.append(Address(email_address='foo@bar.com'))
eq_(
u1.addresses.order_by(Address.id).all(),
[
Address(email_address='lala@hoho.com'),
Address(email_address='foo@bar.com')
]
)
sess.rollback()
eq_(
u1.addresses.all(),
[Address(email_address='lala@hoho.com')]
)
def _test_delete_cascade(self, expected):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "save-update" if expected else "all, delete"})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
sess.commit()
eq_(testing.db.scalar(addresses.count(addresses.c.user_id == None)), 0)
eq_(testing.db.scalar(addresses.count(addresses.c.user_id != None)), 6)
sess.delete(u)
sess.commit()
if expected:
eq_(
testing.db.scalar(
addresses.count(addresses.c.user_id == None)), 6)
eq_(
testing.db.scalar(
addresses.count(addresses.c.user_id != None)), 0)
else:
eq_(testing.db.scalar(addresses.count()), 0)
def test_delete_nocascade(self):
self._test_delete_cascade(True)
def test_delete_cascade(self):
self._test_delete_cascade(False)
def test_self_referential(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node, nodes, properties={
'children': relationship(
Node, lazy="dynamic", order_by=nodes.c.id)})
sess = Session()
n2, n3 = Node(), Node()
n1 = Node(children=[n2, n3])
sess.add(n1)
sess.commit()
eq_(n1.children.all(), [n2, n3])
def test_remove_orphans(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "all, delete-orphan"})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
for a in u.addresses.filter(
Address.email_address.in_(['c', 'e', 'f'])):
u.addresses.remove(a)
eq_(
set(ad for ad, in sess.query(Address.email_address)),
set(['a', 'b', 'd'])
)
def _backref_test(self, autoflush, saveuser):
User, Address = self._user_address_fixture(
addresses_args={"backref": "user"})
sess = create_session(autoflush=autoflush, autocommit=False)
u = User(name='buffy')
a = Address(email_address='foo@bar.com')
a.user = u
if saveuser:
sess.add(u)
else:
sess.add(a)
if not autoflush:
sess.flush()
assert u in sess
assert a in sess
eq_(list(u.addresses), [a])
a.user = None
if not autoflush:
eq_(list(u.addresses), [a])
if not autoflush:
sess.flush()
eq_(list(u.addresses), [])
def test_backref_autoflush_saveuser(self):
self._backref_test(True, True)
def test_backref_autoflush_savead(self):
self._backref_test(True, False)
def test_backref_saveuser(self):
self._backref_test(False, True)
def test_backref_savead(self):
self._backref_test(False, False)
def test_backref_events(self):
User, Address = self._user_address_fixture(
addresses_args={"backref": "user"})
u1 = User()
a1 = Address()
u1.addresses.append(a1)
is_(a1.user, u1)
def test_no_deref(self):
User, Address = self._user_address_fixture(
addresses_args={"backref": "user", })
session = create_session()
user = User()
user.name = 'joe'
user.fullname = 'Joe User'
user.password = 'Joe\'s secret'
address = Address()
address.email_address = 'joe@joesdomain.example'
address.user = user
session.add(user)
session.flush()
session.expunge_all()
def query1():
session = create_session(testing.db)
user = session.query(User).first()
return user.addresses.all()
def query2():
session = create_session(testing.db)
return session.query(User).first().addresses.all()
def query3():
session = create_session(testing.db)
return session.query(User).first().addresses.all()
eq_(query1(), [Address(email_address='joe@joesdomain.example')])
eq_(query2(), [Address(email_address='joe@joesdomain.example')])
eq_(query3(), [Address(email_address='joe@joesdomain.example')])
class HistoryTest(_DynamicFixture, _fixtures.FixtureTest):
run_inserts = None
def _transient_fixture(self, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User()
a1 = Address()
return u1, a1
def _persistent_fixture(self, autoflush=True, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User(name='u1')
a1 = Address(email_address='a1')
s = Session(autoflush=autoflush)
s.add(u1)
s.flush()
return u1, a1, s
def _persistent_m2m_fixture(self, autoflush=True, items_args={}):
Order, Item = self._order_item_fixture(items_args=items_args)
o1 = Order()
i1 = Item(description="i1")
s = Session(autoflush=autoflush)
s.add(o1)
s.flush()
return o1, i1, s
def _assert_history(self, obj, compare, compare_passive=None):
if isinstance(obj, self.classes.User):
attrname = "addresses"
elif isinstance(obj, self.classes.Order):
attrname = "items"
eq_(
attributes.get_history(obj, attrname),
compare
)
if compare_passive is None:
compare_passive = compare
eq_(
attributes.get_history(obj, attrname,
attributes.LOAD_AGAINST_COMMITTED),
compare_passive
)
def test_append_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_append_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_remove_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [])
)
def test_backref_pop_transient(self):
u1, a1 = self._transient_fixture(addresses_args={"backref": "user"})
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], []),
)
a1.user = None
# removed from added
self._assert_history(u1,
([], [], []),
)
def test_remove_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
s.flush()
s.expire_all()
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [a1])
)
def test_backref_pop_persistent_autoflush_o2m_active_hist(self):
u1, a1, s = self._persistent_fixture(
addresses_args={"backref": backref("user", active_history=True)})
u1.addresses.append(a1)
s.flush()
s.expire_all()
a1.user = None
self._assert_history(u1,
([], [], [a1]),
)
def test_backref_pop_persistent_autoflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"})
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_backref_pop_persistent_noflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"}, autoflush=False)
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_unchanged_persistent(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture()
a2, a3 = Address(email_address='a2'), Address(email_address='a3')
u1.addresses.append(a1)
u1.addresses.append(a2)
s.flush()
u1.addresses.append(a3)
u1.addresses.remove(a2)
self._assert_history(u1,
([a3], [a1], [a2]),
compare_passive=([a3], [], [a2])
)
def test_replace_transient(self):
Address = self.classes.Address
u1, a1 = self._transient_fixture()
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_noflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=False)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_autoflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=True)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a3, a4, a5], [a2], [a1]),
compare_passive=([a3, a4, a5], [], [a1])
)
def test_persistent_but_readded_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_persistent_but_readded_autoflush(self):
u1, a1, s = self._persistent_fixture(autoflush=True)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_missing_but_removed_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.remove(a1)
self._assert_history(u1, ([], [], []), compare_passive=([], [], [a1]))
|
nolanliou/tensorflow | refs/heads/master | tensorflow/contrib/signal/python/kernel_tests/shape_ops_test.py | 27 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for shape_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.signal.python.kernel_tests import test_util
from tensorflow.contrib.signal.python.ops import shape_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class FrameTest(test.TestCase):
def test_mapping_of_indices_without_padding(self):
with self.test_session(use_gpu=True):
tensor = constant_op.constant(np.arange(9152), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frame(tensor, 512, 180, pad_end=False).eval()
expected = np.tile(np.arange(512), (49, 1))
expected += np.tile(np.arange(49) * 180, (512, 1)).T
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
def test_mapping_of_indices_with_padding(self):
with self.test_session(use_gpu=True):
tensor = constant_op.constant(np.arange(10000), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frame(tensor, 512, 192, pad_end=True).eval()
expected = np.tile(np.arange(512), (53, 1))
expected += np.tile(np.arange(53) * 192, (512, 1)).T
expected[expected >= 10000] = 0
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
def test_invalid_inputs(self):
# Rank 0 input signal.
with self.assertRaises(ValueError):
shape_ops.frame(1, 1, 1)
# If the rank is unknown, do not raise an exception.
shape_ops.frame(array_ops.placeholder(dtypes.float32), 1, 1)
# Non-scalar frame_length.
with self.assertRaises(ValueError):
shape_ops.frame([1], [1], 1)
# Non-scalar frame_step.
with self.assertRaises(ValueError):
shape_ops.frame([1], 1, [1])
# Non-scalar pad_value.
with self.assertRaises(ValueError):
shape_ops.frame([1], 1, 1, pad_end=True, pad_value=[1])
def test_length_zero(self):
signal = constant_op.constant([], dtype=dtypes.float32)
frame_length = 2
frame_step = 1
with self.test_session(use_gpu=True):
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
self.assertEqual((0, 2), result.shape)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False).eval()
self.assertEqual((0, 2), result.shape)
def test_shape_inference(self):
signal = array_ops.placeholder(dtypes.int32, shape=[1, 1])
frame_length = 2
frame_step = 1
# Shape inference is able to detect the rank and inner-most dimension
# if frame_length is known at graph definition time.
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99)
self.assertEqual([1, 1, 2], result.shape.as_list())
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False)
self.assertEqual([1, 0, 2], result.shape.as_list())
# If frame_length is not known, rank and (known) outer and inner dimensions
# are inferred.
signal = array_ops.placeholder(dtypes.int32, shape=[1, 2, 3, 4])
frame_length = array_ops.placeholder(dtypes.int32, shape=[])
frame_step = 1
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99, axis=1)
self.assertEqual([1, None, None, 3, 4], result.shape.as_list())
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False, axis=1)
self.assertEqual([1, None, None, 3, 4], result.shape.as_list())
# If frame_length and inner-most dimension is known, rank, inner dimensions,
# and known outer dimensions are inferred.
signal = array_ops.placeholder(dtypes.int32,
shape=[None, 5, None, 20, 5, 3])
frame_length = 4
frame_step = 3
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99, axis=3)
self.assertEqual([None, 5, None, 7, 4, 5, 3], result.shape.as_list())
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False, axis=3)
self.assertEqual([None, 5, None, 6, 4, 5, 3], result.shape.as_list())
# Test that shape inference is consistent with actual returned shapes for
# small values of signal_length, frame_length, frame_step, and pad_end in
# [True, False].
frame_step = 1
for signal_length in range(2):
signal = [0] * signal_length
for frame_length in range(2):
for pad_end in [False, True]:
op = shape_ops.frame(signal, frame_length, frame_step,
pad_end=pad_end, pad_value=99)
with self.test_session(use_gpu=True):
result = op.eval()
self.assertEqual(op.shape.as_list(), list(result.shape))
def test_basic_mono(self):
signal = np.arange(6)
frame_length = 3
frame_step = 2
with self.test_session(use_gpu=True):
for rank in range(5):
nd_signal = np.reshape(signal, (1,) * rank + signal.shape)
# With padding, we pad the last frame with pad_value.
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
expected_inner_frames = np.array([[0, 1, 2], [2, 3, 4], [4, 5, 99]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
self.assertAllEqual(expected, result)
# Without padding, we drop the last frame.
expected_inner_frames = np.array([[0, 1, 2], [2, 3, 4]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=False).eval()
self.assertAllEqual(expected, result)
def test_basic_stereo(self):
signal = np.vstack([np.arange(6),
np.arange(6) + 10])
frame_length = 3
frame_step = 2
with self.test_session(use_gpu=True):
for rank in range(5):
nd_signal = np.reshape(signal, (1,) * rank + signal.shape)
# With padding, we pad the last frame with pad_value.
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
expected_inner_frames = np.array([
[[0, 1, 2], [2, 3, 4], [4, 5, 99]],
[[10, 11, 12], [12, 13, 14], [14, 15, 99]]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
self.assertAllEqual(expected, result)
# Without padding, we drop the last frame.
expected_inner_frames = np.array([[[0, 1, 2], [2, 3, 4]],
[[10, 11, 12], [12, 13, 14]]])
expected = np.reshape(
expected_inner_frames, (1,) * rank + expected_inner_frames.shape)
result = shape_ops.frame(nd_signal, frame_length, frame_step,
pad_end=False).eval()
self.assertAllEqual(expected, result)
def test_complex_shape(self):
signal = np.vstack([np.arange(6),
np.arange(6) + 10,
np.arange(6) + 20,
np.arange(6) + 30,
np.arange(6) + 40,
np.arange(6) + 50])
signal = np.reshape(signal, (2, 1, 3, 1, 6))
frame_length = 3
frame_step = 2
with self.test_session(use_gpu=True):
# With padding, we pad the last frame with pad_value.
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
# Resulting shape is (2, 1, 3, 1, 3, 3).
expected = [[[[[[0, 1, 2], [2, 3, 4], [4, 5, 99]]],
[[[10, 11, 12], [12, 13, 14], [14, 15, 99]]],
[[[20, 21, 22], [22, 23, 24], [24, 25, 99]]]]],
[[[[[30, 31, 32], [32, 33, 34], [34, 35, 99]]],
[[[40, 41, 42], [42, 43, 44], [44, 45, 99]]],
[[[50, 51, 52], [52, 53, 54], [54, 55, 99]]]]]]
self.assertAllEqual(expected, result)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False).eval()
# Resulting shape is (2, 1, 3, 1, 3, 2).
expected = [[[[[[0, 1, 2], [2, 3, 4]]],
[[[10, 11, 12], [12, 13, 14]]],
[[[20, 21, 22], [22, 23, 24]]]]],
[[[[[30, 31, 32], [32, 33, 34]]],
[[[40, 41, 42], [42, 43, 44]]],
[[[50, 51, 52], [52, 53, 54]]]]]]
self.assertAllEqual(expected, result)
def test_axis(self):
signal = np.reshape(np.arange(16), (2, 4, 2))
with self.test_session(use_gpu=True):
result = shape_ops.frame(signal, frame_length=2, frame_step=2,
pad_end=True, axis=1)
expected = np.reshape(np.arange(16), (2, 2, 2, 2))
self.assertAllEqual(expected, result.eval())
result = shape_ops.frame(signal, frame_length=2, frame_step=1,
pad_end=True, axis=1)
expected = [[[[0, 1], [2, 3]],
[[2, 3], [4, 5]],
[[4, 5], [6, 7]],
[[6, 7], [0, 0]]],
[[[8, 9], [10, 11]],
[[10, 11], [12, 13]],
[[12, 13], [14, 15]],
[[14, 15], [0, 0]]]]
self.assertAllEqual(expected, result.eval())
result = shape_ops.frame(signal, frame_length=3, frame_step=1,
pad_end=True, axis=1)
expected = [[[[0, 1], [2, 3], [4, 5]],
[[2, 3], [4, 5], [6, 7]],
[[4, 5], [6, 7], [0, 0]],
[[6, 7], [0, 0], [0, 0]]],
[[[8, 9], [10, 11], [12, 13]],
[[10, 11], [12, 13], [14, 15]],
[[12, 13], [14, 15], [0, 0]],
[[14, 15], [0, 0], [0, 0]]]]
self.assertAllEqual(expected, result.eval())
def test_window_larger_than_signal(self):
signal = constant_op.constant([[1, 2], [11, 12]], dtype=dtypes.float32)
frame_length = 4
frame_step = 1
with self.test_session(use_gpu=True):
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
self.assertAllClose([[[1, 2, 99, 99], [2, 99, 99, 99]],
[[11, 12, 99, 99], [12, 99, 99, 99]]], result)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False).eval()
self.assertEqual((2, 0, 4), result.shape)
frame_step = 2
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=True, pad_value=99).eval()
self.assertAllClose([[[1, 2, 99, 99]], [[11, 12, 99, 99]]], result)
result = shape_ops.frame(signal, frame_length, frame_step,
pad_end=False).eval()
self.assertEqual((2, 0, 4), result.shape)
def test_preserves_type(self):
signal = math_ops.range(10, dtype=dtypes.float64)
frame_length = 2
frame_step = 3
with self.test_session(use_gpu=True):
result = shape_ops.frame(signal, frame_length, frame_step)
self.assertEqual(result.dtype, signal.dtype)
def test_dynamic_tensor(self):
# Show that frame works even when the dimensions of its input are
# not known at graph creation time.
input_signal = np.vstack([np.arange(4), np.arange(4) + 10,
np.arange(4) + 20])
frame_length = 2
frame_step = 2
with self.test_session(use_gpu=True) as sess:
signal_placeholder = array_ops.placeholder(shape=(None, None),
dtype=dtypes.float32)
result = sess.run(shape_ops.frame(
signal_placeholder, frame_length, frame_step),
feed_dict={signal_placeholder: input_signal})
self.assertAllEqual([[[0, 1], [2, 3]],
[[10, 11], [12, 13]],
[[20, 21], [22, 23]]], result)
def test_gradient_numerical(self):
with self.test_session(use_gpu=True):
signal_shape = (2, 128)
signal = array_ops.ones(signal_shape)
frame_length = 33
frame_step = 9
frames = shape_ops.frame(signal, frame_length, frame_step)
error = test.compute_gradient_error(
signal, signal_shape, frames, frames.shape.as_list())
self.assertLess(error, 2e-5)
def test_constant_folding(self):
"""frame should be constant foldable for constant inputs."""
for pad_end in [False, True]:
g = ops.Graph()
with g.as_default():
frame_length, frame_step = 32, 16
signal_shape = (2, 128)
signal = array_ops.ones(signal_shape)
frames = shape_ops.frame(signal, frame_length, frame_step,
pad_end=pad_end)
rewritten_graph = test_util.grappler_optimize(g, [frames])
self.assertEqual(1, len(rewritten_graph.node))
if __name__ == "__main__":
test.main()
|
GRArmstrong/invenio-inspire-ops | refs/heads/prod | modules/elmsubmit/lib/elmsubmit_unit_tests.py | 16 | # -*- coding: utf-8 -*-
## Invenio elmsubmit unit tests.
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the elmsubmit."""
__revision__ = "$Id$"
import unittest
import os
from string import expandtabs
import xml.dom.minidom
from invenio.config import CFG_TMPDIR
import invenio.elmsubmit_config as elmsubmit_config
from invenio import elmsubmit
from invenio.testutils import make_test_suite, run_test_suite
if os.path.exists(os.path.join(CFG_TMPDIR,
elmsubmit_config.CFG_ELMSUBMIT_FILES['test_case_1'])):
test_case_1_file_exists = True
else:
test_case_1_file_exists = False
if os.path.exists(os.path.join(CFG_TMPDIR,
elmsubmit_config.CFG_ELMSUBMIT_FILES['test_case_2'])):
test_case_2_file_exists = True
else:
test_case_2_file_exists = False
class MarcTest(unittest.TestCase):
""" elmsubmit - test for sanity """
if test_case_1_file_exists:
def test_simple_marc(self):
"""elmsubmit - parsing simple email"""
f=open(os.path.join(CFG_TMPDIR, elmsubmit_config.CFG_ELMSUBMIT_FILES['test_case_1']),'r')
email = f.read()
f.close()
# let's try to parse an example email and compare it with the appropriate marc xml
x = elmsubmit.process_email(email)
y = """<record>
<datafield tag ="245" ind1="" ind2="">
<subfield code="a">something</subfield>
</datafield>
<datafield tag ="100" ind1="" ind2="">
<subfield code="a">Simko, T</subfield>
<subfield code="u">CERN</subfield>
</datafield>
</record>"""
# in order to properly compare the marc files we have to remove the FFT node, it includes a random generated file path
dom_x = xml.dom.minidom.parseString(x)
datafields = dom_x.getElementsByTagName("datafield")
#remove all the FFT datafields
for node in datafields:
if (node.hasAttribute("tag") and node.getAttribute("tag") == "FFT"):
node.parentNode.removeChild(node)
node.unlink()
new_x = dom_x.toprettyxml("","\n")
dom_y = xml.dom.minidom.parseString(y)
new_y = dom_y.toprettyxml("","\n")
# 'normalize' the two XML MARC files for the purpose of comparing
new_x = expandtabs(new_x)
new_y = expandtabs(new_y)
new_x = new_x.replace(' ','')
new_y = new_y.replace(' ','')
new_x = new_x.replace('\n','')
new_y = new_y.replace('\n','')
# compare the two xml marcs
self.assertEqual(new_x,new_y)
if test_case_2_file_exists:
def test_complex_marc(self):
"""elmsubmit - parsing complex email with multiple fields"""
f=open(os.path.join(CFG_TMPDIR, elmsubmit_config.CFG_ELMSUBMIT_FILES['test_case_2']),'r')
email = f.read()
f.close()
# let's try to reproduce the demo XML MARC file by parsing it and printing it back:
x = elmsubmit.process_email(email)
y = """<record>
<datafield tag ="245" ind1="" ind2="">
<subfield code="a">something</subfield>
</datafield>
<datafield tag ="700" ind1="" ind2="">
<subfield code="a">Le Meur, J Y</subfield>
<subfield code="u">MIT</subfield>
</datafield>
<datafield tag ="700" ind1="" ind2="">
<subfield code="a">Jedrzejek, K J</subfield>
<subfield code="u">CERN2</subfield>
</datafield>
<datafield tag ="700" ind1="" ind2="">
<subfield code="a">Favre, G</subfield>
<subfield code="u">CERN3</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="a">test11</subfield>
<subfield code="c">test31</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="a">test12</subfield>
<subfield code="c">test32</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="a">test13</subfield>
<subfield code="c">test33</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="b">test21</subfield>
<subfield code="d">test41</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="b">test22</subfield>
<subfield code="d">test42</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="a">test14</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="e">test51</subfield>
</datafield>
<datafield tag ="111" ind1="" ind2="">
<subfield code="e">test52</subfield>
</datafield>
<datafield tag ="100" ind1="" ind2="">
<subfield code="a">Simko, T</subfield>
<subfield code="u">CERN</subfield>
</datafield>
</record>"""
# in order to properly compare the marc files we have to remove the FFT node, it includes a random generated file path
dom_x = xml.dom.minidom.parseString(x)
datafields = dom_x.getElementsByTagName("datafield")
#remove all the FFT datafields
for node in datafields:
if (node.hasAttribute("tag") and node.getAttribute("tag") == "FFT"):
node.parentNode.removeChild(node)
node.unlink()
new_x = dom_x.toprettyxml("","\n")
dom_y = xml.dom.minidom.parseString(y)
new_y = dom_y.toprettyxml("","\n")
# 'normalize' the two XML MARC files for the purpose of comparing
new_x = expandtabs(new_x)
new_y = expandtabs(new_y)
new_x = new_x.replace(' ','')
new_y = new_y.replace(' ','')
new_x = new_x.replace('\n','')
new_y = new_y.replace('\n','')
# compare the two xml marcs
self.assertEqual(new_x,new_y)
class FileStorageTest(unittest.TestCase):
""" testing proper storage of files """
if test_case_2_file_exists:
def test_read_text_files(self):
"""elmsubmit - reading text files"""
f=open(os.path.join(CFG_TMPDIR, elmsubmit_config.CFG_ELMSUBMIT_FILES['test_case_2']),'r')
email = f.read()
f.close()
# let's try to see if the files were properly stored:
xml_marc = elmsubmit.process_email(email)
dom = xml.dom.minidom.parseString(xml_marc)
datafields = dom.getElementsByTagName("datafield")
# get the file addresses
file_list = []
for node in datafields:
if (node.hasAttribute("tag") and node.getAttribute("tag") == "FFT"):
children = node.childNodes
for child in children:
if (child.hasChildNodes()):
file_list.append(child.firstChild.nodeValue)
f=open(file_list[0], 'r')
x = f.read()
f.close()
x.lstrip()
x.rstrip()
y = """second attachment\n"""
self.assertEqual(x,y)
f=open(file_list[1], 'r')
x = f.read()
f.close()
x.lstrip()
x.rstrip()
y = """some attachment\n"""
self.assertEqual(x,y)
TEST_SUITE = make_test_suite(MarcTest,
FileStorageTest,)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
|
mtth/igloo | refs/heads/master | igloo.py | 1 | #!/usr/bin/env python
"""Igloo: a command line SCP client.
Usage:
igloo [-adfklmrq] [-p PROFILE | -u URL] ([-inw] -e EXPR | FILEPATH ...)
igloo (-s | --stream) [-bdr] [-p PROFILE | -u URL] FILEPATH
igloo (-c | --config) [add URL [PROFILE] | delete PROFILE | list]
igloo -h | --help | -v | --version
For igloo to work, you must have set up key authentication for each host.
You can then either input each url manually (`-u user@host:remote/path`) or
save urls you use often to profiles (`-c add user@host:remote/path prof`) and
then access them directly (`-p prof`). Profiles are saved in $MYIGLOORC or
$HOME/.igloorc if the former isn't set.
Arguments:
FILEPATH Local or remote path of file to transfer. With
the `--stream` option, this is only used as
remote path. Note that any filepaths
corresponding to directories are skipped.
Options:
-a --ask Interactive mode. Ask before transferring each
file.
-b --binary Don't decode stdout (by default, stdout is
decoded using the local preferred encoding).
-c --config Configuration mode. Use subcommand add to
create a new url/profile entry, subcommand
delete to delete an entry and subcommand list
to display all existing entries. If no
subcommand is specified, prints configuration
filepath.
-d --debug Enable full exception traceback.
-e EXPR --expr=EXPR Regular expression to filter filepaths with
(e.g. `-e .` will match all files in the
directory).
-f --force Allow transferred files to overwrite existing
ones (by default, igloo will error out when
this happens).
-h --help Show this screen and exit.
-i --case-insensitive Case insensitive regular expression matching.
-k --keep-hierarchy Preserve folder hierarchy when transferring
files. The default is to transfer files to the
current directory.
-l --list Show matching filepaths and exit without
transferring files.
-m --move Delete origin copy after successful transfer.
-n --no-match Inverse match.
-p PROFILE --profile=PROFILE Profile [default: default].
-q --quiet No output (by default, the filepath of each
transferred file is printed to stdout).
-r --remote Remote mode. filepaths will correspond to
files on the remote host and all transfers
become downloads.
-s --stream Streaming mode. In non-remote mode, the file
uploaded will be read from stdin. In remote
mode, the downloaded file will be written to
stdout.
-u URL --url=URL Url to SCP to (will override any profile).
-v --version Show version and exit.
-w --walk Recursive directory exploration.
Examples:
igloo -rle . List all files in remote directory.
igloo -fmq * Move all files to remote directory silently.
igloo -sbr a.zip > b.zip Download and rename binary file.
igloo -ine 'jpe?g$' Upload all non jpeg files.
igloo -arwe 'py$' Download all python files in remote directory
hierarchy, asking for confirmation first.
"""
__version__ = '0.1.6'
from codecs import getwriter
from errno import ENOENT, EEXIST
from getpass import getuser
from locale import getpreferredencoding
from os import environ, listdir, mkdir, remove, strerror, walk as os_walk
from os.path import exists, expanduser, isdir, join, sep, split
from re import compile as re_compile, IGNORECASE
from socket import error
from sys import stderr, stdin, stdout
from stat import S_ISDIR
from traceback import format_exc
try:
from docopt import docopt
from paramiko import SSHClient, SSHException
from yaml import dump, load
except ImportError:
pass # probably in setup.py
ERRORS = {
0: 'something bad happened',
1: 'unable to connect to %r@%r',
2: 'remote file %r not found',
3: 'local file %r not found',
4: 'transfer interrupted',
5: 'refusing to transfer directory. try with the --zip option',
6: 'invalid remote folder %r',
7: 'unable to decode received data. try with the --binary option',
8: 'unable to load host keys from file %r',
9: 'no configuration file found',
10: 'profile %r not found in configuration file',
11: 'local file %r would be overwritten by transfer (use --force)',
12: 'remote file %r would be overwritten by transfer (use --force)',
13: 'local file %r already exists',
14: 'remote file %r already exists',
}
def get_stream_writer(binary=False, writer=stdout):
"""Returns the stream writer used by the client."""
if binary:
return writer
else:
return getwriter(getpreferredencoding())(writer)
def write(iterable, writer, lazy_flush=False, format='%s\n'):
"""Write to stdout, handles encoding automatically."""
for elem in iterable:
writer.write(format % elem)
if not lazy_flush:
writer.flush()
if lazy_flush:
writer.flush()
def remote_file_exists(path, sftp):
"""Checks if remote file exists."""
try:
sftp.stat(path)
except IOError as err:
if err.errno == ENOENT:
return False
else:
raise
else:
return True
def remote_file_is_directory(path, sftp):
"""Checks if remote path is a directory."""
return S_ISDIR(sftp.stat(path).st_mode)
def safe_makedirs(path, sftp=None):
"""Recursively create directories."""
parts = path.split(sep)
for depth in range(len(parts)):
part = sep.join(parts[:(depth + 1)])
if sftp:
if not remote_file_exists(part, sftp):
sftp.mkdir(part)
elif not remote_file_is_directory(part, sftp):
raise OSError(EEXIST, strerror(EEXIST), path)
else:
if not exists(part):
mkdir(part)
elif not isdir(part):
raise OSError(EEXIST, strerror(EEXIST), path)
def parse_url(url):
"""Parse URL into user, host and remote directory."""
if '@' in url:
user, url = url.split('@', 1)
else:
user = getuser()
if ':' in url:
host, path = url.split(':', 1)
else:
host = url
path = '.'
if not host:
raise ValueError('Empty url')
else:
return user, host, path
def get_callback():
"""Callback factory function for ``sftp.put`` and ``sftp.get``."""
writer = get_stream_writer()
def callback(transferred, total):
"""Actual callback function."""
progress = int(100 * float(transferred) / total)
if progress < 100:
writer.write(' %2i%%\r' % (progress, ))
else:
writer.write(' \r')
writer.flush()
return callback
def ask(prompt, default='n'):
"""Simple choice prompter."""
if default.lower() == 'y':
defaults = 'Yn'
else:
defaults = 'yN'
choice = raw_input('%s [%s] ' % (prompt, defaults)) or default
return choice.lower() == 'y'
class ClientError(Exception):
"""Base client error class.
Stores the original traceback to be displayed in debug mode.
"""
def __init__(self, number, details=()):
super(ClientError, self).__init__('error: ' + ERRORS[number] % details)
self.traceback = format_exc()
class BaseClient(object):
"""API client."""
ssh = None
sftp = None
def __init__(self, url, host_keys=None):
self.user, self.host, self.path = parse_url(url)
self.host_keys = host_keys or join(expanduser('~'), '.ssh', 'known_hosts')
def __enter__(self):
self.ssh = SSHClient()
try:
self.ssh.load_host_keys(self.host_keys)
except IOError:
raise ClientError(8, (self.host_keys, ))
try:
self.ssh.connect(self.host, username=self.user)
except (SSHException, error):
raise ClientError(1, (self.user, self.host))
else:
self.sftp = self.ssh.open_sftp()
try:
self.sftp.chdir(self.path)
except IOError:
raise ClientError(6, (self.path, ))
def __exit__(self, type, value, traceback):
if self.sftp:
self.sftp.close()
self.sftp = None
self.ssh.close()
self.ssh = None
def transfer(self, remote_filepath, reader=None, writer=None, callback=None):
"""Transfer file. Doesn't check if overwrites."""
try:
if reader and not writer:
# upload
self.sftp.putfo(reader, remote_filepath, callback=callback)
elif writer and not reader:
# download
self.sftp.getfo(remote_filepath, writer, callback=callback)
else:
raise ValueError('Exactly one of reader or writer can be specified.')
except IOError: # missing remote file
raise ClientError(2, (remote_filepath, ))
except UnicodeDecodeError:
raise ClientError(7)
class Client(BaseClient):
"""Implements additional convenience methods."""
config_path = environ.get('MYIGLOORC', expanduser(join('~', '.igloorc')))
def __init__(self, url=None, profile=None, host_keys=None):
if not url:
try:
url = self.profile[profile]
except KeyError:
raise ClientError(10, (profile, ))
super(Client, self).__init__(url=url, host_keys=host_keys)
@property
def profile(self):
"""Dictionary of profiles."""
try:
with open(self.config_path) as handle:
self._profile = load(handle)
except IOError as err:
if err.errno == ENOENT:
self._profile = {}
else:
raise
return self._profile
def configure(self, profile, url=''):
"""Add and remove profiles."""
if url:
self.profile[profile] = url
else:
try:
del self.profile[profile]
except KeyError:
raise ClientError(10, (profile, ))
with open(self.config_path, 'w') as handle:
dump(self._profile, handle, default_flow_style=False)
def get_filepaths(self, expr, no_match=False, case_insensitive=False,
walk=False, remote=False):
"""Return filepaths that match a regular expression."""
regex = re_compile(
pattern=expr,
flags=IGNORECASE if case_insensitive else 0,
)
if walk:
if remote:
def walk_directory(path):
"""Walk remote directory."""
filepaths = []
for filename in self.sftp.listdir(path):
filepath = join(path, filename)
if remote_file_is_directory(filepath, self.sftp):
filepaths.extend(walk_directory(filepath))
else:
filepaths.append(filepath)
return filepaths
else:
def walk_directory(path):
"""Walk local directory."""
filepaths = []
for (path, dirnames, names) in os_walk(path):
filepaths.extend([join(path, filename) for filename in names])
return filepaths
# all path start with './', we remove it for consistency
filepaths = [
filepath.split(sep, 1)[1]
for filepath in walk_directory('.')
]
else:
if remote:
filepaths = [
filepath for filepath in self.sftp.listdir()
if not remote_file_is_directory(filepath, self.sftp)
]
else:
filepaths = [
filepath for filepath in listdir('.')
if not isdir(filepath)
]
return [
filepath for filepath in filepaths
if (regex.search(filepath) and not no_match)
or (not regex.search(filepath) and no_match)
]
def stream(self, remote_filepath, remote=False, binary=False):
"""Stream from stdin / to stdout."""
if remote:
self.transfer(
remote_filepath=remote_filepath,
writer=get_stream_writer(binary=binary),
)
else:
self.transfer(
remote_filepath=remote_filepath,
reader=stdin,
)
def download(self, remote_filepath, keep_hierarchy=False, force=False,
track=False, move=False):
"""Attempt to download a file from the remote host."""
local_filepath = self._prepare_filepath(
remote_filepath,
keep_hierarchy=keep_hierarchy,
remote=False
)
if not force and exists(local_filepath):
raise ClientError(11, (local_filepath, ))
if track:
callback = get_callback()
else:
callback = None
with open(local_filepath, 'wb') as writer:
self.transfer(
remote_filepath=remote_filepath,
writer=writer,
callback=callback,
)
if move:
self.sftp.remove(remote_filepath)
return local_filepath
def upload(self, local_filepath, keep_hierarchy=False, force=False,
track=False, move=False):
"""Attempt to upload a file the remote host."""
remote_filepath = self._prepare_filepath(
local_filepath,
keep_hierarchy=keep_hierarchy,
remote=True
)
if not force and remote_file_exists(remote_filepath, self.sftp):
raise ClientError(12, (remote_filepath, ))
if track:
callback = get_callback()
else:
callback = None
with open(local_filepath, 'rb') as reader:
self.transfer(
remote_filepath=remote_filepath,
reader=reader,
callback=callback,
)
if move:
remove(local_filepath)
return remote_filepath
def _prepare_filepath(self, filepath, keep_hierarchy=False, remote=False):
"""Returns transferred filepath and creates directories if necessary."""
try:
if remote:
dirname, filename = split(filepath)
if keep_hierarchy:
safe_makedirs(dirname, self.sftp)
new_filepath = filepath
else:
new_filepath = filename
else:
dirname, filename = split(filepath)
if keep_hierarchy:
safe_makedirs(dirname)
new_filepath = filepath
else:
new_filepath = filename
except OSError as err:
client_errno = 14 if remote else 13
raise ClientError(client_errno, (err.filename, ))
else:
return new_filepath
def configure_client(client, arguments):
"""Configure client according to command line arguments."""
writer = get_stream_writer()
if arguments['add'] or arguments['delete']:
client.configure(
profile=arguments['PROFILE'] or 'default',
url=arguments['URL'],
)
elif arguments['list']:
write(
sorted(reversed(client.profile.items())),
writer,
format='%s [%s]\n'
)
else:
write([client.config_path], writer)
def run_client(client, arguments):
"""Main handler."""
writer = get_stream_writer(binary=arguments['--binary'])
with client:
if arguments['--expr']:
filepaths = client.get_filepaths(
expr=arguments['--expr'],
no_match=arguments['--no-match'],
case_insensitive=arguments['--case-insensitive'],
remote=arguments['--remote'],
walk=arguments['--walk'],
)
else:
filepaths = [
filepath for filepath in arguments['FILEPATH']
if arguments['--remote'] or not isdir(filepath)
]
if arguments['--list']:
write(filepaths, writer)
else:
remote = arguments['--remote']
# ask upfront to confirm all filepaths to avoid waiting afterwards
filepaths = [
filepath for filepath in filepaths
if not arguments['--ask'] or ask('Transfer %s?' % (filepath, ))
]
for filepath in filepaths:
if arguments['--stream']:
client.stream(
remote_filepath=filepath,
remote=remote,
binary=arguments['--binary'],
)
else:
if remote:
local_filepath = client.download(
remote_filepath=filepath,
move=arguments['--move'],
keep_hierarchy=arguments['--keep-hierarchy'],
force=arguments['--force'],
)
if not arguments['--quiet']:
write([local_filepath], writer)
else:
remote_filepath = client.upload(
local_filepath=filepath,
move=arguments['--move'],
keep_hierarchy=arguments['--keep-hierarchy'],
force=arguments['--force'],
)
if not arguments['--quiet']:
write([remote_filepath], writer)
def main():
"""Command line parser. Docopt is amazing."""
arguments = docopt(__doc__, version=__version__)
try:
client = Client(
url=arguments['--url'],
profile=arguments['--profile'],
)
if arguments['--config']:
configure_client(client, arguments)
else:
run_client(client, arguments)
except ClientError as err:
if arguments['--debug']:
stderr.write(err.traceback)
else:
stderr.write('%s\n' % (err.message, ))
exit(1)
if __name__ == '__main__':
main()
|
markflyhigh/incubator-beam | refs/heads/master | sdks/python/apache_beam/runners/worker/sdk_worker_main.py | 1 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK Fn Harness entry point."""
from __future__ import absolute_import
import http.server
import json
import logging
import os
import re
import sys
import threading
import traceback
from builtins import object
from google.protobuf import text_format
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import ProfilingOptions
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.internal import names
from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler
from apache_beam.runners.worker.sdk_worker import SdkHarness
from apache_beam.utils import profiler
# This module is experimental. No backwards-compatibility guarantees.
class StatusServer(object):
@classmethod
def get_thread_dump(cls):
lines = []
frames = sys._current_frames() # pylint: disable=protected-access
for t in threading.enumerate():
lines.append('--- Thread #%s name: %s ---\n' % (t.ident, t.name))
lines.append(''.join(traceback.format_stack(frames[t.ident])))
return lines
def start(self, status_http_port=0):
"""Executes the serving loop for the status server.
Args:
status_http_port(int): Binding port for the debug server.
Default is 0 which means any free unsecured port
"""
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
"""HTTP handler for serving stacktraces of all threads."""
def do_GET(self): # pylint: disable=invalid-name
"""Return all thread stacktraces information for GET request."""
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line)
def log_message(self, f, *args):
"""Do not log any messages."""
pass
self.httpd = httpd = http.server.HTTPServer(
('localhost', status_http_port), StatusHttpHandler)
logging.info('Status HTTP server running at %s:%s', httpd.server_name,
httpd.server_port)
httpd.serve_forever()
def main(unused_argv):
"""Main entry point for SDK Fn Harness."""
if 'LOGGING_API_SERVICE_DESCRIPTOR' in os.environ:
try:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'],
logging_service_descriptor)
# Send all logs to the runner.
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
# TODO(BEAM-5468): This should be picked up from pipeline options.
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
logging.info('Logging handler created.')
except Exception:
logging.error("Failed to set up logging handler, continuing without.",
exc_info=True)
fn_log_handler = None
else:
fn_log_handler = None
# Start status HTTP server thread.
thread = threading.Thread(name='status_http_server',
target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if 'PIPELINE_OPTIONS' in os.environ:
sdk_pipeline_options = _parse_pipeline_options(
os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if 'SEMI_PERSISTENT_DIRECTORY' in os.environ:
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
logging.info('semi_persistent_directory: %s', semi_persistent_directory)
_worker_id = os.environ.get('WORKER_ID', None)
try:
_load_main_session(semi_persistent_directory)
except Exception: # pylint: disable=broad-except
exception_details = traceback.format_exc()
logging.error(
'Could not load main session: %s', exception_details, exc_info=True)
try:
logging.info('Python sdk harness started with pipeline_options: %s',
sdk_pipeline_options.get_all_options(drop_default=True))
service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'],
service_descriptor)
# TODO(robertwb): Support credentials.
assert not service_descriptor.oauth2_client_credentials_grant.url
SdkHarness(
control_address=service_descriptor.url,
worker_count=_get_worker_count(sdk_pipeline_options),
worker_id=_worker_id,
profiler_factory=profiler.Profile.factory_from_options(
sdk_pipeline_options.view_as(ProfilingOptions))
).run()
logging.info('Python sdk harness exiting.')
except: # pylint: disable=broad-except
logging.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close()
def _parse_pipeline_options(options_json):
options = json.loads(options_json)
# Check the options field first for backward compatibility.
if 'options' in options:
return PipelineOptions.from_dictionary(options.get('options'))
else:
# Remove extra urn part from the key.
portable_option_regex = r'^beam:option:(?P<key>.*):v1$'
return PipelineOptions.from_dictionary({
re.match(portable_option_regex, k).group('key')
if re.match(portable_option_regex, k) else k: v
for k, v in options.items()
})
def _get_worker_count(pipeline_options):
"""Extract worker count from the pipeline_options.
This defines how many SdkWorkers will be started in this Python process.
And each SdkWorker will have its own thread to process data. Name of the
experimental parameter is 'worker_threads'
Example Usage in the Command Line:
--experimental worker_threads=1
Note: worker_threads is an experimental flag and might not be available in
future releases.
Returns:
an int containing the worker_threads to use. Default is 12
"""
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = experiments if experiments else []
for experiment in experiments:
# There should only be 1 match so returning from the loop
if re.match(r'worker_threads=', experiment):
return int(
re.match(r'worker_threads=(?P<worker_threads>.*)',
experiment).group('worker_threads'))
return 12
def _load_main_session(semi_persistent_directory):
"""Loads a pickled main session from the path specified."""
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged',
names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
logging.warning(
'No session file found: %s. Functions defined in __main__ '
'(interactive session) may fail.', session_file)
else:
logging.warning(
'No semi_persistent_directory found: Functions defined in __main__ '
'(interactive session) may fail.')
if __name__ == '__main__':
main(sys.argv)
|
burakbayramli/classnotes | refs/heads/master | algs/algs_170_nmt/translate.py | 2 | # translate.py
import pickle
import tensorflow as tf
import numpy as np, os
checkpoint_path = "/home/burak/Downloads/model.ckpt"
from data_utils import (
process_data,split_data,generate_epoch,generate_batch,
)
def rnn_cell(FLAGS, dropout, scope):
with tf.variable_scope(scope):
rnn_cell_type = tf.nn.rnn_cell.BasicLSTMCell
single_cell = rnn_cell_type(FLAGS.num_hidden_units)
single_cell = tf.nn.rnn_cell.DropoutWrapper(single_cell,
output_keep_prob=1-dropout)
stacked_cell = tf.nn.rnn_cell.MultiRNNCell(
[single_cell] * FLAGS.num_layers)
return stacked_cell
def rnn_inputs(FLAGS, input_data, vocab_size, scope):
with tf.variable_scope(scope, reuse=True):
W_input = tf.get_variable("W_input",
[vocab_size, FLAGS.num_hidden_units])
# embeddings will be shape [input_data dimensions, num_hidden units]
embeddings = tf.nn.embedding_lookup(W_input, input_data)
return embeddings
def rnn_softmax(FLAGS, outputs, scope):
with tf.variable_scope(scope, reuse=True):
W_softmax = tf.get_variable("W_softmax",
[FLAGS.num_hidden_units, FLAGS.sp_vocab_size])
b_softmax = tf.get_variable("b_softmax", [FLAGS.sp_vocab_size])
logits = tf.matmul(outputs, W_softmax) + b_softmax
return logits
class model(object):
def __init__(self, FLAGS):
self.encoder_inputs = tf.placeholder(tf.int32, shape=[None, None],
name='encoder_inputs')
self.decoder_inputs = tf.placeholder(tf.int32, shape=[None, None],
name='decoder_inputs')
self.targets = tf.placeholder(tf.int32, shape=[None, None],
name='targets')
self.en_seq_lens = tf.placeholder(tf.int32, shape=[None, ],
name="en_seq_lens")
self.sp_seq_lens = tf.placeholder(tf.int32, shape=[None, ],
name="sp_seq_lens")
self.dropout = tf.placeholder(tf.float32)
with tf.variable_scope('encoder') as scope:
# Encoder RNN cell
self.encoder_stacked_cell = rnn_cell(FLAGS, self.dropout,
scope=scope)
# Embed encoder inputs
W_input = tf.get_variable("W_input",
[FLAGS.en_vocab_size, FLAGS.num_hidden_units])
self.embedded_encoder_inputs = rnn_inputs(FLAGS,
self.encoder_inputs, FLAGS.en_vocab_size, scope=scope)
# Outputs from encoder RNN
self.all_encoder_outputs, self.encoder_state = tf.nn.dynamic_rnn(
cell=self.encoder_stacked_cell,
inputs=self.embedded_encoder_inputs,
sequence_length=self.en_seq_lens, time_major=False,
dtype=tf.float32)
with tf.variable_scope('decoder') as scope:
# Initial state is last relevant state from encoder
self.decoder_initial_state = self.encoder_state
# Decoder RNN cell
self.decoder_stacked_cell = rnn_cell(FLAGS, self.dropout,
scope=scope)
# Embed decoder RNN inputs
W_input = tf.get_variable("W_input",
[FLAGS.sp_vocab_size, FLAGS.num_hidden_units])
self.embedded_decoder_inputs = rnn_inputs(FLAGS, self.decoder_inputs,
FLAGS.sp_vocab_size, scope=scope)
# Outputs from encoder RNN
self.all_decoder_outputs, self.decoder_state = tf.nn.dynamic_rnn(
cell=self.decoder_stacked_cell,
inputs=self.embedded_decoder_inputs,
sequence_length=self.sp_seq_lens, time_major=False,
initial_state=self.decoder_initial_state)
# Softmax on decoder RNN outputs
W_softmax = tf.get_variable("W_softmax",
[FLAGS.num_hidden_units, FLAGS.sp_vocab_size])
b_softmax = tf.get_variable("b_softmax", [FLAGS.sp_vocab_size])
# Logits
self.decoder_outputs_flat = tf.reshape(self.all_decoder_outputs,
[-1, FLAGS.num_hidden_units])
self.logits_flat = rnn_softmax(FLAGS, self.decoder_outputs_flat,
scope=scope)
# Loss with masking
targets_flat = tf.reshape(self.targets, [-1])
losses_flat = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits_flat, labels=targets_flat
)
mask = tf.sign(tf.to_float(targets_flat))
masked_losses = mask * losses_flat
masked_losses = tf.reshape(masked_losses, tf.shape(self.targets))
self.loss = tf.reduce_mean(
tf.reduce_sum(masked_losses, reduction_indices=1))
# Optimization
self.lr = tf.Variable(0.0, trainable=False)
trainable_vars = tf.trainable_variables()
# clip the gradient to avoid vanishing or blowing up gradients
grads, _ = tf.clip_by_global_norm(
tf.gradients(self.loss, trainable_vars), FLAGS.max_gradient_norm)
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_optimizer = optimizer.apply_gradients(
zip(grads, trainable_vars))
#self.saver = tf.train.Saver(tf.all_variables())
def step(self, sess, FLAGS, batch_encoder_inputs, batch_decoder_inputs,
batch_targets, batch_en_seq_lens, batch_sp_seq_lens, dropout):
input_feed = {self.encoder_inputs: batch_encoder_inputs,
self.decoder_inputs: batch_decoder_inputs,
self.targets: batch_targets,
self.en_seq_lens: batch_en_seq_lens,
self.sp_seq_lens: batch_sp_seq_lens,
self.dropout: dropout}
output_feed = [self.loss, self.train_optimizer]
outputs = sess.run(output_feed, input_feed)
return outputs[0], outputs[1]
class parameters(object):
def __init__(self):
self.max_en_vocab_size = 30000
self.max_sp_vocab_size = 30000
self.num_epochs = 100
self.batch_size = 20
self.num_hidden_units = 300
self.num_layers = 3
self.dropout = 0.2
self.learning_rate = 1e-3
self.learning_rate_decay_factor = 0.99
self.max_gradient_norm = 5.0
def create_model(sess, FLAGS):
tf_model = model(FLAGS)
print "Created a new model"
sess.run(tf.initialize_all_variables())
return tf_model
def restore_model(sess, FLAGS):
tf_model = model(FLAGS)
tf_model.saver.restore(sess, checkpoint_path)
return tf_model
def train(FLAGS):
# Load the data
en_token_ids, en_seq_lens, en_vocab_dict, en_rev_vocab_dict = \
process_data('data/tst2013.en', max_vocab_size=30000, target_lang=False)
sp_token_ids, sp_seq_lens, sp_vocab_dict, sp_rev_vocab_dict = \
process_data('data/tst2013.tr', max_vocab_size=30000, target_lang=True)
# Split into train and validation sets
train_encoder_inputs, train_decoder_inputs, train_targets, \
train_en_seq_lens, train_sp_seq_len, \
valid_encoder_inputs, valid_decoder_inputs, valid_targets, \
valid_en_seq_lens, valid_sp_seq_len = \
split_data(en_token_ids, sp_token_ids, en_seq_lens, sp_seq_lens,
train_ratio=0.8)
output = open('data/vocab_en.pkl', 'wb')
pickle.dump(en_vocab_dict, output)
output.close()
output = open('data/vocab_sp.pkl', 'wb')
pickle.dump(sp_vocab_dict, output)
output.close()
# Update parameters
FLAGS.en_vocab_size = len(en_vocab_dict)
FLAGS.sp_vocab_size = len(sp_vocab_dict)
print 'len(en_vocab_dict)', len(en_vocab_dict)
print 'len(sp_vocab_dict)', len(sp_vocab_dict)
# Start session
with tf.Session() as sess:
model = None
# Create new model or load old one
f = checkpoint_path + ".index"
print f
exit()
if os.path.isfile(f):
model = restore_model(sess)
else:
model = create_model(sess, FLAGS)
# Training begins
losses = []
for epoch_num, epoch in enumerate(generate_epoch(train_encoder_inputs,
train_decoder_inputs, train_targets,
train_en_seq_lens, train_sp_seq_len,
FLAGS.num_epochs, FLAGS.batch_size)):
print "EPOCH: %i" % (epoch_num)
# Decay learning rate
sess.run(tf.assign(model.lr, FLAGS.learning_rate * \
(FLAGS.learning_rate_decay_factor ** epoch_num)))
batch_loss = []
for batch_num, (batch_encoder_inputs, batch_decoder_inputs,
batch_targets, batch_en_seq_lens,
batch_sp_seq_lens) in enumerate(epoch):
loss, _ = model.step(sess, FLAGS,
batch_encoder_inputs, batch_decoder_inputs, batch_targets,
batch_en_seq_lens, batch_sp_seq_lens,
FLAGS.dropout)
print loss
batch_loss.append(loss)
print 'mean: ', np.mean(batch_loss)
print "Saving the model."
model.saver.save(sess, checkpoint_path)
if __name__ == '__main__':
FLAGS = parameters()
train(FLAGS)
|
pombreda/swarming | refs/heads/master | appengine/swarming/server/task_request.py | 2 | # coding: utf-8
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Tasks definition.
Each user request creates a new TaskRequest. The TaskRequest instance saves the
metadata of the request, e.g. who requested it, when why, etc. It links to the
actual data of the request in a TaskProperties. The TaskProperties represents
everything needed to run the task.
This means if two users request an identical task, it can be deduped
accordingly and efficiently by the scheduler.
Note that the mere existence of a TaskRequest in the db doesn't mean it will be
scheduled, see task_scheduler.py for the actual scheduling. Registering tasks
and scheduling are kept separated to keep the immutable and mutable models in
separate files.
Overview of transactions:
- TaskRequest() are created inside a transaction.
Graph of the schema:
+--------Root---------+
|TaskRequest |
| +--------------+ |
| |TaskProperties| |
| +--------------+ |
|id=<based on epoch> |
+---------------------+
^
|
<See task_to_run.py and task_result.py>
TaskProperties is embedded in TaskRequest. TaskProperties is still declared as a
separate entity to clearly declare the boundary for task request deduplication.
"""
import datetime
import hashlib
import logging
import random
import re
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from components import auth
from components import datastore_utils
from components import utils
from server import task_pack
# Maximum acceptable priority value, which is effectively the lowest priority.
MAXIMUM_PRIORITY = 255
# One day in seconds. Add 10s to account for small jitter.
_ONE_DAY_SECS = 24*60*60 + 10
# Minimum value for timeouts.
_MIN_TIMEOUT_SECS = 1 if utils.is_local_dev_server() else 30
# The world started on 2010-01-01 at 00:00:00 UTC. The rationale is that using
# EPOCH (1970) means that 40 years worth of keys are wasted.
#
# Note: This creates a 'naive' object instead of a formal UTC object. Note that
# datetime.datetime.utcnow() also return naive objects. That's python.
_BEGINING_OF_THE_WORLD = datetime.datetime(2010, 1, 1, 0, 0, 0, 0)
# Parameters for make_request().
# The content of the 'data' parameter. This relates to the context of the
# request, e.g. who wants to run a task.
_REQUIRED_DATA_KEYS = frozenset(
['name', 'priority', 'properties', 'scheduling_expiration_secs', 'tags',
'user'])
_EXPECTED_DATA_KEYS = frozenset(
['name', 'parent_task_id', 'priority', 'properties',
'scheduling_expiration_secs', 'tags', 'user'])
# The content of 'properties' inside the 'data' parameter. This relates to the
# task itself, e.g. what to run.
_REQUIRED_PROPERTIES_KEYS = frozenset(
['commands', 'data', 'dimensions', 'env', 'execution_timeout_secs',
'io_timeout_secs'])
_EXPECTED_PROPERTIES_KEYS = frozenset(
['commands', 'data', 'dimensions', 'env', 'execution_timeout_secs',
'grace_period_secs', 'idempotent', 'io_timeout_secs'])
### Properties validators must come before the models.
def _validate_command(prop, value):
"""Validates TaskProperties.command."""
# pylint: disable=W0212
if not value:
# required=True would still accept [].
raise datastore_errors.BadValueError('%s is required' % prop._name)
def check(line):
return isinstance(line, list) and all(isinstance(j, unicode) for j in line)
if not all(check(i) for i in value):
raise TypeError(
'%s must be a list of commands, each a list of arguments' % prop._name)
def _validate_data(prop, value):
"""Validates TaskProperties.data and sort the URLs."""
def check(i):
return (
isinstance(i, list) and len(i) == 2 and
isinstance(i[0], unicode) and isinstance(i[1], unicode))
if not all(check(i) for i in value):
# pylint: disable=W0212
raise TypeError('%s must be a list of (url, file)' % prop._name)
return sorted(value)
def _validate_dict_of_strings(prop, value):
"""Validates TaskProperties.dimension and TaskProperties.env."""
if not all(
isinstance(k, unicode) and isinstance(v, unicode)
for k, v in value.iteritems()):
# pylint: disable=W0212
raise TypeError('%s must be a dict of strings' % prop._name)
def _validate_expiration(prop, value):
"""Validates TaskRequest.expiration_ts."""
now = utils.utcnow()
offset = int(round((value - now).total_seconds()))
if not (_MIN_TIMEOUT_SECS <= offset <= _ONE_DAY_SECS):
# pylint: disable=W0212
raise datastore_errors.BadValueError(
'%s (%s, %ds from now) must effectively be between %ds and one day '
'from now (%s)' %
(prop._name, value, offset, _MIN_TIMEOUT_SECS, now))
def _validate_grace(prop, value):
"""Validates grace_period_secs in TaskProperties."""
if not (0 <= value <= _ONE_DAY_SECS):
# pylint: disable=W0212
raise datastore_errors.BadValueError(
'%s (%ds) must be between %ds and one day' % (prop._name, value, 0))
def _validate_priority(_prop, value):
"""Validates TaskRequest.priority."""
validate_priority(value)
return value
def _validate_task_run_id(_prop, value):
"""Validates a task_id looks valid without fetching the entity."""
if not value:
return None
task_pack.unpack_run_result_key(value)
return value
def _validate_timeout(prop, value):
"""Validates timeouts in seconds in TaskProperties."""
if not (_MIN_TIMEOUT_SECS <= value <= _ONE_DAY_SECS):
# pylint: disable=W0212
raise datastore_errors.BadValueError(
'%s (%ds) must be between %ds and one day' %
(prop._name, value, _MIN_TIMEOUT_SECS))
def _validate_tags(prop, value):
"""Validates and sorts TaskRequest.tags."""
if not ':' in value:
# pylint: disable=W0212
raise ValueError('%s must be key:value form, not %s' % (prop._name, value))
### Models.
class TaskProperties(ndb.Model):
"""Defines all the properties of a task to be run on the Swarming
infrastructure.
This entity is not saved in the DB as a standalone entity, instead it is
embedded in a TaskRequest.
This model is immutable.
"""
# Hashing algorithm used to hash TaskProperties to create its key.
HASHING_ALGO = hashlib.sha1
# Commands to run. It is a list of lists. Each command is run one after the
# other. Encoded json.
commands = datastore_utils.DeterministicJsonProperty(
validator=_validate_command, json_type=list, required=True)
# List of (URLs, local file) for the bot to download. Encoded as json. Must be
# sorted by URLs. Optional.
data = datastore_utils.DeterministicJsonProperty(
validator=_validate_data, json_type=list)
# Filter to use to determine the required properties on the bot to run on. For
# example, Windows or hostname. Encoded as json. Optional but highly
# recommended.
dimensions = datastore_utils.DeterministicJsonProperty(
validator=_validate_dict_of_strings, json_type=dict)
# Environment variables. Encoded as json. Optional.
env = datastore_utils.DeterministicJsonProperty(
validator=_validate_dict_of_strings, json_type=dict)
# Maximum duration the bot can take to run this task. It's named hard_timeout
# in the bot.
execution_timeout_secs = ndb.IntegerProperty(
validator=_validate_timeout, required=True)
# Grace period is the time between signaling the task it timed out and killing
# the process. During this time the process should clean up itself as quickly
# as possible, potentially uploading partial results back.
grace_period_secs = ndb.IntegerProperty(validator=_validate_grace, default=30)
# Bot controlled timeout for new bytes from the subprocess. If a subprocess
# doesn't output new data to stdout for .io_timeout_secs, consider the command
# timed out. Optional.
io_timeout_secs = ndb.IntegerProperty(validator=_validate_timeout)
# If True, the task can safely be served results from a previously succeeded
# task.
idempotent = ndb.BooleanProperty(default=False)
@property
def properties_hash(self):
"""Calculates the hash for this entity IFF the task is idempotent.
It uniquely identifies the TaskProperties instance to permit deduplication
by the task scheduler. It is None if the task is not idempotent.
Returns:
Hash as a compact byte str.
"""
if not self.idempotent:
return None
return self.HASHING_ALGO(utils.encode_to_json(self)).digest()
class TaskRequest(ndb.Model):
"""Contains a user request.
Key id is a decreasing integer based on time since utils.EPOCH plus some
randomness on lower order bits. See _new_request_key() for the complete gory
details.
There is also "old style keys" which inherit from a fake root entity
TaskRequestShard.
TODO(maruel): Remove support 2015-02-01.
This model is immutable.
"""
# Time this request was registered. It is set manually instead of using
# auto_now_add=True so that expiration_ts can be set very precisely relative
# to this property.
created_ts = ndb.DateTimeProperty(required=True)
# The name for this task request. It's only for description.
name = ndb.StringProperty(required=True)
# Authenticated client that triggered this task.
authenticated = auth.IdentityProperty()
# Which user to blame for this task.
user = ndb.StringProperty(default='')
# The actual properties are embedded in this model.
properties = ndb.LocalStructuredProperty(
TaskProperties, compressed=True, required=True)
# Priority of the task to be run. A lower number is higher priority, thus will
# preempt requests with lower priority (higher numbers).
priority = ndb.IntegerProperty(
indexed=False, validator=_validate_priority, required=True)
# If the task request is not scheduled by this moment, it will be aborted by a
# cron job. It is saved instead of scheduling_expiration_secs so finding
# expired jobs is a simple query.
expiration_ts = ndb.DateTimeProperty(
indexed=True, validator=_validate_expiration, required=True)
# Tags that specify the category of the task.
tags = ndb.StringProperty(repeated=True, validator=_validate_tags)
# Set when a task (the parent) reentrantly create swarming tasks. Must be set
# to a valid task_id pointing to a TaskRunResult or be None.
parent_task_id = ndb.StringProperty(validator=_validate_task_run_id)
@property
def scheduling_expiration_secs(self):
"""Reconstructs this value from expiration_ts and created_ts."""
return (self.expiration_ts - self.created_ts).total_seconds()
def to_dict(self):
"""Converts properties_hash to hex so it is json serializable."""
out = super(TaskRequest, self).to_dict()
properties_hash = self.properties.properties_hash
out['properties_hash'] = (
properties_hash.encode('hex') if properties_hash else None)
return out
def _pre_put_hook(self):
"""Adds automatic tags."""
super(TaskRequest, self)._pre_put_hook()
self.tags.append('priority:%s' % self.priority)
self.tags.append('user:%s' % self.user)
for key, value in self.properties.dimensions.iteritems():
self.tags.append('%s:%s' % (key, value))
self.tags = sorted(set(self.tags))
def _new_request_key():
"""Returns a valid ndb.Key for this entity.
Task id is a 64 bits integer represented as a string to the user:
- 1 highest order bits set to 0 to keep value positive.
- 43 bits is time since _BEGINING_OF_THE_WORLD at 1ms resolution.
It is good for 2**43 / 365.3 / 24 / 60 / 60 / 1000 = 278 years or 2010+278 =
2288. The author will be dead at that time.
- 16 bits set to a random value or a server instance specific value. Assuming
an instance is internally consistent with itself, it can ensure to not reuse
the same 16 bits in two consecutive requests and/or throttle itself to one
request per millisecond.
Using random value reduces to 2**-15 the probability of collision on exact
same timestamp at 1ms resolution, so a maximum theoretical rate of 65536000
requests/sec but an effective rate in the range of ~64k requests/sec without
much transaction conflicts. We should be fine.
- 4 bits set to 0x1. This is to represent the 'version' of the entity schema.
Previous version had 0. Note that this value is XOR'ed in the DB so it's
stored as 0xE. When the TaskRequest entity tree is modified in a breaking
way that affects the packing and unpacking of task ids, this value should be
bumped.
The key id is this value XORed with task_pack.TASK_REQUEST_KEY_ID_MASK. The
reason is that increasing key id values are in decreasing timestamp order.
"""
utcnow = utils.utcnow()
if utcnow < _BEGINING_OF_THE_WORLD:
raise ValueError(
'Time %s is set to before %s' % (utcnow, _BEGINING_OF_THE_WORLD))
delta = utcnow - _BEGINING_OF_THE_WORLD
now = int(round(delta.total_seconds() * 1000.))
# TODO(maruel): Use real randomness.
suffix = random.getrandbits(16)
task_id = int((now << 20) | (suffix << 4) | 0x1)
return ndb.Key(TaskRequest, task_id ^ task_pack.TASK_REQUEST_KEY_ID_MASK)
def _put_request(request):
"""Puts the new TaskRequest in the DB.
Returns:
ndb.Key of the new entity. Returns None if failed, which should be surfaced
to the user.
"""
assert not request.key
request.key = _new_request_key()
return datastore_utils.insert(request, _new_request_key)
def _assert_keys(expected_keys, minimum_keys, actual_keys, name):
"""Raise an exception if expected keys are not present."""
actual_keys = frozenset(actual_keys)
superfluous = actual_keys - expected_keys
missing = minimum_keys - actual_keys
if superfluous or missing:
msg_missing = (
('Missing: %s\n' % ', '.join(sorted(missing))) if missing else '')
msg_superfluous = (
('Superfluous: %s\n' % ', '.join(sorted(superfluous)))
if superfluous else '')
message = 'Unexpected %s; did you make a typo?\n%s%s' % (
name, msg_missing, msg_superfluous)
raise ValueError(message)
### Public API.
def validate_request_key(request_key):
if request_key.kind() != 'TaskRequest':
raise ValueError('Expected key to TaskRequest, got %s' % request_key.kind())
task_id = request_key.integer_id()
if not task_id:
raise ValueError('Invalid null TaskRequest key')
if (task_id & 0xF) == 0xE:
# New style key.
return
# Check the shard.
# TODO(maruel): Remove support 2015-02-01.
request_shard_key = request_key.parent()
if not request_shard_key:
raise ValueError('Expected parent key for TaskRequest, got nothing')
if request_shard_key.kind() != 'TaskRequestShard':
raise ValueError(
'Expected key to TaskRequestShard, got %s' % request_shard_key.kind())
root_entity_shard_id = request_shard_key.string_id()
if (not root_entity_shard_id or
len(root_entity_shard_id) != task_pack.DEPRECATED_SHARDING_LEVEL):
raise ValueError(
'Expected root entity key (used for sharding) to be of length %d but '
'length was only %d (key value %r)' % (
task_pack.DEPRECATED_SHARDING_LEVEL,
len(root_entity_shard_id or ''),
root_entity_shard_id))
def make_request(data):
"""Constructs a TaskRequest out of a yet-to-be-specified API.
Argument:
- data: dict with:
- name
- parent_task_id*
- properties
- commands
- data
- dimensions
- env
- execution_timeout_secs
- grace_period_secs*
- idempotent*
- io_timeout_secs
- priority
- scheduling_expiration_secs
- tags
- user
* are optional.
If parent_task_id is set, properties for the parent are used:
- priority: defaults to parent.priority - 1
- user: overriden by parent.user
Returns:
The newly created TaskRequest.
"""
# Save ourself headaches with typos and refuses unexpected values.
_assert_keys(_EXPECTED_DATA_KEYS, _REQUIRED_DATA_KEYS, data, 'request keys')
data_properties = data['properties']
_assert_keys(
_EXPECTED_PROPERTIES_KEYS, _REQUIRED_PROPERTIES_KEYS, data_properties,
'request properties keys')
parent_task_id = data.get('parent_task_id') or None
if parent_task_id:
data = data.copy()
run_result_key = task_pack.unpack_run_result_key(parent_task_id)
result_summary_key = task_pack.run_result_key_to_result_summary_key(
run_result_key)
request_key = task_pack.result_summary_key_to_request_key(
result_summary_key)
parent = request_key.get()
if not parent:
raise ValueError('parent_task_id is not a valid task')
data['priority'] = max(min(data['priority'], parent.priority - 1), 0)
# Drop the previous user.
data['user'] = parent.user
# Can't be a validator yet as we wouldn't be able to load previous task
# requests.
if len(data_properties.get('commands') or []) > 1:
raise datastore_errors.BadValueError('Only one command is supported')
# Class TaskProperties takes care of making everything deterministic.
properties = TaskProperties(
commands=data_properties['commands'],
data=data_properties['data'],
dimensions=data_properties['dimensions'],
env=data_properties['env'],
execution_timeout_secs=data_properties['execution_timeout_secs'],
grace_period_secs=data_properties.get('grace_period_secs', 30),
idempotent=data_properties.get('idempotent', False),
io_timeout_secs=data_properties['io_timeout_secs'])
now = utils.utcnow()
expiration_ts = now + datetime.timedelta(
seconds=data['scheduling_expiration_secs'])
request = TaskRequest(
authenticated=auth.get_current_identity(),
created_ts=now,
expiration_ts=expiration_ts,
name=data['name'],
parent_task_id=parent_task_id,
priority=data['priority'],
properties=properties,
tags=data['tags'],
user=data['user'] or '')
_put_request(request)
return request
def make_request_clone(original_request):
"""Makes a new TaskRequest from a previous one.
Modifications:
- Enforces idempotent=False.
- Removes the parent_task_id if any.
- Append suffix '(Retry #1)' to the task name, incrementing the number of
followup retries.
- Strip any tag starting with 'user:'.
- Override request's user with the credentials of the currently logged in
user.
Returns:
The newly created TaskRequest.
"""
now = utils.utcnow()
properties = TaskProperties(**original_request.properties.to_dict())
properties.idempotent = False
expiration_ts = (
now + (original_request.expiration_ts - original_request.created_ts))
name = original_request.name
match = re.match(r'^(.*) \(Retry #(\d+)\)$', name)
if match:
name = '%s (Retry #%d)' % (match.group(1), int(match.group(2)) + 1)
else:
name += ' (Retry #1)'
user = auth.get_current_identity()
username = user.to_bytes()
prefix = 'user:'
if not username.startswith(prefix):
raise ValueError('a request can only be cloned by a user, not a bot')
username = username[len(prefix):]
tags = set(t for t in original_request.tags if not t.startswith('user:'))
request = TaskRequest(
authenticated=user,
created_ts=now,
expiration_ts=expiration_ts,
name=name,
parent_task_id=None,
priority=original_request.priority,
properties=properties,
tags=tags,
user=username)
_put_request(request)
return request
def validate_priority(priority):
"""Throws ValueError if priority is not a valid value."""
if 0 > priority or MAXIMUM_PRIORITY < priority:
raise datastore_errors.BadValueError(
'priority (%d) must be between 0 and %d (inclusive)' %
(priority, MAXIMUM_PRIORITY))
|
gptech/ansible | refs/heads/devel | lib/ansible/modules/cloud/digital_ocean/digital_ocean.py | 26 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean
short_description: Create/delete a droplet/SSH_key in DigitalOcean
description:
- Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key.
version_added: "1.3"
author: "Vincent Viallet (@zbal)"
options:
command:
description:
- Which target you want to operate on.
default: droplet
choices: ['droplet', 'ssh']
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'active', 'absent', 'deleted']
api_token:
description:
- DigitalOcean api token.
version_added: "1.9.5"
id:
description:
- Numeric, the droplet id you want to operate on.
name:
description:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key.
unique_name:
description:
- Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host
per name. Useful for idempotence.
version_added: "1.4"
default: "no"
choices: [ "yes", "no" ]
size_id:
description:
- This is the slug of the size you would like the droplet created with.
image_id:
description:
- This is the slug of the image you would like the droplet created with.
region_id:
description:
- This is the slug of the region you would like your server to be created in.
ssh_key_ids:
description:
- Optional, array of SSH key (numeric) ID that you would like to be added to the server.
virtio:
description:
- "Bool, turn on virtio driver in droplet for improved network and storage I/O."
version_added: "1.4"
default: "yes"
choices: [ "yes", "no" ]
private_networking:
description:
- "Bool, add an additional, private network interface to droplet for inter-droplet communication."
version_added: "1.4"
default: "no"
choices: [ "yes", "no" ]
backups_enabled:
description:
- Optional, Boolean, enables backups for your droplet.
version_added: "1.6"
default: "no"
choices: [ "yes", "no" ]
user_data:
description:
- opaque blob of data which is made available to the droplet
version_added: "2.0"
required: false
default: None
ipv6:
description:
- Optional, Boolean, enable IPv6 for your droplet.
version_added: "2.2"
required: false
default: "no"
choices: [ "yes", "no" ]
wait:
description:
- Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned.
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token.
- As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token).
- If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
Upgrade Ansible or, if unable to, try downloading the latest version of this module from github and putting it into a 'library' directory.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean:
state: present
command: ssh
name: my_ssh_key
ssh_pub_key: 'ssh-rsa AAAA...'
api_token: XXX
# Create a new Droplet
# Will return the droplet details including the droplet id (used for idempotence)
- digital_ocean:
state: present
command: droplet
name: mydroplet
api_token: XXX
size_id: 2gb
region_id: ams2
image_id: fedora-19-x64
wait_timeout: 500
register: my_droplet
- debug:
msg: "ID is {{ my_droplet.droplet.id }}"
- debug:
msg: "IP is {{ my_droplet.droplet.ip_address }}"
# Ensure a droplet is present
# If droplet id already exist, will return the droplet details and changed = False
# If no droplet matches the id, a new droplet will be created and the droplet details (including the new id) are returned, changed = True.
- digital_ocean:
state: present
command: droplet
id: 123
name: mydroplet
api_token: XXX
size_id: 2gb
region_id: ams2
image_id: fedora-19-x64
wait_timeout: 500
# Create a droplet with ssh key
# The ssh key id can be passed as argument at the creation of a droplet (see ssh_key_ids).
# Several keys can be added to ssh_key_ids as id1,id2,id3
# The keys are used to connect as root to the droplet.
- digital_ocean:
state: present
ssh_key_ids: 123,456
name: mydroplet
api_token: XXX
size_id: 2gb
region_id: ams2
image_id: fedora-19-x64
'''
import os
import time
import traceback
from distutils.version import LooseVersion
try:
import six
HAS_SIX = True
except ImportError:
HAS_SIX = False
HAS_DOPY = False
try:
import dopy
from dopy.manager import DoError, DoManager
if LooseVersion(dopy.__version__) >= LooseVersion('0.3.2'):
HAS_DOPY = True
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
class TimeoutError(Exception):
def __init__(self, msg, id_):
super(TimeoutError, self).__init__(msg)
self.id = id_
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class Droplet(JsonfyMixIn):
manager = None
def __init__(self, droplet_json):
self.status = 'new'
self.__dict__.update(droplet_json)
def is_powered_on(self):
return self.status == 'active'
def update_attr(self, attrs=None):
if attrs:
for k, v in attrs.items():
setattr(self, k, v)
else:
json = self.manager.show_droplet(self.id)
if json['ip_address']:
self.update_attr(json)
def power_on(self):
assert self.status == 'off', 'Can only power on a closed one.'
json = self.manager.power_on_droplet(self.id)
self.update_attr(json)
def ensure_powered_on(self, wait=True, wait_timeout=300):
if self.is_powered_on():
return
if self.status == 'off': # powered off
self.power_on()
if wait:
end_time = time.time() + wait_timeout
while time.time() < end_time:
time.sleep(min(20, end_time - time.time()))
self.update_attr()
if self.is_powered_on():
if not self.ip_address:
raise TimeoutError('No ip is found.', self.id)
return
raise TimeoutError('Wait for droplet running timeout', self.id)
def destroy(self):
return self.manager.destroy_droplet(self.id, scrub_data=True)
@classmethod
def setup(cls, api_token):
cls.manager = DoManager(None, api_token, api_version=2)
@classmethod
def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False, user_data=None,
ipv6=False):
private_networking_lower = str(private_networking).lower()
backups_enabled_lower = str(backups_enabled).lower()
ipv6_lower = str(ipv6).lower()
json = cls.manager.new_droplet(name, size_id, image_id, region_id,
ssh_key_ids=ssh_key_ids, virtio=virtio, private_networking=private_networking_lower,
backups_enabled=backups_enabled_lower, user_data=user_data, ipv6=ipv6_lower)
droplet = cls(json)
return droplet
@classmethod
def find(cls, id=None, name=None):
if not id and not name:
return False
droplets = cls.list_all()
# Check first by id. digital ocean requires that it be unique
for droplet in droplets:
if droplet.id == id:
return droplet
# Failing that, check by hostname.
for droplet in droplets:
if droplet.name == name:
return droplet
return False
@classmethod
def list_all(cls):
json = cls.manager.all_active_droplets()
return map(cls, json)
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, api_token):
cls.manager = DoManager(None, api_token, api_version=2)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
api_token = module.params['api_token'] or os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
command = module.params['command']
state = module.params['state']
if command == 'droplet':
Droplet.setup(api_token)
if state in ('active', 'present'):
# First, try to find a droplet by id.
droplet = Droplet.find(id=module.params['id'])
# If we couldn't find the droplet and the user is allowing unique
# hostnames, then check to see if a droplet with the specified
# hostname already exists.
if not droplet and module.params['unique_name']:
droplet = Droplet.find(name=getkeyordie('name'))
# If both of those attempts failed, then create a new droplet.
if not droplet:
droplet = Droplet.add(
name=getkeyordie('name'),
size_id=getkeyordie('size_id'),
image_id=getkeyordie('image_id'),
region_id=getkeyordie('region_id'),
ssh_key_ids=module.params['ssh_key_ids'],
virtio=module.params['virtio'],
private_networking=module.params['private_networking'],
backups_enabled=module.params['backups_enabled'],
user_data=module.params.get('user_data'),
ipv6=module.params['ipv6'],
)
if droplet.is_powered_on():
changed = False
droplet.ensure_powered_on(
wait=getkeyordie('wait'),
wait_timeout=getkeyordie('wait_timeout')
)
module.exit_json(changed=changed, droplet=droplet.to_json())
elif state in ('absent', 'deleted'):
# First, try to find a droplet by id.
droplet = Droplet.find(module.params['id'])
# If we couldn't find the droplet and the user is allowing unique
# hostnames, then check to see if a droplet with the specified
# hostname already exists.
if not droplet and module.params['unique_name']:
droplet = Droplet.find(name=getkeyordie('name'))
if not droplet:
module.exit_json(changed=False, msg='The droplet is not found.')
droplet.destroy()
module.exit_json(changed=True)
elif command == 'ssh':
SSH.setup(api_token)
name = getkeyordie('name')
if state in ('active', 'present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent', 'deleted'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
command = dict(choices=['droplet', 'ssh'], default='droplet'),
state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'),
api_token = dict(aliases=['API_TOKEN'], no_log=True),
name = dict(type='str'),
size_id = dict(),
image_id = dict(),
region_id = dict(),
ssh_key_ids = dict(type='list'),
virtio = dict(type='bool', default='yes'),
private_networking = dict(type='bool', default='no'),
backups_enabled = dict(type='bool', default='no'),
id = dict(aliases=['droplet_id'], type='int'),
unique_name = dict(type='bool', default='no'),
user_data = dict(default=None),
ipv6 = dict(type='bool', default='no'),
wait = dict(type='bool', default=True),
wait_timeout = dict(default=300, type='int'),
ssh_pub_key = dict(type='str'),
),
required_together = (
['size_id', 'image_id', 'region_id'],
),
mutually_exclusive = (
['size_id', 'ssh_pub_key'],
['image_id', 'ssh_pub_key'],
['region_id', 'ssh_pub_key'],
),
required_one_of = (
['id', 'name'],
),
)
if not HAS_DOPY and not HAS_SIX:
module.fail_json(msg='dopy >= 0.3.2 is required for this module. dopy requires six but six is not installed. '
'Make sure both dopy and six are installed.')
if not HAS_DOPY:
module.fail_json(msg='dopy >= 0.3.2 required for this module')
try:
core(module)
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
sandeepdsouza93/TensorFlow-15712 | refs/heads/master | tensorflow/contrib/factorization/python/ops/gmm_test.py | 22 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
class GMMTest(tf.test.TestCase):
def setUp(self):
np.random.seed(3)
tf.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = 100
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = tf.contrib.factorization.KMeansClustering(
num_clusters=self.num_centers)
clusterer.fit(self.points, steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(np.random.rand(num_centers,
num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(np.random.randn(num_points,
num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [np.mean(points[assignments == center], axis=0)
for center in xrange(num_centers)]
covs = [np.cov(points[assignments == center].T)
for center in xrange(num_centers)]
scores = []
for r in xrange(num_points):
scores.append(np.sqrt(np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])),
points[r, :] - means[assignments[r]])))
return (points, assignments, scores)
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters=self.initial_means,
batch_size=self.batch_size,
steps=40,
continue_training=True,
random_seed=4,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(x=self.points, steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape),
[self.num_centers, self.num_dims])
def test_fit(self):
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters='random',
batch_size=self.batch_size,
random_seed=4,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(x=self.points, steps=1)
score1 = gmm.score(x=self.points)
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters='random',
batch_size=self.batch_size,
random_seed=4,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(x=self.points, steps=10)
score2 = gmm.score(x=self.points)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters=self.initial_means,
batch_size=self.batch_size,
steps=40,
continue_training=True,
random_seed=4,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(x=self.points, steps=60)
clusters = gmm.clusters()
# Make a small test set
points, true_assignments, true_offsets = (
self.make_random_points(clusters, 40))
assignments = np.ravel(gmm.predict(points))
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(points)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
batch_size=self.num_points,
steps=iterations,
continue_training=True,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(self.points)
skflow_assignments = gmm.predict(self.points[:10, :]).astype(int)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(np.diag(sklearn_covs[d]),
gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
if __name__ == '__main__':
tf.test.main()
|
cloudbase/nova-virtualbox | refs/heads/virtualbox_driver | nova/scheduler/client/report.py | 2 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova import conductor
from nova import exception
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
class SchedulerReportClient(object):
"""Client class for updating the scheduler."""
def __init__(self):
self.conductor_api = conductor.API()
def update_resource_stats(self, context, name, stats):
"""Creates or updates stats for the desired service.
:param context: local context
:param name: name of resource to update
:type name: immutable (str or tuple)
:param stats: updated stats to send to scheduler
:type stats: dict
"""
if 'id' in stats:
compute_node_id = stats['id']
updates = stats.copy()
del updates['id']
else:
raise exception.ComputeHostNotCreated(name=str(name))
self.conductor_api.compute_node_update(context,
{'id': compute_node_id},
updates)
LOG.info(_LI('Compute_service record updated for '
'%s') % str(name))
|
rmcgibbo/psi4public | refs/heads/master | tests/psithon2/psiaux1/myplugin1/pymodule.py | 36 | import psi4
from psi4 import *
from psi4.core import *
def run_myplugin1(name, **kwargs):
r"""Function encoding sequence of PSI module and plugin calls so that
myplugin1 can be called via :py:func:`~driver.energy`.
>>> energy('myplugin1')
"""
lowername = name.lower()
kwargs = p4util.kwargs_lower(kwargs)
# Your plugin's psi4 run sequence goes here
psi4.set_global_option('BASIS', 'sto-3g')
psi4.set_local_option('MYPLUGIN1', 'PRINT', 1)
energy('scf', **kwargs)
returnvalue = psi4.plugin('myplugin1.so')
return returnvalue
def exampleFN(name, **kwargs):
psi4.set_variable('CURRENT ENERGY', -74.94550962)
# Your Python code goes here
pass
# Integration with driver routines
procedures['energy']['myplugin1'] = exampleFN
|
ActiveState/code | refs/heads/master | recipes/Python/304223_Mapping_arbitrary_objects_PostgreSQL_database/recipe-304223.py | 1 | from datetime import datetime
import psycopg
from psycopg.extensions import adapters, adapt
try: sorted()
except NameError:
def sorted(seq):
seq.sort()
return seq
# Here is the adapter for every object that we may ever need to
# insert in the database. It receives the original object and does
# its job on that instance
class ObjectMapper(object):
def __init__(self, orig):
self.orig = orig
self.tmp = {}
self.items, self.fields = self._gatherState()
def _gatherState(self):
adaptee_name = self.orig.__class__.__name__
fields = sorted([(field, getattr(self.orig, field))
for field in persistent_fields[adaptee_name]])
items = []
for item, value in fields:
items.append(item)
return items, fields
def getTableName(self):
return self.orig.__class__.__name__
def getMappedValues(self):
tmp = []
for i in self.items:
tmp.append("%%(%s)s"%i)
return ", ".join(tmp)
def getValuesDict(self):
return dict(self.fields)
def getFields(self):
return self.items
def generateInsert(self):
qry = "INSERT INTO"
qry += " " + self.getTableName() + " ("
qry += ", ".join(self.getFields()) + ") VALUES ("
qry += self.getMappedValues() + ")"
return qry, self.getValuesDict()
# Here are the objects
class Album(object):
id = 0
def __init__(self):
self.creation_time = datetime.now()
self.album_id = self.id
Album.id = Album.id + 1
self.binary_data = buffer('12312312312121')
class Order(object):
id = 0
def __init__(self):
self.items = ['rice','chocolate']
self.price = 34
self.order_id = self.id
Order.id = Order.id + 1
adapters.update({Album: ObjectMapper, Order: ObjectMapper})
# Describe what is needed to save on each object
# This is actually just configuration, you can use xml with a parser if you
# like to have plenty of wasted CPU cycles ;P.
persistent_fields = {'Album': ['album_id', 'creation_time', 'binary_data'],
'Order': ['order_id', 'items', 'price']
}
print adapt(Album()).generateInsert()
print adapt(Album()).generateInsert()
print adapt(Album()).generateInsert()
print adapt(Order()).generateInsert()
print adapt(Order()).generateInsert()
print adapt(Order()).generateInsert()
|
adviti/melange | refs/heads/master | thirdparty/google_appengine/lib/django_1_2/tests/modeltests/choices/__init__.py | 12133432 | |
savex/spectra | refs/heads/master | spectra/__init__.py | 12133432 | |
varunagrawal/azure-services | refs/heads/master | varunagrawal/site-packages/django/contrib/localflavor/sk/__init__.py | 12133432 | |
andim27/magiccamp | refs/heads/master | tests/regressiontests/admin_scripts/complex_app/__init__.py | 12133432 | |
RevelSystems/django | refs/heads/master | tests/utils_tests/__init__.py | 12133432 | |
utecuy/edx-platform | refs/heads/master | common/lib/chem/chem/tests.py | 132 | import codecs
from fractions import Fraction
import unittest
from .chemcalc import (
compare_chemical_expression,
divide_chemical_expression,
render_to_html,
chemical_equations_equal,
)
import miller
local_debug = None
def log(s, output_type=None):
if local_debug:
print s
if output_type == 'html':
f.write(s + '\n<br>\n')
class Test_Compare_Equations(unittest.TestCase):
def test_simple_equation(self):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 -> H2O2'))
# left sides don't match
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + 2H2 -> H2O2'))
# right sides don't match
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 -> H2O'))
# factors don't match
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 -> 2H2O2'))
def test_different_factor(self):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2'))
self.assertFalse(
chemical_equations_equal(
'2H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2',
)
)
def test_different_arrows(self):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2'))
self.assertFalse(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 <-> 2H2O2'))
def test_exact_match(self):
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2'))
self.assertFalse(
chemical_equations_equal(
'H2 + O2 -> H2O2',
'2O2 + 2H2 -> 2H2O2',
exact=True,
)
)
# order still doesn't matter
self.assertTrue(chemical_equations_equal('H2 + O2 -> H2O2',
'O2 + H2 -> H2O2', exact=True))
def test_syntax_errors(self):
self.assertFalse(chemical_equations_equal('H2 + O2 a-> H2O2',
'2O2 + 2H2 -> 2H2O2'))
self.assertFalse(chemical_equations_equal('H2O( -> H2O2',
'H2O -> H2O2'))
self.assertFalse(chemical_equations_equal('H2 + O2 ==> H2O2', # strange arrow
'2O2 + 2H2 -> 2H2O2'))
class Test_Compare_Expressions(unittest.TestCase):
def test_compare_incorrect_order_of_atoms_in_molecule(self):
self.assertFalse(compare_chemical_expression("H2O + CO2", "O2C + OH2"))
def test_compare_same_order_no_phases_no_factors_no_ions(self):
self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2+H2O"))
def test_compare_different_order_no_phases_no_factors_no_ions(self):
self.assertTrue(compare_chemical_expression("H2O + CO2", "CO2 + H2O"))
def test_compare_different_order_three_multimolecule(self):
self.assertTrue(compare_chemical_expression("H2O + Fe(OH)3 + CO2", "CO2 + H2O + Fe(OH)3"))
def test_compare_same_factors(self):
self.assertTrue(compare_chemical_expression("3H2O + 2CO2", "2CO2 + 3H2O "))
def test_compare_different_factors(self):
self.assertFalse(compare_chemical_expression("2H2O + 3CO2", "2CO2 + 3H2O "))
def test_compare_correct_ions(self):
self.assertTrue(compare_chemical_expression("H^+ + OH^-", " OH^- + H^+ "))
def test_compare_wrong_ions(self):
self.assertFalse(compare_chemical_expression("H^+ + OH^-", " OH^- + H^- "))
def test_compare_parent_groups_ions(self):
self.assertTrue(compare_chemical_expression("Fe(OH)^2- + (OH)^-", " (OH)^- + Fe(OH)^2- "))
def test_compare_correct_factors_ions_and_one(self):
self.assertTrue(compare_chemical_expression("3H^+ + 2OH^-", " 2OH^- + 3H^+ "))
def test_compare_wrong_factors_ions(self):
self.assertFalse(compare_chemical_expression("2H^+ + 3OH^-", " 2OH^- + 3H^+ "))
def test_compare_float_factors(self):
self.assertTrue(compare_chemical_expression("7/2H^+ + 3/5OH^-", " 3/5OH^- + 7/2H^+ "))
# Phases tests
def test_compare_phases_ignored(self):
self.assertTrue(compare_chemical_expression(
"H2O(s) + CO2", "H2O+CO2", ignore_state=True))
def test_compare_phases_not_ignored_explicitly(self):
self.assertFalse(compare_chemical_expression(
"H2O(s) + CO2", "H2O+CO2", ignore_state=False))
def test_compare_phases_not_ignored(self): # same as previous
self.assertFalse(compare_chemical_expression(
"H2O(s) + CO2", "H2O+CO2"))
def test_compare_phases_not_ignored_explicitly(self):
self.assertTrue(compare_chemical_expression(
"H2O(s) + CO2", "H2O(s)+CO2", ignore_state=False))
# all in one cases
def test_complex_additivity(self):
self.assertTrue(compare_chemical_expression(
"5(H1H212)^70010- + 2H20 + 7/2HCl + H2O",
"7/2HCl + 2H20 + H2O + 5(H1H212)^70010-"))
def test_complex_additivity_wrong(self):
self.assertFalse(compare_chemical_expression(
"5(H1H212)^70010- + 2H20 + 7/2HCl + H2O",
"2H20 + 7/2HCl + H2O + 5(H1H212)^70011-"))
def test_complex_all_grammar(self):
self.assertTrue(compare_chemical_expression(
"5[Ni(NH3)4]^2+ + 5/2SO4^2-",
"5/2SO4^2- + 5[Ni(NH3)4]^2+"))
# special cases
def test_compare_one_superscript_explicitly_set(self):
self.assertTrue(compare_chemical_expression("H^+ + OH^1-", " OH^- + H^+ "))
def test_compare_equal_factors_differently_set(self):
self.assertTrue(compare_chemical_expression("6/2H^+ + OH^-", " OH^- + 3H^+ "))
def test_compare_one_subscript_explicitly_set(self):
self.assertFalse(compare_chemical_expression("H2 + CO2", "H2 + C102"))
class Test_Divide_Expressions(unittest.TestCase):
''' as compare_ use divide_,
tests here must consider different
division (not equality) cases '''
def test_divide_by_zero(self):
self.assertFalse(divide_chemical_expression(
"0H2O", "H2O"))
def test_divide_wrong_factors(self):
self.assertFalse(divide_chemical_expression(
"5(H1H212)^70010- + 10H2O", "5H2O + 10(H1H212)^70010-"))
def test_divide_right(self):
self.assertEqual(divide_chemical_expression(
"5(H1H212)^70010- + 10H2O", "10H2O + 5(H1H212)^70010-"), 1)
def test_divide_wrong_reagents(self):
self.assertFalse(divide_chemical_expression(
"H2O + CO2", "CO2"))
def test_divide_right_simple(self):
self.assertEqual(divide_chemical_expression(
"H2O + CO2", "H2O+CO2"), 1)
def test_divide_right_phases(self):
self.assertEqual(divide_chemical_expression(
"H2O(s) + CO2", "2H2O(s)+2CO2"), Fraction(1, 2))
def test_divide_right_phases_other_order(self):
self.assertEqual(divide_chemical_expression(
"2H2O(s) + 2CO2", "H2O(s)+CO2"), 2)
def test_divide_wrong_phases(self):
self.assertFalse(divide_chemical_expression(
"H2O(s) + CO2", "2H2O+2CO2(s)"))
def test_divide_wrong_phases_but_phases_ignored(self):
self.assertEqual(divide_chemical_expression(
"H2O(s) + CO2", "2H2O+2CO2(s)", ignore_state=True), Fraction(1, 2))
def test_divide_order(self):
self.assertEqual(divide_chemical_expression(
"2CO2 + H2O", "2H2O+4CO2"), Fraction(1, 2))
def test_divide_fract_to_int(self):
self.assertEqual(divide_chemical_expression(
"3/2CO2 + H2O", "2H2O+3CO2"), Fraction(1, 2))
def test_divide_fract_to_frac(self):
self.assertEqual(divide_chemical_expression(
"3/4CO2 + H2O", "2H2O+9/6CO2"), Fraction(1, 2))
def test_divide_fract_to_frac_wrog(self):
self.assertFalse(divide_chemical_expression(
"6/2CO2 + H2O", "2H2O+9/6CO2"), 2)
class Test_Render_Equations(unittest.TestCase):
def test_render1(self):
s = "H2O + CO2"
out = render_to_html(s)
correct = u'<span class="math">H<sub>2</sub>O+CO<sub>2</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_uncorrect_reaction(self):
s = "O2C + OH2"
out = render_to_html(s)
correct = u'<span class="math">O<sub>2</sub>C+OH<sub>2</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render2(self):
s = "CO2 + H2O + Fe(OH)3"
out = render_to_html(s)
correct = u'<span class="math">CO<sub>2</sub>+H<sub>2</sub>O+Fe(OH)<sub>3</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render3(self):
s = "3H2O + 2CO2"
out = render_to_html(s)
correct = u'<span class="math">3H<sub>2</sub>O+2CO<sub>2</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render4(self):
s = "H^+ + OH^-"
out = render_to_html(s)
correct = u'<span class="math">H<sup>+</sup>+OH<sup>-</sup></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render5(self):
s = "Fe(OH)^2- + (OH)^-"
out = render_to_html(s)
correct = u'<span class="math">Fe(OH)<sup>2-</sup>+(OH)<sup>-</sup></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render6(self):
s = "7/2H^+ + 3/5OH^-"
out = render_to_html(s)
correct = u'<span class="math"><sup>7</sup>⁄<sub>2</sub>H<sup>+</sup>+<sup>3</sup>⁄<sub>5</sub>OH<sup>-</sup></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render7(self):
s = "5(H1H212)^70010- + 2H2O + 7/2HCl + H2O"
out = render_to_html(s)
correct = u'<span class="math">5(H<sub>1</sub>H<sub>212</sub>)<sup>70010-</sup>+2H<sub>2</sub>O+<sup>7</sup>⁄<sub>2</sub>HCl+H<sub>2</sub>O</span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render8(self):
s = "H2O(s) + CO2"
out = render_to_html(s)
correct = u'<span class="math">H<sub>2</sub>O(s)+CO<sub>2</sub></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render9(self):
s = "5[Ni(NH3)4]^2+ + 5/2SO4^2-"
out = render_to_html(s)
correct = u'<span class="math">5[Ni(NH<sub>3</sub>)<sub>4</sub>]<sup>2+</sup>+<sup>5</sup>⁄<sub>2</sub>SO<sub>4</sub><sup>2-</sup></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_error(self):
s = "5.2H20"
out = render_to_html(s)
correct = u'<span class="math"><span class="inline-error inline">5.2H20</span></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_simple_brackets(self):
s = "(Ar)"
out = render_to_html(s)
correct = u'<span class="math">(Ar)</span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_eq1(self):
s = "H^+ + OH^- -> H2O"
out = render_to_html(s)
correct = u'<span class="math">H<sup>+</sup>+OH<sup>-</sup>\u2192H<sub>2</sub>O</span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_eq2(self):
s = "H^+ + OH^- <-> H2O"
out = render_to_html(s)
correct = u'<span class="math">H<sup>+</sup>+OH<sup>-</sup>\u2194H<sub>2</sub>O</span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
def test_render_eq3(self):
s = "H^+ + OH^- <= H2O" # unsupported arrow
out = render_to_html(s)
correct = u'<span class="math"><span class="inline-error inline">H^+ + OH^- <= H2O</span></span>'
log(out + ' ------- ' + correct, 'html')
self.assertEqual(out, correct)
class Test_Crystallography_Miller(unittest.TestCase):
''' Tests for crystallography grade function.'''
def test_empty_points(self):
user_input = '{"lattice": "bcc", "points": []}'
self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_only_one_point(self):
user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"]]}'
self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_only_two_points(self):
user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.00", "0.50", "0.00"]]}'
self.assertFalse(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_1(self):
user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.00", "0.50", "0.00"], ["0.00", "0.00", "0.50"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_2(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,1,1)', 'lattice': 'bcc'}))
def test_3(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.50", "1.00"], ["1.00", "1.00", "0.50"], ["0.50", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(2,2,2)', 'lattice': 'bcc'}))
def test_4(self):
user_input = '{"lattice": "bcc", "points": [["0.33", "1.00", "0.00"], ["0.00", "0.664", "0.00"], ["0.00", "1.00", "0.33"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(-3, 3, -3)', 'lattice': 'bcc'}))
def test_5(self):
""" return true only in case points coordinates are exact.
But if they transform to closest 0.05 value it is not true"""
user_input = '{"lattice": "bcc", "points": [["0.33", "1.00", "0.00"], ["0.00", "0.33", "0.00"], ["0.00", "1.00", "0.33"]]}'
self.assertFalse(miller.grade(user_input, {'miller': '(-6,3,-6)', 'lattice': 'bcc'}))
def test_6(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.25", "0.00"], ["0.25", "0.00", "0.00"], ["0.00", "0.00", "0.25"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(4,4,4)', 'lattice': 'bcc'}))
def test_7(self): # goes throug origin
user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.00"], ["1.00", "0.00", "0.00"], ["0.50", "1.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,0,-1)', 'lattice': 'bcc'}))
def test_8(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.50"], ["1.00", "0.00", "0.50"], ["0.50", "1.00", "0.50"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,0,2)', 'lattice': 'bcc'}))
def test_9(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "1.00"], ["0.00", "1.00", "1.00"], ["1.00", "0.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,1,0)', 'lattice': 'bcc'}))
def test_10(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "1.00"], ["0.00", "0.00", "0.00"], ["0.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,1,-1)', 'lattice': 'bcc'}))
def test_11(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.50"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,1,2)', 'lattice': 'bcc'}))
def test_12(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.50"], ["0.00", "0.00", "0.50"], ["1.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,1,-2)', 'lattice': 'bcc'}))
def test_13(self):
user_input = '{"lattice": "bcc", "points": [["0.50", "0.00", "0.00"], ["0.50", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(2,0,1)', 'lattice': 'bcc'}))
def test_14(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["0.00", "0.00", "1.00"], ["0.50", "1.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(2,-1,0)', 'lattice': 'bcc'}))
def test_15(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': 'bcc'}))
def test_16(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["1.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,1,-1)', 'lattice': 'bcc'}))
def test_17(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "1.00"], ["1.00", "1.00", "0.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(-1,1,1)', 'lattice': 'bcc'}))
def test_18(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': 'bcc'}))
def test_19(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(-1,1,0)', 'lattice': 'bcc'}))
def test_20(self):
user_input = '{"lattice": "bcc", "points": [["1.00", "0.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,0,1)', 'lattice': 'bcc'}))
def test_21(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["0.00", "1.00", "0.00"], ["1.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(-1,0,1)', 'lattice': 'bcc'}))
def test_22(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "1.00", "0.00"], ["1.00", "1.00", "0.00"], ["0.00", "0.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,1,1)', 'lattice': 'bcc'}))
def test_23(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "0.00"], ["1.00", "1.00", "1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,-1,1)', 'lattice': 'bcc'}))
def test_24(self):
user_input = '{"lattice": "bcc", "points": [["0.66", "0.00", "0.00"], ["0.00", "0.66", "0.00"], ["0.00", "0.00", "0.66"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': 'bcc'}))
def test_25(self):
user_input = u'{"lattice":"","points":[["0.00","0.00","0.01"],["1.00","1.00","0.01"],["0.00","1.00","1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(1,-1,1)', 'lattice': ''}))
def test_26(self):
user_input = u'{"lattice":"","points":[["0.00","0.01","0.00"],["1.00","0.00","0.00"],["0.00","0.00","1.00"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(0,-1,0)', 'lattice': ''}))
def test_27(self):
""" rounding to 0.35"""
user_input = u'{"lattice":"","points":[["0.33","0.00","0.00"],["0.00","0.33","0.00"],["0.00","0.00","0.33"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': ''}))
def test_28(self):
""" rounding to 0.30"""
user_input = u'{"lattice":"","points":[["0.30","0.00","0.00"],["0.00","0.30","0.00"],["0.00","0.00","0.30"]]}'
self.assertTrue(miller.grade(user_input, {'miller': '(10,10,10)', 'lattice': ''}))
def test_wrong_lattice(self):
user_input = '{"lattice": "bcc", "points": [["0.00", "0.00", "0.00"], ["1.00", "0.00", "0.00"], ["1.00", "1.00", "1.00"]]}'
self.assertFalse(miller.grade(user_input, {'miller': '(3,3,3)', 'lattice': 'fcc'}))
def suite():
testcases = [Test_Compare_Expressions,
Test_Divide_Expressions,
Test_Render_Equations,
Test_Crystallography_Miller]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
local_debug = True
with codecs.open('render.html', 'w', encoding='utf-8') as f:
unittest.TextTestRunner(verbosity=2).run(suite())
# open render.html to look at rendered equations
|
RobinQuetin/CAIRIS-web | refs/heads/develop | cairis/cairis/UsabilityContentHandler.py | 1 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from xml.sax.handler import ContentHandler,EntityResolver
from PersonaParameters import PersonaParameters
from PersonaEnvironmentProperties import PersonaEnvironmentProperties
from ExternalDocumentParameters import ExternalDocumentParameters
from DocumentReferenceParameters import DocumentReferenceParameters
from ConceptReferenceParameters import ConceptReferenceParameters
from PersonaCharacteristicParameters import PersonaCharacteristicParameters
from TaskCharacteristicParameters import TaskCharacteristicParameters
from TaskParameters import TaskParameters
from TaskEnvironmentProperties import TaskEnvironmentProperties
from UseCaseParameters import UseCaseParameters
from UseCaseEnvironmentProperties import UseCaseEnvironmentProperties
from Steps import Steps
from Step import Step
from Borg import Borg
def a2s(aStr):
if aStr == 'a':
return '*'
elif aStr == '1..a':
return '1..*'
else:
return aStr
def u2s(aStr):
outStr = ''
for c in aStr:
if (c == '_'):
outStr += ' '
else:
outStr += c
return outStr
def durationValue(dLabel):
if dLabel == 'Seconds':
return 'Low'
elif dLabel == 'Minutes':
return 'Medium'
else:
return 'High'
def frequencyValue(fLabel):
if fLabel == 'Hourly_or_more':
return 'Low'
elif fLabel == 'Daily_-_Weekly':
return 'Medium'
else:
return 'High'
class UsabilityContentHandler(ContentHandler,EntityResolver):
def __init__(self):
self.thePersonas = []
self.theExternalDocuments = []
self.theDocumentReferences = []
self.theConceptReferences = []
self.thePersonaCharacteristics = []
self.theTaskCharacteristics = []
self.theTasks = []
self.theUseCases = []
b = Borg()
self.configDir = b.configDir
self.resetPersonaAttributes()
self.resetDocumentReferenceAttributes()
self.resetConceptReferenceAttributes()
self.resetPersonaCharacteristicAttributes()
self.resetTaskCharacteristicAttributes()
self.resetTaskAttributes()
self.resetUseCaseAttributes()
def resolveEntity(self,publicId,systemId):
return self.configDir + '/usability.dtd'
def personas(self):
return self.thePersonas
def externalDocuments(self):
return self.theExternalDocuments
def documentReferences(self):
return self.theDocumentReferences
def conceptReferences(self):
return self.theConceptReferences
def personaCharacteristics(self):
return self.thePersonaCharacteristics
def taskCharacteristics(self):
return self.theTaskCharacteristics
def tasks(self):
return self.theTasks
def usecases(self):
return self.theUseCases
def resetPersonaAttributes(self):
self.inActivities = 0
self.inAttitudes = 0
self.inAptitudes = 0
self.inMotivations = 0
self.inSkills = 0
self.inIntrinsic = 0
self.inContextual = 0
self.theName = ''
self.theTags = []
self.theType = ''
self.theImage = ''
self.isAssumptionPersona = False
self.theActivities = ''
self.theAptitudes = ''
self.theMotivations = ''
self.theSkills = ''
self.theIntrinsic = ''
self.theContextual = ''
self.theEnvironmentProperties = []
self.resetPersonaEnvironmentAttributes()
def resetPersonaEnvironmentAttributes(self):
self.theEnvironmentName = ''
self.theRoles = []
self.isDirect = True
self.inNarrative = 0
self.theNarrative = ''
def resetExternalDocumentAttributes(self):
self.theName = ''
self.theVersion = ''
self.theDate = ''
self.theAuthors = ''
self.inDescription = 0
self.theDescription = ''
def resetDocumentReferenceAttributes(self):
self.inExcerpt = 0
self.theName = ''
self.theContributor = ''
self.theDocument = ''
self.theExcerpt = ''
def resetConceptReferenceAttributes(self):
self.inDescription = 0
self.theName = ''
self.theConcept = ''
self.theObject = ''
self.theDescription = ''
def resetPersonaCharacteristicAttributes(self):
self.thePersona = ''
self.theBvName = ''
self.theModalQualifier = ''
self.inDefinition = 0
self.theDefinition = ''
self.theGrounds = []
self.theWarrants = []
self.theRebuttals = []
def resetTaskCharacteristicAttributes(self):
self.theTask = ''
self.theModalQualifier = ''
self.inDefinition = 0
self.theDefinition = ''
self.theGrounds = []
self.theWarrants = []
self.theRebuttals = []
def resetTaskAttributes(self):
self.theName = ''
self.theTags = []
self.theCode = ''
self.theAuthor = ''
self.isAssumptionTask = False
self.inObjective = 0
self.theObjective = ''
self.theEnvironmentProperties = []
self.resetTaskEnvironmentAttributes()
def resetTaskEnvironmentAttributes(self):
self.theEnvironmentName = ''
self.inDependencies = 0
self.inNarrative = 0
self.inConsequences = 0
self.inBenefits = 0
self.theDependencies = ''
self.theNarrative = ''
self.theConsequences = ''
self.theBenefits = ''
self.theTaskPersonas = []
self.theConcerns = []
self.theConcernAssociations = []
def resetUseCaseAttributes(self):
self.theName = ''
self.theTags = []
self.theAuthor = ''
self.theCode = ''
self.inDescription = 0
self.theDescription = ''
self.theActors = []
self.theEnvironmentProperties = []
self.resetUseCaseEnvironmentAttributes()
def resetUseCaseEnvironmentAttributes(self):
self.theEnvironmentName = ''
self.inPreconditions = 0
self.thePreconditions = ''
self.inPostconditions = 0
self.thePostconditions = ''
self.theSteps = Steps()
self.theCurrentStep = None
self.theCurrentStepNo = 0
self.theExcName = ''
self.theExcType = ''
self.theExcValue = ''
self.theExcCat = ''
self.inDefinition = 0
self.theDefinition = ''
def startElement(self,name,attrs):
self.currentElementName = name
if name == 'persona':
self.theName = attrs['name']
self.theType = attrs['type']
self.theImage = attrs['image']
if (attrs['assumption_persona'] == 'TRUE'):
self.isAssumptionPersona = True
elif name == 'persona_environment':
self.theEnvironmentName = attrs['name']
if (attrs['is_direct'] == 'FALSE'):
self.isDirect = False
elif name == 'persona_role':
self.theRoles.append(attrs['name'])
elif name == 'external_document':
self.theName = attrs['name']
self.theVersion = attrs['version']
self.theDate = attrs['date']
self.theAuthors = attrs['authors']
elif name == 'document_reference':
self.theName = attrs['name']
self.theContributor = attrs['contributor']
self.theDocument = attrs['document']
elif name == 'concept_reference':
self.theName = attrs['name']
self.theConcept = attrs['concept']
self.theObject = attrs['object']
elif name == 'persona_characteristic':
self.thePersona = attrs['persona']
self.theBvName = u2s(attrs['behavioural_variable'])
self.theModalQualifier = attrs['modal_qualifier']
elif name == 'task_characteristic':
self.theTask = attrs['task']
self.theModalQualifier = attrs['modal_qualifier']
elif name == 'grounds':
refName = attrs['reference']
refType = attrs['type']
refArtifact = ''
self.theGrounds.append((refName,'',refType))
elif name == 'warrant':
refName = attrs['reference']
refType = attrs['type']
refArtifact = ''
self.theWarrants.append((refName,'',refType))
elif name == 'rebuttal':
refName = attrs['reference']
refType = attrs['type']
refArtifact = ''
self.theRebuttals.append((refName,'',refType))
elif name == 'task':
self.theName = attrs['name']
try:
self.theCode = attrs['code']
except KeyError:
self.theCode = ''
self.theAuthor = attrs['author']
if (attrs['assumption_task'] == 'TRUE'):
self.isAssumptionTask = True
elif name == 'task_environment':
self.theEnvironmentName = attrs['name']
elif name == 'task_persona':
self.theTaskPersonas.append((attrs['persona'],durationValue(attrs['duration']),frequencyValue(attrs['frequency']),attrs['demands'],attrs['goal_conflict']))
elif name == 'task_concern':
self.theConcerns.append(attrs['asset'])
elif name == 'task_concern_association':
self.theConcernAssociations.append((attrs['source_name'],a2s(attrs['source_nry']),attrs['link_name'],attrs['target_name'],a2s(attrs['target_nry'])))
elif name == 'usecase':
self.theName = attrs['name']
self.theAuthor = attrs['author']
self.theCode = attrs['code']
elif name == 'actor':
self.theActors.append(attrs['name'])
elif name == 'usecase_environment':
self.theEnvironmentName = attrs['name']
elif name == 'step':
self.theCurrentStepNo = attrs['number']
self.theCurrentStep = Step(attrs['description'])
elif name == 'exception':
self.theExcName = attrs['name']
self.theExcType = attrs['type']
self.theExcValue = attrs['value']
self.theExcCat = u2s(attrs['category'])
elif name == 'activities':
self.inActivities = 1
self.theActivities = ''
elif name == 'attitudes':
self.inAttitudes = 1
self.theAttitudes = ''
elif name == 'aptitudes':
self.inAptitudes = 1
self.theAptitudes = ''
elif name == 'motivations':
self.inMotivations = 1
self.theMotivations = ''
elif name == 'skills':
self.inSkills = 1
self.theSkills = ''
elif name == 'intrinsic':
self.inIntrinsic = 1
self.theIntrinsic = ''
elif name == 'contextual':
self.inContextual = 1
self.theContextual = ''
elif name == 'narrative':
self.inNarrative = 1
self.theNarrative = ''
elif name == 'consequences':
self.inConsequences = 1
self.theConsequences = ''
elif name == 'benefits':
self.inBenefits = 1
self.theBenefits = ''
elif name == 'excerpt':
self.inExcerpt = 1
self.theExcerpt = ''
elif name == 'description':
self.inDescription = 1
self.theDescription = ''
elif name == 'definition':
self.inDefinition = 1
self.theDefinition = ''
elif name == 'dependencies':
self.inDependencies = 1
self.theDependencies = ''
elif name == 'objective':
self.inObjective = 1
self.theObjective = ''
elif name == 'preconditions':
self.inPreconditions = 1
self.thePreconditions = ''
elif name == 'postconditions':
self.inPostconditions = 1
self.thePostconditions = ''
elif name == 'tag':
self.theTags.append(attrs['name'])
def characters(self,data):
if self.inActivities:
self.theActivities += data
elif self.inAttitudes:
self.theAttitudes += data
elif self.inAptitudes:
self.theAptitudes += data
elif self.inMotivations:
self.theMotivations += data
elif self.inSkills:
self.theSkills += data
elif self.inIntrinsic:
self.theIntrinsic += data
elif self.inContextual:
self.theContextual += data
elif self.inConsequences:
self.theConsequences += data
elif self.inBenefits:
self.theBenefits += data
elif self.inExcerpt:
self.theExcerpt += data
elif self.inDescription:
self.theDescription += data
elif self.inDefinition:
self.theDefinition += data
elif self.inDependencies:
self.theDependencies += data
elif self.inObjective:
self.theObjective += data
elif self.inPreconditions:
self.thePreconditions += data
elif self.inPostconditions:
self.thePostconditions += data
elif self.inNarrative:
self.theNarrative += data
def endElement(self,name):
if name == 'persona':
p = PersonaParameters(self.theName,self.theActivities,self.theAttitudes,self.theAptitudes,self.theMotivations,self.theSkills,self.theIntrinsic,self.theContextual,self.theImage,self.isAssumptionPersona,self.theType,self.theTags,self.theEnvironmentProperties,{})
self.thePersonas.append(p)
self.resetPersonaAttributes()
elif name == 'persona_environment':
p = PersonaEnvironmentProperties(self.theEnvironmentName,self.isDirect,self.theNarrative,self.theRoles,{'narrative':{}})
self.theEnvironmentProperties.append(p)
self.resetPersonaEnvironmentAttributes()
elif name == 'external_document':
p = ExternalDocumentParameters(self.theName,self.theVersion,self.theDate,self.theAuthors,self.theDescription)
self.theExternalDocuments.append(p)
self.resetExternalDocumentAttributes()
elif name == 'document_reference':
p = DocumentReferenceParameters(self.theName,self.theDocument,self.theContributor,self.theExcerpt)
self.theDocumentReferences.append(p)
self.resetDocumentReferenceAttributes()
elif name == 'concept_reference':
p = ConceptReferenceParameters(self.theName,self.theConcept,self.theObject,self.theDescription)
self.theConceptReferences.append(p)
self.resetConceptReferenceAttributes()
elif name == 'persona_characteristic':
p = PersonaCharacteristicParameters(self.thePersona,self.theModalQualifier,self.theBvName,self.theDefinition,self.theGrounds,self.theWarrants,[],self.theRebuttals)
self.thePersonaCharacteristics.append(p)
self.resetPersonaCharacteristicAttributes()
elif name == 'task_characteristic':
p = TaskCharacteristicParameters(self.theTask,self.theModalQualifier,self.theDefinition,self.theGrounds,self.theWarrants,[],self.theRebuttals)
self.theTaskCharacteristics.append(p)
self.resetTaskCharacteristicAttributes()
elif name == 'task':
p = TaskParameters(self.theName,self.theCode,self.theObjective,self.isAssumptionTask,self.theAuthor,self.theTags,self.theEnvironmentProperties)
self.theTasks.append(p)
self.resetTaskAttributes()
elif name == 'task_environment':
p = TaskEnvironmentProperties(self.theEnvironmentName,self.theDependencies,self.theTaskPersonas,self.theConcerns,self.theConcernAssociations,self.theNarrative,self.theConsequences,self.theBenefits,{'narrative':{},'consequences':{},'benefits':{}})
self.theEnvironmentProperties.append(p)
self.resetTaskEnvironmentAttributes()
elif name == 'exception':
self.theCurrentStep.addException((self.theExcName,self.theExcType,self.theExcValue,self.theExcCat,self.theDefinition))
elif name == 'step':
self.theCurrentStep.setTags(self.theTags)
self.theSteps.append(self.theCurrentStep)
self.theCurrentStep = None
elif name == 'usecase_environment':
p = UseCaseEnvironmentProperties(self.theEnvironmentName,self.thePreconditions,self.theSteps,self.thePostconditions)
self.theEnvironmentProperties.append(p)
self.resetUseCaseEnvironmentAttributes()
elif name == 'usecase':
p = UseCaseParameters(self.theName,self.theAuthor,self.theCode,self.theActors,self.theDescription,self.theTags,self.theEnvironmentProperties)
self.theUseCases.append(p)
self.resetUseCaseAttributes()
elif name == 'activities':
self.inActivities = 0
elif name == 'attitudes':
self.inAttitudes = 0
elif name == 'aptitudes':
self.inAptitudes = 0
elif name == 'motivations':
self.inMotivations = 0
elif name == 'skills':
self.inSkills = 0
elif name == 'intrinsic':
self.inIntrinsic = 0
elif name == 'contextual':
self.inContextual = 0
elif name == 'narrative':
self.inNarrative = 0
elif name == 'excerpt':
self.inExcerpt = 0
elif name == 'description':
self.inDescription = 0
elif name == 'definition':
self.inDefinition = 0
elif name == 'dependencies':
self.inDependencies = 0
elif name == 'objective':
self.inObjective = 0
elif name == 'preconditions':
self.inPreconditions = 0
elif name == 'postconditions':
self.inPostconditions = 0
elif name == 'benefits':
self.inBenefits = 0
elif name == 'consequences':
self.inConsequences = 0
|
mac01021/repoze.who.plugins.fake | refs/heads/master | setup.py | 1 | from setuptools import setup, find_packages
import sys, os
version = '0.1'
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
setup(name='repoze.who.plugins.fake',
version=version,
description="A collection of repoze.who plugins meant to help with testing",
long_description=README,
classifiers=[],
keywords='',
author='',
author_email='coolbeth@gmail.com',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
""",
)
|
rrampage/rethinkdb | refs/heads/next | test/rql_test/connections/http_support/werkzeug/contrib/atom.py | 147 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k])) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k])) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(category[k])) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
|
kotfic/girder | refs/heads/master | tests/cases/py_client/cli_test.py | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import contextlib
import girder_client.cli
import logging
import mock
import os
import requests
import shutil
import sys
import six
import httmock
from girder import config
from girder.models.api_key import ApiKey
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.user import User
from girder_client.cli import GirderCli
from tests import base
from six.moves.http_client import HTTPConnection
from six import StringIO
os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_TEST_PORT', '20200')
config.loadConfig() # Must reload config to pickup correct port
@contextlib.contextmanager
def captureOutput():
oldout, olderr = sys.stdout, sys.stderr
try:
out = [StringIO(), StringIO()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
class SysExitException(Exception):
pass
def invokeCli(argv, username='', password='', useApiUrl=False):
"""
Invoke the Girder Python client CLI with a set of arguments.
"""
if useApiUrl:
apiUrl = 'http://localhost:%s/api/v1' % os.environ['GIRDER_PORT']
argsList = ['girder-client', '--api-url', apiUrl]
else:
argsList = ['girder-client', '--port', os.environ['GIRDER_PORT']]
if username:
argsList += ['--username', username]
if password:
argsList += ['--password', password]
argsList += list(argv)
exitVal = 0
with mock.patch.object(sys, 'argv', argsList),\
mock.patch('sys.exit', side_effect=SysExitException) as exit,\
captureOutput() as output:
try:
girder_client.cli.main()
except SysExitException:
args = exit.mock_calls[0][1]
exitVal = args[0] if len(args) else 0
return {
'exitVal': exitVal,
'stdout': output[0],
'stderr': output[1]
}
def setUpModule():
plugins = os.environ.get('ENABLED_PLUGINS', '')
if plugins:
base.enabledPlugins.extend(plugins.split())
base.startServer(False)
def tearDownModule():
base.stopServer()
class PythonCliTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.user = User().createUser(
firstName='First', lastName='Last', login='mylogin',
password='password', email='email@email.com')
self.publicFolder = six.next(Folder().childFolders(
parentType='user', parent=self.user, user=None, limit=1))
self.apiKey = ApiKey().createApiKey(self.user, name='')
self.downloadDir = os.path.join(
os.path.dirname(__file__), '_testDownload')
shutil.rmtree(self.downloadDir, ignore_errors=True)
def tearDown(self):
logger = logging.getLogger('girder_client')
logger.setLevel(logging.ERROR)
logger.handlers = []
shutil.rmtree(self.downloadDir, ignore_errors=True)
base.TestCase.tearDown(self)
def testUrlByPart(self):
# This test does NOT connect to the test server. It only checks that the
# client object has the expected attributes.
username = None
password = None
for case in [
# Check that apiUrl is preferred
{
'input': {'apiUrl': 'https://girder.example.com:74/api/v74',
'host': 'foo', 'scheme': 'bar', 'port': 42, 'apiRoot': 'bar'},
'expected': {
'urlBase': 'https://girder.example.com:74/api/v74/',
'host': None, 'scheme': None, 'port': None}
},
# Check different configuration of URL by part
{
'input': {},
'expected': {
'urlBase': 'http://localhost:8080/api/v1/',
'host': 'localhost', 'scheme': 'http', 'port': 8080}
},
{
'input': {'host': 'localhost'},
'expected': {
'urlBase': 'http://localhost:8080/api/v1/',
'host': 'localhost', 'scheme': 'http', 'port': 8080}
},
{
'input': {'port': 42},
'expected': {
'urlBase': 'http://localhost:42/api/v1/',
'host': 'localhost', 'scheme': 'http', 'port': 42}
},
{
'input': {'scheme': 'https'},
'expected': {
'urlBase': 'https://localhost:443/api/v1/',
'host': 'localhost', 'scheme': 'https', 'port': 443}
},
{
'input': {'host': 'girder.example.com'},
'expected': {
'urlBase': 'https://girder.example.com:443/api/v1/',
'host': 'girder.example.com', 'scheme': 'https', 'port': 443}
},
{
'input': {'host': 'girder.example.com', 'scheme': 'http'},
'expected': {
'urlBase': 'http://girder.example.com:80/api/v1/',
'host': 'girder.example.com', 'scheme': 'http', 'port': 80}
},
{
'input': {'host': 'localhost', 'port': 42},
'expected': {
'urlBase': 'http://localhost:42/api/v1/',
'host': 'localhost', 'scheme': 'http', 'port': 42}
},
{
'input': {'host': 'girder.example.com', 'port': 42},
'expected': {
'urlBase': 'https://girder.example.com:42/api/v1/',
'host': 'girder.example.com', 'scheme': 'https', 'port': 42}
},
{
'input': {'host': 'localhost', 'scheme': 'https'},
'expected': {
'urlBase': 'https://localhost:443/api/v1/',
'host': 'localhost', 'scheme': 'https', 'port': 443}
},
{
'input': {'host': 'girder.example.com', 'scheme': 'https'},
'expected': {
'urlBase': 'https://girder.example.com:443/api/v1/',
'host': 'girder.example.com', 'scheme': 'https', 'port': 443}
},
]:
client = girder_client.cli.GirderCli(username, password, **case['input'])
for attribute, value in case['expected'].items():
self.assertEqual(getattr(client, attribute), value)
def testCliHelp(self):
ret = invokeCli(())
self.assertNotEqual(ret['exitVal'], 0)
ret = invokeCli(('-h',))
self.assertIn('Usage: girder-client', ret['stdout'])
self.assertEqual(ret['exitVal'], 0)
def testUploadDownload(self):
localDir = os.path.join(os.path.dirname(__file__), 'testdata')
args = ['upload', str(self.publicFolder['_id']), localDir, '--parent-type=folder']
with self.assertRaises(requests.HTTPError):
invokeCli(args)
with self.assertRaises(requests.HTTPError):
invokeCli(['--api-key', '1234'] + args)
# Test dry-run and blacklist options
ret = invokeCli(
args + ['--dry-run', '--blacklist=hello.txt'], username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertIn('Ignoring file hello.txt as it is blacklisted', ret['stdout'])
# Test with multiple files in a dry-run
ret = invokeCli([
'upload', str(self.publicFolder['_id']), '--parent-type=folder',
os.path.join(localDir, 'hello.txt'),
os.path.join(localDir, 'world.txt'), '--dry-run'],
username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertIn('Uploading Item from hello.txt', ret['stdout'])
self.assertIn('Uploading Item from world.txt', ret['stdout'])
# Actually upload the test data
ret = invokeCli(args, username='mylogin', password='password', useApiUrl=True)
self.assertEqual(ret['exitVal'], 0)
six.assertRegex(
self, ret['stdout'], 'Creating Folder from .*tests/cases/py_client/testdata')
self.assertIn('Uploading Item from hello.txt', ret['stdout'])
subfolder = six.next(Folder().childFolders(
parent=self.publicFolder, parentType='folder', limit=1))
self.assertEqual(subfolder['name'], 'testdata')
items = list(Folder().childItems(folder=subfolder))
toUpload = list(os.listdir(localDir))
self.assertEqual(len(toUpload), len(items))
downloadDir = os.path.join(os.path.dirname(localDir), '_testDownload')
ret = invokeCli(('download', str(subfolder['_id']), downloadDir),
username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
for downloaded in os.listdir(downloadDir):
if downloaded == '.girder_metadata':
continue
self.assertIn(downloaded, toUpload)
# Download again to same location, we should not get errors
ret = invokeCli(('download', str(subfolder['_id']), downloadDir),
username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
# Download again to same location, using path, we should not get errors
ret = invokeCli(('download', '/user/mylogin/Public/testdata',
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
# Test uploading with reference
queryList = []
@httmock.urlmatch(netloc='localhost', path='/api/v1/file$', method='POST')
def checkParams(url, request):
# Add query for every file upload request
queryList.append(six.moves.urllib.parse.parse_qs(url[3]))
with httmock.HTTMock(checkParams):
ret = invokeCli(
args + ['--reference', 'reference_string'], username='mylogin', password='password')
# Test if reference is sent with each file upload
fileList = os.listdir(localDir)
self.assertTrue(queryList)
self.assertTrue(fileList)
self.assertEqual(len(queryList), len(fileList))
for query in queryList:
self.assertIn('reference', query)
self.assertIn('reference_string', query['reference'])
# Create a collection and subfolder
resp = self.request('/collection', 'POST', user=self.user, params={
'name': 'my_collection'
})
self.assertStatusOk(resp)
resp = self.request('/folder', 'POST', user=self.user, params={
'parentType': 'collection',
'parentId': resp.json['_id'],
'name': 'my_folder'
})
self.assertStatusOk(resp)
# Test download of the collection
ret = invokeCli(('download', '--parent-type=collection', '/collection/my_collection',
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertTrue(os.path.isdir(os.path.join(downloadDir, 'my_folder')))
shutil.rmtree(downloadDir, ignore_errors=True)
# Test download of the collection auto-detecting parent-type
ret = invokeCli(('download', '/collection/my_collection',
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertTrue(os.path.isdir(os.path.join(downloadDir, 'my_folder')))
shutil.rmtree(downloadDir, ignore_errors=True)
# Test download of a user
ret = invokeCli(('download', '--parent-type=user', '/user/mylogin',
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertTrue(
os.path.isfile(os.path.join(downloadDir, 'Public', 'testdata', 'hello.txt')))
shutil.rmtree(downloadDir, ignore_errors=True)
# Test download of a user auto-detecting parent-type
ret = invokeCli(('download', '/user/mylogin',
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertTrue(
os.path.isfile(os.path.join(downloadDir, 'Public', 'testdata', 'hello.txt')))
shutil.rmtree(downloadDir, ignore_errors=True)
# Test download of an item
items = list(Folder().childItems(folder=subfolder))
item_id = items[0]['_id']
item_name = items[0]['name']
ret = invokeCli(('download', '--parent-type=item', '%s' % item_id,
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertTrue(
os.path.isfile(os.path.join(downloadDir, item_name)))
shutil.rmtree(downloadDir, ignore_errors=True)
# Test download of a file
os.makedirs(downloadDir)
items = list(Folder().childItems(folder=subfolder))
file_name, file_doc = next(Item().fileList(items[0], data=False))
ret = invokeCli(
('download', '--parent-type=file', '%s' % file_doc['_id'],
os.path.join(downloadDir, file_name)),
username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertTrue(
os.path.isfile(os.path.join(downloadDir, file_name)))
shutil.rmtree(downloadDir, ignore_errors=True)
# Test download of an item auto-detecting parent-type
ret = invokeCli(('download', '%s' % item_id,
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertTrue(
os.path.isfile(os.path.join(downloadDir, item_name)))
shutil.rmtree(downloadDir, ignore_errors=True)
def _check_upload(ret):
self.assertEqual(ret['exitVal'], 0)
six.assertRegex(
self, ret['stdout'],
'Creating Folder from .*tests/cases/py_client/testdata')
self.assertIn('Uploading Item from hello.txt', ret['stdout'])
# Try uploading using API key
_check_upload(invokeCli(['--api-key', self.apiKey['key']] + args))
# Try uploading using API key set with GIRDER_API_KEY env. variable
os.environ["GIRDER_API_KEY"] = self.apiKey['key']
_check_upload(invokeCli(args))
del os.environ["GIRDER_API_KEY"]
# Test localsync, it shouldn't touch files on 2nd pass
ret = invokeCli(('localsync', str(subfolder['_id']),
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
old_mtimes = {}
for fname in os.listdir(downloadDir):
filename = os.path.join(downloadDir, fname)
old_mtimes[fname] = os.path.getmtime(filename)
ret = invokeCli(('localsync', str(subfolder['_id']),
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
for fname in os.listdir(downloadDir):
if fname == '.girder_metadata':
continue
filename = os.path.join(downloadDir, fname)
self.assertEqual(os.path.getmtime(filename), old_mtimes[fname])
# Check that localsync command do not show '--parent-type' option help
ret = invokeCli(('localsync', '--help'))
self.assertNotIn('--parent-type', ret['stdout'])
self.assertEqual(ret['exitVal'], 0)
# Check that localsync command still accepts '--parent-type' argument
ret = invokeCli(('localsync', '--parent-type', 'folder', str(subfolder['_id']),
downloadDir), username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
def testLeafFoldersAsItems(self):
localDir = os.path.join(os.path.dirname(__file__), 'testdata')
args = ['upload', str(self.publicFolder['_id']), localDir, '--leaf-folders-as-items']
ret = invokeCli(args, username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
six.assertRegex(
self, ret['stdout'], 'Creating Item from folder .*tests/cases/py_client/testdata')
self.assertIn('Adding file world.txt', ret['stdout'])
# Test re-use existing case
args.append('--reuse')
ret = invokeCli(args, username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertIn('File hello.txt already exists in parent Item', ret['stdout'])
def testVerboseLoggingLevel0(self):
args = ['localsync', '--help']
ret = invokeCli(args, username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertEqual(logging.getLogger('girder_client').level, logging.ERROR)
def testVerboseLoggingLevel1(self):
args = ['-v', 'localsync', '--help']
ret = invokeCli(args, username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertEqual(logging.getLogger('girder_client').level, logging.WARNING)
def testVerboseLoggingLevel2(self):
args = ['-vv', 'localsync', '--help']
ret = invokeCli(args, username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertEqual(logging.getLogger('girder_client').level, logging.INFO)
def testVerboseLoggingLevel3(self):
args = ['-vvv', 'localsync', '--help']
ret = invokeCli(args, username='mylogin', password='password')
self.assertEqual(ret['exitVal'], 0)
self.assertEqual(logging.getLogger('girder_client').level, logging.DEBUG)
self.assertEqual(HTTPConnection.debuglevel, 1)
def testRetryUpload(self):
gc = GirderCli('mylogin', 'password',
host='localhost', port=os.environ['GIRDER_PORT'],
retries=5)
def checkRetryHandler(*args, **kwargs):
session = gc._session
self.assertIsNotNone(session)
self.assertIn(gc.urlBase, session.adapters)
adapter = session.adapters[gc.urlBase]
self.assertEqual(adapter.max_retries.total, 5)
with mock.patch('girder_client.cli.GirderClient.sendRestRequest',
side_effect=checkRetryHandler) as m:
gc.sendRestRequest('')
self.assertTrue(m.called)
|
Dhivyap/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_guest_info.py | 19 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
# Copyright (C) 2018 James E. King III (@jeking3) <jking@apache.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_info
short_description: Gather info about a single VM
description:
- Gather information about a single VM on a VMware ESX cluster.
- This module was called C(vmware_guest_facts) before Ansible 2.9. The usage did not change.
version_added: 2.3
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
notes:
- Tested on vSphere 5.5, 6.7
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the VM to work with
- This is required if C(uuid) or C(moid) is not supplied.
type: str
name_match:
description:
- If multiple VMs matching the name, use the first or last found
default: 'first'
choices: ['first', 'last']
type: str
uuid:
description:
- UUID of the instance to manage if known, this is VMware's unique identifier.
- This is required if C(name) or C(moid) is not supplied.
type: str
use_instance_uuid:
description:
- Whether to use the VMware instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
moid:
description:
- Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
- This is required if C(name) or C(uuid) is not supplied.
version_added: '2.9'
type: str
folder:
description:
- Destination folder, absolute or relative path to find an existing guest.
- This is required if name is supplied.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
datacenter:
description:
- Destination datacenter for the deploy operation
required: True
type: str
tags:
description:
- Whether to show tags or not.
- If set C(True), shows tag information.
- If set C(False), hides tags information.
- vSphere Automation SDK and vCloud Suite SDK is required.
default: 'no'
type: bool
version_added: '2.8'
schema:
description:
- Specify the output schema desired.
- The 'summary' output schema is the legacy output from the module
- The 'vsphere' output schema is the vSphere API class definition
which requires pyvmomi>6.7.1
choices: ['summary', 'vsphere']
default: 'summary'
type: str
version_added: '2.8'
properties:
description:
- Specify the properties to retrieve.
- If not specified, all properties are retrieved (deeply).
- Results are returned in a structure identical to the vsphere API.
- 'Example:'
- ' properties: ['
- ' "config.hardware.memoryMB",'
- ' "config.hardware.numCPU",'
- ' "guest.disk",'
- ' "overallStatus"'
- ' ]'
- Only valid when C(schema) is C(vsphere).
type: list
required: False
version_added: '2.8'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather info from standalone ESXi server having datacenter as 'ha-datacenter'
vmware_guest_info:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
datacenter: ha-datacenter
validate_certs: no
uuid: 421e4592-c069-924d-ce20-7e7533fab926
delegate_to: localhost
register: info
- name: Gather some info from a guest using the vSphere API output schema
vmware_guest_info:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter_name }}"
name: "{{ vm_name }}"
schema: "vsphere"
properties: ["config.hardware.memoryMB", "guest.disk", "overallStatus"]
delegate_to: localhost
register: info
- name: Gather some information about a guest using MoID
vmware_guest_facts:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter_name }}"
moid: vm-42
schema: "vsphere"
properties: ["config.hardware.memoryMB", "guest.disk", "overallStatus"]
delegate_to: localhost
register: vm_moid_info
- name: Gather Managed object ID (moid) from a guest using the vSphere API output schema for REST Calls
vmware_guest_info:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter_name }}"
name: "{{ vm_name }}"
schema: "vsphere"
properties:
- _moId
delegate_to: localhost
register: moid_info
'''
RETURN = """
instance:
description: metadata about the virtual machine
returned: always
type: dict
sample: {
"annotation": "",
"current_snapshot": null,
"customvalues": {},
"guest_consolidation_needed": false,
"guest_question": null,
"guest_tools_status": "guestToolsNotRunning",
"guest_tools_version": "10247",
"hw_cores_per_socket": 1,
"hw_datastores": [
"ds_226_3"
],
"hw_esxi_host": "10.76.33.226",
"hw_eth0": {
"addresstype": "assigned",
"ipaddresses": null,
"label": "Network adapter 1",
"macaddress": "00:50:56:87:a5:9a",
"macaddress_dash": "00-50-56-87-a5-9a",
"portgroup_key": null,
"portgroup_portkey": null,
"summary": "VM Network"
},
"hw_files": [
"[ds_226_3] ubuntu_t/ubuntu_t.vmx",
"[ds_226_3] ubuntu_t/ubuntu_t.nvram",
"[ds_226_3] ubuntu_t/ubuntu_t.vmsd",
"[ds_226_3] ubuntu_t/vmware.log",
"[ds_226_3] u0001/u0001.vmdk"
],
"hw_folder": "/DC0/vm/Discovered virtual machine",
"hw_guest_full_name": null,
"hw_guest_ha_state": null,
"hw_guest_id": null,
"hw_interfaces": [
"eth0"
],
"hw_is_template": false,
"hw_memtotal_mb": 1024,
"hw_name": "ubuntu_t",
"hw_power_status": "poweredOff",
"hw_processor_count": 1,
"hw_product_uuid": "4207072c-edd8-3bd5-64dc-903fd3a0db04",
"hw_version": "vmx-13",
"instance_uuid": "5007769d-add3-1e12-f1fe-225ae2a07caf",
"ipv4": null,
"ipv6": null,
"module_hw": true,
"snapshots": [],
"tags": [
"backup"
],
"vnc": {},
"moid": "vm-42",
"vimref": "vim.VirtualMachine:vm-42"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
from ansible.module_utils.vmware_rest_client import VmwareRestClient
try:
from com.vmware.vapi.std_client import DynamicID
HAS_VSPHERE = True
except ImportError:
HAS_VSPHERE = False
class VmwareTag(VmwareRestClient):
def __init__(self, module):
super(VmwareTag, self).__init__(module)
self.tag_service = self.api_client.tagging.Tag
self.tag_association_svc = self.api_client.tagging.TagAssociation
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
name_match=dict(type='str', choices=['first', 'last'], default='first'),
uuid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
moid=dict(type='str'),
folder=dict(type='str'),
datacenter=dict(type='str', required=True),
tags=dict(type='bool', default=False),
schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'),
properties=dict(type='list')
)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['name', 'uuid', 'moid']],
supports_check_mode=True)
if module._name == 'vmware_guest_facts':
module.deprecate("The 'vmware_guest_facts' module has been renamed to 'vmware_guest_info'", version='2.13')
if module.params.get('folder'):
# FindByInventoryPath() does not require an absolute path
# so we should leave the input folder path unmodified
module.params['folder'] = module.params['folder'].rstrip('/')
if module.params['schema'] != 'vsphere' and module.params.get('properties'):
module.fail_json(msg="The option 'properties' is only valid when the schema is 'vsphere'")
pyv = PyVmomi(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
# VM already exists
if vm:
try:
if module.params['schema'] == 'summary':
instance = pyv.gather_facts(vm)
else:
instance = pyv.to_json(vm, module.params['properties'])
if module.params.get('tags'):
if not HAS_VSPHERE:
module.fail_json(msg="Unable to find 'vCloud Suite SDK' Python library which is required."
" Please refer this URL for installation steps"
" - https://code.vmware.com/web/sdk/60/vcloudsuite-python")
vm_rest_client = VmwareTag(module)
instance.update(
tags=vm_rest_client.get_vm_tags(vm_rest_client.tag_service,
vm_rest_client.tag_association_svc,
vm_mid=vm._moId)
)
module.exit_json(instance=instance)
except Exception as exc:
module.fail_json(msg="Information gathering failed with exception %s" % to_text(exc))
else:
vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid'))
module.fail_json(msg="Unable to gather information for non-existing VM %s" % vm_id)
if __name__ == '__main__':
main()
|
westernx/vee | refs/heads/master | tests/test_build_types.py | 1 | from . import *
class TestBuildTypes(TestCase):
def test_static_file(self):
pkg = MockPackage('scheme_static', 'static_file', {'PATH': 'etc/scheme_static'})
pkg.render_commit()
vee(['install', sandbox('packages/scheme_static'), '--install-name', 'scheme_static/1.0.0'])
self.assertExists(sandbox('vee/installs/scheme_static/1.0.0/etc/scheme_static'))
def assert_echo(self, type, do_call=True):
name = 'scheme_' + type
pkg = MockPackage(name, type)
pkg.render_commit()
vee(['install', pkg.path, '--install-name', '%s/1.0.0' % name])
exe = sandbox('vee/installs/%s/1.0.0/bin/%s' % (name, name))
self.assertExists(exe)
if do_call:
# Jumping through a bit of a hoop here to see the output.
out = []
try:
call(['vee', 'exec', '-R', pkg.path, name], stdout=out.append)
except:
print ''.join(out)
raise
self.assertEqual(''.join(out).strip(), '%s:1' % name)
def test_make(self):
self.assert_echo('c_make')
def test_configure_make_install(self):
self.assert_echo('c_configure_make_install')
def test_self(self):
self.assert_echo('self')
def test_python_source(self):
self.assert_echo('python_source')
# TODO: arbitrary data.
# TODO: both scripts and console_scripts entrypoints.
def test_python_sdist(self):
pkg = MockPackage('scheme_python_sdist', 'python_sdist')
pkg.render_commit()
vee(['install', sandbox('packages/scheme_python_sdist'), '--install-name', 'scheme_python_sdist/1.0.0'])
self.assertExists(sandbox('vee/installs/scheme_python_sdist/1.0.0/lib/python2.7/site-packages/scheme_python_sdist/__init__.py'))
# TODO: arbitrary data.
# TODO: scripts and console_scripts entrypoints.
# self.assertExists(sandbox('vee/installs/scheme_py_egg/1.0.0/bin/scheme_py_egg'))
# self.assertExists(sandbox('vee/installs/scheme_py_egg/1.0.0/bin/scheme_py_egg-ep')
def test_python_bdist(self):
pkg = MockPackage('scheme_python_bdist', 'python_bdist')
pkg.render_commit()
vee(['install', sandbox('packages/scheme_python_bdist'), '--install-name', 'scheme_python_bdist/1.0.0'])
self.assertExists(sandbox('vee/installs/scheme_python_bdist/1.0.0/lib/python2.7/site-packages/scheme_python_bdist/__init__.py'))
# TODO: arbitrary data.
# TODO: scripts and console_scripts entrypoints.
# self.assertExists(sandbox('vee/installs/scheme_py_egg/1.0.0/bin/scheme_py_egg'))
# self.assertExists(sandbox('vee/installs/scheme_py_egg/1.0.0/bin/scheme_py_egg-ep')
def test_python_bdist_egg(self):
return # This one doesn't work.
pkg = MockPackage('scheme_python_bdist_egg', 'python_bdist_egg')
pkg.render_commit()
vee(['install', sandbox('packages/scheme_python_bdist_egg'), '--install-name', 'scheme_python_bdist_egg/1.0.0'])
self.assertExists(sandbox('vee/installs/scheme_python_bdist_egg/1.0.0/lib/python2.7/site-packages/scheme_python_bdist_egg/__init__.py'))
# TODO: arbitrary data.
# TODO: scripts and console_scripts entrypoints:
# self.assertExists(sandbox('vee/installs/scheme_python_bdist_wheel/1.0.0/bin/scheme_python_bdist_wheel'))
# self.assertExists(sandbox('vee/installs/scheme_python_bdist_wheel/1.0.0/bin/scheme_python_bdist_wheel-ep'))
def test_python_bdist_wheel(self):
pkg = MockPackage('scheme_python_bdist_wheel', 'python_bdist_wheel')
pkg.render_commit()
vee(['install', sandbox('packages/scheme_python_bdist_wheel'), '--install-name', 'scheme_python_bdist_wheel/1.0.0'])
self.assertExists(sandbox('vee/installs/scheme_python_bdist_wheel/1.0.0/lib/python2.7/site-packages/scheme_python_bdist_wheel/__init__.py'))
# TODO: arbitrary data.
# TODO: scripts and console_scripts entrypoints:
# self.assertExists(sandbox('vee/installs/scheme_python_bdist_wheel/1.0.0/bin/scheme_python_bdist_wheel'))
# self.assertExists(sandbox('vee/installs/scheme_python_bdist_wheel/1.0.0/bin/scheme_python_bdist_wheel-ep'))
|
mtlchun/edx | refs/heads/master | cms/djangoapps/contentstore/management/commands/__init__.py | 12133432 | |
googleapis/python-datacatalog | refs/heads/master | samples/v1beta1/__init__.py | 12133432 | |
asutoshpalai/distiller | refs/heads/master | server/__init__.py | 12133432 | |
richgieg/flask-now | refs/heads/master | migrations/versions/14bd9687d5b3_init_db.py | 1 | """Init db
Revision ID: 14bd9687d5b3
Revises: None
Create Date: 2015-11-01 21:44:01.777934
"""
# revision identifiers, used by Alembic.
revision = '14bd9687d5b3'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('log_event_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('context', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_roles_default'), 'roles', ['default'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('_confirmed', sa.Boolean(), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('member_since', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('avatar_hash', sa.String(length=32), nullable=True),
sa.Column('auth_token', sa.String(length=128), nullable=True),
sa.Column('last_failed_login_attempt', sa.DateTime(), nullable=True),
sa.Column('failed_login_attempts', sa.Integer(), nullable=True),
sa.Column('_locked', sa.Boolean(), nullable=True),
sa.Column('_enabled', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_auth_token'), 'users', ['auth_token'], unique=True)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('log_events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('ip_address', sa.String(length=48), nullable=True),
sa.Column('logged_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['type_id'], ['log_event_types.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('log_events')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_index(op.f('ix_users_auth_token'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_roles_default'), table_name='roles')
op.drop_table('roles')
op.drop_table('log_event_types')
### end Alembic commands ###
|
krother/maze_run | refs/heads/master | leftovers/chapter08_load_tile_positions.py | 1 |
# Chapter 08 - Organizing code
# Cleaned code for loading tiles
import csv
import os
from pygame.rect import Rect
CONFIG_PATH = os.path.split(__file__)[0]
TILE_POSITION_FILE = os.path.join(CONFIG_PATH, 'tiles.txt')
TILE_IMAGE_FILE = os.path.join(CONFIG_PATH, '../images/tiles.xpm')
SIZE = 32
def load_tile_positions(filename):
"""Returns a dictionary of positions {name: (x, y), ..} parsed from the file"""
tile_positions = {}
with open(filename) as f:
for row in csv.reader(f, delimiter='\t'):
name = row[0]
if not name.startswith('REMARK'):
x = int(row[1])
y = int(row[2])
rect = Rect(x*SIZE, y*SIZE, SIZE, SIZE)
tile_positions[name] = rect
return tile_positions
if __name__ == '__main__':
tile_positions = load_tile_positions(TILE_POSITION_FILE)
print(tile_positions)
|
attilahorvath/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/filter_unittest.py | 124 | # Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for filter.py."""
import unittest2 as unittest
from filter import _CategoryFilter as CategoryFilter
from filter import validate_filter_rules
from filter import FilterConfiguration
# On Testing __eq__() and __ne__():
#
# In the tests below, we deliberately do not use assertEqual() or
# assertNotEquals() to test __eq__() or __ne__(). We do this to be
# very explicit about what we are testing, especially in the case
# of assertNotEquals().
#
# Part of the reason is that it is not immediately clear what
# expression the unittest module uses to assert "not equals" -- the
# negation of __eq__() or __ne__(), which are not necessarily
# equivalent expresions in Python. For example, from Python's "Data
# Model" documentation--
#
# "There are no implied relationships among the comparison
# operators. The truth of x==y does not imply that x!=y is
# false. Accordingly, when defining __eq__(), one should
# also define __ne__() so that the operators will behave as
# expected."
#
# (from http://docs.python.org/reference/datamodel.html#object.__ne__ )
class ValidateFilterRulesTest(unittest.TestCase):
"""Tests validate_filter_rules() function."""
def test_validate_filter_rules(self):
all_categories = ["tabs", "whitespace", "build/include"]
bad_rules = [
"tabs",
"*tabs",
" tabs",
" +tabs",
"+whitespace/newline",
"+xxx",
]
good_rules = [
"+tabs",
"-tabs",
"+build"
]
for rule in bad_rules:
self.assertRaises(ValueError, validate_filter_rules,
[rule], all_categories)
for rule in good_rules:
# This works: no error.
validate_filter_rules([rule], all_categories)
class CategoryFilterTest(unittest.TestCase):
"""Tests CategoryFilter class."""
def test_init(self):
"""Test __init__ method."""
# Test that the attributes are getting set correctly.
filter = CategoryFilter(["+"])
self.assertEqual(["+"], filter._filter_rules)
def test_init_default_arguments(self):
"""Test __init__ method default arguments."""
filter = CategoryFilter()
self.assertEqual([], filter._filter_rules)
def test_str(self):
"""Test __str__ "to string" operator."""
filter = CategoryFilter(["+a", "-b"])
self.assertEqual(str(filter), "+a,-b")
def test_eq(self):
"""Test __eq__ equality function."""
filter1 = CategoryFilter(["+a", "+b"])
filter2 = CategoryFilter(["+a", "+b"])
filter3 = CategoryFilter(["+b", "+a"])
# See the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertTrue(filter1.__eq__(filter2))
self.assertFalse(filter1.__eq__(filter3))
def test_ne(self):
"""Test __ne__ inequality function."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
#
# Also, see the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertFalse(CategoryFilter().__ne__(CategoryFilter()))
def test_should_check(self):
"""Test should_check() method."""
filter = CategoryFilter()
self.assertTrue(filter.should_check("everything"))
# Check a second time to exercise cache.
self.assertTrue(filter.should_check("everything"))
filter = CategoryFilter(["-"])
self.assertFalse(filter.should_check("anything"))
# Check a second time to exercise cache.
self.assertFalse(filter.should_check("anything"))
filter = CategoryFilter(["-", "+ab"])
self.assertTrue(filter.should_check("abc"))
self.assertFalse(filter.should_check("a"))
filter = CategoryFilter(["+", "-ab"])
self.assertFalse(filter.should_check("abc"))
self.assertTrue(filter.should_check("a"))
class FilterConfigurationTest(unittest.TestCase):
"""Tests FilterConfiguration class."""
def _config(self, base_rules, path_specific, user_rules):
"""Return a FilterConfiguration instance."""
return FilterConfiguration(base_rules=base_rules,
path_specific=path_specific,
user_rules=user_rules)
def test_init(self):
"""Test __init__ method."""
# Test that the attributes are getting set correctly.
# We use parameter values that are different from the defaults.
base_rules = ["-"]
path_specific = [(["path"], ["+a"])]
user_rules = ["+"]
config = self._config(base_rules, path_specific, user_rules)
self.assertEqual(base_rules, config._base_rules)
self.assertEqual(path_specific, config._path_specific)
self.assertEqual(user_rules, config._user_rules)
def test_default_arguments(self):
# Test that the attributes are getting set correctly to the defaults.
config = FilterConfiguration()
self.assertEqual([], config._base_rules)
self.assertEqual([], config._path_specific)
self.assertEqual([], config._user_rules)
def test_eq(self):
"""Test __eq__ method."""
# See the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertTrue(FilterConfiguration().__eq__(FilterConfiguration()))
# Verify that a difference in any argument causes equality to fail.
config = FilterConfiguration()
# These parameter values are different from the defaults.
base_rules = ["-"]
path_specific = [(["path"], ["+a"])]
user_rules = ["+"]
self.assertFalse(config.__eq__(FilterConfiguration(
base_rules=base_rules)))
self.assertFalse(config.__eq__(FilterConfiguration(
path_specific=path_specific)))
self.assertFalse(config.__eq__(FilterConfiguration(
user_rules=user_rules)))
def test_ne(self):
"""Test __ne__ method."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
#
# Also, see the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertFalse(FilterConfiguration().__ne__(FilterConfiguration()))
def test_base_rules(self):
"""Test effect of base_rules on should_check()."""
base_rules = ["-b"]
path_specific = []
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertTrue(config.should_check("a", "path"))
self.assertFalse(config.should_check("b", "path"))
def test_path_specific(self):
"""Test effect of path_rules_specifier on should_check()."""
base_rules = ["-"]
path_specific = [(["path1"], ["+b"]),
(["path2"], ["+c"])]
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("c", "path1"))
self.assertTrue(config.should_check("c", "path2"))
# Test that first match takes precedence.
self.assertFalse(config.should_check("c", "path2/path1"))
def test_path_with_different_case(self):
"""Test a path that differs only in case."""
base_rules = ["-"]
path_specific = [(["Foo/"], ["+whitespace"])]
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("whitespace", "Fooo/bar.txt"))
self.assertTrue(config.should_check("whitespace", "Foo/bar.txt"))
# Test different case.
self.assertTrue(config.should_check("whitespace", "FOO/bar.txt"))
def test_user_rules(self):
"""Test effect of user_rules on should_check()."""
base_rules = ["-"]
path_specific = []
user_rules = ["+b"]
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("a", "path"))
self.assertTrue(config.should_check("b", "path"))
|
StrellaGroup/erpnext | refs/heads/develop | erpnext/restaurant/doctype/restaurant_reservation/test_restaurant_reservation.py | 26 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestRestaurantReservation(unittest.TestCase):
pass
|
RafaelTorrealba/odoo | refs/heads/8.0 | addons/account_voucher/invoice.py | 382 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class invoice(osv.osv):
_inherit = 'account.invoice'
def invoice_pay_customer(self, cr, uid, ids, context=None):
if not ids: return []
dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_dialog_form')
inv = self.browse(cr, uid, ids[0], context=context)
return {
'name':_("Pay Invoice"),
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'account.voucher',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': {
'payment_expected_currency': inv.currency_id.id,
'default_partner_id': self.pool.get('res.partner')._find_accounting_partner(inv.partner_id).id,
'default_amount': inv.type in ('out_refund', 'in_refund') and -inv.residual or inv.residual,
'default_reference': inv.name,
'close_after_process': True,
'invoice_type': inv.type,
'invoice_id': inv.id,
'default_type': inv.type in ('out_invoice','out_refund') and 'receipt' or 'payment',
'type': inv.type in ('out_invoice','out_refund') and 'receipt' or 'payment'
}
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tcmitchell/geni-tools | refs/heads/develop | src/gcf/omnilib/stitchhandler.py | 3 | #!/usr/bin/env python
from __future__ import absolute_import
#----------------------------------------------------------------------
# Copyright (c) 2013-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
'''Main stitching workhorse. Handles calling the stitching service, orchestrating
parsing RSpecs and creating objects. See doStitching().'''
import copy
import datetime
import json
import logging
import os
import string
import sys
import time
from .. import oscript as omni
from .util import OmniError, naiveUTC
from .util import credparsing as credutils
from .util.files import readFile
from .util import handler_utils
from .util.json_encoding import DateTimeAwareJSONEncoder
from . import stitch
from .stitch import defs
from .stitch.ManifestRSpecCombiner import combineManifestRSpecs
from .stitch.objects import Aggregate, Link, Node, LinkProperty
from .stitch.RSpecParser import RSpecParser
from .stitch import scs
from .stitch.workflow import WorkflowParser
from .stitch.utils import StitchingError, StitchingCircuitFailedError, stripBlankLines, isRSpecStitchingSchemaV2, prependFilePrefix, StitchingStoppedError
from .stitch.VLANRange import *
from ..geni.util import rspec_schema
from ..geni.util.rspec_util import is_rspec_string, is_rspec_of_type, rspeclint_exists, validate_rspec
from ..geni.util.urn_util import URN, urn_to_string_format
from ..sfa.trust import gid
from ..sfa.util.xrn import urn_to_hrn, get_leaf
DCN_AM_TYPE = 'dcn' # geni_am_type value from AMs that use the DCN codebase
ORCA_AM_TYPE = 'orca' # geni_am_type value from AMs that use the Orca codebase
PG_AM_TYPE = 'protogeni' # geni_am_type / am_type from ProtoGENI based AMs
GRAM_AM_TYPE = 'gram' # geni_am_type value from AMs that use the GRAM codebase
FOAM_AM_TYPE = 'foam' # geni_am_type value from some AMs that use the FOAM codebase
OESS_AM_TYPE = 'oess' # geni_am_type value from AMs that use the OESS codebase
# Max # of times to call the stitching service
MAX_SCS_CALLS = 5
# File in which we save the slice cred so omni calls don't have to keep re-fetching it
# Valid substitutions: %username, %slicename, %slicehrn
SLICECRED_FILENAME = 'slice-%slicehrn-for-%username-cred.xml'
def urn_to_clean_hrn( urn ):
hrn, type = urn_to_hrn( urn )
hrn = handler_utils.remove_bad_characters( hrn )
return hrn, type
# The main stitching class. Holds all the state about our attempt at doing stitching.
class StitchingHandler(object):
'''Workhorse class to do stitching. See doStitching().'''
def __init__(self, opts, config, logger):
self.logger = logger
config['logger'] = logger
self.omni_config = config['omni']
self.config = config
self.parsedSCSRSpec = None
self.lastException = None
self.ams_to_process = []
self.opts = opts # command line options as parsed
self.slicecred = None # Cached slice credential to avoid re-fetching
self.savedSliceCred = None # path to file with slice cred if any
self.parsedURNNewAggs = [] # Aggs added from parsed URNs
# Get the framework
if not self.opts.debug:
# First, suppress all but WARN+ messages on console
lvl = logging.INFO
handlers = logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
self.framework = omni.load_framework(self.config, self.opts)
if not self.opts.debug:
handlers = logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
# FIXME: How many times is right to go back to the SCS
self.maxSCSCalls = MAX_SCS_CALLS
# Remember we got the extra info for this AM
self.amURNsAddedInfo = []
if self.opts.timeout == 0:
self.config['timeoutTime'] = datetime.datetime.max
self.logger.debug("Requested no timeout for stitcher.")
else:
self.config['timeoutTime'] = datetime.datetime.utcnow() + datetime.timedelta(minutes=self.opts.timeout)
self.logger.debug("Stitcher run will timeout at %s UTC.", self.config['timeoutTime'])
def doStitching(self, args):
'''Main stitching function.'''
# Parse the commandline args
# Hand off to omni if this is not a command stitcher handles
# Parse the request rspec
# Check if this request is bound, multiAM, uses GRE links, includes stitched links
# If the request is not a bound multi-AM RSpec, hand off to Omni
# - ensure the -a args are set to match the RSpec
# Check this stitching request is safe, and we have a valid slice
# Create the SCS instance if needed
# Then call mainStitchingLoop() to do the real work of calling the SCS and then
# getting each aggregate to make a reservation.
# On keyboard interrupt and delete any partial reservation
# On success, create and save the combined manifest RSpec, and
# pull out summary resource expiration information and a summary of the run,
# and return (pretty string, combined manifest rspec)
# On error, log something appropriate and exit
# Always be sure to clean up temporary files
# Parse the commandline args
# Hand off to omni if this is not a command stitcher handles
# Get request RSpec
request = None
command = None
self.slicename = None
if len(args) > 0:
command = args[0]
if len(args) > 1:
self.slicename = args[1]
if command and command.strip().lower() in ('describe', 'listresources', 'delete', 'deletesliver') and self.slicename:
if (not self.opts.aggregate or len(self.opts.aggregate) == 0) and not self.opts.useSliceAggregates:
self.addAggregateOptions(args)
if not self.opts.aggregate or len(self.opts.aggregate) == 0:
# Call the CH to get AMs in this slice
oldUSA = self.opts.useSliceAggregates
self.opts.useSliceAggregates = True
self.opts.sliceName = self.slicename
(aggs, message) = handler_utils._listaggregates(self)
self.opts.useSliceAggregates = oldUSA
if len(aggs) > 0:
self.opts.aggregate = []
for agg in aggs.values():
self.logger.debug("Adding AM %s retrieved from CH", agg)
self.opts.aggregate.append(agg)
else:
self.logger.debug("No AMs from CH: %s", message)
if not self.opts.aggregate or len(self.opts.aggregate) == 0:
# No resources known to be in any AMs. Try again specifying explicit -a arguments.
msg = "No known reservations at any aggregates. Try again with explicit -a arguments."
self.logger.info(msg)
return (msg, None)
if self.opts.aggregate and len(self.opts.aggregate) == 1:
# Omni can handle this
self.logger.debug("Passing call to Omni...")
return self.passToOmni(args)
self.opts.useSliceAggregates = False
if command.strip().lower() in ('describe', 'listresources'):
# This is a case of multiple AMs whose manifests should be combined
return self.rebuildManifest()
# elif command.strip().lower() in ('delete', 'deletesliver'):
else:
# Lets someone use stitcher to delete at multiple AMs when the API version is mixed
return self.doDelete()
if not command or command.strip().lower() not in ('createsliver', 'allocate'):
# Stitcher only handles createsliver or allocate. Hand off to Omni.
if self.opts.fakeModeDir:
msg = "In fake mode. Otherwise would call Omni with args %r" % args
self.logger.info(msg)
return (msg, None)
else:
self.logger.debug("Passing call to Omni...")
# Add -a options from the saved file, if none already supplied
self.addAggregateOptions(args)
return self.passToOmni(args)
# End of block to check the command
if len(args) > 2:
request = args[2]
if len(args) > 3:
self.logger.warn("Arguments %s ignored", args[3:])
#self.logger.debug("Command=%s, slice=%s, rspec=%s", command, self.slicename, request)
# Parse the RSpec
requestString = ""
if request:
self.rspecParser = RSpecParser(self.logger)
self.parsedUserRequest = None
try:
# read the rspec into a string, and add it to the rspecs dict
requestString = handler_utils._derefRSpecNick(self, request)
except Exception, exc:
msg = "Unable to read rspec file '%s': %s" % (request, str(exc))
if self.opts.devmode:
self.logger.warn(msg)
else:
raise OmniError(msg)
# # Test if the rspec is really json containing an RSpec, and pull out the right thing
# requestString = amhandler.self._maybeGetRSpecFromStruct(requestString)
# confirmGoodRequest
self.confirmGoodRSpec(requestString)
self.logger.debug("Valid GENI v3 request RSpec")
# parseRequest
self.parsedUserRequest = self.rspecParser.parse(requestString)
else:
raise OmniError("No request RSpec found, or slice name missing!")
# Examine the RSpec to see what kind of request it is
self.isStitching = self.mustCallSCS(self.parsedUserRequest)
self.isGRE = self.hasGRELink(self.parsedUserRequest)
self.isMultiAM = False
# If any node is unbound, then all AMs will try to allocate it. So bail
unboundNode = self.getUnboundNode()
self.isBound = (unboundNode is None)
if self.isBound:
self.logger.debug("Request appears to be fully bound")
if (self.isGRE or self.isStitching) and not self.isMultiAM:
self.logger.debug("Nodes seemed to list <2 AMs, but rspec appears GRE or stitching, so it is multi AM")
self.isMultiAM = True
# FIXME:
# If it is bound, make sure all the implied AMs are known (have a URL)
# FIXME:
# If any node is unbound: Check that there is exactly 1 -a AM that is not one of the AMs a node is bound to, and then
# edit the request to bind the nodes to that AM.
if self.isBound and not self.isMultiAM and self.opts.fixedEndpoint:
self.logger.debug("Got --fixedEndpoint, so pretend this is multi AM")
self.isMultiAM = True
# If this is not a bound multi AM RSpec, just let Omni handle this.
if not self.isBound or not self.isMultiAM:
self.logger.info("Not a bound multi-aggregate request - let Omni handle this.")
# Check the -a arguments and compare with the AMs inferred from the request RSpec
# Log on problems and try to set the -a arguments appropriately
self.cleanDashAArgs(unboundNode)
if self.opts.noReservation:
self.logger.info("Not reserving resources")
sys.exit()
# Try to force a call that falls through to omni to log at info level,
# or whatever level the main stitcher is using on the console
ologger = logging.getLogger("omni")
myLevel = logging.INFO
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
myLevel = handler.level
break
for handler in ologger.handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(myLevel)
break
# Warning: If this is createsliver and you specified multiple aggregates,
# then omni only contacts 1 aggregate. That is likely not what you wanted.
return omni.call(args, self.opts)
# End of block to let Omni handle unbound or single AM requests
# self.logger.debug("Edited request RSpec: %s", self.parsedUserRequest.getLinkEditedDom().toprettyxml())
if self.opts.explicitRSpecVersion:
self.logger.info("All manifest RSpecs will be in GENI v3 format")
self.opts.explicitRSpecVersion = False
self.opts.rspectype = ["GENI", '3']
# FIXME: Confirm request is not asking for any loops
self.confirmSafeRequest()
# Remove any -a arguments from the opts so that when we later call omni
# the right thing happens
self.opts.aggregate = []
# FIXME: Maybe use threading to parallelize confirmSliceOK and the 1st SCS call?
# Get username for slicecred filename
self.username = get_leaf(handler_utils._get_user_urn(self.logger, self.framework.config))
if not self.username:
raise OmniError("Failed to find your username to name your slice credential")
# Ensure the slice is valid before all those Omni calls use it
(sliceurn, sliceexp) = self.confirmSliceOK()
# Here is where we used to add the expires attribute. No
# longer necessary (nor a good idea).
# Create the SCS instance if it will be needed
if self.isStitching and not self.opts.noSCS:
if not "geni-scs.net.internet2.edu:8443" in self.opts.scsURL:
self.logger.info("Using SCS at %s", self.opts.scsURL)
self.scsService = scs.Service(self.opts.scsURL, key=self.framework.key, cert=self.framework.cert, timeout=self.opts.ssltimeout, verbose=self.opts.verbosessl)
self.scsCalls = 0
if self.isStitching and self.opts.noSCS:
self.logger.info("Not calling SCS on stitched topology per commandline option.")
# Create singleton that knows about default sliver expirations by AM type
defs.DefaultSliverExpirations.getInstance(self.config, self.logger)
# Compare the list of AMs in the request with AMs known
# to the SCS. Any that the SCS does not know means the request
# cannot succeed if those are AMs in a stitched link
# self.checkSCSAMs()
# Call SCS and then do reservations at AMs, deleting or retrying SCS as needed
# Note that it does this with mainStitchingLoop which recurses if needed.
# Catch Ctrl-C, deleting partial reservations.
lvl = None
try:
# Passing in the request as a DOM - after allowing edits as necessary. OK?
lastAM = self.mainStitchingLoop(sliceurn, self.parsedUserRequest.getLinkEditedDom())
# Construct and save out a combined manifest
combinedManifest, filename, retVal = self.getAndSaveCombinedManifest(lastAM)
# If some AMs used APIv3+, then we only did an allocation. Print something
msg = self.getProvisionMessage()
if msg:
self.logger.info(msg)
retVal += msg + "\n"
# Print something about sliver expiration times
msg = self.getExpirationMessage()
if msg:
self.logger.info(msg)
retVal += msg + "\n"
if filename:
msg = "Saved combined reservation RSpec at %d AM(s) to file '%s'" % (len(self.ams_to_process), os.path.abspath(filename))
self.logger.info(msg)
retVal += msg
except KeyboardInterrupt, kbi:
if lvl:
self.logger.setLevel(lvl)
msg = 'Stitching interrupted!'
if self.lastException:
msg += ' ' + str(self.lastException)
self.logger.error(msg)
import traceback
self.logger.debug("%s", traceback.format_exc())
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
self.logger.warn("Per command-line option, not deleting existing reservations.")
msg = self.endPartiallyReserved(kbi, aggs=self.ams_to_process)
# Here this method need not exit or raise. But should log something.
# sys.exit is called later.
self.logger.warn(msg)
elif self.ams_to_process is not None:
class DumbLauncher():
def __init__(self, agglist):
self.aggs = agglist
(delretText, delretStruct) = self.deleteAllReservations(DumbLauncher(self.ams_to_process))
for am in self.ams_to_process:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
sys.exit(-1)
except StitchingError, se:
if lvl:
self.logger.setLevel(lvl)
# FIXME: Return anything different for stitching error?
# Do we want to return a geni triple struct?
if self.lastException:
msg = "Stitching Failed. %s" % str(se)
if str(self.lastException) not in str(se):
msg += ". Root cause error: %s" % str(self.lastException)
self.logger.error(msg)
newError = StitchingError(msg)
se = newError
if "Requested no reservation" in str(se) or isinstance(se, StitchingStoppedError):
print str(se)
self.logger.debug(se)
sys.exit(0)
else:
raise se
finally:
# Save a file with the aggregates used in this slice
self.saveAggregateList(sliceurn)
# Clean up temporary files
self.cleanup()
self.dump_objects(self.parsedSCSRSpec, self.ams_to_process)
# Construct return message
retMsg = self.buildRetMsg()
# FIXME: What do we want to return?
# Make it something like createsliver / allocate, with the code/value/output triple plus a string
# On success
# Request from SCS that worked? Merged request as I modified?
# Merged manifest
# List of AMs, the URLs, and their API versions?
# Some indication of the slivers and their status and expiration at each AM?
# In particular, which AMs need a provision and poa geni_start
# ?? Stuff parsed from manifest?? EG some representation of each path with node list/count at each AM and VLAN tag for each link?, maybe list of the AMs added by the SCS?
#On error
# Error code / message (standard GENI triple)
# If the error was after SCS, include the expanded request from the SCS
# If particular AMs had errors, ID those AMs and the errors
self.logger.debug(retMsg)
return (retMsg, combinedManifest)
# End of doStitching()
def prepObjectsForNonCreate(self):
# Initialize variables and datastructures when they wont be created by doing createsliver
# EG to do a describe/listresources/delete/deletesliver. See rebuildManifests()
# Get username for slicecred filename
self.username = get_leaf(handler_utils._get_user_urn(self.logger, self.framework.config))
if not self.username:
raise OmniError("Failed to find your username to name your slice credential")
# Ensure the slice is valid before all those Omni calls use it
(sliceurn, sliceexp) = self.confirmSliceOK()
# We don't have any RSpecs
self.parsedUserRequest = None
self.parsedSCSRSpec = None
# Ensure all AM URNs in the commandline are Aggregate objects in ams_to_process
self.createObjectsFromOptArgs()
# Remove any -a arguments from the opts so that when we later call omni
# the right thing happens
self.opts.aggregate = []
# Add extra info about the aggregates to the AM objects
self.add_am_info(self.ams_to_process)
# If requesting from >1 ExoGENI AM, then use ExoSM. And use ExoSM only once.
# FIXME!!
# Will this correctly query the ExoSM vs the individual rack?
# Or should I always query both the individual rack and the ExoSM (once)?
self.ensureOneExoSM()
# Save slice cred and timeoutTime on each AM
for am in self.ams_to_process:
if self.slicecred:
# Hand each AM the slice credential, so we only read it once
am.slicecred = self.slicecred
# Also hand the timeout time
am.timeoutTime = self.config['timeoutTime']
am.userRequested = True
self.rspecParser = RSpecParser(self.logger)
def doDelete(self):
# Do delete at APIv3 AMs and deletesliver at v2 only AMs and combine the results
self.prepObjectsForNonCreate()
#self.logger.debug("Done with prep for delete. AMs: %s", self.ams_to_process)
# Fake mark that each AM had a reservation so we try the delete
for am in self.ams_to_process:
am.manifestDom = True
# Let deleteAllReservations call delete on each aggregate instance individually, and combine the results
# Could have instead produced 2 omni calls of course....
# Note that results are combined in a kind of odd way:
# All results are keyed by am.url. For v2 AMs, we try to make it True or False
# v2 return used to be (successURLs, failedURLs)
# But that's hard to preserve
# So instead, the v2 return is True if the AM was found in the success list, False if found in Failed list,
# and otherwise the return under the am.url is whatever the AM originally returned.
# Note that failing to find the AM url may mean it's a variant of the URL
class DumbLauncher():
def __init__(self, agglist):
self.aggs = agglist
(text, struct) = self.deleteAllReservations(DumbLauncher(self.ams_to_process))
self.logger.debug("Result from deleteAll: %s", text)
# deletesliver is (successList of AM URLs, failList)
# delete is a dictionary by AM URL of the raw APIv3 return
# This is text, dictionary by AM URL of [APIv3 return or
return (text, struct)
# End of doDelete()
def rebuildManifest(self):
# Process a listresources or describe call on a slice
# by fetching all the manifests and combining those into a new combined manifest
# Save off the various RSpecs to files.
# Return is consistent with Omni: (string, object)
# Describe return should be by URL with the full return triple
# Put the combined manifest under 'combined'
# ListResources return should be dict by URN,URL of RSpecs
# Put the combined manifest under ('combined','combined')
self.prepObjectsForNonCreate()
# Init some data structures
lastAM = None
workflow_parser = WorkflowParser(self.logger)
retStruct = dict()
# Now actually get the manifest for each AM
for am in self.ams_to_process:
opts_copy = copy.deepcopy(self.opts)
opts_copy.aggregate = [(am.nick if am.nick else am.url)]
self.logger.info("Gathering current reservations at %s...", am)
rspec = None
try:
rspec = am.listResources(opts_copy, self.slicename)
except StitchingError, se:
self.logger.debug("Failed to list current reservation: %s", se)
if am.api_version == 2:
retStruct[(am.urn,am.url)] = rspec
else:
retStruct[am.url] = {'code':dict(),'value':rspec,'output':None}
if am.isPG:
retStruct[am.url]['code'] = {'geni_code':0, 'am_type':'protogeni', 'am_code':0}
elif am.dcn:
retStruct[am.url]['code'] = {'geni_code':0, 'am_type':'dcn', 'am_code':0}
elif am.isEG:
retStruct[am.url]['code'] = {'geni_code':0, 'am_type':'orca', 'am_code':0}
elif am.isGRAM:
retStruct[am.url]['code'] = {'geni_code':0, 'am_type':'gram', 'am_code':0}
else:
retStruct[am.url]['code'] = {'geni_code':0, 'am_code':0}
if rspec is None:
continue
# Look for and save any sliver expiration
am.setSliverExpirations(handler_utils.expires_from_rspec(rspec, self.logger))
# Fill in more data structures using this RSpec to the extent it helps
parsedMan = self.rspecParser.parse(rspec)
if self.parsedUserRequest is None:
self.parsedUserRequest = parsedMan
if self.parsedSCSRSpec is None:
self.parsedSCSRSpec = parsedMan
# This next, if I had a workflow, would create the hops
# on the aggregates. As is, it does verly little
# Without the hops on the aggregates, we don't merge hops in the stitching extension
workflow_parser.parse({}, parsedMan)
# Make sure the ExoSM lists URN synonyms for all the EG component managers
# that don't have their own Agg instance
# FIXME: Anything similar I need to do for other AMs like gram?
if am.isExoSM:
for urn in parsedMan.amURNs:
# self.logger.debug("Man from %s had AM URN %s", am, urn)
if urn in Aggregate.aggs:
# self.logger.debug("Already is an AM")
continue
syns = Aggregate.urn_syns(urn)
found = False
for urn2 in syns:
if urn2 in Aggregate.aggs:
found = True
urn = urn2
# self.logger.debug(".. which is an AM under syn %s", urn)
break
if not found:
if not (urn.strip().lower().endswith("+cm") or urn.strip().lower().endswith("+am")):
# Doesn't look like an AM URN. Skip it.
self.logger.debug("URN parsed from man doesn't look like an AM URN: %s", urn)
continue
# self.logger.debug("... is not any existing AM")
urnO = URN(urn=urn)
urnAuth = urnO.getAuthority()
if urnAuth.startswith("exogeni.net"):
# self.logger.debug("Is an ExoGENI URN. Since this is the exoSM, add it as a urn syn")
am.urn_syns.append(urn)
# end of loop over AM URNs
# End of block to handle ExoSM
# Try to use the info I do have to construct hops on aggregates
# Note this has to be redone on the combined manifest later.
# May need to tell it to not swap hops?
self.fixHopRefs(parsedMan, am)
self.logger.debug("%s has %d hops", am, len(am.hops))
# Parse the manifest and fill in the manifest suggested/range values
try:
from xml.dom.minidom import parseString
am.manifestDom = parseString(rspec)
am.requestDom = am.manifestDom
# Fill in the manifest values on hops
for hop in am.hops:
self.logger.debug("Updating hop %s", hop)
# 7/12/13: FIXME: EG Manifests reset the Hop ID. So you have to look for the link URN
if am.isEG:
self.logger.debug("Parsing EG manifest with special method")
range_suggested = am.getEGVLANRangeSuggested(am.manifestDom, hop._hop_link.urn, hop.path.id)
else:
range_suggested = am.getVLANRangeSuggested(am.manifestDom, hop._id, hop.path.id)
pathGlobalId = None
if range_suggested and len(range_suggested) > 0:
if range_suggested[0] is not None:
pathGlobalId = str(range_suggested[0]).strip()
if pathGlobalId and pathGlobalId is not None and pathGlobalId != "None" and pathGlobalId != '':
if hop.globalId and hop.globalId is not None and hop.globalId != "None" and hop.globalId != pathGlobalId:
self.logger.warn("Changing Hop %s global ID from %s to %s", hop, hop.globalId, pathGlobalId)
hop.globalId = pathGlobalId
else:
self.logger.debug("Got no global id")
else:
#self.logger.debug("Got nothing in range_suggested first slot")
pass
if len(range_suggested) > 1 and range_suggested[1] is not None:
rangeValue = str(range_suggested[1]).strip()
if not rangeValue or rangeValue in ('null', 'any', 'None'):
self.logger.debug("Got no valid vlan range on %s: %s", hop, rangeValue)
else:
rangeObject = VLANRange.fromString(rangeValue)
hop._hop_link.vlan_range_manifest = rangeObject
self.logger.debug("Set range manifest: %s", rangeObject)
else:
self.logger.debug("Got no spot for a range value")
if len(range_suggested) > 2 and range_suggested[2] is not None:
suggestedValue = str(range_suggested[2]).strip()
if not suggestedValue or suggestedValue in ('null', 'any', 'None'):
self.logger.debug("Got no valid vlan suggestion on %s: %s", hop, suggestedValue)
else:
suggestedObject = VLANRange.fromString(suggestedValue)
hop._hop_link.vlan_suggested_manifest = suggestedObject
self.logger.debug("Set suggested manifest: %s", hop._hop_link.vlan_suggested_manifest)
else:
self.logger.debug("Got no spot for a suggested value")
else:
self.logger.debug("Got no range_suggested at all")
# End block for found the range and suggested from the RSpec for this hop
# end of loop over hops
except Exception, e:
self.logger.debug("Failed to parse rspec: %s", e)
continue
if am.manifestDom is not None:
lastAM = am
self.logger.debug("Setting lastAM to %s", lastAM)
# Done looping over AMs
if lastAM is None:
# Failed to get any manifests, so bail
raise StitchingError("Failed to retrieve resource listing - see logs")
# Construct and save out a combined manifest
combinedManifest, filename, retVal = self.getAndSaveCombinedManifest(lastAM)
if self.opts.api_version == 2:
retStruct[('combined','combined')] = combinedManifest
else:
retStruct['combined'] = {'code':{'geni_code':0},'value':combinedManifest,'output':None}
parsedCombined = self.rspecParser.parse(combinedManifest)
# Fix up the parsed combined RSpec to ensure we use the proper
# hop instances and all the objects point to each other
self.fixHopRefs(parsedCombined)
self.dump_objects(parsedCombined, self.ams_to_process)
# Print something about sliver expiration times
msg = self.getExpirationMessage()
if msg:
self.logger.info(msg)
retVal += msg + "\n"
if filename:
msg = "Saved combined reservation RSpec at %d AM(s) to file '%s'" % (len(self.ams_to_process), os.path.abspath(filename))
self.logger.info(msg)
retVal += msg
# Construct return message
retMsg = self.buildRetMsg()
self.logger.debug(retMsg)
# # Simplest return: just the combined rspec
# return (retMsg, combinedManifest)
# API method compliant returns
# Describe return should be by URL with the full return triple
# Put the combined manifest under 'combined'
# ListResources return should be dict by URN,URL of RSpecs
# Put the combined manifest under ('combined','combined')
return (retMsg, retStruct)
# End of rebuildManifest()
def fixHopRefs(self, parsedManifest, thisAM=None):
# Use a parsed RSpec to fix up the Hop and Aggregate objects that would otherwise
# be fixed up using the workflow.
# Used by rebuildManifest()
if not parsedManifest or not parsedManifest.stitching:
return
for path in parsedManifest.stitching.paths:
for hop in path.hops:
if hop.path != path:
hop.path = path
# Fill in the Aggregate instance on the hop
if not hop.aggregate:
self.logger.debug("%s missing aggregate", hop)
urn = hop.urn
if not urn or not '+' in urn:
self.logger.debug("%s had invalid urn", hop)
continue
spl = urn.split('+')
if len(spl) < 4:
self.logger.debug("%s URN malformed", hop)
continue
urnAuth = urn_to_string_format(spl[1])
urnC = URN(authority=urnAuth, type='authority', name='am')
hopAgg = Aggregate.find(urnC.urn)
hop.aggregate = hopAgg
self.logger.debug("Found %s", hopAgg)
if thisAM and hop.aggregate != thisAM:
# self.logger.debug("%s not for this am (%s) - continue", hop, thisAM)
continue
if not hop.aggregate in hop.path.aggregates:
self.logger.debug("%s's AM not on its path - adding", hop)
hop.path.aggregates.add(hop.aggregate)
# Find the AM for this hop
if not thisAM:
anAM = None
for am in self.ams_to_process:
if hop.aggregate == am:
anAM = am
break
if not anAM:
return
am = anAM
else:
am = thisAM
# Now ensure we have the right objects
found=False
for hop2 in am.hops:
# Ensure use right version of the Hop object
if hop2.urn == hop.urn and hop2.path.id == hop.path.id:
self.logger.debug("%s already listed by its AM", hop)
if hop != hop2:
self.logger.debug("... but the 2 hop instances are different!")
# Do I need to swap instances?
if hop2._hop_link.vlan_suggested_manifest != hop._hop_link.vlan_suggested_manifest:
self.logger.debug("Swapping out the path version of the hop to use the AM version instead, which has sug man: %s", hop2._hop_link.vlan_suggested_manifest)
# use hop2 not hop
# edit path.hops
newHops = []
for hop3 in path.hops:
if hop3 == hop:
newHops.append(hop2)
else:
newHops.append(hop3)
path.hops = newHops
else:
# both hops have same manifest value, shouldn't matter
self.logger.debug(" ... but have same suggested manifest, so leave it alone")
found = True
break
# AM didn't know the hop, so add it
if not found:
self.logger.debug("%s not listed on it's AM's hops - adding", hop)
am.add_hop(hop)
found = False
# And make sure the AM has the Path too
for path2 in am.paths:
if hop.path.id == path2.id:
found = True
self.logger.debug("%s 's path already listed by its aggregate %s", hop, hop.aggregate)
if hop.path != path2:
self.logger.debug("... but FIXME the 2 path instances are different!!")
# FIXME: Do I need to swap instances?
break
if not found:
self.logger.debug("%s 's path not listed on the AM's paths, adding", hop)
am.add_path(hop.path)
# End of block to ensure the AM has the hop
# End of loop over hops
# End of loop over paths
# End of method fixHopRefs
def passToOmni(self, args):
# Pass the call on to Omni, using the given args. Reset logging appropriately
# Return is the omni.call return
# Try to force a call that falls through to omni to log at info level,
# or whatever level the main stitcher is using on the console
ologger = logging.getLogger("omni")
myLevel = logging.INFO
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
myLevel = handler.level
break
for handler in ologger.handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(myLevel)
break
return omni.call(args, self.opts)
# End of passToOmni
def buildRetMsg(self):
# Build the return message from this handler on success
# Typically counting links and aggregates.
amcnt = len(self.ams_to_process)
scs_added_amcnt = 0
pathcnt = 0
grecnt = 0
if self.parsedSCSRSpec and self.parsedSCSRSpec.stitching:
pathcnt = len(self.parsedSCSRSpec.stitching.paths)
if self.parsedSCSRSpec and self.parsedSCSRSpec.links:
for link in self.parsedSCSRSpec.links:
if link.typeName in (link.GRE_LINK_TYPE, link.EGRE_LINK_TYPE):
grecnt += 1
for am in self.ams_to_process:
if not am.userRequested:
scs_added_amcnt = scs_added_amcnt + 1
greStr = ""
if grecnt > 0:
greStr = ", creating %d GRE link(s)" % grecnt
stitchStr = ""
if pathcnt > 0:
stitchStr = ", creating %d stitched link(s)" % pathcnt
if scs_added_amcnt > 0:
retMsg = "Success: Reserved resources in slice %s at %d Aggregates (including %d intermediate aggregate(s) not in the original request)%s%s." % (self.slicename, amcnt, scs_added_amcnt, greStr, stitchStr)
else:
retMsg = "Success: Reserved resources in slice %s at %d Aggregates%s%s." % (self.slicename, amcnt, greStr, stitchStr)
return retMsg
# End of buildRetMsg
def cleanDashAArgs(self, unboundNode):
# Check and clean the -a args relative to the request RSpec
# logging on issues found
# Used in doStitching
if unboundNode is not None:
self.logger.info("Node '%s' is unbound in request - all nodes must be bound for stitcher, as all aggregates get the same request RSpec" % unboundNode)
if self.isBound:
if self.opts.aggregate is None or len(self.opts.aggregate) == 0:
# A bound non multi AM RSpec but no AM specified. Fill in the -a appropriately
if self.parsedUserRequest.amURNs and len(self.parsedUserRequest.amURNs) > 0:
amURN = self.parsedUserRequest.amURNs.pop()
(nick, url) = handler_utils._lookupAggNickURLFromURNInNicknames(self.logger, self.config, amURN)
if url and url.strip() != '':
self.logger.debug("Setting -a argument for Omni: Found RSpec AM %s in omni_config AM nicknames: %s", amURN, nick)
self.opts.aggregate = [nick]
else:
self.logger.debug("Could not find AM from RSpec for URN %s - Omni will have no -a argument", amURN)
#else:
# weird and really shouldn't happen
elif len(self.opts.aggregate) == 1:
# If the AM specified is not what it is bound to, then what? complain? fix it? do it anyhow?
# else this is good
if self.parsedUserRequest.amURNs and len(self.parsedUserRequest.amURNs) > 0:
amURN = self.parsedUserRequest.amURNs.pop()
(nick, url) = handler_utils._lookupAggNickURLFromURNInNicknames(self.logger, self.config, amURN)
amNick = None
amURL = None
if url and url.strip() != '':
self.logger.debug("Found RSpec AM %s in omni_config AM nicknames: %s", amURN, nick)
amNick = nick
amURL = url
if not self.opts.debug:
# Suppress most log messages on the console for doing the nickname lookup
lvl = logging.INFO
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
url1,urn1 = handler_utils._derefAggNick(self, self.opts.aggregate[0])
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
if (amNick and amNick == self.opts.aggregate[0]) or (amURL and amURL == url1) or (amURN == urn1):
self.logger.debug("Supplied -a matches the AM found in the RSpec: %s=%s", amURN, self.opts.aggregate[0])
elif amNick and url1:
# A valid comparison that didn't find anything
self.logger.warn("RSpec appears bound to a different AM than you are submitting it to. RSpec specifies AM %s (%s) but -a argument specifies %s (%s)! Continuing anyway....", amURN, amNick, self.opts.aggregate[0], url1)
# FIXME: Correct it? Bail?
# else:
# Didn't get all the values for a proper comparison
# else:
# No AMs parsed out of the RSpec. I don't think this should happen
else:
# the RSpec appeared to be single AM but multiple AMs specified.
# Perhaps check if the bound AM is at least one of them?
# Complain? Bail? Fix it? Continue?
self.logger.debug("RSpec appeared bound to a single AM but multiple -a arguments specified?")
if self.parsedUserRequest.amURNs and len(self.parsedUserRequest.amURNs) > 0:
amURN = self.parsedUserRequest.amURNs.pop()
(nick, url) = handler_utils._lookupAggNickURLFromURNInNicknames(self.logger, self.config, amURN)
amNick = None
amURL = None
if url and url.strip() != '':
self.logger.debug("Found RSpec AM %s URL from omni_config AM nicknames: %s", amURN, nick)
amNick = nick
amURL = url
# Get the urn,urn for each -a and see if it is in the RSpec
found = False
for dasha in self.opts.aggregate:
if not self.opts.debug:
# Suppress most log messages on the console for doing the nickname lookup
lvl = logging.INFO
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
url1,urn1 = handler_utils._derefAggNick(self, dasha)
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
if (amNick and amNick == dasha) or (amURL and amURL == url1) or (amURN == urn1):
self.logger.debug("1 of the supplied -a args matches the AM found in the RSpec: %s", amURN)
found = True
break
# End of loop over -a args
if not found:
self.logger.warn("RSpec appears bound to a different AM than the multiple AMs you are submitting it to. RSpec specifies AM %s (%s) but -a argument specifies %s! Continuing anyway....", amURN, amNick, self.opts.aggregate)
else:
self.logger.warn("RSpec appeared bound to a single AM (%s) but multiple -a arguments specified? %s", amURN, self.opts.aggregate)
self.logger.info("... continuing anyway")
# FIXME: Correct it? Bail?
# end of multiple AMs found in parsed RSpec
# end of multi AMs specified with -a
# end of if self.isBound
# End of cleanDashAArgs
def getAndSaveCombinedManifest(self, lastAM):
# Construct a unified manifest and save it to a file
# Used in doStitching
# Return combinedManifest, name of file where saved (or None), retVal string partially constructed to return
# include AMs, URLs, API versions
# Avoid EG manifests - they are incomplete
# Avoid DCN manifests - they do funny things with namespaces (ticket #549)
# GRAM AMs seems to also miss nodes. Avoid if possible.
if lastAM is None and len(self.ams_to_process) > 0:
lastAM = self.ams_to_process[-1]
if lastAM is not None and (lastAM.isEG or lastAM.dcn or lastAM.isGRAM or lastAM.manifestDom is None):
self.logger.debug("Last AM was an EG or DCN or GRAM AM. Find another for the template.")
i = 1
while (lastAM.isEG or lastAM.dcn or lastAM.isGRAM or lastAM.manifestDom is None) and i <= len(self.ams_to_process):
# This has lost some hops and messed up hop IDs. Don't use it as the template
# I'd like to find another AM we did recently
lastAM = self.ams_to_process[-i]
i = i + 1
if lastAM.isEG or lastAM.dcn or lastAM.isGRAM or lastAM.manifestDom is None:
self.logger.debug("Still had an EG or DCN or GRAM template AM - use the raw SCS request")
lastAM = None
# I have a slight preference for a PG AM. See if we have one
if lastAM is not None and not lastAM.isPG and len(self.ams_to_process) > 1:
for am in self.ams_to_process:
if am != lastAM and am.isPG and am.manifestDom is not None:
lastAM = am
break
combinedManifest = self.combineManifests(self.ams_to_process, lastAM)
# FIXME: Handle errors. Maybe make return use code/value/output struct
# If error and have an expanded request from SCS, include that in output.
# Or if particular AM had errors, ID the AMs and errors
# FIXME: This prepends a header on an RSpec that might already have a header
# -- maybe replace any existing header
# FIXME: We force -o here and keep it from logging the
# RSpec. Do we need an option to not write the RSpec to a file?
ot = self.opts.output
if not self.opts.tostdout:
self.opts.output = True
if not self.opts.debug:
# Suppress all but WARN on console here
lvl = self.logger.getEffectiveLevel()
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
retVal, filename = handler_utils._writeRSpec(self.opts, self.logger, combinedManifest, self.slicename, 'multiam-combined', '', None)
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
self.opts.output = ot
return combinedManifest, filename, retVal
# End of getAndSaveCombinedManifest
def getExpirationMessage(self):
# Return a message to return/print about the expiration of reservations at aggregates.
# Used in doStitching
# FIXME: 15min? 30min?
# FIXME: Old code printed per agg exp at debug level
sortedAggs = Aggregate.sortAggsByExpirations(15) # 15min apart counts as same
firstTime = None
firstCount = 0
firstLabel = ""
secondTime = None
secondCount = 0
secondLabel = ""
noPrint = False
msgAdd = ''
msg = None
if len(sortedAggs) == 0:
msg = "No aggregates"
self.logger.debug("Got no aggregates?")
noPrint = True
else:
self.logger.debug("AMs expire at %d time(s).", len(sortedAggs))
firstSlotTimes = sortedAggs[0][0].sliverExpirations
skipFirst = False
if firstSlotTimes is None or len(firstSlotTimes) == 0:
skipFirst = True
if len(sortedAggs) == 1:
msg = "Aggregates did not report sliver expiration"
self.logger.debug("Only expiration timeslot has an agg with no expirations")
noPrint = True
else:
msgAdd = "Resource expiration unknown at %d aggregate(s)" % len(sortedAggs[0])
self.logger.debug("First slot had no times, but there are other slots")
ind = -1
for slot in sortedAggs:
ind += 1
if skipFirst and ind == 0:
continue
if firstTime is None:
firstTime = slot[0].sliverExpirations[0]
firstCount = len(slot)
firstLabel = str(slot[0])
if len(sortedAggs) > 1:
self.logger.debug("First expiration is at %s UTC at %s, at %d total AM(s).", firstTime.isoformat(), firstLabel, firstCount)
else:
self.logger.debug("Resource expiration is at %s UTC, at %d total AM(s).", firstTime.isoformat(), firstCount)
if firstCount == 1:
continue
elif firstCount == 2:
firstLabel += " and " + str(slot[1])
else:
firstLabel += " and %d other AM(s)" % (firstCount - 1)
continue
elif secondTime is None:
secondTime = slot[0].sliverExpirations[0]
secondCount = len(slot)
secondLabel = str(slot[0])
self.logger.debug("Second expiration at %s UTC at %s, at %d total AM(s)", secondTime.isoformat(), secondLabel, secondCount)
if secondCount == 1:
break
elif secondCount == 2:
secondLabel += " and " + str(slot[1])
else:
secondLabel += " and %d other AM(s)" % (secondCount - 1)
break
# Done looping over agg exp times in sortedAggs
# Done handling sortedAggs
if not noPrint:
if len(sortedAggs) == 1 or secondTime is None:
msg = "Your resources expire at %s (UTC). %s" % (firstTime.isoformat(), msgAdd)
else:
msg = "Your resources expire at %d different times. The first resources expire at %s (UTC) at %s. The second expiration time is %s (UTC) at %s. %s" % (len(sortedAggs), firstTime.isoformat(), firstLabel, secondTime.isoformat(), secondLabel, msgAdd)
return msg
# end getExpirationMessage
def getProvisionMessage(self):
# Get a message warning the experimenter to do provision and poa at AMs that are only allocated
msg = None
for agg in self.ams_to_process:
if agg.manifestDom and agg.api_version > 2:
if msg is None:
msg = ""
aggnick = agg.nick
if aggnick is None:
aggnick = agg.url
msg += " Reservation at %s is temporary! \nYou must manually call `omni -a %s -V3 provision %s` and then `omni -a %s -V3 poa %s geni_start`.\n" % (aggnick, aggnick, self.slicename, aggnick, self.slicename)
return msg
# Compare the list of AMs in the request with AMs known
# to the SCS. Any that the SCS does not know means the request
# cannot succeed if those are AMs in a stitched link
# This would be in the doStitching() method but is currently commented out.
def checkSCSAMs(self):
# FIXME: This takes time. If this can't block a more expensive later operation, why bother?
scsAggs = {}
try:
scsAggs = self.scsService.ListAggregates(False, self.opts.ssltimeout)
except Exception, e:
self.logger.debug("SCS ListAggregates failed: %s", e)
if scsAggs and isinstance(scsAggs, dict) and len(scsAggs.keys()) > 0:
if scsAggs.has_key('value') and scsAggs['value'].has_key('geni_aggregate_list'):
scsAggs = scsAggs['value']['geni_aggregate_list']
# self.logger.debug("Got geni_agg_list from scs: %s", scsAggs)
# Now sanity check AMs requested
# Note that this includes AMs that the user does not
# want to stitch - so we cannot error out early
# FIXME: Can we ID from the request which are AMs that need a stitch?
for reqAMURN in self.parsedUserRequest.amURNs:
found = False
for sa in scsAggs.keys():
if scsAggs[sa]['urn'] == reqAMURN:
self.logger.debug("Requested AM URN %s is listed by SCS with URL %s", reqAMURN, scsAggs[sa]['url'])
found = True
break
if not found:
self.logger.warn("Your request RSpec specifies the aggregate (component manager) '%s' for which there are no stitching paths configured. If you requested a stitched link to this aggregate, it will fail.", reqAMURN)
def cleanup(self):
'''Remove temporary files if not in debug mode'''
if self.opts.debug:
return
scsres = prependFilePrefix(self.opts.fileDir, Aggregate.FAKEMODESCSFILENAME)
if os.path.exists(scsres):
os.unlink(scsres)
if self.savedSliceCred and os.path.exists(self.opts.slicecredfile):
os.unlink(self.opts.slicecredfile)
if not self.ams_to_process:
return
for am in self.ams_to_process:
# Remove getversion files
# Note the AM URN here may not be right, so we might miss a file
filename = handler_utils._construct_output_filename(self.opts, None, am.url, am.urn, "getversion", ".json", 1)
# self.logger.debug("Deleting AM getversion: %s", filename)
if os.path.exists(filename):
os.unlink(filename)
# Remove any per AM request RSpecs
if am.rspecfileName and not self.opts.output:
# self.logger.debug("Deleting AM request: %s", am.rspecfileName)
if os.path.exists(am.rspecfileName):
os.unlink(am.rspecfileName)
# v2.5 left these manifest & status files there. Leave them still? Remove them?
# Now delete the per AM saved manifest rspec file
if not self.opts.output:
manfile = handler_utils._construct_output_filename(self.opts, self.slicename, am.url, am.urn, "manifest-rspec", ".xml", 1)
# self.logger.debug("Deleting AM manifest: %s", manfile)
if os.path.exists(manfile):
os.unlink(manfile)
# Now delete per AM saved status files
statusfilename = handler_utils._construct_output_filename(self.opts, self.slicename, am.url, am.urn, "sliverstatus", ".json", 1)
# self.logger.debug("Deleting AM status: %s", statusfilename)
if os.path.exists(statusfilename):
os.unlink(statusfilename)
# The main loop that does the work of getting all aggregates objects to make reservations.
# This method recurses on itself when an attempt fails.
# - Handle timeout
# - Call the SCS as needed
# - pause to let AMs free resources from earlier attempts
# - parse the SCS response, constructing aggregate objects and dependencies
# - save aggregate state from any previous time through this loop
# - gather extra info on aggregates
# - ensure we use only 1 ExoSM instance, handle various request oddities
# - request 'any' at AMs where we can
# - handle rrequests to exit early
# - update the available range in the request based on current availability where appropriate
# - spawn the Launcher to loop over aggregates until all aggregates have a reservation, or raise an error
# - On error, delete partial reservations, and recurse for recoverable errors
def mainStitchingLoop(self, sliceurn, requestDOM, existingAggs=None):
# existingAggs are Aggregate objects
# Time out stitcher call if needed
if datetime.datetime.utcnow() >= self.config['timeoutTime']:
msg = "Reservation attempt timed out after %d minutes." % self.opts.timeout
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
# Update the message to indicate not deleting....
self.logger.warn("%s Per command-line option, not deleting existing reservations.", msg)
msg2 = self.endPartiallyReserved(aggs=existingAggs, timeout=True)
msg = "%s %s" % (msg, msg2)
# Allow later code to raise this as an error
else:
self.logger.warn("%s Deleting any reservations...", msg)
class DumbLauncher():
def __init__(self, agglist):
self.aggs = agglist
try:
(delretText, delretStruct) = self.deleteAllReservations(DumbLauncher(existingAggs))
for am in existingAggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in existingAggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
raise StitchingError(msg)
# Call SCS if needed
self.scsCalls = self.scsCalls + 1
if self.isStitching and not self.opts.noSCS:
if self.scsCalls == 1:
self.logger.info("Calling SCS...")
else:
thStr = 'th'
if self.scsCalls == 2:
thStr = 'nd'
elif self.scsCalls == 3:
thStr = 'rd'
if self.scsCalls == self.maxSCSCalls:
self.logger.info("Calling SCS for the %d%s and last time...", self.scsCalls, thStr)
else:
self.logger.info("Calling SCS for the %d%s time...", self.scsCalls, thStr)
scsResponse = self.callSCS(sliceurn, requestDOM, existingAggs)
self.lastException = None # Clear any last exception from the last run through
# If needed, pause to let AMs free up resources; recheck the timeout if needed
if self.scsCalls > 1 and existingAggs:
# We are doing another call.
# Let AMs recover. Is this long enough?
# If one of the AMs is a DCN AM, use that sleep time instead - longer
sTime = Aggregate.PAUSE_FOR_V3_AM_TO_FREE_RESOURCES_SECS
for agg in existingAggs:
if agg.dcn and agg.triedRes:
# Only need to sleep this much longer time
# if this is a DCN AM that we tried a reservation on (whether it worked or failed)
if sTime < Aggregate.PAUSE_FOR_DCN_AM_TO_FREE_RESOURCES_SECS:
self.logger.debug("Must sleep longer cause had a previous reservation attempt at a DCN AM: %s", agg)
sTime = Aggregate.PAUSE_FOR_DCN_AM_TO_FREE_RESOURCES_SECS
elif agg.api_version == 2 and agg.triedRes and sTime < Aggregate.PAUSE_FOR_AM_TO_FREE_RESOURCES_SECS:
self.logger.debug("Must sleep longer cause had a previous v2 reservation attempt at %s", agg)
sTime = Aggregate.PAUSE_FOR_AM_TO_FREE_RESOURCES_SECS
# Reset whether we've tried this AM this time through
agg.triedRes = False
if datetime.datetime.utcnow() + datetime.timedelta(seconds=sTime) >= self.config['timeoutTime']:
# We'll time out. So quit now.
self.logger.debug("After planned sleep for %d seconds we will time out", sTime)
msg = "Reservation attempt timing out after %d minutes." % self.opts.timeout
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
# Update the message to indicate not deleting....
self.logger.warn("%s Per command-line option, not deleting existing reservations.", msg)
msg2 = self.endPartiallyReserved(aggs=existingAggs, timeout=True)
msg = "%s %s" % (msg, msg2)
# Allow later code to raise this as an error
else:
self.logger.warn("%s Deleting any reservations...", msg)
class DumbLauncher():
def __init__(self, agglist):
self.aggs = agglist
try:
(delretText, delretStruct) = self.deleteAllReservations(DumbLauncher(existingAggs))
for am in existingAggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in existingAggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
raise StitchingError(msg)
self.logger.info("Pausing for %d seconds for Aggregates to free up resources...\n\n", sTime)
time.sleep(sTime)
# Done pausing to let AMs free resources
# Parse SCS Response, constructing objects and dependencies, validating return
if self.isStitching and not self.opts.noSCS:
self.parsedSCSRSpec, workflow_parser = self.parseSCSResponse(scsResponse)
scsResponse = None # Just to note we are done with this here (keep no state)
else:
# Fake out the data structures using the original user request RSpec
try:
xmlreq = requestDOM.toxml()
except Exception, xe:
self.logger.debug("Failed to XMLify requestDOM for parsing: %s", xe)
self._raise_omni_error("Malformed request RSpec: %s" % xe)
self.parsedSCSRSpec = self.rspecParser.parse(xmlreq)
workflow_parser = WorkflowParser(self.logger)
# Parse the workflow, creating Path/Hop/etc objects
# In the process, fill in a tree of which hops depend on which,
# and which AMs depend on which
# Also mark each hop with what hop it imports VLANs from,
# And check for AM dependency loops
workflow_parser.parse({}, self.parsedSCSRSpec)
# self.logger.debug("Did fake workflow parsing")
# Save off existing Aggregate object state
parsedURNExistingAggs = [] # Existing aggs that came from a parsed URN, not in workflow
self.parsedURNNewAggs = [] # New aggs created not from workflow
if existingAggs:
# Copy existingAggs.hops.vlans_unavailable to workflow_parser.aggs.hops.vlans_unavailable? Other state?
self.saveAggregateState(existingAggs, workflow_parser.aggs)
# An AM added only from parsed AM URNs will have state lost. Ticket #781
if self.parsedSCSRSpec:
# Look for existing aggs that came from parsed URN and aren't in workflow
for agg in existingAggs:
self.logger.debug("Looking at existing AM %s", agg)
isWorkflow = False
for agg2 in workflow_parser.aggs:
if agg.urn == agg2.urn or agg.urn in agg2.urn_syns:
self.logger.debug("Is a workflow AM; found AM's URN %s in workflow's AMs", agg.urn)
isWorkflow = True
break
else:
for urn2 in agg.urn_syns:
if urn2 == agg2.urn or urn2 in agg2.urn_syns:
self.logger.debug("Is a workflow AM based on urn_syn; found AM's urn_syn %s in workflow AM", urn2)
isWorkflow = True
break
if isWorkflow:
break
if isWorkflow:
continue
isParsed = False
if agg.urn in self.parsedSCSRSpec.amURNs:
self.logger.debug("isParsed from main URN %s", agg.urn)
isParsed = True
else:
for urn2 in agg.urn_syns:
if urn2 in self.parsedSCSRSpec.amURNs:
self.logger.debug("isParsed from urn syn %s", urn2)
isParsed = True
break
if not isParsed:
continue
# Have an AM that came from parsed URN and is not in the workflow.
# So this agg needs its data copied over.
# this agg wont be in ams_to_process
# need to do self.saveAggregateState(otherExistingAggs, newAggsFromURNs)
self.logger.debug("%s was not in workflow and came from parsed URN", agg)
parsedURNExistingAggs.append(agg)
# end loop over existing aggs
# End block to handle parsed URNs not in workflow
existingAggs = None # Now done
# FIXME: if notScript, print AM dependency tree?
# Ensure we are processing all the workflow aggs plus any aggs in the RSpec not in
# the workflow
self.ams_to_process = copy.copy(workflow_parser.aggs)
if self.isStitching and not self.opts.noSCS:
self.logger.debug("SCS workflow said to include resources from these aggregates:")
for am in self.ams_to_process:
self.logger.debug("\t%s", am)
# Ensure all AM URNs we found in the RSpec are Aggregate objects in ams_to_process
self.createObjectsFromParsedAMURNs()
# If we saved off some existing aggs that were from parsed URNs and not in the workflow earlier,
# and we also just created some new aggs, then see if those need to have existing data copied over
# Ticket #781
if len(parsedURNExistingAggs) > 0 and len(self.parsedURNNewAggs) > 0:
self.saveAggregateState(parsedURNExistingAggs, self.parsedURNNewAggs)
parsedURNExistingAggs = []
self.parsedURNNewAggs = []
# Add extra info about the aggregates to the AM objects
self.add_am_info(self.ams_to_process)
# FIXME: check each AM reachable, and we know the URL/API version to use
# If requesting from >1 ExoGENI AM, then use ExoSM. And use ExoSM only once.
self.ensureOneExoSM()
self.dump_objects(self.parsedSCSRSpec, self.ams_to_process)
self.logger.info("Multi-AM reservation will include resources from these aggregates:")
for am in self.ams_to_process:
self.logger.info("\t%s", am)
# If we said this rspec needs a fixed / fake endpoint, add it here - so the SCS and other stuff
# doesn't try to do anything with it
if self.opts.fixedEndpoint:
self.addFakeNode()
# DCN AMs seem to require there be at least one sliver_type specified
self.ensureSliverType()
# Change the requested VLAN tag to 'any' where we can, allowing
# The AM to pick from the currently available tags
self.changeRequestsToAny()
# Save slice cred and timeoutTime on each AM
for am in self.ams_to_process:
if self.slicecred:
# Hand each AM the slice credential, so we only read it once
am.slicecred = self.slicecred
# Also hand the timeout time
am.timeoutTime = self.config['timeoutTime']
# Exit if user specified --noReservation, saving expanded request RSpec
self.handleNoReservation()
# Check current VLAN tag availability before doing allocations
ret = self.updateAvailRanges(sliceurn, requestDOM)
if ret is not None:
return ret
# Exit if user specified --genRequest, saving more fully expanded request RSpec
self.handleGenRequest()
# The launcher handles calling the aggregates to do their allocation
# Create a launcher and run it. That in turn calls the Aggregates to do the allocations,
# where all the work happens.
# A StitchingCircuitFailedError is a transient or recoverable error. On such errors,
# recurse and call this main method again, re-calling the SCS and retrying reservations at AMs.
# A StitchingError is a permanent failure.
# On any error, delete any partial reservations.
launcher = stitch.Launcher(self.opts, self.slicename, self.ams_to_process, self.config['timeoutTime'])
try:
# Spin up the main loop
lastAM = launcher.launch(self.parsedSCSRSpec, self.scsCalls)
# for testing calling the SCS only many times
# raise StitchingCircuitFailedError("testing")
except StitchingCircuitFailedError, se:
# A StitchingCircuitFailedError is a transient or recoverable error. On such errors,
# recurse and call this main method again, re-calling the SCS and retrying reservations at AMs.
# On any error, delete any partial reservations.
# Do not recurse if we've hit the maxSCSCalls or if there's an error deleting
# previous reservations.
self.lastException = se
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
# Update the message to indicate not deleting....
self.logger.warn("Stitching failed. Would retry but commandline option specified not to. Last error: %s", se)
msg = self.endPartiallyReserved(se, aggs=self.ams_to_process)
# Exit by raising an error
raise StitchingError("Stitching failed due to: %s. %s" % (se, msg))
else:
if self.scsCalls == self.maxSCSCalls:
self.logger.error("Stitching max circuit failures reached - will delete and exit.")
try:
(delretText, delretStruct) = self.deleteAllReservations(launcher)
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
raise StitchingError("Stitching reservation failed %d times. Last error: %s" % (self.scsCalls, se))
self.logger.warn("Stitching failed but will retry: %s", se)
success = False
try:
(delRetText, delRetStruct) = self.deleteAllReservations(launcher)
hadFail = False
for url in delRetStruct.keys():
if not delRetStruct[url]:
hadFail = True
break
if isinstance(delRetStruct[url], dict) and delRetStruct[url].has_key('code') and isinstance(delRetStruct[url]['code'], dict) and delRetStruct[url]['code'].has_key('geni_code') and delRetStruct[url]['code']['geni_code'] not in (0, 12, 15):
hadFail = True
break
if isinstance(delRetStruct[url], dict) and delRetStruct[url].has_key('code') and isinstance(delRetStruct[url]['code'], dict) and delRetStruct[url]['code'].has_key('geni_code') and delRetStruct[url]['code']['geni_code'] == 0 and delRetStruct[url].has_key('value') and isinstance(delRetStruct[url]['value'], list) and len(delRetStruct[url]['value']) > 0:
try:
for sliver in delRetStruct[url]["value"]:
status = sliver["geni_allocation_status"]
if status != 'geni_unallocated':
hadFail = True
break
if hadFail:
break
except:
# Malformed return I think
hadFail = True
# FIXME: Handle other cases...
if not hadFail:
success = True
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
if not success:
raise StitchingError("Stitching failed. Would retry but delete had errors. Last Stitching error: %s" % se)
# Flush the cache of aggregates. Loses all state. Avoids
# double adding hops to aggregates, etc. But we lose the vlans_unavailable. And ?
aggs = copy.copy(self.ams_to_process)
self.ams_to_process = None # Clear local memory of AMs to avoid issues
Aggregate.clearCache()
# construct new SCS args
# redo SCS call et al
# FIXME: aggs.hops have loose tag: mark the hops in the request as explicitly loose
# FIXME: Here we pass in the request to give to the SCS. I'd like this
# to be modified (different VLAN range? Some hops marked loose?) in future
lastAM = self.mainStitchingLoop(sliceurn, requestDOM, aggs)
except StitchingError, se:
# A StitchingError is a permanent failure.
# On any error, delete any partial reservations.
if not isinstance(se, StitchingStoppedError):
self.logger.error("Stitching failed with an error: %s", se)
if self.lastException:
self.logger.error("Root cause error: %s", self.lastException)
newError = StitchingError("%s which caused %s" % (str(self.lastException), str(se)))
se = newError
if self.opts.noDeleteAtEnd:
# User requested to not delete on interrupt
# Update the message to indicate not deleting....
self.logger.warn("Per commandline option, not deleting existing reservations.")
msg = self.endPartiallyReserved(se, aggs=self.ams_to_process)
# Create a new error with a new return msg and raise that
raise StitchingStoppedError("Stitching stopped. %s. %s" % (se, msg))
else:
try:
(delRetText, delRetStruct) = self.deleteAllReservations(launcher)
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
except KeyboardInterrupt:
self.logger.error('... deleting interrupted!')
for am in launcher.aggs:
if am.manifestDom:
self.logger.warn("You have a reservation at %s", am)
#raise
raise se
return lastAM
def writeExpandedRequest(self, ams, requestDom):
# Write the fully expanded/updated request RSpec to a file
self.logger.debug("Generating updated combined request RSpec")
combinedRequestDom = combineManifestRSpecs(ams, requestDom, useReqs=True)
try:
reqString = combinedRequestDom.toprettyxml(encoding="utf-8")
except Exception, xe:
self.logger.debug("Failed to XMLify combined Request RSpec: %s", xe)
self._raise_omni_error("Malformed combined request RSpec: %s" % xe)
reqString = stripBlankLines(reqString)
# set rspec to be UTF-8
if isinstance(reqString, unicode):
reqString = reqString.encode('utf-8')
self.logger.debug("Combined request RSpec was unicode")
# FIXME: Handle errors. Maybe make return use code/value/output struct
# If error and have an expanded request from SCS, include that in output.
# Or if particular AM had errors, ID the AMs and errors
# FIXME: This prepends a header on an RSpec that might already have a header
# -- maybe replace any existing header
# FIXME: We force -o here and keep it from logging the
# RSpec. Do we need an option to not write the RSpec to a file?
ot = self.opts.output
if not self.opts.tostdout:
self.opts.output = True
if not self.opts.debug:
# Suppress all but WARN on console here
lvl = self.logger.getEffectiveLevel()
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
retVal, filename = handler_utils._writeRSpec(self.opts, self.logger, reqString, None, '%s-expanded-request'%self.slicename, '', None)
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
self.opts.output = ot
if filename:
msg = "Saved expanded request RSpec at %d AM(s) to file '%s'" % (len(ams), os.path.abspath(filename))
else:
msg = "Generated expanded request RSpec"
return msg
def handleGenRequest(self):
# Exit if user specified --genRequest, saving more fully expanded request RSpec
# Used in mainStitchingLoop
if self.opts.genRequest:
msg = self.writeExpandedRequest(self.ams_to_process, self.parsedSCSRSpec.dom)
self.logger.info(msg)
raise StitchingError("Requested to only generate and save the expanded request")
# End of block to save the expanded request and exit
def handleNoReservation(self):
# Exit if user specified --noReservation, saving expanded request RSpec
# Used in mainStitchingLoop
if self.opts.noReservation:
self.logger.info("Not reserving resources")
# Write the request rspec to a string that we save to a file
try:
requestString = self.parsedSCSRSpec.dom.toxml(encoding="utf-8")
except Exception, xe:
self.logger.debug("Failed to XMLify parsed SCS request RSpec for saving: %s", xe)
self._raise_omni_error("Malformed SCS expanded request RSpec: %s" % xe)
header = "<!-- Expanded Resource request for:\n\tSlice: %s -->" % (self.slicename)
if requestString is not None:
content = stripBlankLines(string.replace(requestString, "\\n", '\n'))
else:
self.logger.debug("None expanded request RSpec?")
content = ""
filename = None
ot = self.opts.output
if not self.opts.tostdout:
self.opts.output = True
if self.opts.output:
filename = handler_utils._construct_output_filename(self.opts, self.slicename, '', None, "expanded-request-rspec", ".xml", 1)
if filename:
self.logger.info("Saving expanded request RSpec to file: %s", os.path.abspath(filename))
else:
self.logger.info("Expanded request RSpec:")
if not self.opts.debug:
# Suppress all but WARN on console here
lvl = self.logger.getEffectiveLevel()
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
lvl = handler.level
handler.setLevel(logging.WARN)
break
# Create FILE
# This prints or logs results, depending on whether filename is None
handler_utils._printResults(self.opts, self.logger, header, content, filename)
if not self.opts.debug:
handlers = self.logger.handlers
if len(handlers) == 0:
handlers = logging.getLogger().handlers
for handler in handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(lvl)
break
self.opts.output = ot
raise StitchingError("Requested no reservation")
# Done handling --noReservation
def createObjectFromOneURN(self, amURN):
# Create an Aggregate class instance from the URN of the aggregate,
# avoiding duplicates.
# If the AM URN we parsed from the RSpec is already in the list of aggregates to process,
# skip to the next parsed URN
found = False
for agg in self.ams_to_process:
if agg.urn == amURN:
found = True
# self.logger.debug(" .. was already in ams_to_process")
break
# For EG there are multiple URNs that are really the same
# If find one, found them all
for urn2 in agg.urn_syns:
if urn2 == amURN:
# self.logger.debug(" .. was in ams_to_process under synonym. Ams_to_process had %s", agg.urn)
found = True
break
if found:
return
# AM URN was not in the workflow from the SCS
# # If this URN was on a stitching link, then this isn't going to work
# for link in self.parsedSCSRSpec.links:
# if len(link.aggregates) > 1 and not link.hasSharedVlan and link.typeName == link.VLAN_LINK_TYPE:
# # This is a link that needs stitching
# for linkagg in link.aggregates:
# if linkagg.urn == amURN or amURN in linkagg.urn_syns:
# self.logger.debug("Found AM %s on stitching link %s that is not in SCS Workflow. URL: %s", amURN, link.id, linkagg.url)
# stitching = self.parsedSCSRSpec.stitching
# slink = None
# if stitching:
# slink = stitching.find_path(link.id)
# if not slink:
# self.logger.debug("No path in stitching section of rspec for link %s that seems to need stitching", link.id)
# raise StitchingError("SCS did not handle link %s - perhaps AM %s is unknown?", link.id, amURN)
am = Aggregate.find(amURN)
# Fill in a URL for this AM
# First, find it in the agg_nick_cache
if not am.url:
# FIXME: Avoid apparent v1 URLs
for urn in am.urn_syns:
(nick, url) = handler_utils._lookupAggNickURLFromURNInNicknames(self.logger, self.config, urn)
if url and url.strip() != '':
self.logger.debug("Found AM %s URL using URN %s from omni_config AM nicknames: %s", amURN, urn, nick)
am.url = url
am.nick = nick
break
# If that failed, try asking the CH
if not am.url:
# Try asking our CH for AMs to get the URL for the
# given URN
fw_ams = dict()
try:
fw_ams = self.framework.list_aggregates()
for fw_am_urn in fw_ams.keys():
if fw_am_urn and fw_am_urn.strip() in am.urn_syns and fw_ams[fw_am_urn].strip() != '':
am.url = fw_ams[fw_am_urn]
self.logger.debug("Found AM %s URL from CH ListAggs: %s", amURN, am.url)
break
except:
pass
if not am.url:
raise StitchingError("RSpec requires AM '%s' which is not in workflow and URL is unknown!" % amURN)
else:
self.logger.debug("Adding am to ams_to_process from URN %s, with url %s", amURN, am.url)
self.ams_to_process.append(am)
self.parsedURNNewAggs.append(am) # Save off the new agg as something we just added
return
# End of createObjectFromOneURN
def createObjectsFromOptArgs(self):
# For use when merging manifests
for amNick in self.opts.aggregate:
url1,urn1 = handler_utils._derefAggNick(self, amNick)
self.createObjectFromOneURN(urn1)
def createObjectsFromParsedAMURNs(self):
# Ensure all AM URNs we found in the RSpec are Aggregate objects in ams_to_process
if self.parsedSCSRSpec is None:
return
for amURN in self.parsedSCSRSpec.amURNs:
# self.logger.debug("Looking at SCS returned amURN %s", amURN)
self.createObjectFromOneURN(amURN)
# Done adding user requested non linked AMs to list of AMs to process
def updateAvailRanges(self, sliceurn, requestDOM):
# Check current VLAN tag availability before doing allocations
# Loop over AMs. If I update an AM, then go to AMs that depend on it and intersect there (but don't redo avail query), and recurse.
for am in self.ams_to_process:
# If doing the avail query at this AM doesn't work or wouldn't help or we did it recently, move on
if not am.doAvail(self.opts):
self.logger.debug("Not checking VLAN availability at %s", am)
continue
self.logger.debug("Checking current availabilty at %s", am)
madeChange = False
try:
madeChange = am.updateWithAvail(self.opts)
if madeChange:
# Must intersect the new ranges with others in the chain
# We have already updated avail and checked request at this AM
for hop in am.hops:
self.logger.debug("Applying updated availability up the chain for %s", hop)
while hop.import_vlans:
newHop = hop.import_vlans_from
oldRange = newHop._hop_link.vlan_range_request
newHop._hop_link.vlan_range_request = newHop._hop_link.vlan_range_request.intersection(hop._hop_link.vlan_range_request)
if oldRange != newHop._hop_link.vlan_range_request:
self.logger.debug("Reset range of %s to '%s' from %s", newHop, newHop._hop_link.vlan_range_request, oldRange)
else:
self.logger.debug("Availability unchanged at %s", newHop)
if len(newHop._hop_link.vlan_range_request) <= 0:
self.logger.debug("New available range is empty!")
raise StitchingCircuitFailedError("No VLANs possible at %s based on latest availability; Try again from the SCS" % newHop.aggregate)
if newHop._hop_link.vlan_suggested_request != VLANRange.fromString("any") and not newHop._hop_link.vlan_suggested_request <= newHop._hop_link.vlan_range_request:
self.logger.debug("Suggested (%s) is not in reset available range - mark it unavailable and raise an error!", newHop._hop_link.vlan_suggested_request)
newHop.vlans_unavailable = newHop.vlans_unavailable.union(newHop._hop_link.vlan_suggested_request)
raise StitchingCircuitFailedError("Requested VLAN unavailable at %s based on latest availability; Try again from the SCS" % newHop)
else:
self.logger.debug("Suggested (%s) still in reset available range", newHop._hop_link.vlan_suggested_request)
hop = newHop
# End of loop up the imports chain for this hop
# End of loop over all hops on this AM where we just updated availability
self.logger.debug("Done applying updated availabilities from %s", am)
else:
self.logger.debug("%s VLAN availabilities did not change. Done with this AM", am)
# End of block to only update avails up the chain if we updated availability on this AM
except StitchingCircuitFailedError, se:
self.lastException = se
if self.scsCalls == self.maxSCSCalls:
self.logger.error("Stitching max circuit failures reached")
raise StitchingError("Stitching reservation failed %d times. Last error: %s" % (self.scsCalls, se))
# FIXME: If we aren't doing stitching so won't be calling the SCS, then does it ever make sense
# to try this again here? For example, EG Embedding workflow ERROR?
# if not self.isStitching:
# self.logger.error("Reservation failed and not reasonable to retry - not a stitching request.")
# raise StitchingError("Multi AM reservation failed. Not stitching so cannot retry with new path. %s" % se)
self.logger.warn("Stitching failed but will retry: %s", se)
# Flush the cache of aggregates. Loses all state. Avoids
# double adding hops to aggregates, etc. But we lose the vlans_unavailable. And ?
aggs = copy.copy(self.ams_to_process)
self.ams_to_process = None # Clear local memory of AMs to avoid issues
Aggregate.clearCache()
# construct new SCS args
# redo SCS call et al
return self.mainStitchingLoop(sliceurn, requestDOM, aggs)
# End of exception handling block
# End of loop over AMs getting current availability
return None # Not an AM return so don't return it in the main block
def changeRequestsToAny(self):
# Change requested VLAN tags to 'any' where appropriate
# Check the AMs: For each hop that says it is a VLAN producer / imports no VLANs, lets change the suggested request to "any".
# That should ensure that that hop succeeds the first time through. Hopefully the SCS has set up the avail ranges to work throughout
# the path, so everything else will just work as well.
# In APIv3, a failure later is just a negotiation case (we'll get a new tag to try). In APIv2, a later failure is a pseudo negotiation case.
# That is, we can go back to the 'any' hop and exclude the failed tag, deleting that reservation, and try again.
# FIXME: In schema v2, the logic for where to figure out if it is a consumer or producer is more complex. But for now, the hoplink says,
# and the hop indicates if it imports vlans.
# While doing this, make sure the tells for whether we can tell the hop to pick the tag are consistent.
if self.opts.useSCSSugg:
self.logger.info("Per option, requesting SCS suggested VLAN tags")
return
for am in self.ams_to_process:
if self.opts.useSCSSugg:
#self.logger.info("Per option, requesting SCS suggested VLAN tags")
continue
if not am.supportsAny():
self.logger.debug("%s doesn't support requesting 'any' VLAN tag - move on", am)
continue
# Could a complex topology have some hops producing VLANs and some accepting VLANs at the same AM?
# if len(am.dependsOn) == 0:
# self.logger.debug("%s says it depends on no other AMs", am)
for hop in am.hops:
# Init requestAny so we never request 'any' when option says not or it is one of the non-supported AMs
requestAny = not self.opts.useSCSSugg and am.supportsAny()
if not requestAny:
continue
isConsumer = False
isProducer = False
imports = False
if hop._hop_link.vlan_consumer:
# self.logger.debug("%s says it is a vlan consumer. In itself, that is OK", hop)
isConsumer = True
if hop._import_vlans:
if hop.import_vlans_from._aggregate != hop._aggregate:
imports = True
self.logger.debug("%s imports VLANs from another AM, %s. Don't request 'any'.", hop, hop.import_vlans_from)
if len(am.dependsOn) == 0:
self.logger.warn("%s imports VLANs from %s but the AM says it depends on no AMs?!", hop, hop.import_vlans_from)
requestAny = False
else:
# This hop imports tags from another hop on the same AM.
# So we want this hop to do what that other hop does. So if that other hop is changing to any, this this
# hop should change to any.
hop2 = hop.import_vlans_from
if hop2._import_vlans and hop2.import_vlans_from._aggregate != hop2._aggregate:
imports = True
requestAny = False
self.logger.debug("%s imports VLANs from %s which imports VLANs from a different AM (%s) so don't request 'any'.", hop, hop2, hop2._import_vlans_from)
elif not hop2._hop_link.vlan_producer:
self.logger.debug("%s imports VLANs from %s which does not say it is a vlan producer. Don't request 'any'.", hop, hop2)
requestAny = False
else:
self.logger.debug("%s imports VLANs from %s which is OK to request 'any', so this hop should request 'any'.", hop, hop2)
if not hop._hop_link.vlan_producer:
if not imports and not isConsumer:
# See http://groups.geni.net/geni/ticket/1263 and http://groups.geni.net/geni/ticket/1262
if not am.supportsAny():
self.logger.debug("%s doesn't import VLANs and not marked as either a VLAN producer or consumer. But it is an EG or GRAM or OESS or DCN AM, where we cannot assume 'any' works.", hop)
requestAny = False
else:
# If this hop doesn't import and isn't explicitly marked as either a consumer or a producer, then
# assume it is willing to produce a VLAN tag
self.logger.debug("%s doesn't import and not marked as either a VLAN producer or consumer. Assuming 'any' is OK.", hop)
requestAny = True
else:
if requestAny:
self.logger.debug("%s does not say it is a vlan producer. Don't request 'any'.", hop)
requestAny = False
else:
self.logger.debug("%s does not say it is a vlan producer. Still not requesting 'any'.", hop)
else:
isProducer = True
self.logger.debug("%s marked as a VLAN producer", hop)
if not requestAny and not imports and not isConsumer and not isProducer:
if not am.supportsAny():
self.logger.debug("%s doesn't import VLANs and not marked as either a VLAN producer or consumer. But it is an EG or GRAM or OESS or DCN AM, where we cannot assume 'any' works.", hop)
else:
# If this hop doesn't import and isn't explicitly marked as either a consumer or a producer, then
# assume it is willing to produce a VLAN tag
self.logger.debug("%s doesn't import VLANs and not marked as either a VLAN producer or consumer. Assuming 'any' is OK.", hop)
requestAny = True
if self.opts.useSCSSugg and requestAny:
self.logger.info("Would request 'any', but user requested to stick to SCS suggestions.")
elif requestAny:
if len(am.dependsOn) != 0:
self.logger.debug("%s appears OK to request tag 'any', but the AM says it depends on other AMs?", hop)
if hop._hop_link.vlan_suggested_request != VLANRange.fromString("any"):
self.logger.debug("Changing suggested request tag from %s to 'any' on %s", hop._hop_link.vlan_suggested_request, hop)
hop._hop_link.vlan_suggested_request = VLANRange.fromString("any")
# else:
# self.logger.debug("%s suggested request was already 'any'.", hop)
# End of loop over hops in AM
# End of loop over AMs to process
def deleteAllReservations(self, launcher):
'''On error exit, ensure all outstanding reservations are deleted.'''
# Try to combine v2 and v3 results together
# Text is just appended
# all results in struct are keyed by am.url
# For v3, this is therefore same as before
# v2 return used to be (successURLs, failedURLs)
# But that's hard to preserve
# So instead, the v2 return is True if the AM was found in the success list, False if found in Failed list,
# and otherwise the return under the am.url is whatever the AM originally returned.
# Note that failing to find the AM url may mean it's a variant of the URL
loggedDeleting = False
retText = ""
retStruct = {}
if len(launcher.aggs) == 0:
self.logger.debug("0 aggregates from which to delete")
for am in launcher.aggs:
if am.manifestDom:
if not loggedDeleting:
loggedDeleting = True
self.logger.info("Deleting existing reservations...")
self.logger.debug("Had reservation at %s", am)
try:
(text, result) = am.deleteReservation(self.opts, self.slicename)
self.logger.info("Deleted reservation at %s.", am)
if text is not None and text.strip() != "":
if retText != "":
retText += "\n %s" % text
else:
retText = text
if am.api_version < 3 or not isinstance(result, dict):
if not (isinstance(result, tuple) and isinstance(result[0], list)):
if result is None and text.startswith("Success"):
retStruct[am.url] = True
else:
# Some kind of error
self.logger.debug("Struct result from delete or deletesliver unknown from %s: %s", am, result)
retStruct[am.url] = result
else:
(succ, fail) = result
# FIXME: Do the handler_utils tricks for comparing URLs?
if am.url in succ or am.alt_url in succ:
retStruct[am.url] = True
elif am.url in fail or am.alt_url in fail:
retStruct[am.url] = False
else:
self.logger.debug("Failed to find AM URL in v2 deletesliver return struct. AM %s, return %s", am, result)
retStruct[am.url] = result
else:
retCopy = retStruct.copy()
retCopy.update(result)
retStruct = retCopy
except StitchingError, se2:
msg = "Failed to delete reservation at %s: %s" % (am, se2)
self.logger.warn(msg)
retStruct[am.url] = False
if retText != "":
retText += "\n %s" % msg
else:
retText = msg
if retText == "":
retText = "No aggregates with reservations from which to delete"
return (retText, retStruct)
def confirmGoodRSpec(self, requestString, rspecType=rspec_schema.REQUEST, doRSpecLint=True):
'''Ensure an rspec is valid'''
typeStr = 'Request'
if rspecType == rspec_schema.MANIFEST:
typeStr = 'Manifest'
# Confirm the string is a request rspec, valid
if requestString is None or str(requestString).strip() == '':
raise OmniError("Empty %s rspec" % typeStr)
if not is_rspec_string(requestString, None, None, logger=self.logger):
raise OmniError("%s RSpec file did not contain an RSpec" % typeStr)
# if not is_rspec_of_type(requestString, rspecType):
# if not is_rspec_of_type(requestString, rspecType, "GENI 3", False, logger=self.logger):
# FIXME: ION does not support PGv2 schema RSpecs. Stitcher doesn't mind, and PG AMs don't mind, but
# this if the request is PGv2 and crosses ION this may cause trouble.
if not (is_rspec_of_type(requestString, rspecType, "GENI 3", False) or is_rspec_of_type(requestString, rspecType, "ProtoGENI 2", False)):
if self.opts.devmode:
self.logger.info("RSpec of wrong type or schema, but continuing...")
else:
raise OmniError("%s RSpec file did not contain a %s RSpec (wrong type or schema)" % (typeStr, typeStr))
# Run rspeclint
if doRSpecLint:
try:
rspeclint_exists()
except:
self.logger.debug("No rspeclint found")
return
# FIXME: Make this support GENIv4+? PGv2?
schema = rspec_schema.GENI_3_REQ_SCHEMA
if rspecType == rspec_schema.MANIFEST:
schema = rspec_schema.GENI_3_MAN_SCHEMA
if not validate_rspec(requestString, rspec_schema.GENI_3_NAMESPACE, schema):
raise OmniError("%s RSpec does not validate against its schemas" % typeStr)
def confirmSliceOK(self):
'''Ensure the given slice name corresponds to a current valid slice,
and return the Slice URN and expiration datetime.'''
self.logger.info("Reading slice %s credential...", self.slicename)
# Get slice URN from name
try:
sliceurn = self.framework.slice_name_to_urn(self.slicename)
except Exception, e:
self.logger.error("Could not determine slice URN from name %s: %s", self.slicename, e)
raise StitchingError(e)
self.slicehrn = urn_to_clean_hrn(sliceurn)[0]
if self.opts.fakeModeDir:
self.logger.info("Fake mode: not checking slice credential")
return (sliceurn, naiveUTC(datetime.datetime.max))
if self.opts.noReservation:
self.logger.info("Requested noReservation: not checking slice credential")
return (sliceurn, naiveUTC(datetime.datetime.max))
if self.opts.genRequest:
self.logger.info("Requested to only generate the request: not checking slice credential")
return (sliceurn, naiveUTC(datetime.datetime.max))
# Get slice cred
(slicecred, message) = handler_utils._get_slice_cred(self, sliceurn)
if not slicecred:
# FIXME: Maybe if the slice doesn't exist, create it?
# omniargs = ["createslice", self.slicename]
# try:
# (slicename, message) = omni.call(omniargs, self.opts)
# except:
# pass
raise StitchingError("Could not get a slice credential for slice %s: %s" % (sliceurn, message))
self.slicecred = slicecred
self.savedSliceCred = False
# Force the slice cred to be from a saved file if not already set
if not self.opts.slicecredfile:
self.opts.slicecredfile = os.path.join(os.getenv("TMPDIR", os.getenv("TMP", "/tmp")), SLICECRED_FILENAME)
if "%username" in self.opts.slicecredfile:
self.opts.slicecredfile = string.replace(self.opts.slicecredfile, "%username", self.username)
if "%slicename" in self.opts.slicecredfile:
self.opts.slicecredfile = string.replace(self.opts.slicecredfile, "%slicename", self.slicename)
if "%slicehrn" in self.opts.slicecredfile:
self.opts.slicecredfile = string.replace(self.opts.slicecredfile, "%slicehrn", self.slicehrn)
self.opts.slicecredfile = os.path.normpath(self.opts.slicecredfile)
if self.opts.fileDir:
self.opts.slicecredfile = prependFilePrefix(self.opts.fileDir, self.opts.slicecredfile)
trim = -4
if self.opts.slicecredfile.endswith("json"):
trim = -5
# -4 is to cut off .xml. It would be -5 if the cred is json
#self.logger.debug("Saving slice cred %s... to %s", str(slicecred)[:15], self.opts.slicecredfile[:trim])
self.opts.slicecredfile = handler_utils._save_cred(self, self.opts.slicecredfile[:trim], slicecred)
self.savedSliceCred = True
# Ensure slice not expired
sliceexp = credutils.get_cred_exp(self.logger, slicecred)
sliceexp = naiveUTC(sliceexp)
now = datetime.datetime.utcnow()
shorthours = 3
middays = 1
if sliceexp <= now:
# FIXME: Maybe if the slice doesn't exist, create it?
# omniargs = ["createslice", self.slicename]
# try:
# (slicename, message) = omni.call(omniargs, self.opts)
# except:
# pass
raise StitchingError("Slice %s expired at %s" % (sliceurn, sliceexp))
elif sliceexp - datetime.timedelta(hours=shorthours) <= now:
self.logger.warn('Slice %s expires in <= %d hours on %s UTC' % (sliceurn, shorthours, sliceexp))
self.logger.debug('It is now %s UTC' % (datetime.datetime.utcnow()))
elif sliceexp - datetime.timedelta(days=middays) <= now:
self.logger.info('Slice %s expires within %d day on %s UTC' % (sliceurn, middays, sliceexp))
else:
self.logger.info('Slice %s expires on %s UTC' % (sliceurn, sliceexp))
# return the slice urn, slice expiration (datetime)
return (sliceurn, sliceexp)
# End of confirmSliceOK
# Ensure the link has well formed property elements for cross-AM links each with a capacity
# Really there could be multiple AMs on the link, and each cross-AM link could have different properties,
# and properties are unidirectional so capacities could differ in different directions
# For now, the first 2 different AMs get properties
def addCapacityOneLink(self, link):
# look for property elements
if len(link.properties) > 2:
# raise StitchingError("Your request RSpec is malformed: include either 2 or 0 property elements on link '%s'" % link.id)
self.logger.debug("Request RSpec has %d property elements on link '%s'", len(link.properties), link.id)
# Get the 2 node IDs
ifcs = link.interfaces
if len(ifcs) < 2:
self.logger.debug("Link '%s' doesn't have at least 2 interfaces? Has %d", link.id, len(ifcs))
# If there is a stitching extension path for this, then this is a stitched link.
# Theoretically that means we want a property so SCS can put this in the stitching extension,
# but the stitching extension already exists
return
if len(ifcs) > 2:
self.logger.debug("Link '%s' has more than 2 interfaces (%d). Picking source and dest from the first 2 on different AMs.", link.id, len(ifcs))
# FIXME: Create a list of AM pairs, so I can look for 1 or 2 properties for each pair, and ensure
# each has a capacity. AM pairs means 2 interface_refs whose nodes are at different AMs
# Create a mapping of AM -> interface_id. Then can find the pairs of AMs and ensure there's a property for each,
# and use that interface_id for the property.
amToIfc = {}
for ifc in ifcs:
cid = ifc.client_id
idam = None
for node in self.parsedUserRequest.nodes:
if cid in node.interface_ids:
idam = node.amURN
break
if idam and idam not in amToIfc:
amToIfc[idam] = cid
self.logger.debug("Link '%s' has interfaces on %d AMs", link.id, len(amToIfc.keys()))
if len(amToIfc.keys()) > 0:
node1AM = amToIfc.keys()[0]
node1ID = amToIfc[node1AM]
# Now find a 2nd interface on a different AM
node2ID = None
node2AM = None
if len(amToIfc.keys()) > 1:
keys = amToIfc.keys()
node2AM = keys[1]
if node2AM == node1AM:
node2AM = keys[0]
node2ID = amToIfc[node2AM]
if node2AM is None:
# No 2nd interface on different AM found
self.logger.debug("Link '%s' doesn't have interfaces on more than 1 AM ('%s')?" % (link.id, node1AM))
# Even if this is a stitched link, the stitching extensino would already have capacity
return
else:
# FIXME: Eventually want all the pairs to have properties
self.logger.debug("Link '%s' properties will be from '%s' to '%s'", link.id, node1ID, node2ID)
# If we get here, the link crosses 2+ AMs
# FIXME: Really I want properties between every pair of AMs (not nodes), and not
# just the first 2 different AMs
# If there are no property elements
if len(link.properties) == 0:
self.logger.debug("Link '%s' had no properties - must add them", link.id)
# Then add them
s_id = node1ID
d_id = node2ID
s_p = LinkProperty(s_id, d_id, None, None, self.opts.defaultCapacity)
s_p.link = link
d_p = LinkProperty(d_id, s_id, None, None, self.opts.defaultCapacity)
d_p.link = link
link.properties = [s_p, d_p]
return
# Error check properties:
for prop in link.properties:
if prop.source_id is None or prop.source_id == "":
raise StitchingError("Malformed property on link '%s' missing source_id attribute" % link.id)
if prop.dest_id is None or prop.dest_id == "":
raise StitchingError("Malformed property on link '%s' missing dest_id attribute" % link.id)
if prop.dest_id == prop.source_id:
raise StitchingError("Malformed property on link '%s' has matching source and dest_id: '%s'" % (link.id, prop.dest_id))
# If the elements are there, error check them, adding property if necessary
# FIXME: Generalize this to find any pair of properties that is reciprocal to ensure that if 1 has a capacity, the other has same
if len(link.properties) == 2:
props = link.properties
prop1S = props[0].source_id
prop1D = props[0].dest_id
prop2S = props[1].source_id
prop2D = props[1].dest_id
# FIXME: Compare to the interface_refs
if prop1S != prop2D or prop1D != prop2S:
# raise StitchingError("Malformed properties on link '%s': source and dest tags are not reversed" % link.id)
# This could happen if >2 ifcs and 2 asymetric props
# But it could also mean a single property is duplicated
self.logger.debug("On link '%s': source and dest tags are not reversed" % link.id)
else:
if props[0].capacity and not props[1].capacity:
props[1].capacity = props[0].capacity
if props[1].capacity and not props[0].capacity:
props[0].capacity = props[1].capacity
# FIXME: Warn about really small or big capacities?
return
# End of handling have 2 current properties
for prop in link.properties:
# If this is a cross AM property, then it should have an explicit capacity
sourceAM = None
destAM = None
for node in self.parsedUserRequest.nodes:
if prop.source_id in node.interface_ids:
sourceAM = node.amURN
if prop.dest_id in node.interface_ids:
destAM = node.amURN
if sourceAM and destAM:
break
if sourceAM and destAM and sourceAM != destAM:
if prop.capacity is None or prop.capacity == "":
prop.capacity = self.opts.defaultCapacity
# FIXME: Warn about really small or big capacities?
# FIXME: Do we need the reciprocal property?
# # Create the 2nd property with the source and dest reversed
# prop2 = LinkProperty(prop.dest_id, prop.source_id, prop.latency, prop.packet_loss, prop.capacity)
# link.properties = [prop, prop2]
# self.logger.debug("Link '%s' added missing reverse property", link.id)
# End of addCapacityOneLink
# Ensure all implicit AMs (from interface_ref->node->component_manager_id) are explicit on the link
def ensureLinkListsAMs(self, link, requestRSpecObject):
if not link:
return
ams = []
for ifc in link.interfaces:
found = False
for node in requestRSpecObject.nodes:
if ifc.client_id in node.interface_ids:
if node.amURN is not None and node.amURN not in ams:
ams.append(node.amURN)
found = True
self.logger.debug("Link '%s' interface '%s' found on node '%s'", link.id, ifc.client_id, node.id)
break
if not found:
self.logger.debug("Link '%s' interface '%s' not found on any node", link.id, ifc.client_id)
# FIXME: What would this mean?
for amURN in ams:
am = Aggregate.find(amURN)
if am not in link.aggregates:
self.logger.debug("Adding missing AM %s to link '%s'", amURN, link.id)
link.aggregates.append(am)
# End of ensureLinkListsAMs
def hasGRELink(self, requestRSpecObject):
# Does the given RSpec have a GRE link
# Side effect: ensure all links list all known component_managers
# Return boolean
if not requestRSpecObject:
return False
isGRE = False
for link in requestRSpecObject.links:
# Make sure links explicitly lists all its aggregates, so this test is valid
self.ensureLinkListsAMs(link, requestRSpecObject)
# has a link that has 2 interface_refs and has a link type of *gre_tunnel and endpoint nodes are PG
if not (link.typeName == link.GRE_LINK_TYPE or link.typeName == link.EGRE_LINK_TYPE):
# Not GRE
# self.logger.debug("Link %s not GRE but %s", link.id, link.typeName)
continue
if len(link.aggregates) != 2:
self.logger.warn("Link '%s' is a GRE link with %d AMs?", link.id, len(link.aggregates))
continue
if len(link.interfaces) != 2:
self.logger.warn("Link '%s' is a GRE link with %d interfaces?", link.id, len(link.interfaces))
continue
isGRE = True
for ifc in link.interfaces:
found = False
for node in requestRSpecObject.nodes:
if ifc.client_id in node.interface_ids:
found = True
# This is the node
# I'd like to ensure the node is a PG node.
# But at this point we haven't called getversion yet
# So we don't really know if this is a PG node
# am = Aggregate.find(node.amURN)
# if not am.isPG:
# self.logger.warn("Bad GRE link %s: interface_ref %s is on a non PG node: %s", link.id, ifc.client_id, am)
# isGRE = False
# We do not currently parse sliver-type off of nodes to validate that
break
if not found:
self.logger.warn("GRE link '%s' has unknown interface_ref '%s' - assuming it is OK", link.id, ifc.client_id)
if isGRE:
self.logger.debug("Link '%s' is GRE", link.id)
# Extra: ensure endpoints are xen for link type egre, openvz or rawpc for gre
# End of loop over links
return isGRE
# End of hasGRELink
def mustCallSCS(self, requestRSpecObject):
'''Does this request actually require stitching?
Check: >=1 link in main body with >= 2 diff component_manager
names and no shared_vlan extension and no non-VLAN link_type
'''
# side effects
# - links list known component_managers
# - links have 2 well formed property elements with explicit capacities
if not requestRSpecObject:
return False
needSCS = False
for link in requestRSpecObject.links:
# Make sure links explicitly lists all its aggregates, so this test is valid
self.ensureLinkListsAMs(link, requestRSpecObject)
if len(link.aggregates) > 1 and not link.hasSharedVlan and link.typeName == link.VLAN_LINK_TYPE:
# Ensure this link has 2 well formed property elements with explicit capacities
self.addCapacityOneLink(link)
self.logger.debug("Requested link '%s' is stitching", link.id)
# Links that are ExoGENI only use ExoGENI stitching, not the SCS
# So only if the link includes anything non-ExoGENI, we use the SCS
egOnly = True
for am in link.aggregates:
# I wish I could do am.isEG but we don't get that info until later.
# Hack!
if 'exogeni' not in am.urn:
needSCS = True
egOnly = False
break
if egOnly:
self.logger.debug("Link '%s' is only ExoGENI, so can use ExoGENI stitching.", link.id)
if needSCS:
self.logger.debug("But we already decided we need the SCS.")
elif self.opts.noEGStitching and not needSCS:
self.logger.info("Requested to use GENI stitching instead of ExoGENI stitching")
needSCS = True
elif self.opts.noEGStitchingOnLink and link.id in self.opts.noEGStitchingOnLink and not needSCS:
self.logger.info("Requested to use GENI stitching on link %s instead of ExoGENI stitching", link.id)
needSCS = True
# FIXME: If the link includes the openflow rspec extension marking a desire to make the link
# be OF controlled, then use the SCS and GENI stitching?
# End of block to handle likely stitching link
# FIXME: Can we be robust to malformed requests, and stop and warn the user?
# EG the link has 2+ interface_ref elements that are on 2+ nodes belonging to 2+ AMs?
# Currently the parser only saves the IRefs on Links - no attempt to link to Nodes
# And for Nodes, we don't even look at the Interface sub-elements
# End of loop over links
return needSCS
def callSCS(self, sliceurn, requestDOM, existingAggs):
'''Construct SCS args, call the SCS service'''
# - Construct the args
# - Call ComputePath
# - raise an informative error if necessary
# - if --debug, save scs-result.json
# - return scsResponse
requestString, scsOptions = self.constructSCSArgs(requestDOM, existingAggs)
existingAggs = None # Clear to note we are done
self.scsService.result = None # Avoid any unexpected issues
self.logger.debug("Calling SCS with options %s", scsOptions)
if self.opts.savedSCSResults:
self.logger.debug("** Not actually calling SCS, using results from '%s'", self.opts.savedSCSResults)
try:
scsResponse = self.scsService.ComputePath(sliceurn, requestString, scsOptions, self.opts.savedSCSResults)
except StitchingError as e:
self.logger.debug("Error from slice computation service: %s", e)
raise
except Exception as e:
# FIXME: If SCS used dossl then that might handle many of these errors.
# Alternatively, the SCS could handle these itself.
excName = e.__class__.__name__
strE = str(e)
if strE == '':
strE = excName
elif strE == "''":
strE = "%s: %s" % (excName, strE)
if strE.startswith('BadStatusLine'):
# Did you call scs with http when https was expected?
url = self.opts.scsURL.lower()
if '8443' in url and not url.startswith('https'):
strE = "Bad SCS URL: Use https for a SCS requiring SSL (running on port 8443). (%s)" % strE
elif 'unknown protocol' in strE:
url = self.opts.scsURL.lower()
if url.startswith('https'):
strE = "Bad SCS URL: Try using http not https. (%s)" % strE
elif '404 Not Found' in strE:
strE = 'Bad SCS URL (%s): %s' % (self.opts.scsURL, strE)
elif 'Name or service not known' in strE:
strE = 'Bad SCS host (%s): %s' % (self.opts.scsURL, strE)
elif 'alert unknown ca' in strE:
try:
certObj = gid.GID(filename=self.framework.cert)
certiss = certObj.get_issuer()
certsubj = certObj.get_urn()
self.logger.debug("SCS gave exception: %s", strE)
strE = "SCS does not trust the CA (%s) that signed your (%s) user certificate! Use an account at another clearinghouse or find another SCS server." % (certiss, certsubj)
except:
strE = 'SCS does not trust your certificate. (%s)' % strE
self.logger.error("Exception from slice computation service: %s", strE)
import traceback
self.logger.debug("%s", traceback.format_exc())
raise StitchingError("SCS gave error: %s" % strE)
# Done SCS call error handling
self.logger.debug("SCS successfully returned.");
if self.opts.debug:
scsresfile = prependFilePrefix(self.opts.fileDir, "scs-result.json")
self.logger.debug("Writing SCS result JSON to %s" % scsresfile)
with open (scsresfile, 'w') as file:
file.write(stripBlankLines(str(json.dumps(self.scsService.result, encoding='ascii', cls=DateTimeAwareJSONEncoder))))
self.scsService.result = None # Clear memory/state
return scsResponse
# Done callSCS
def constructSCSArgs(self, requestDOM, existingAggs=None):
'''Build and return the string rspec request and options arguments for calling the SCS.'''
# return requestString and options
# Handles --noEGStitching, --includeHop, --excludeHop, --noEGSttichingOnLink, --includeHopOnPath
# Also handles requesting to avoid any VLAN tags found to be unavailable on the hops
options = {}
# options is a struct
# Supply the SCS option that requests the
# '##all_paths_merged##' path in the workflow.
# Doing so forces SCS to detect cross path workflow loops for
# us.
# Note that in omnilib/stitch/workflow we ignore that "path"
# currently, and construct our own workflow
options[scs.GENI_PATHS_MERGED_TAG] = True
if self.opts.noEGStitching:
# User requested no EG stitching. So ask SCS to find a GENI path
# for all EG links
options[scs.ATTEMPT_PATH_FINDING_TAG] = True
# To exclude a hop, add a geni_routing_profile struct
# This in turn should have a struct per path whose name is the path name
# Each shuld have a hop_exclusion_list array, containing the names of hops
# If you append '=<VLANRange>' to the hop URN, that means to exclude
# that set of VLANs from consideration on that hop, but don't entirely exclude
# the hop.
# exclude = "urn:publicid:IDN+instageni.gpolab.bbn.com+interface+procurve2:5.24=3747-3748"
# path = "link-pg-utah1-ig-gpo1"
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.atla:ge-7/1/6:protogeni"
# excludes = []
# excludes.append(exclude)
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.hous:ge-9/1/4:protogeni"
# excludes.append(exclude)
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.losa:ge-7/1/3:protogeni"
# excludes.append(exclude)
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.salt:ge-7/1/2:*"
## excludes.append(exclude)
# exclude = "urn:publicid:IDN+ion.internet2.edu+interface+rtr.wash:ge-7/1/3:protogeni"
# excludes.append(exclude)
# profile = {}
# pathStruct = {}
# pathStruct["hop_exclusion_list"]=excludes
# profile[path] = pathStruct
# options["geni_routing_profile"]=profile
profile = {}
# If we have existing AMs,
# Add the options to tell the SCS to exclude any hops marked for exclusion, or any VLANs
# marked unavailable
if existingAggs and len(existingAggs) > 0:
for agg in existingAggs:
for hop in agg.hops:
if hop.excludeFromSCS or (hop.vlans_unavailable and len(hop.vlans_unavailable) > 0):
# get path and ensure a pathStruct object
path = hop._path.id
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
# Get hop_exclusion_list
if pathStruct.has_key(scs.HOP_EXCLUSION_TAG):
excludes = pathStruct[scs.HOP_EXCLUSION_TAG]
else:
excludes = []
# get hop URN
urn = hop.urn
# Add to the excludes list
if hop.excludeFromSCS:
excludes.append(urn)
elif hop.vlans_unavailable and len(hop.vlans_unavailable) > 0:
excludes.append(urn + "=" + str(hop.vlans_unavailable))
# Put the new objects in the struct
pathStruct[scs.HOP_EXCLUSION_TAG] = excludes
profile[path] = pathStruct
# Done loop over hops
# Done loop over AMs
# Done block to handle existing AMs
# Handle the commandline options to modify how links are processed.
# IE, Exclude any hops given as an option from _all_ hops
# And add the right include hops and force GENI Stitching options
links = None
if (self.opts.excludehop and len(self.opts.excludehop) > 0) or (self.opts.includehop and len(self.opts.includehop) > 0) or \
(self.opts.includehoponpath and len(self.opts.includehoponpath) > 0) or \
(self.opts.noEGStitchingOnLink and len(self.opts.noEGStitchingOnLink) > 0):
links = requestDOM.getElementsByTagName(defs.LINK_TAG)
if links and len(links) > 0:
if not self.opts.excludehop:
self.opts.excludehop = []
if not self.opts.includehop:
self.opts.includehop = []
if not self.opts.includehoponpath:
self.opts.includehoponpath= []
if not self.opts.noEGStitchingOnLink:
self.opts.noEGStitchingOnLink= []
self.logger.debug("Got links and option to exclude hops: %s, include hops: %s, include hops on paths: %s, force GENI stitching on paths: %s", self.opts.excludehop, self.opts.includehop, self.opts.includehoponpath, self.opts.noEGStitchingOnLink)
# Handle any --excludeHop
for exclude in self.opts.excludehop:
# For each path
for link in links:
path = link.getAttribute(Link.CLIENT_ID_TAG)
path = str(path).strip()
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
# Get hop_exclusion_list
if pathStruct.has_key(scs.HOP_EXCLUSION_TAG):
excludes = pathStruct[scs.HOP_EXCLUSION_TAG]
else:
excludes = []
excludes.append(exclude)
self.logger.debug("Excluding %s from path %s", exclude, path)
# Put the new objects in the struct
pathStruct[scs.HOP_EXCLUSION_TAG] = excludes
profile[path] = pathStruct
# Handle any --includeHop
for include in self.opts.includehop:
# For each path
for link in links:
path = link.getAttribute(Link.CLIENT_ID_TAG)
path = str(path).strip()
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
# Get hop_inclusion_list
if pathStruct.has_key(scs.HOP_INCLUSION_TAG):
includes = pathStruct[scs.HOP_INCLUSION_TAG]
else:
includes = []
includes.append(include)
self.logger.debug("Including %s on path %s", include, path)
# Put the new objects in the struct
pathStruct[scs.HOP_INCLUSION_TAG] = includes
profile[path] = pathStruct
# Handle any --includeHopOnPath
for (includehop, includepath) in self.opts.includehoponpath:
# For each path
for link in links:
path = link.getAttribute(Link.CLIENT_ID_TAG)
path = str(path).strip()
if not path.lower() == includepath.lower():
continue
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
# Get hop_inclusion_list
if pathStruct.has_key(scs.HOP_INCLUSION_TAG):
includes = pathStruct[scs.HOP_INCLUSION_TAG]
else:
includes = []
includes.append(includehop)
self.logger.debug("Including %s on path %s", includehop, path)
# Put the new objects in the struct
pathStruct[scs.HOP_INCLUSION_TAG] = includes
profile[path] = pathStruct
# Handle any --noEGStitchingOnLink
for noeglink in self.opts.noEGStitchingOnLink:
for link in links:
path = link.getAttribute(Link.CLIENT_ID_TAG)
path = str(path).strip()
if not path.lower() == noeglink.lower():
continue
if profile.has_key(path):
pathStruct = profile[path]
else:
pathStruct = {}
pathStruct[scs.ATTEMPT_PATH_FINDING_TAG] = True
self.logger.debug("Force SCS to find a GENI stitching path for link %s", noeglink)
profile[path] = pathStruct
# Done block to handle commandline per link arguments
if profile != {}:
options[scs.GENI_PROFILE_TAG] = profile
self.logger.debug("Sending SCS options %s", options)
try:
xmlreq = requestDOM.toprettyxml(encoding="utf-8")
except Exception, xe:
self.logger.debug("Failed to XMLify requestDOM for sending to SCS: %s", xe)
self._raise_omni_error("Malformed request RSpec: %s" % xe)
return xmlreq, options
# Done constructSCSArgs
def parseSCSResponse(self, scsResponse):
# Parse the response from the SCS
# - print / save SCS expanded RSpec in debug mode
# - print SCS picked VLAN tags in debug mode
# - parse the RSpec, creating objects
# - parse the workflow, creating dependencies
# return the parsed RSpec object and the workflow parser
expandedRSpec = scsResponse.rspec()
if self.opts.debug or self.opts.fakeModeDir or self.logger.isEnabledFor(logging.DEBUG):
if isRSpecStitchingSchemaV2(expandedRSpec):
self.logger.debug("SCS RSpec uses v2 stitching schema")
# Write the RSpec the SCS gave us to a file
header = "<!-- SCS expanded stitching request for:\n\tSlice: %s\n -->" % (self.slicename)
if expandedRSpec and is_rspec_string( expandedRSpec, None, None, logger=self.logger ):
content = stripBlankLines(string.replace(expandedRSpec, "\\n", '\n'))
else:
content = "<!-- No valid RSpec returned. -->"
if expandedRSpec is not None:
content += "\n<!-- \n" + expandedRSpec + "\n -->"
if self.opts.debug or self.opts.fakeModeDir:
# Set -o to ensure this goes to a file, not logger or stdout
opts_copy = copy.deepcopy(self.opts)
opts_copy.output = True
scsreplfile = prependFilePrefix(self.opts.fileDir, Aggregate.FAKEMODESCSFILENAME)
handler_utils._printResults(opts_copy, self.logger, header, \
content, \
scsreplfile)
# In debug mode, keep copies of old SCS expanded requests
if self.opts.debug:
handler_utils._printResults(opts_copy, self.logger, header, content, scsreplfile + str(self.scsCalls))
self.logger.debug("Wrote SCS expanded RSpec to %s", \
scsreplfile)
# A debugging block: print out the VLAN tag the SCS picked for each hop, independent of objects
if self.logger.isEnabledFor(logging.DEBUG):
start = 0
path = None
while True:
if not content.find("<link id=", start) >= start:
break
hopIdStart = content.find('<link id=', start) + len('<link id=') + 1
hopIdEnd = content.find(">", hopIdStart)-1
# Get the link ID
hop = content[hopIdStart:hopIdEnd]
# Look for the name of the path for this hop before the name of the hop
if content.find('<path id=', start, hopIdStart) > 0:
pathIdStart = content.find('<path id=', start) + len('<path id=') + 1
pathIdEnd = content.find(">", pathIdStart)-1
self.logger.debug("Found path from %d to %d", pathIdStart, pathIdEnd)
path = content[pathIdStart:pathIdEnd]
# find suggestedVLANRange
suggestedStart = content.find("suggestedVLANRange>", hopIdEnd) + len("suggestedVLANRange>")
suggestedEnd = content.find("</suggested", suggestedStart)
suggested = content[suggestedStart:suggestedEnd]
# find vlanRangeAvailability
availStart = content.find("vlanRangeAvailability>", hopIdEnd) + len("vlanRangeAvailability>")
availEnd = content.find("</vlanRange", availStart)
avail = content[availStart:availEnd]
# print that all
self.logger.debug("SCS gave hop %s on path %s suggested VLAN %s, avail: '%s'", hop, path, suggested, avail)
start = suggestedEnd
# parseRequest
parsed_rspec = self.rspecParser.parse(expandedRSpec)
# self.logger.debug("Parsed SCS expanded RSpec of type %r",
# type(parsed_rspec))
# parseWorkflow
workflow = scsResponse.workflow_data()
scsResponse = None # once workflow extracted, done with that object
# Dump the formatted workflow at debug level
import pprint
pp = pprint.PrettyPrinter(indent=2)
self.logger.debug("SCS workflow:\n" + pp.pformat(workflow))
workflow_parser = WorkflowParser(self.logger)
# Parse the workflow, creating Path/Hop/etc objects
# In the process, fill in a tree of which hops depend on which,
# and which AMs depend on which
# Also mark each hop with what hop it imports VLANs from,
# And check for AM dependency loops
workflow_parser.parse(workflow, parsed_rspec)
# FIXME: Check SCS output consistency in a subroutine:
# In each path: An AM with 1 hop must either _have_ dependencies or _be_ a dependency
# All AMs must be listed in workflow data at least once per path they are in
return parsed_rspec, workflow_parser
# End of parseSCSResponse
def ensureOneExoSM(self):
'''If 2 AMs in ams_to_process are ExoGENI and share a path and no noEGStitching specified,
then ensure we use the ExoSM. If 2 AMs use the ExoSM URL, combine them into a single AM.'''
if len(self.ams_to_process) < 2:
return
exoSMCount = 0
exoSMs = []
nonExoSMs = []
egAMCount = 0
egAMs = []
for am in self.ams_to_process:
if am.isExoSM:
egAMCount += 1
exoSMCount += 1
exoSMs.append(am)
self.logger.debug("%s is ExoSM", am)
else:
nonExoSMs.append(am)
if am.isEG:
egAMs.append(am)
egAMCount += 1
if egAMCount == 0:
return
if egAMCount > 1:
self.logger.debug("Request includes more than one ExoGENI AM.")
# If there is a stitched link between 2 EG AMs and no noEGStitching, then we
# must change each to be the ExoSM so we use EG stitching for those AMs / links.
# If there is no stitched link between the 2 EG AMs or the user specified noEGStitching,
# then we do not change them to be the ExoSM.
# Note that earlier useExoSM changed EG AMs into the ExoSM
if self.opts.noEGStitching:
# SCS will have tried to provide a GENI path and errored if not possible
self.logger.debug("Requested no EG stitching. Will edit requests to let this work later")
# And do not force the AMs to be the ExoSM
elif exoSMCount == egAMCount:
self.logger.debug("All EG AMs are already the ExoSM")
else:
# Now see if each EG AM should be made into the ExoSM or not.
for anEGAM in egAMs:
if self.opts.useExoSM:
# Should not happen I believe.
self.logger.debug("Asked to use the ExoSM for all EG AMs. So change this one.")
elif self.parsedSCSRSpec:
self.logger.debug("Will use EG stitching where applicable. Must go through the ExoSM for EG only links.")
# Does this AM participate in an EG only link? If so, convert it.
# If not, continue
# EG only links will not be in the stitching extension, so use the main body elements
hasEGLink = False
for link in self.parsedSCSRSpec.links:
# If this link was explicitly marked for no EG stitching
# via a commandline option, then log at debug and continue to next link
if self.opts.noEGStitchingOnLink and link.id in self.opts.noEGStitchingOnLink:
self.logger.debug("Requested no EG stitching on link %s, so this link cannot force this AM to be the ExoSM", link.id)
continue
hasThisAgg = False
hasOtherEGAgg = False
hasNonEGAgg = False
for agg in link.aggregates:
if anEGAM == agg:
hasThisAgg=True
elif agg.isEG:
hasOtherEGAgg = True
else:
hasNonEGAgg = True
if hasThisAgg and hasOtherEGAgg:
# then this AM has an EG link
# Or FIXME, must it also not hasNonEGAgg?
self.logger.debug("Looking at links, %s uses this %s and also another EG AM", link.id, anEGAM)
if hasNonEGAgg:
self.logger.debug("FIXME: Also has a non EG AM. Should this case avoid setting hasEGLink to true and use GENI stitching? Assuming so...")
else:
hasEGLink = True
break # out of loop over links
# End of loop over links in the RSpec
if not hasEGLink:
self.logger.debug("%s is EG but has no links to other EG AMs, so no need to make it the ExoSM", anEGAM)
continue # to next EG AM
self.logger.debug("%s has a link that to another EG AM. To use EG stitching between them, make this the ExoSM.", anEGAM)
# At this point, we're going to make a non ExoSM EG AM into the ExoSM so the ExoSM
# can handle the stitching.
# Make anEGAM the ExoSM
self.logger.debug("Making %s the ExoSM", anEGAM)
anEGAM.alt_url = anEGAM.url
anEGAM.url = defs.EXOSM_URL
anEGAM.isExoSM = True
anEGAM.nick = handler_utils._lookupAggNick(self, anEGAM.url)
exoSMCount += 1
exoSMs.append(anEGAM)
nonExoSMs.remove(anEGAM)
# End of block where didn't specify useExoSM
# End of loop over EG AMs
# End of else to see if each EG AM must be changed into the ExoSM
# End of block handling EG AM count > 1
if exoSMCount == 0:
self.logger.debug("Not using ExoSM")
return
exoSM = None
# First ExoSM will be _the_ ExoSM
if exoSMCount > 0:
exoSM = exoSMs[0]
exoSMURN = handler_utils._lookupAggURNFromURLInNicknames(self.logger, self.config, defs.EXOSM_URL)
# Ensure standard ExoSM URN is the URN and old URN is in urn_syns
if exoSM.urn not in exoSM.urn_syns:
exoSM.urn_syns.append(exoSM.urn)
if exoSMURN != exoSM.urn:
exoSM.urn = exoSMURN
if exoSMURN not in exoSM.urn_syns:
exoSM.urn_syns += Aggregate.urn_syns(exoSMURN)
if exoSMCount < 2:
self.logger.debug("Only %d ExoSMs", exoSMCount)
return
# Now merge other ExoSMs into _the_ ExoSM
for am in exoSMs:
if am == exoSM:
continue
self.logger.debug("Merge AM %s (%s, %s) into %s (%s, %s)", am.urn, am.url, am.alt_url, exoSM, exoSM.url, exoSM.alt_url)
# Merge urn_syns
if exoSM.urn != am.urn and am.urn not in exoSM.urn_syns:
exoSM.urn_syns.append(am.urn)
for urn in am.urn_syns:
if urn not in exoSM.urn_syns:
exoSM.urn_syns.append(urn)
# Merge _dependsOn
if am in exoSM.dependsOn:
exoSM._dependsOn.discard(am)
if exoSM in am.dependsOn:
am._dependsOn.discard(exoSM)
exoSM._dependsOn.update(am._dependsOn)
# If both am and exoSM are in dependsOn or isDependencyFor for some other AM, then remove am
for am2 in self.ams_to_process:
if am2 in exoSMs:
continue
if am2 == am:
continue
if am2 == exoSM:
continue
if am in am2.dependsOn:
self.logger.debug("Removing dup ExoSM %s from %s.dependsOn", am, am2)
am2._dependsOn.discard(am)
if not exoSM in am2.dependsOn:
self.logger.debug("Adding real ExoSM %s to %s.dependsOn", exoSM, am2)
am2._dependsOn.add(exoSM)
if am in am2.isDependencyFor:
self.logger.debug("Removing dup ExoSM %s from %s.isDependencyFor", am, am2)
am2.isDependencyFor.discard(am)
if not exosM in am2.isDependencyFor:
self.logger.debug("Adding real ExosM %s to %s.isDependencyFor", exoSM, am2)
am2.isDependencyFor.add(exoSM)
# End of loop over AMs to merge dependsOn and isDependencyFor
# merge isDependencyFor
if am in exoSM.isDependencyFor:
exoSM.isDependencyFor.discard(am)
if exoSM in am.isDependencyFor:
am.isDependencyFor.discard(exoSM)
exoSM.isDependencyFor.update(am.isDependencyFor)
# merge _paths
# Path has hops and aggregates
# Fix the list of aggregates to drop the aggregate being merged away
# What happens when a path has same aggregate at 2 discontiguous hops?
for path in am.paths:
path._aggregates.remove(am)
if not exoSM in path.aggregates:
path._aggregates.add(exoSM)
if not path in exoSM.paths:
self.logger.debug("Merging in path %s", path)
exoSM._paths.add(path)
# FIXME: What does it mean for the same path to be on both aggregates? What has to be merged?
# merge _hops
# Hop points back to aggregate. Presumably these pointers must be reset
for hop in am.hops:
hop._aggregate = exoSM
if not hop in exoSM.hops:
self.logger.debug("Merging in hop %s", hop)
exoSM._hops.add(hop)
# merge userRequested
# - If 1 was user requested and 1 was not, whole thing is user requested
if am.userRequested:
exoSM.userRequested = True
# merge alt_url
if exoSM.alt_url and handler_utils._extractURL(self.logger, exoSM.alt_url) == handler_utils._extractURL(self.logger, exoSM.url):
if handler_utils._extractURL(self.logger, exoSM.alt_url) != handler_utils._extractURL(self.logger, am.url):
exoSM.alt_url = am.alt_url
# End of loop over exoSMs, doing merge
# ensure only one in cls.aggs
newaggs = dict()
for (key, agg) in Aggregate.aggs.items():
if not (agg.isExoSM and agg != exoSM):
newaggs[key] = agg
Aggregate.aggs = newaggs
nonExoSMs.append(exoSM)
self.ams_to_process = nonExoSMs
def add_am_info(self, aggs):
'''Add extra information about the AMs to the Aggregate objects, like the API version'''
options_copy = copy.deepcopy(self.opts)
options_copy.debug = False
options_copy.info = False
options_copy.aggregate = []
aggsc = copy.copy(aggs)
for agg in aggsc:
# Don't do an aggregate twice
if agg.urn in self.amURNsAddedInfo:
continue
# self.logger.debug("add_am_info looking at %s", agg)
# Note which AMs were user requested
if self.parsedUserRequest and agg.urn in self.parsedUserRequest.amURNs:
agg.userRequested = True
elif self.parsedUserRequest:
for urn2 in agg.urn_syns:
if urn2 in self.parsedUserRequest.amURNs:
agg.userRequested = True
# FIXME: Better way to detect this?
if handler_utils._extractURL(self.logger, agg.url) in defs.EXOSM_URL:
agg.isExoSM = True
# self.logger.debug("%s is the ExoSM cause URL is %s", agg, agg.url)
# EG AMs in particular have 2 URLs in some sense - ExoSM and local
# So note the other one, since VMs are split between the 2
for (amURN, amURL) in self.config['aggregate_nicknames'].values():
if amURN.strip() in agg.urn_syns:
hadURL = handler_utils._extractURL(self.logger, agg.url)
newURL = handler_utils._extractURL(self.logger, amURL)
if hadURL != newURL and not hadURL in newURL and not newURL in hadURL and not newURL.strip == '':
agg.alt_url = amURL.strip()
break
# else:
# self.logger.debug("Not setting alt_url for %s. URL is %s, alt candidate was %s with URN %s", agg, hadURL, newURL, amURN)
# elif "exogeni" in amURN and "exogeni" in agg.urn:
# self.logger.debug("Config had URN %s URL %s, but that URN didn't match our URN synonyms for %s", amURN, newURL, agg)
if "exogeni" in agg.urn and not agg.alt_url:
# self.logger.debug("No alt url for Orca AM %s (URL %s) with URN synonyms:", agg, agg.url)
# for urn in agg.urn_syns:
# self.logger.debug("\t%s", urn)
if not agg.isExoSM:
agg.alt_url = defs.EXOSM_URL
# Try to get a URL from the CH? Do we want/need this
# expense? This is a call to the CH....
# Comment this out - takes too long, not clear
# it is needed.
# if not agg.alt_url:
# fw_ams = dict()
# try:
# fw_ams = self.framework.list_aggregates()
# for fw_am_urn in fw_ams.keys():
# if fw_am_urn and fw_am_urn.strip() in am.urn_syns and fw_ams[fw_am_urn].strip() != '':
# cand_url = fw_ams[fw_am_urn]
# if cand_url != am.url and not am.url in cand_url and not cand_url in am.url:
# am.alt_url = cand_url
# self.logger.debug("Found AM %s alternate URL from CH ListAggs: %s", am.urn, am.alt_url)
# break
# except:
# pass
# If --noExoSM then ensure this is not the ExoSM
if agg.isExoSM and agg.alt_url and self.opts.noExoSM:
self.logger.warn("%s used ExoSM URL. Changing to %s", agg, agg.alt_url)
amURL = agg.url
agg.url = agg.alt_url
agg.alt_url = amURL
agg.isExoSM = False
# For using the test ION AM
# if 'alpha.dragon' in agg.url:
# agg.url = 'http://alpha.dragon.maxgigapop.net:12346/'
# Use GetVersion to determine AM type, AM API versions spoken, etc
# Hack: Here we hard-code using APIv2 always to call getversion, assuming that v2 is the AM default
# and so the URLs are v2 URLs.
if options_copy.warn:
omniargs = ['--ForceUseGetVersionCache', '-V2', '-a', agg.url, 'getversion']
else:
omniargs = ['--ForceUseGetVersionCache', '-o', '--warn', '-V2', '-a', agg.url, 'getversion']
try:
self.logger.debug("Getting extra AM info from Omni for AM %s", agg)
(text, version) = omni.call(omniargs, options_copy)
aggurl = agg.url
if isinstance (version, dict) and version.has_key(aggurl) and isinstance(version[aggurl], dict) \
and version[aggurl].has_key('value') and isinstance(version[aggurl]['value'], dict):
# First parse geni_am_type
if version[aggurl]['value'].has_key('geni_am_type') and isinstance(version[aggurl]['value']['geni_am_type'], list):
if DCN_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is DCN", agg)
agg.dcn = True
elif ORCA_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is Orca", agg)
agg.isEG = True
elif PG_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is ProtoGENI", agg)
agg.isPG = True
elif GRAM_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is GRAM", agg)
agg.isGRAM = True
elif FOAM_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is FOAM", agg)
agg.isFOAM = True
elif OESS_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is OESS", agg)
agg.isOESS = True
elif version[aggurl]['value'].has_key('geni_am_type') and ORCA_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is Orca", agg)
agg.isEG = True
elif version[aggurl]['value'].has_key('geni_am_type') and DCN_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is DCN", agg)
agg.dcn = True
elif version[aggurl]['value'].has_key('geni_am_type') and PG_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is ProtoGENI", agg)
agg.isPG = True
elif version[aggurl]['value'].has_key('geni_am_type') and GRAM_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is GRAM", agg)
agg.isGRAM = True
elif version[aggurl]['value'].has_key('geni_am_type') and FOAM_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is FOAM", agg)
agg.isFOAM = True
elif version[aggurl]['value'].has_key('geni_am_type') and OESS_AM_TYPE in version[aggurl]['value']['geni_am_type']:
self.logger.debug("AM %s is OESS", agg)
agg.isOESS = True
# This code block looks nice but doesn't work - the version object is not the full triple
# elif version[aggurl].has_key['code'] and isinstance(version[aggurl]['code'], dict) and \
# version[aggurl]['code'].has_key('am_type') and str(version[aggurl]['code']['am_type']).strip() != "":
# if version[aggurl]['code']['am_type'] == PG_AM_TYPE:
# self.logger.debug("AM %s is ProtoGENI", agg)
# agg.isPG = True
# elif version[aggurl]['code']['am_type'] == ORCA_AM_TYPE:
# self.logger.debug("AM %s is Orca", agg)
# agg.isEG = True
# elif version[aggurl]['code']['am_type'] == DCN_AM_TYPE:
# self.logger.debug("AM %s is DCN", agg)
# agg.dcn = True
# Now parse geni_api_versions
if version[aggurl]['value'].has_key('geni_api_versions') and isinstance(version[aggurl]['value']['geni_api_versions'], dict):
maxVer = 1
hasV2 = False
v2url = None
maxVerUrl = None
reqVerUrl = None
for key in version[aggurl]['value']['geni_api_versions'].keys():
if int(key) == 2:
hasV2 = True
v2url = version[aggurl]['value']['geni_api_versions'][key]
# Ugh. Why was I changing the URL based on the Ad? Not needed, Omni does this.
# And if the AM says the current URL is the current opts.api_version OR the AM only lists
# one URL, then changing the URL makes no sense. So if I later decide I need this
# for some reason, only do it if len(keys) > 1 and [value][geni_api] != opts.api_version
# Or was I trying to change to the 'canonical' URL for some reason?
# # Change the stored URL for this Agg to the URL the AM advertises if necessary
# if agg.url != version[aggurl]['value']['geni_api_versions'][key]:
# agg.url = version[aggurl]['value']['geni_api_versions'][key]
# The reason to do this would be to
# avoid errors like:
#16:46:34 WARNING : Requested API version 2, but AM https://clemson-clemson-control-1.clemson.edu:5001 uses version 3. Same aggregate talks API v2 at a different URL: https://clemson-clemson-control-1.clemson.edu:5002
# if len(version[aggurl]['value']['geni_api_versions'].keys()) > 1 and \
# agg.url != version[aggurl]['value']['geni_api_versions'][key]:
# agg.url = version[aggurl]['value']['geni_api_versions'][key]
if int(key) > maxVer:
maxVer = int(key)
maxVerUrl = version[aggurl]['value']['geni_api_versions'][key]
if int(key) == self.opts.api_version:
reqVerUrl = version[aggurl]['value']['geni_api_versions'][key]
# Done loop over api versions
# This code is just to avoid ugly WARNs from Omni about changing URL to get the right API version.
# Added it for GRAM. But GRAM is manually fixed at the SCS now, so no need.
# if self.opts.api_version == 2 and hasV2 and agg.url != v2url:
# if agg.isEG and "orca/xmlrpc" in agg.url and "orca/geni" in v2url:
# # EGs ad lists the wrong v2 URL
# #self.logger.debug("Don't swap at EG with the wrong URL")
# pass
# else:
# self.logger.debug("%s: Swapping URL to v2 URL. Change from %s to %s", agg, agg.url, v2url)
# if agg.alt_url is None:
# agg.alt_url = agg.url
# agg.url = v2url
# Stitcher doesn't really know how to parse
# APIv1 return structs
if maxVer == 1:
msg = "%s speaks only AM API v1 - not supported!" % agg
#self.logger.error(msg)
raise StitchingError(msg)
# Hack alert: v3 AM implementations don't work even if they exist
if not hasV2:
msg = "%s does not speak AM API v2 (max is V%d). APIv2 required!" % (agg, maxVer)
#self.logger.error(msg)
raise StitchingError(msg)
agg.api_version = self.opts.api_version
if self.opts.api_version > maxVer:
self.logger.debug("Asked for APIv%d but %s only supports v%d", self.opts.api_version, agg, maxVer)
agg.api_version = maxVer
# if maxVer != 2:
# self.logger.debug("%s speaks AM API v%d, but sticking with v2", agg, maxVer)
# if self.opts.fakeModeDir:
# self.logger.warn("Testing v3 support")
# agg.api_version = 3
# agg.api_version = maxVer
# Change the URL for the AM so that later calls to this AM don't get complaints from Omni
# Here we hard-code knowledge that APIv2 is the default in Omni, the agg_nick_cache, and at AMs
if agg.api_version != 2:
if agg.api_version == maxVer and maxVerUrl is not None and maxVerUrl != agg.url:
self.logger.debug("%s: Swapping URL to v%d URL. Change from %s to %s", agg, agg.api_version, agg.url, maxVerUrl)
if agg.alt_url is None:
agg.alt_url = agg.url
agg.url = maxVerUrl
elif agg.api_version == self.opts.api_version and reqVerUrl is not None and reqVerUrl != agg.url:
self.logger.debug("%s: Swapping URL to v%d URL. Change from %s to %s", agg, agg.api_version, agg.url, reqVerUrl)
if agg.alt_url is None:
agg.alt_url = agg.url
agg.url = reqVerUrl
# Done handling geni_api_versions
if version[aggurl]['value'].has_key('GRAM_version'):
agg.isGRAM = True
self.logger.debug("AM %s is GRAM", agg)
if version[aggurl]['value'].has_key('foam_version') and ('oess' in agg.url or 'al2s' in agg.url):
agg.isOESS = True
self.logger.debug("AM %s is OESS", agg)
if version[aggurl]['value'].has_key('geni_request_rspec_versions') and \
isinstance(version[aggurl]['value']['geni_request_rspec_versions'], list):
for rVer in version[aggurl]['value']['geni_request_rspec_versions']:
if isinstance(rVer, dict) and rVer.has_key('type') and rVer.has_key('version') and \
rVer.has_key('extensions') and rVer['type'].lower() == 'geni' and str(rVer['version']) == '3' and \
isinstance(rVer['extensions'], list):
v2 = False
v1 = False
for ext in rVer['extensions']:
if defs.STITCH_V1_BASE in ext:
v1 = True
if defs.STITCH_V2_BASE in ext:
v2 = True
if v2:
self.logger.debug("%s supports stitch schema v2", agg)
agg.doesSchemaV2 = True
if not v1:
self.logger.debug("%s does NOT say it supports stitch schema v1", agg)
agg.doesSchemaV1 = False
# End of if block
# Done with loop over versions
if not agg.doesSchemaV2 and not agg.doesSchemaV1:
self.logger.debug("%s doesn't say whether it supports either stitching schema, so assume v1", agg)
agg.doesSchemaV1 = True
except StitchingError, se:
# FIXME: Return anything different for stitching error?
# Do we want to return a geni triple struct?
raise
except Exception, e:
self.logger.debug("Got error extracting extra AM info: %s", e)
import traceback
self.logger.debug(traceback.format_exc())
pass
# finally:
# logging.disable(logging.NOTSET)
# Done with call to GetVersion
# If this is an EG AM and we said useExoSM, make this the ExoSM
# Later we'll use ensureOneExoSM to dedupe
if agg.isEG and self.opts.useExoSM and not agg.isExoSM:
agg.alt_url = defs.EXOSM_URL
self.logger.info("%s is an EG AM and user asked for ExoSM. Changing to %s", agg, agg.alt_url)
amURL = agg.url
agg.url = agg.alt_url
agg.alt_url = amURL
agg.isExoSM = True
aggsc.append(agg)
continue
# else:
# self.logger.debug("%s is EG: %s, alt_url: %s, isExo: %s", agg, agg.isEG, agg.alt_url, agg.isExoSM)
# Save off the aggregate nickname if possible
agg.nick = handler_utils._lookupAggNick(self, agg.url)
if not agg.isEG and not agg.isGRAM and not agg.dcn and not agg.isOESS and "protogeni/xmlrpc" in agg.url:
agg.isPG = True
# self.logger.debug("Remembering done getting extra info for %s", agg)
# Remember we got the extra info for this AM
self.amURNsAddedInfo.append(agg.urn)
# Done loop over aggs
# End add_am_info
def dump_objects(self, rspec, aggs):
'''Print out the hops, aggregates, and dependencies'''
if rspec and rspec.stitching:
stitching = rspec.stitching
self.logger.debug( "\n===== Hops =====")
for path in stitching.paths:
self.logger.debug( "Path %s" % (path.id))
for hop in path.hops:
self.logger.debug( " Hop %s" % (hop))
if hop.globalId:
self.logger.debug( " GlobalId: %s" % hop.globalId)
if hop._hop_link.isOF:
self.logger.debug( " An Openflow controlled hop")
if hop._hop_link.controllerUrl:
self.logger.debug( " Controller: %s", hop._hop_link.controllerUrl)
if hop._hop_link.ofAMUrl:
self.logger.debug( " Openflow AM URL: %s", hop._hop_link.ofAMUrl)
if len(hop._hop_link.capabilities) > 0:
self.logger.debug( " Capabilities: %s", hop._hop_link.capabilities)
# FIXME: don't use the private variable
self.logger.debug( " VLAN Suggested (requested): %s" % (hop._hop_link.vlan_suggested_request))
self.logger.debug( " VLAN Available Range (requested): %s" % (hop._hop_link.vlan_range_request))
if hop._hop_link.vlan_suggested_manifest:
self.logger.debug( " VLAN Suggested (manifest): %s" % (hop._hop_link.vlan_suggested_manifest))
if hop._hop_link.vlan_range_manifest:
self.logger.debug( " VLAN Available Range (manifest): %s" % (hop._hop_link.vlan_range_manifest))
if hop.vlans_unavailable and len(hop.vlans_unavailable) > 0:
self.logger.debug( " VLANs found UN Available: %s" % hop.vlans_unavailable)
self.logger.debug( " Import VLANs From: %s" % (hop.import_vlans_from))
deps = hop.dependsOn
if deps:
self.logger.debug( " Dependencies:")
for h in deps:
self.logger.debug( " Hop %s" % (h))
# End of loop over hops
# End of loop over paths
# End of block to print hops if possible
if aggs and len(aggs) > 0:
self.logger.debug( "\n===== Aggregates =====")
for agg in aggs:
self.logger.debug( "\nAggregate %s" % (agg))
if agg.userRequested:
self.logger.debug(" (User requested)")
else:
self.logger.debug(" (SCS added)")
if agg.dcn:
self.logger.debug(" A DCN Aggregate")
if agg.isPG:
self.logger.debug(" A ProtoGENI Aggregate")
if agg.isGRAM:
self.logger.debug(" A GRAM Aggregate")
if agg.isOESS:
self.logger.debug(" An OESS Aggregate")
if agg.isFOAM:
self.logger.debug(" A FOAM Aggregate")
if agg.isEG:
self.logger.debug(" An Orca Aggregate")
if agg.isExoSM:
self.logger.debug(" The ExoSM Aggregate")
self.logger.debug(" URN synonyms: %s", agg.urn_syns)
if agg.alt_url:
self.logger.debug(" Alternate URL: %s", agg.alt_url)
self.logger.debug(" Using AM API version %d", agg.api_version)
if agg.manifestDom:
if agg.api_version > 2:
self.logger.debug(" Have a temporary reservation here (%s)! \n*** You must manually call `omni -a %s -V3 provision %s` and then `omni -a %s -V3 poa %s geni_start`", agg.url, agg.url, self.slicename, agg.url, self.slicename)
else:
self.logger.debug(" Have a reservation here (%s)!", agg.url)
if not agg.doesSchemaV1:
self.logger.debug(" Does NOT support Stitch Schema V1")
if agg.doesSchemaV2:
self.logger.debug(" Supports Stitch Schema V2")
if agg.lastError:
self.logger.debug(" Last Error: %s", agg.lastError)
if agg.pgLogUrl:
self.logger.debug(" PG Log URL %s", agg.pgLogUrl)
if agg.sliverExpirations is not None:
if len(agg.sliverExpirations) > 1:
# More than 1 distinct sliver expiration found
# Sort and take first
outputstr = agg.sliverExpirations[0].isoformat()
self.logger.debug(" Resources here expire at %d different times. First expiration is %s UTC" % (len(agg.sliverExpirations), outputstr))
elif len(agg.sliverExpirations) == 1:
outputstr = agg.sliverExpirations[0].isoformat()
self.logger.debug(" Resources here expire at %s UTC" % (outputstr))
for h in agg.hops:
self.logger.debug( " Hop %s" % (h))
for ad in agg.dependsOn:
self.logger.debug( " Depends on %s" % (ad))
# End of loop over aggregates
# End of block to print aggregates
# End of dump_objects
def _raise_omni_error( self, msg, err=OmniError, triple=None ):
msg2 = msg
if triple is not None:
msg2 += " "
msg2 += str(triple)
self.logger.error( msg2 )
if triple is None:
raise err, msg
else:
raise err, (msg, triple)
def combineManifests(self, ams, lastAM):
'''Produce a single combined manifest string from the reservation results at each aggregate.
lastAM is the last reservation that completed, for use as a template.'''
# Nodes and hops come from the AM that owns those
# interface_ref elements on link elements also come from the responsible AM
# Top level link element is effectively arbitrary, but with comments on what other AMs said
lastDom = None
if lastAM is None or lastAM.manifestDom is None:
self.logger.debug("Combined manifest will start from expanded request RSpec")
lastDom = self.parsedSCSRSpec.dom
# Change that dom to be a manifest RSpec
# for each attribute on the dom root node, change "request" to "manifest"
doc_root = lastDom.documentElement
for i in range(doc_root.attributes.length):
attr = doc_root.attributes.item(i)
doingChange = False
ind = attr.value.find('request')
if ind > -1:
doingChange = True
while ind > -1:
attr.value = attr.value[:ind] + 'manifest' + attr.value[ind+len('request'):]
ind = attr.value.find('request', ind+len('request'))
if doingChange:
self.logger.debug("Reset original request rspec attr %s='%s'", attr.name, attr.value)
# self.logger.debug(stripBlankLines(lastDom.toprettyxml(encoding="utf-8")))
else:
lastDom = lastAM.manifestDom
if lastAM:
self.logger.debug("Template for combining will be from %s", lastAM)
combinedManifestDom = combineManifestRSpecs(ams, lastDom)
try:
manString = combinedManifestDom.toprettyxml(encoding="utf-8")
except Exception, xe:
self.logger.debug("Failed to XMLify combined Manifest RSpec: %s", xe)
self._raise_omni_error("Malformed combined manifest RSpec: %s" % xe)
# set rspec to be UTF-8
if isinstance(manString, unicode):
manString = manString.encode('utf-8')
self.logger.debug("Combined manifest RSpec was unicode")
# FIXME
# For fake mode this is really a request, but should be treating it as a manifest
# For now, SCS gives us stitchSchemaV2 stuff, so rspeclint fails
try:
if self.opts.fakeModeDir:
self.confirmGoodRSpec(manString, rspec_schema.REQUEST, False)
else:
self.confirmGoodRSpec(manString, rspec_schema.MANIFEST, False)
except OmniError, oe:
# If there is an EG AM in the mix, then we expect an error
# like:
#Manifest RSpec file did not contain a Manifest RSpec (wrong type or schema)
hasEG = False
for am in ams:
if am.isEG:
hasEG = True
break
if hasEG and "Manifest RSpec file did not contain a Manifest RSpec (wrong type or schema)" in str(oe):
self.logger.debug("EG AM meant manifest does not validate: %s", oe)
except Exception, e:
self.logger.error(e)
return stripBlankLines(manString)
# End of combineManifest
def saveAggregateList(self, sliceurn):
'''Save a file with the list of aggregates used. Used as input
to later stitcher calls, e.g. to delete from all AMs.'''
# URN to hrn
(slicehrn, stype) = urn_to_clean_hrn(sliceurn)
if not slicehrn or slicehrn.strip() == '' or not stype=='slice':
self.logger.warn("Couldn't parse slice HRN from URN %s",
sliceurn)
return
# ./$slicehrn-amlist.txt
fname = prependFilePrefix(self.opts.fileDir, "~/.gcf/%s-amlist.txt" % slicehrn)
if not self.ams_to_process or len(self.ams_to_process) == 0:
self.logger.debug("No AMs in AM list to process, so not creating amlist file")
return
listdir = os.path.abspath(os.path.expanduser(os.path.dirname(fname)))
if not os.path.exists(listdir):
try:
os.makedirs(listdir)
except Exception, e:
self.logger.warn("Failed to create dir '%s' to save list of used AMs: %s", listdir, e)
# URL,URN
with open (fname, 'w') as file:
file.write("# AM List for multi-AM slice %s\n" % sliceurn)
file.write("# Slice allocated at %s\n" % datetime.datetime.utcnow().isoformat())
for am in self.ams_to_process:
file.write("%s,%s\n" % (am.url, am.urn) )
# Include am.userRequested? am.api_version? len(am._hops)?
# file.write("%s,%s,%s,%d,%d\n" % (am.url, am.urn, am.userRequested,
# am.api_version, len(am._hops)))
# Done writing to file
# End of saveAggregateList
def addAggregateOptions(self, args):
'''Read a file with a list of aggregates, adding those as -a
options. Allows stitcher to delete from all AMs. Note that
extra aggregate options are added only if no -a options are
already supplied.'''
# Find slice name from args[1]
if not args or len(args) < 2:
self.logger.debug("Cannot find slice name")
return
slicename = args[1]
# get slice URN
# Get slice URN from name
try:
sliceurn = self.framework.slice_name_to_urn(slicename)
except Exception, e:
self.logger.warn("Could not determine slice URN from name %s: %s", slicename, e)
return
if not sliceurn or sliceurn.strip() == '':
self.logger.warn("Could not determine slice URN from name %s", slicename)
return
# get slice HRN
(slicehrn, stype) = urn_to_clean_hrn(sliceurn)
if not slicehrn or slicehrn.strip() == '' or not stype=='slice':
self.logger.warn("Couldn't parse slice HRN from URN %s",
sliceurn)
return
# ./$slicehrn-amlist.txt
fname = prependFilePrefix(self.opts.fileDir, "~/.gcf/%s-amlist.txt" % slicehrn)
# look to see if $slicehrn-amlist.txt exists
if not os.path.exists(fname) or not os.path.getsize(fname) > 0:
self.logger.debug("File of AMs for slice %s not found or empty: %s", slicename, fname)
return
self.logger.info("Reading slice %s aggregates from file %s", slicename, fname)
self.opts.ensure_value('aggregate', [])
addOptions = True
if len(self.opts.aggregate) > 0:
addOptions = False
with open(fname, 'r') as file:
# For each line:
for line in file:
line = line.strip()
# Skip if starts with # or is empty
if line == '' or line.startswith('#'):
continue
# split on ,
(url,urn) = line.split(',')
# (url,urn,userRequested,api_version,numHops) = line.split(',')
url = url.strip()
# If first looks like a URL, log
if not url == '':
# add -a option
# Note this next doesn't avoid the dup of a nickname
if not url in self.opts.aggregate:
if addOptions:
self.logger.debug("Adding aggregate option %s (%s)", url, urn)
self.opts.aggregate.append(url)
else:
self.logger.debug("NOTE not adding aggregate %s", url)
# Non-empty URL
# End of loop over lines
# End of block to read the file
# End of addAggregateOptions
def addExpiresAttribute(self, rspecDOM, sliceexp):
'''Set the expires attribute on the rspec to the slice
expiration. DCN AMs used to not support renew, but this is no
longer true, so this should not be necessary. Additionally,
some AMs treat this as a strict requirement and if this
exceeds local policy for maximum sliver, the request will fail.'''
if not rspecDOM:
return
if not sliceexp or str(sliceexp).strip() == "":
return
rspecs = rspecDOM.getElementsByTagName(defs.RSPEC_TAG)
if not rspecs or len(rspecs) < 1:
return
if rspecs[0].hasAttribute(defs.EXPIRES_ATTRIBUTE):
self.logger.debug("Not over-riding expires %s", rspecs[0].getAttribute(defs.EXPIRES_ATTRIBUTE))
return
# Some PG based AMs cannot handle fractional seconds, and
# erroneously treat expires as in local time. So (a) avoid
# microseconds, and (b) explicitly note this is in UTC.
# So this is sliceexp.isoformat() except without the
# microseconds and with the Z. Note that PG requires exactly
# this format.
rspecs[0].setAttribute(defs.EXPIRES_ATTRIBUTE, sliceexp.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.logger.debug("Added expires %s", rspecs[0].getAttribute(defs.EXPIRES_ATTRIBUTE))
def getUnboundNode(self):
'''Set self.isMultiAM by looking at Node component_manager_id fields. Also return at most 1 node without such a field.'''
# If any node is unbound, then all AMs will try to allocate it.
amURNs = []
unboundNode = None
for node in self.parsedUserRequest.nodes:
if node.amURN is None:
if self.opts.devmode:
# Note that SCS likely will fail with something like:
# code 65535: std::exception
self.logger.warn("Node %s is unbound in request", node.id)
else:
self.logger.debug("Node %s is unbound in request", node.id)
unboundNode = node.id
else:
# self.logger.debug("Node %s is on AM %s", node.id, node.amURN)
if node.amURN not in amURNs:
amURNs.append(node.amURN)
self.logger.debug("Request RSpec binds nodes to %d AMs", len(amURNs))
if len(amURNs) > 1:
self.isMultiAM = True
return unboundNode
def confirmSafeRequest(self):
'''Confirm this request is not asking for a loop. Bad things should
not be allowed, dangerous things should get a warning.'''
# Currently, this method is a no-op
# FIXME FIXME - what other checks go here?
# Ticket #570: to stitch multiple VMs at same PG AM on same VLAN, ensure component IDs are eth0-3 on interfaces
# to force it to go through hardware
# for link in self.parsedUserRequest.links:
# Only care about stitching links with more than 2 interfaces
# if len(link.aggregates) > 1 and not link.hasSharedVlan and link.typeName == link.VLAN_LINK_TYPE and len(link.interfaces) > 2:
# ifcsByNode = {}
# for ifc in link.interfaces:
# theNode = None
# for node in self.parseUserRequest.nodes:
# if ifc in node.interface_ids
# theNode = node
# break
# if theNode is None:
# error
# ifcsByNode[theNode] = [ifc]
# for node in ifcsByNode.keys():
# if len(ifcsByNode[node] < 2:
# continue
# agg = Aggregate.find(theNode.amURN)
# if not agg.isPG:
# self.logger.warn("%s is not a PG AM and may not support stitching multiple Nodes on same link", agg)
# continue
# # Now we have a PG node with >2 interfaces on the same stitched link
# # Find the node in the rspec XML
# # find the interface
# # Add the component_id if it is not already there
# # FIXME: If some ifc on the node has the component_id, then I need to avoid using the same ones!
# # Maybe for now, if any ifc has a component_id in the original rspec, skip this node?
# FIXME: we call rspec.getLinkEditedDom() to build what we send to the SCS. So the object / dom there needs to put the
# component_id in in the right way. So probably I need to do this via objects.
# So: objects.py class Node: store the interface_ref as an object that has both client_id (the id) and component_id.
# Make that class have a toDOM method that writes in the correct interface_ref sub-elements as needed, and call that method
# from clas RSpec.getLinkEditedDom
# ethcnt = 0
# For each ifc
# If ifc in the current link, then add component_id attribute using ethcnt, and then increment
pass
def saveAggregateState(self, oldAggs, newAggs):
'''Save state from old aggregates for use with new aggregates from later SCS call'''
for agg in newAggs:
for oldAgg in oldAggs:
# Is this oldAgg the same as the new 'agg' by URN? If so, copy from old to new
# FIXME: Correct to compare urn_syns too?
if not (agg.urn == oldAgg.urn or agg.urn in oldAgg.urn_syns or oldAgg.urn in agg.urn_syns):
# Not a match
continue
for hop in agg.hops:
for oldHop in oldAgg.hops:
if hop.urn == oldHop.urn:
if oldHop.excludeFromSCS:
self.logger.warn("%s had been marked to exclude from SCS, but we got it again", oldHop)
hop.vlans_unavailable = hop.vlans_unavailable.union(oldHop.vlans_unavailable)
break
# End of loop over hops
# FIXME: agg.allocateTries?
agg.dcn = oldAgg.dcn
agg.isOESS = oldAgg.isOESS
agg.isFOAM = oldAgg.isFOAM
agg.isGRAM = oldAgg.isGRAM
agg.isPG = oldAgg.isPG
agg.isEG = oldAgg.isEG
agg.isExoSM = oldAgg.isExoSM
agg.userRequested = oldAgg.userRequested
agg.alt_url = oldAgg.alt_url
agg.api_version = oldAgg.api_version
agg.nick = oldAgg.nick
agg.doesSchemaV1 = oldAgg.doesSchemaV1
agg.doesSchemaV2 = oldAgg.doesSchemaV2
agg.slicecred = oldAgg.slicecred
# Since we're restarting, clear out any old error, so don't do this copy
# agg.lastError = oldAgg.lastError
# FIXME: correct?
agg.url = oldAgg.url
agg.urn_syns = copy.deepcopy(oldAgg.urn_syns)
break # out of loop over oldAggs, cause we found the new 'agg'
# Loop over oldAggs
# Loop over newAggs
# End of saveAggregateState
def ensureSliverType(self):
# DCN AMs seem to insist that there is at least one sliver_type specified one one node
# So if we have a DCN AM, add one if needed
haveDCN = False
for am in self.ams_to_process:
if am.dcn:
haveDCN = True
break
if not haveDCN:
# Only have a problem if there is a DCN AM. Nothing to do.
return
# Do we have a sliver type?
slivtypes = self.parsedSCSRSpec.dom.getElementsByTagName(defs.SLIVER_TYPE_TAG)
if slivtypes and len(slivtypes) > 0:
# have at least one sliver type element. Nothing to do
return
slivTypeNode = self.parsedSCSRSpec.dom.createElement(defs.SLIVER_TYPE_TAG)
slivTypeNode.setAttribute("name", "default-vm")
# Find the rspec element from parsedSCSRSpec.dom
rspecs = self.parsedSCSRSpec.dom.getElementsByTagName(defs.RSPEC_TAG)
if rspecs and len(rspecs):
rspec = rspecs[0]
# Find a node and add a sliver type
for child in rspec.childNodes:
if child.localName == defs.NODE_TAG:
id = child.getAttribute(Node.CLIENT_ID_TAG)
child.appendChild(slivTypeNode)
self.logger.debug("To keep DCN AMs happy, adding a default-vm sliver type to node %s", id)
return
# End of ensureSliverType
# If we said this rspec needs a fake endpoint, add it here - so the SCS and other stuff
# doesn't try to do anything with it. Useful with Links from IG AMs to fixed interfaces
# on ION or AL2S.
def addFakeNode(self):
fakeNode = self.parsedSCSRSpec.dom.createElement(defs.NODE_TAG)
fakeInterface = self.parsedSCSRSpec.dom.createElement("interface")
fakeInterface.setAttribute(Node.CLIENT_ID_TAG, "fake:if0")
fakeNode.setAttribute(Node.CLIENT_ID_TAG, "fake")
fakeNode.setAttribute(Node.COMPONENT_MANAGER_ID_TAG, "urn:publicid:IDN+fake+authority+am")
fakeCM = self.parsedSCSRSpec.dom.createElement(Link.COMPONENT_MANAGER_TAG)
fakeCM.setAttribute(Link.NAME_TAG, "urn:publicid:IDN+fake+authority+am")
fakeNode.appendChild(fakeInterface)
fakeiRef = self.parsedSCSRSpec.dom.createElement(Link.INTERFACE_REF_TAG)
fakeiRef.setAttribute(Node.CLIENT_ID_TAG, "fake:if0")
# Find the rspec element from parsedSCSRSpec.dom
rspecs = self.parsedSCSRSpec.dom.getElementsByTagName(defs.RSPEC_TAG)
if not rspecs or len(rspecs) < 1:
self.logger.debug("Failed to find <rspec> element")
return
rspec = rspecs[0]
# Add a node to the dom
# FIXME: Check that there is no node with the fake component_manager_id already?
self.logger.info("Adding fake Node endpoint")
rspec.appendChild(fakeNode)
# Also find all links for which there is a stitching path and add an interface_ref to any with only 1 interface_ref
for child in rspec.childNodes:
if child.localName == defs.LINK_TAG:
linkName = child.getAttribute(Node.CLIENT_ID_TAG)
stitchPath = self.parsedSCSRSpec.find_path(linkName)
if not stitchPath:
# The link has no matching stitching path
# This could be a link all within 1 AM, or a link on a shared VLAN, or an ExoGENI stitched link
self.logger.debug("For fakeEndpoint, skipping main body link %s with no stitching path", linkName)
continue
ifcCount = 0
ifcAMCount = 0 # Num AMs the interfaces are at
propCount = 0
ifc1Name = None
ifcAuths = []
for c2 in child.childNodes:
if c2.localName == Link.INTERFACE_REF_TAG:
ifcCount += 1
ifc1Name = c2.getAttribute(Node.CLIENT_ID_TAG)
for node in self.parsedSCSRSpec.nodes:
if ifc1Name in node.interface_ids:
ifcAuth = node.amURN
if not ifcAuth in ifcAuths:
ifcAuths.append(ifcAuth)
ifcAMCount += 1
break
if c2.localName == Link.PROPERTY_TAG:
propCount += 1
# End of loop over link sub-elements counting interface_refs
if ifcAMCount == 1:
self.logger.info("Adding fake interface_ref endpoint on link %s", linkName)
child.appendChild(fakeiRef)
child.appendChild(fakeCM)
if propCount == 0:
# Add the 2 property elements
self.logger.debug("Adding property tags to link %s to fake node", linkName)
sP = self.parsedSCSRSpec.dom.createElement(Link.PROPERTY_TAG)
sP.setAttribute(LinkProperty.SOURCE_TAG, ifc1Name)
sP.setAttribute(LinkProperty.DEST_TAG, "fake:if0")
sP.setAttribute(LinkProperty.CAPACITY_TAG, str(self.opts.defaultCapacity))
dP = self.parsedSCSRSpec.dom.createElement(Link.PROPERTY_TAG)
dP.setAttribute(LinkProperty.DEST_TAG, ifc1Name)
dP.setAttribute(LinkProperty.SOURCE_TAG, "fake:if0")
dP.setAttribute(LinkProperty.CAPACITY_TAG, str(self.opts.defaultCapacity))
child.appendChild(sP)
child.appendChild(dP)
else:
self.logger.debug("Link %s had only interfaces at 1 am (%d interfaces total), so added the fake interface - but it has %d properties already?", linkName, ifcCount, propCount)
else:
self.logger.debug("Not adding fake endpoint to link %s with %d interfaces at %d AMs", linkName, ifcCount, ifcAMCount)
# Got a link
# End of loop over top level elements in the RSpec XML to find links and add the fake interface_ref
# self.logger.debug("\n" + self.parsedSCSRSpec.dom.toxml())
# End of addFakeNode
def endPartiallyReserved(self, exception=None, aggs=[], timeout=False):
# End the run with things only partially reserved
# This could be due to --noDeleteAtEnd and a fatal failure or Ctrl-C, or it could be due to --noTransitAMs and only transit AMs remain
# exception would be an indication of why we are quitting to include in xml comments
# 1) Print where you have reservations and where you do not. Also print where there were failures if possible.
# 2) Output a combined manifest for what you do have
# - ideally with comments indicating what this is a manifest for and what AMs need reservations
# - Include the VLANs unavailable for failed AMs and any other available error information
# - Ideally comments also indicate which AMs / hops depend on which others, so experimenter can manually do what stitcher does
# 3) Output a combined request for what you do not have
# - ideally with comments indicating where this must be submitted and what AMs that are part of this topology have reservations
# - Include the VLANs unavailable for failed AMs and any other available error information
# - Ideally comments also indicate which AMs / hops depend on which others, so experimenter can manually do what stitcher does
# This method does not exit. It constructs a message suitable for logging at the end and returns it
retMsg = ""
# Note that caller has already noted we are not deleting existing reservations, and caller will log the stuff in 'msg'
aggsRes = []
aggsNoRes = []
aggsFailed = []
for agg in aggs:
if agg.manifestDom:
# FIXME: If the Ctrl-C happened during allocate, then we fake set the manifestDom so it looks like we have a reservation there,
# because the AM may think we do. In fact, we may not. Perhaps detect this case and log something here? Perhaps with agg.completed?
aggsRes.append(agg)
if agg.api_version > 2:
self.logger.debug(" Have a temporary reservation here (%s)! \n*** You must manually call `omni -a %s -V3 provision %s` and then `omni -a %s -V3 poa %s geni_start`", agg.url, agg.url, self.slicename, agg.url, self.slicename)
else:
self.logger.debug(" Have a reservation here (%s)!", agg.url)
else:
aggsNoRes.append(agg)
self.logger.debug("%s has no reservation", agg)
# Can we tell where we tried & failed?
if agg.inProcess or agg.allocateTries > 0 or agg.triedRes or agg.lastError:
aggsFailed.append(agg)
self.logger.debug("%s was a failed attempt. inProcess=%s, allocateTries=%d, triedRes=%s, lastError=%s", agg, agg.inProcess, agg.allocateTries, agg.triedRes, agg.lastError)
if len(aggsRes) + len(aggsNoRes) != len(aggs):
self.logger.debug("Ack! aggsRes=%d, aggsNoRes=%d, but total aggs is %d", len(aggsRes), len(aggsNoRes), len(aggs))
retMsg = "Stitcher interrupted"
if len(aggsRes) > 0:
retMsg += " with reservations at %d aggregate(s)" % len(aggsRes)
retMsg += ". "
if len(aggsNoRes) > 0:
retMsg += "Reservation must be completed at %d aggregate(s). " % len(aggsNoRes)
if len(aggsFailed) > 0:
retMsg += "Reservation failed at: %s." % aggsFailed
retMsg += "\n"
if len(aggsRes) > 0:
lastSuccAM = aggsRes[0]
# Note this will include the AMs where we have no reservation
combinedManifest, filename, retVal = self.getAndSaveCombinedManifest(lastSuccAM)
# Print something about sliver expiration times
msg = self.getExpirationMessage()
if msg:
retMsg += msg + '\n'
if filename:
msg = "Saved combined reservation RSpec at %d AM(s) to file '%s'\n" % (len(aggsRes), os.path.abspath(filename))
retMsg += msg
if len(aggsNoRes) > 0:
# For the DOM to start from, start with one I've edited if it exists
dom = self.parsedSCSRSpec.dom
for am in aggsNoRes:
if am.requestDom:
dom = am.requestDom
break
# Generate / save the expanded request using the full list of AMs. Note this means
# we'll include things that are technically for manifests only.
# To avoid that, call with aggsNoRes instead.
msg = self.writeExpandedRequest(aggs, dom)
retMsg += msg
self.logger.debug(retMsg)
return retMsg
# End of endPartiallyReserved
|
alash3al/rethinkdb | refs/heads/next | test/rql_test/connections/http_support/werkzeug/testsuite/datastructures.py | 145 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.datastructures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the functionality of the provided Werkzeug
datastructures.
TODO:
- FileMultiDict
- Immutable types undertested
- Split up dict tests
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
import pickle
from contextlib import contextmanager
from copy import copy, deepcopy
from werkzeug import datastructures
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
iterlistvalues, text_type, PY2
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.exceptions import BadRequestKeyError
class NativeItermethodsTestCase(WerkzeugTestCase):
def test_basic(self):
@datastructures.native_itermethods(['keys', 'values', 'items'])
class StupidDict(object):
def keys(self, multi=1):
return iter(['a', 'b', 'c'] * multi)
def values(self, multi=1):
return iter([1, 2, 3] * multi)
def items(self, multi=1):
return iter(zip(iterkeys(self, multi=multi),
itervalues(self, multi=multi)))
d = StupidDict()
expected_keys = ['a', 'b', 'c']
expected_values = [1, 2, 3]
expected_items = list(zip(expected_keys, expected_values))
self.assert_equal(list(iterkeys(d)), expected_keys)
self.assert_equal(list(itervalues(d)), expected_values)
self.assert_equal(list(iteritems(d)), expected_items)
self.assert_equal(list(iterkeys(d, 2)), expected_keys * 2)
self.assert_equal(list(itervalues(d, 2)), expected_values * 2)
self.assert_equal(list(iteritems(d, 2)), expected_items * 2)
class MutableMultiDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_pickle(self):
cls = self.storage_class
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
d = cls()
d.setlist(b'foo', [1, 2, 3, 4])
d.setlist(b'bar', b'foo bar baz'.split())
s = pickle.dumps(d, protocol)
ud = pickle.loads(s)
self.assert_equal(type(ud), type(d))
self.assert_equal(ud, d)
self.assert_equal(pickle.loads(
s.replace(b'werkzeug.datastructures', b'werkzeug')), d)
ud[b'newkey'] = b'bla'
self.assert_not_equal(ud, d)
def test_basic_interface(self):
md = self.storage_class()
assert isinstance(md, dict)
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
# simple getitem gives the first value
self.assert_equal(md['a'], 1)
self.assert_equal(md['c'], 3)
with self.assert_raises(KeyError):
md['e']
self.assert_equal(md.get('a'), 1)
# list getitem
self.assert_equal(md.getlist('a'), [1, 2, 1, 3])
self.assert_equal(md.getlist('d'), [3, 4])
# do not raise if key not found
self.assert_equal(md.getlist('x'), [])
# simple setitem overwrites all values
md['a'] = 42
self.assert_equal(md.getlist('a'), [42])
# list setitem
md.setlist('a', [1, 2, 3])
self.assert_equal(md['a'], 1)
self.assert_equal(md.getlist('a'), [1, 2, 3])
# verify that it does not change original lists
l1 = [1, 2, 3]
md.setlist('a', l1)
del l1[:]
self.assert_equal(md['a'], 1)
# setdefault, setlistdefault
self.assert_equal(md.setdefault('u', 23), 23)
self.assert_equal(md.getlist('u'), [23])
del md['u']
md.setlist('u', [-1, -2])
# delitem
del md['u']
with self.assert_raises(KeyError):
md['u']
del md['d']
self.assert_equal(md.getlist('d'), [])
# keys, values, items, lists
self.assert_equal(list(sorted(md.keys())), ['a', 'b', 'c'])
self.assert_equal(list(sorted(iterkeys(md))), ['a', 'b', 'c'])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(md.items())),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.items(multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md))),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md, multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.lists())),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
self.assert_equal(list(sorted(iterlists(md))),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
# copy method
c = md.copy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# copy method 2
c = copy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# deepcopy method
c = md.deepcopy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# deepcopy method 2
c = deepcopy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# update with a multidict
od = self.storage_class([('a', 4), ('a', 5), ('y', 0)])
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4, 5])
self.assert_equal(md.getlist('y'), [0])
# update with a regular dict
md = c
od = {'a': 4, 'y': 0}
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4])
self.assert_equal(md.getlist('y'), [0])
# pop, poplist, popitem, popitemlist
self.assert_equal(md.pop('y'), 0)
assert 'y' not in md
self.assert_equal(md.poplist('a'), [1, 2, 3, 4])
assert 'a' not in md
self.assert_equal(md.poplist('missing'), [])
# remaining: b=2, c=3
popped = md.popitem()
assert popped in [('b', 2), ('c', 3)]
popped = md.popitemlist()
assert popped in [('b', [2]), ('c', [3])]
# type conversion
md = self.storage_class({'a': '4', 'b': ['2', '3']})
self.assert_equal(md.get('a', type=int), 4)
self.assert_equal(md.getlist('b', type=int), [2, 3])
# repr
md = self.storage_class([('a', 1), ('a', 2), ('b', 3)])
assert "('a', 1)" in repr(md)
assert "('a', 2)" in repr(md)
assert "('b', 3)" in repr(md)
# add and getlist
md.add('c', '42')
md.add('c', '23')
self.assert_equal(md.getlist('c'), ['42', '23'])
md.add('c', 'blah')
self.assert_equal(md.getlist('c', type=int), [42, 23])
# setdefault
md = self.storage_class()
md.setdefault('x', []).append(42)
md.setdefault('x', []).append(23)
self.assert_equal(md['x'], [42, 23])
# to dict
md = self.storage_class()
md['foo'] = 42
md.add('bar', 1)
md.add('bar', 2)
self.assert_equal(md.to_dict(), {'foo': 42, 'bar': 1})
self.assert_equal(md.to_dict(flat=False), {'foo': [42], 'bar': [1, 2]})
# popitem from empty dict
with self.assert_raises(KeyError):
self.storage_class().popitem()
with self.assert_raises(KeyError):
self.storage_class().popitemlist()
# key errors are of a special type
with self.assert_raises(BadRequestKeyError):
self.storage_class()[42]
# setlist works
md = self.storage_class()
md['foo'] = 42
md.setlist('foo', [1, 2])
self.assert_equal(md.getlist('foo'), [1, 2])
class ImmutableDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_follows_dict_interface(self):
cls = self.storage_class
data = {'foo': 1, 'bar': 2, 'baz': 3}
d = cls(data)
self.assert_equal(d['foo'], 1)
self.assert_equal(d['bar'], 2)
self.assert_equal(d['baz'], 3)
self.assert_equal(sorted(d.keys()), ['bar', 'baz', 'foo'])
self.assert_true('foo' in d)
self.assert_true('foox' not in d)
self.assert_equal(len(d), 3)
def test_copies_are_mutable(self):
cls = self.storage_class
immutable = cls({'a': 1})
with self.assert_raises(TypeError):
immutable.pop('a')
mutable = immutable.copy()
mutable.pop('a')
self.assert_true('a' in immutable)
self.assert_true(mutable is not immutable)
self.assert_true(copy(immutable) is immutable)
def test_dict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': 1, 'b': 2})
immutable2 = cls({'a': 2, 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableTypeConversionDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableTypeConversionDict
class ImmutableMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableMultiDict
def test_multidict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': [1, 2], 'b': 2})
immutable2 = cls({'a': [1], 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableDict
class ImmutableOrderedMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableOrderedMultiDict
def test_ordered_multidict_is_hashable(self):
a = self.storage_class([('a', 1), ('b', 1), ('a', 2)])
b = self.storage_class([('a', 1), ('a', 2), ('b', 1)])
self.assert_not_equal(hash(a), hash(b))
class MultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.MultiDict
def test_multidict_pop(self):
make_d = lambda: self.storage_class({'foo': [1, 2, 3, 4]})
d = make_d()
self.assert_equal(d.pop('foo'), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foo', 32), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foos', 32), 32)
assert d
with self.assert_raises(KeyError):
d.pop('foos')
def test_setlistdefault(self):
md = self.storage_class()
self.assert_equal(md.setlistdefault('u', [-1, -2]), [-1, -2])
self.assert_equal(md.getlist('u'), [-1, -2])
self.assert_equal(md['u'], -1)
def test_iter_interfaces(self):
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
self.assert_equal(list(zip(md.keys(), md.listvalues())),
list(md.lists()))
self.assert_equal(list(zip(md, iterlistvalues(md))),
list(iterlists(md)))
self.assert_equal(list(zip(iterkeys(md), iterlistvalues(md))),
list(iterlists(md)))
class OrderedMultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.OrderedMultiDict
def test_ordered_interface(self):
cls = self.storage_class
d = cls()
assert not d
d.add('foo', 'bar')
self.assert_equal(len(d), 1)
d.add('foo', 'baz')
self.assert_equal(len(d), 1)
self.assert_equal(list(iteritems(d)), [('foo', 'bar')])
self.assert_equal(list(d), ['foo'])
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 'bar'), ('foo', 'baz')])
del d['foo']
assert not d
self.assert_equal(len(d), 0)
self.assert_equal(list(d), [])
d.update([('foo', 1), ('foo', 2), ('bar', 42)])
d.add('foo', 3)
self.assert_equal(d.getlist('foo'), [1, 2, 3])
self.assert_equal(d.getlist('bar'), [42])
self.assert_equal(list(iteritems(d)), [('foo', 1), ('bar', 42)])
expected = ['foo', 'bar']
self.assert_sequence_equal(list(d.keys()), expected)
self.assert_sequence_equal(list(d), expected)
self.assert_sequence_equal(list(iterkeys(d)), expected)
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 1), ('foo', 2), ('bar', 42), ('foo', 3)])
self.assert_equal(len(d), 2)
self.assert_equal(d.pop('foo'), 1)
assert d.pop('blafasel', None) is None
self.assert_equal(d.pop('blafasel', 42), 42)
self.assert_equal(len(d), 1)
self.assert_equal(d.poplist('bar'), [42])
assert not d
d.get('missingkey') is None
d.add('foo', 42)
d.add('foo', 23)
d.add('bar', 2)
d.add('foo', 42)
self.assert_equal(d, datastructures.MultiDict(d))
id = self.storage_class(d)
self.assert_equal(d, id)
d.add('foo', 2)
assert d != id
d.update({'blah': [1, 2, 3]})
self.assert_equal(d['blah'], 1)
self.assert_equal(d.getlist('blah'), [1, 2, 3])
# setlist works
d = self.storage_class()
d['foo'] = 42
d.setlist('foo', [1, 2])
self.assert_equal(d.getlist('foo'), [1, 2])
with self.assert_raises(BadRequestKeyError):
d.pop('missing')
with self.assert_raises(BadRequestKeyError):
d['missing']
# popping
d = self.storage_class()
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitem(), ('foo', 23))
with self.assert_raises(BadRequestKeyError):
d.popitem()
assert not d
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitemlist(), ('foo', [23, 42, 1]))
with self.assert_raises(BadRequestKeyError):
d.popitemlist()
def test_iterables(self):
a = datastructures.MultiDict((("key_a", "value_a"),))
b = datastructures.MultiDict((("key_b", "value_b"),))
ab = datastructures.CombinedMultiDict((a,b))
self.assert_equal(sorted(ab.lists()), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(ab.listvalues()), [['value_a'], ['value_b']])
self.assert_equal(sorted(ab.keys()), ["key_a", "key_b"])
self.assert_equal(sorted(iterlists(ab)), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(iterlistvalues(ab)), [['value_a'], ['value_b']])
self.assert_equal(sorted(iterkeys(ab)), ["key_a", "key_b"])
class CombinedMultiDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CombinedMultiDict
def test_basic_interface(self):
d1 = datastructures.MultiDict([('foo', '1')])
d2 = datastructures.MultiDict([('bar', '2'), ('bar', '3')])
d = self.storage_class([d1, d2])
# lookup
self.assert_equal(d['foo'], '1')
self.assert_equal(d['bar'], '2')
self.assert_equal(d.getlist('bar'), ['2', '3'])
self.assert_equal(sorted(d.items()),
[('bar', '2'), ('foo', '1')])
self.assert_equal(sorted(d.items(multi=True)),
[('bar', '2'), ('bar', '3'), ('foo', '1')])
assert 'missingkey' not in d
assert 'foo' in d
# type lookup
self.assert_equal(d.get('foo', type=int), 1)
self.assert_equal(d.getlist('bar', type=int), [2, 3])
# get key errors for missing stuff
with self.assert_raises(KeyError):
d['missing']
# make sure that they are immutable
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# copies are immutable
d = d.copy()
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# make sure lists merges
md1 = datastructures.MultiDict((("foo", "bar"),))
md2 = datastructures.MultiDict((("foo", "blafasel"),))
x = self.storage_class((md1, md2))
self.assert_equal(list(iterlists(x)), [('foo', ['bar', 'blafasel'])])
class HeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.Headers
def test_basic_interface(self):
headers = self.storage_class()
headers.add('Content-Type', 'text/plain')
headers.add('X-Foo', 'bar')
assert 'x-Foo' in headers
assert 'Content-type' in headers
headers['Content-Type'] = 'foo/bar'
self.assert_equal(headers['Content-Type'], 'foo/bar')
self.assert_equal(len(headers.getlist('Content-Type')), 1)
# list conversion
self.assert_equal(headers.to_wsgi_list(), [
('Content-Type', 'foo/bar'),
('X-Foo', 'bar')
])
self.assert_equal(str(headers), (
"Content-Type: foo/bar\r\n"
"X-Foo: bar\r\n"
"\r\n"))
self.assert_equal(str(self.storage_class()), "\r\n")
# extended add
headers.add('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(headers['Content-Disposition'],
'attachment; filename=foo')
headers.add('x', 'y', z='"')
self.assert_equal(headers['x'], r'y; z="\""')
def test_defaults_and_conversion(self):
# defaults
headers = self.storage_class([
('Content-Type', 'text/plain'),
('X-Foo', 'bar'),
('X-Bar', '1'),
('X-Bar', '2')
])
self.assert_equal(headers.getlist('x-bar'), ['1', '2'])
self.assert_equal(headers.get('x-Bar'), '1')
self.assert_equal(headers.get('Content-Type'), 'text/plain')
self.assert_equal(headers.setdefault('X-Foo', 'nope'), 'bar')
self.assert_equal(headers.setdefault('X-Bar', 'nope'), '1')
self.assert_equal(headers.setdefault('X-Baz', 'quux'), 'quux')
self.assert_equal(headers.setdefault('X-Baz', 'nope'), 'quux')
headers.pop('X-Baz')
# type conversion
self.assert_equal(headers.get('x-bar', type=int), 1)
self.assert_equal(headers.getlist('x-bar', type=int), [1, 2])
# list like operations
self.assert_equal(headers[0], ('Content-Type', 'text/plain'))
self.assert_equal(headers[:1], self.storage_class([('Content-Type', 'text/plain')]))
del headers[:2]
del headers[-1]
self.assert_equal(headers, self.storage_class([('X-Bar', '1')]))
def test_copying(self):
a = self.storage_class([('foo', 'bar')])
b = a.copy()
a.add('foo', 'baz')
self.assert_equal(a.getlist('foo'), ['bar', 'baz'])
self.assert_equal(b.getlist('foo'), ['bar'])
def test_popping(self):
headers = self.storage_class([('a', 1)])
self.assert_equal(headers.pop('a'), 1)
self.assert_equal(headers.pop('b', 2), 2)
with self.assert_raises(KeyError):
headers.pop('c')
def test_set_arguments(self):
a = self.storage_class()
a.set('Content-Disposition', 'useless')
a.set('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(a['Content-Disposition'], 'attachment; filename=foo')
def test_reject_newlines(self):
h = self.storage_class()
for variation in 'foo\nbar', 'foo\r\nbar', 'foo\rbar':
with self.assert_raises(ValueError):
h['foo'] = variation
with self.assert_raises(ValueError):
h.add('foo', variation)
with self.assert_raises(ValueError):
h.add('foo', 'test', option=variation)
with self.assert_raises(ValueError):
h.set('foo', variation)
with self.assert_raises(ValueError):
h.set('foo', 'test', option=variation)
def test_slicing(self):
# there's nothing wrong with these being native strings
# Headers doesn't care about the data types
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('Content-Type', 'application/whocares')
h.set('X-Forwarded-For', '192.168.0.123')
h[:] = [(k, v) for k, v in h if k.startswith(u'X-')]
self.assert_equal(list(h), [
('X-Foo-Poo', 'bleh'),
('X-Forwarded-For', '192.168.0.123')
])
def test_bytes_operations(self):
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('X-Whoops', b'\xff')
self.assert_equal(h.get('x-foo-poo', as_bytes=True), b'bleh')
self.assert_equal(h.get('x-whoops', as_bytes=True), b'\xff')
def test_to_wsgi_list(self):
h = self.storage_class()
h.set(u'Key', u'Value')
for key, value in h.to_wsgi_list():
if PY2:
self.assert_strict_equal(key, b'Key')
self.assert_strict_equal(value, b'Value')
else:
self.assert_strict_equal(key, u'Key')
self.assert_strict_equal(value, u'Value')
class EnvironHeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.EnvironHeaders
def test_basic_interface(self):
# this happens in multiple WSGI servers because they
# use a vary naive way to convert the headers;
broken_env = {
'HTTP_CONTENT_TYPE': 'text/html',
'CONTENT_TYPE': 'text/html',
'HTTP_CONTENT_LENGTH': '0',
'CONTENT_LENGTH': '0',
'HTTP_ACCEPT': '*',
'wsgi.version': (1, 0)
}
headers = self.storage_class(broken_env)
assert headers
self.assert_equal(len(headers), 3)
self.assert_equal(sorted(headers), [
('Accept', '*'),
('Content-Length', '0'),
('Content-Type', 'text/html')
])
assert not self.storage_class({'wsgi.version': (1, 0)})
self.assert_equal(len(self.storage_class({'wsgi.version': (1, 0)})), 0)
def test_return_type_is_unicode(self):
# environ contains native strings; we return unicode
headers = self.storage_class({
'HTTP_FOO': '\xe2\x9c\x93',
'CONTENT_TYPE': 'text/plain',
})
self.assert_equal(headers['Foo'], u"\xe2\x9c\x93")
assert isinstance(headers['Foo'], text_type)
assert isinstance(headers['Content-Type'], text_type)
iter_output = dict(iter(headers))
self.assert_equal(iter_output['Foo'], u"\xe2\x9c\x93")
assert isinstance(iter_output['Foo'], text_type)
assert isinstance(iter_output['Content-Type'], text_type)
def test_bytes_operations(self):
foo_val = '\xff'
h = self.storage_class({
'HTTP_X_FOO': foo_val
})
self.assert_equal(h.get('x-foo', as_bytes=True), b'\xff')
self.assert_equal(h.get('x-foo'), u'\xff')
class HeaderSetTestCase(WerkzeugTestCase):
storage_class = datastructures.HeaderSet
def test_basic_interface(self):
hs = self.storage_class()
hs.add('foo')
hs.add('bar')
assert 'Bar' in hs
self.assert_equal(hs.find('foo'), 0)
self.assert_equal(hs.find('BAR'), 1)
assert hs.find('baz') < 0
hs.discard('missing')
hs.discard('foo')
assert hs.find('foo') < 0
self.assert_equal(hs.find('bar'), 0)
with self.assert_raises(IndexError):
hs.index('missing')
self.assert_equal(hs.index('bar'), 0)
assert hs
hs.clear()
assert not hs
class ImmutableListTestCase(WerkzeugTestCase):
storage_class = datastructures.ImmutableList
def test_list_hashable(self):
t = (1, 2, 3, 4)
l = self.storage_class(t)
self.assert_equal(hash(t), hash(l))
self.assert_not_equal(t, l)
def make_call_asserter(assert_equal_func, func=None):
"""Utility to assert a certain number of function calls.
>>> assert_calls, func = make_call_asserter(self.assert_equal)
>>> with assert_calls(2):
func()
func()
"""
calls = [0]
@contextmanager
def asserter(count, msg=None):
calls[0] = 0
yield
assert_equal_func(calls[0], count, msg)
def wrapped(*args, **kwargs):
calls[0] += 1
if func is not None:
return func(*args, **kwargs)
return asserter, wrapped
class CallbackDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CallbackDict
def test_callback_dict_reads(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(0, 'callback triggered by read-only method'):
# read-only methods
dct['a']
dct.get('a')
self.assert_raises(KeyError, lambda: dct['x'])
'a' in dct
list(iter(dct))
dct.copy()
with assert_calls(0, 'callback triggered without modification'):
# methods that may write but don't
dct.pop('z', None)
dct.setdefault('a')
def test_callback_dict_writes(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(8, 'callback not triggered by write method'):
# always-write methods
dct['z'] = 123
dct['z'] = 123 # must trigger again
del dct['z']
dct.pop('b', None)
dct.setdefault('x')
dct.popitem()
dct.update([])
dct.clear()
with assert_calls(0, 'callback triggered by failed del'):
self.assert_raises(KeyError, lambda: dct.__delitem__('x'))
with assert_calls(0, 'callback triggered by failed pop'):
self.assert_raises(KeyError, lambda: dct.pop('x'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MultiDictTestCase))
suite.addTest(unittest.makeSuite(OrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(CombinedMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableTypeConversionDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableOrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(HeadersTestCase))
suite.addTest(unittest.makeSuite(EnvironHeadersTestCase))
suite.addTest(unittest.makeSuite(HeaderSetTestCase))
suite.addTest(unittest.makeSuite(NativeItermethodsTestCase))
suite.addTest(unittest.makeSuite(CallbackDictTestCase))
return suite
|
rsteca/python-social-auth | refs/heads/master | social/backends/dropbox.py | 83 | """
Dropbox OAuth1 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/dropbox.html
"""
from social.backends.oauth import BaseOAuth1, BaseOAuth2
class DropboxOAuth(BaseOAuth1):
"""Dropbox OAuth authentication backend"""
name = 'dropbox'
ID_KEY = 'uid'
AUTHORIZATION_URL = 'https://www.dropbox.com/1/oauth/authorize'
REQUEST_TOKEN_URL = 'https://api.dropbox.com/1/oauth/request_token'
REQUEST_TOKEN_METHOD = 'POST'
ACCESS_TOKEN_URL = 'https://api.dropbox.com/1/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_URI_PARAMETER_NAME = 'oauth_callback'
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Dropbox account"""
fullname, first_name, last_name = self.get_user_names(
response.get('display_name')
)
return {'username': str(response.get('uid')),
'email': response.get('email'),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('https://api.dropbox.com/1/account/info',
auth=self.oauth_auth(access_token))
class DropboxOAuth2(BaseOAuth2):
name = 'dropbox-oauth2'
ID_KEY = 'uid'
AUTHORIZATION_URL = 'https://www.dropbox.com/1/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://api.dropbox.com/1/oauth2/token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
EXTRA_DATA = [
('uid', 'username'),
]
def get_user_details(self, response):
"""Return user details from Dropbox account"""
fullname, first_name, last_name = self.get_user_names(
response.get('display_name')
)
return {'username': str(response.get('uid')),
'email': response.get('email'),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(
'https://api.dropbox.com/1/account/info',
headers={'Authorization': 'Bearer {0}'.format(access_token)}
)
|
394954369/horizon | refs/heads/master | horizon/management/commands/__init__.py | 12133432 | |
TeamBasedLearning/Service | refs/heads/master | pgtbl/discipline/__init__.py | 12133432 | |
tysonclugg/django | refs/heads/master | tests/admin_scripts/app_with_import/__init__.py | 12133432 | |
confeitaria/inelegant | refs/heads/master | inelegant/finder.py | 1 | #
# Copyright 2015, 2016 Adam Victor Brandizzi
#
# This file is part of Inelegant.
#
# Inelegant is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inelegant is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Inelegant. If not, see <http://www.gnu.org/licenses/>.
import unittest
import doctest
import importlib
import inspect
import itertools
import sys
import os
from inelegant.module import get_caller_module
class TestFinder(unittest.TestSuite):
"""
``TestFinder`` is a subclass of ``unittest.TestSuite``. It receives
modules, modules names and file paths as its arguments. It will look for
subclasses of ``unittest.TestCase`` and ``unittest.TestSuite`` from each
module it receives. It will also look for doctests in the docstrings of the
functions and classes from the module. If a file path is given to it, then
it will look for doctests inside it.
Loading test cases
------------------
If we have the test case below...
::
>>> class SomeTestCase(unittest.TestCase):
... def test1(self):
... self.assertEquals(1, 1)
... def testFail(self):
... self.assertEquals(1, 0)
... def testError(self):
... self.assertEquals(1, 1/0)
...in a module, and the module is given to the finder, then all of these
tests will be available in the finder::
>>> from inelegant.module import installed_module
>>> with installed_module('t', scope={'SomeTestCase': SomeTestCase}) as t:
... finder = TestFinder(t)
... finder.countTestCases()
3
It also works if one gives the module name instead of the module itself::
>>> with installed_module('t', scope={'SomeTestCase': SomeTestCase}):
... finder = TestFinder('t')
... finder.countTestCases()
3
All other methods from ``unittest.TestSuite`` are available as well.
Ignoring test cases
-------------------
Sometimes we may not want to load a specific test case. In these cases, we
can pass the test case's classes to be ignored to the named-only ``skip``
argument::
>>> class TestCase1(unittest.TestCase):
... def testFail(self):
... self.assertEquals(1, 0)
>>> class TestCase2(unittest.TestCase):
... def testError(self):
... self.assertEquals(1, 1/0)
>>> with installed_module('t1', to_adopt=[TestCase1, TestCase2]) as t1:
... finder = TestFinder(t1)
... finder.countTestCases()
2
>>> with installed_module('t1', to_adopt=[TestCase1, TestCase2]) as t1:
... finder = TestFinder(t1, skip=[TestCase2])
... finder.countTestCases()
1
If only one class is to be ignored, it can be passed directly
>>> with installed_module('t1', to_adopt=[TestCase1, TestCase2]) as t1:
... finder = TestFinder(t1, skip=TestCase2)
... finder.countTestCases()
1
It is very useful when a base test case is to be extended with necessary
methods::
>>> class TestMultiplier(unittest.TestCase):
... def test_add(self):
... m = self.get_multiplier()
... self.assertEquals(4, m(2, 2))
This way, it can test different implementations::
>>> def mul1(a, b):
... return a*b
>>> def mul2(a, b):
... return sum([a]*b)
>>> class TestMul1(TestMultiplier):
... def get_multiplier(self):
... return mul1
>>> class TestMul2(TestMultiplier):
... def get_multiplier(self):
... return mul2
Naturally, we do not want to run tests from the base class. However, it is
usually imported into the modules that are going to extend it, causing
errors::
>>> runner = unittest.TextTestRunner(stream=sys.stdout)
>>> with installed_module('tm', to_adopt=[
... TestMultiplier, TestMul1, TestMul2]):
... finder = TestFinder('tm')
... _ = runner.run(finder) # doctest: +ELLIPSIS
..E
======================================================================
ERROR: test_add (tm.TestMultiplier)
----------------------------------------------------------------------
Traceback (most recent call last):
...
AttributeError: 'TestMultiplier' object has no attribute 'get_multiplier'
...
Ran 3 tests in ...
...
FAILED (errors=1)
Here the ``skip`` argument helps::
>>> with installed_module('tm', to_adopt=[
... TestMultiplier, TestMul1, TestMul2]):
... finder = TestFinder('tm', skip=[TestMultiplier])
... _ = runner.run(finder) # doctest: +ELLIPSIS
..
----------------------------------------------------------------------
Ran 2 tests in ...
<BLANKLINE>
OK
Loading docstrings
------------------
If a module containing docstrings with doctests is given to the finder then
a the doctest cases will also be available. So, if we had such a class::
>>> class Point(object):
... '''
... Is a point:
...
... >>> p = Point(2, 3)
... >>> p.x
... 2
... >>> p.y
... 3
... '''
... def __init__(self, x, y):
... self.x = x
... self.y = y
... def distance(self, point):
... '''
... Distance to other point:
...
... >>> Point(0, 0).distance(Point(2, 3)
... 5.0
... '''
... return ((self.x-point.x)**2 + (self.y-point.y)**2)**(1/2)
...and its module is given to the finder, the finder will have two test
cases - one for the class docstring and other for the method docstring::
>>> with installed_module('point', to_adopt=[Point]) as p:
... finder = TestFinder(p)
... finder.countTestCases()
2
Loading files
-------------
Doctests can be added to arbitrary text files as well, and ``TestFinder``
can also load them. Given the example below one just needs to give its path
to the finder to have the doctests loaded as test cases::
>>> from inelegant.fs import temp_file as tempfile
>>> content = '''
... >>> 3+3
... 6
... '''
>>> with tempfile(content=content) as path:
... finder = TestFinder(path)
... finder.countTestCases()
1
The nicest thing of it all, however, is that one can give all these
options, to the finder at once::
>>> with tempfile(content=content) as path:
... with installed_module('t', to_adopt=[SomeTestCase]),\\
... installed_module('point', to_adopt=[Point]) as p:
... finder = TestFinder('t', p, path)
... finder.countTestCases()
6
"""
def __init__(self, *testables, **kwargs):
unittest.TestSuite.__init__(self)
skip = kwargs.get('skip', None)
try:
caller_module = get_caller_module()
except:
caller_module = importlib.import_module('__main__')
for testable in testables:
module = get_module(testable)
doctestable = get_doctestable(testable)
if module is not None:
add_module(self, module, skip=skip)
if doctestable is not None:
module_path = getattr(caller_module, '__file__', '.')
module_dir = os.path.dirname(module_path)
add_doctest(self, doctestable, working_dir=module_dir)
def load_tests(self, loader, tests, pattern):
"""
This is, basically, an implementation of the ```load_tests()
protocol`__. You can assign it (when bound) to ``load_tests`` inside a
module and then the ``TestFinder`` will be the suite to be called.
For example, suppose we have the following classes
>>> class TestCase1(unittest.TestCase):
... def test_fail1(self):
... self.fail('TestCase1')
>>> class TestCase2(unittest.TestCase):
... def test_fail2(self):
... self.fail('TestCase1')
If we add them to two different modules, but then create a finder with
the first one and set its bound ``load_tests()`` into the second one,
then the second module will only "publish" the cases of the first one::
>>> from inelegant.module import installed_module
>>> with installed_module('t1', to_adopt=[TestCase1]) as t1, \\
... installed_module('t2', to_adopt=[TestCase2]) as t2:
... t2.load_tests = TestFinder(t1).load_tests
... loader = unittest.TestLoader()
... suite = loader.loadTestsFromModule(t2)
... # doctest: +ELLIPSIS
... _ = unittest.TextTestRunner(stream=sys.stdout).run(suite)
F
======================================================================
FAIL: test_fail1 (t1.TestCase1)
----------------------------------------------------------------------
Traceback (most recent call last):
...
AssertionError: TestCase1
<BLANKLINE>
----------------------------------------------------------------------
Ran 1 test in ...s
<BLANKLINE>
FAILED (failures=1)
__ https://docs.python.org/2/library/unittest.html#load-tests-protocol
"""
return self
def get_module(testable):
"""
``get_module()`` can receive a module or a string. If it receives a module,
the module is returned::
>>> import inelegant.test.finder
>>> get_module(inelegant.test.finder) # doctest: +ELLIPSIS
<module 'inelegant.test.finder' ...>
If it receives a string, it is supposed to be the name of a module. Then
the module is returned::
::
>>> get_module('inelegant.test.net') # doctest: +ELLIPSIS
<module 'inelegant.test.net' ...>
"""
module = None
if inspect.ismodule(testable):
module = testable
elif isinstance(testable, basestring):
try:
module = importlib.import_module(testable)
except ImportError:
if len(get_exc_frames()) > 2:
raise
module = None
except TypeError:
module = None
return module
def get_exc_frames():
"""
Return the list of frames that were executed from the raised exception
until the current function::
>>> try:
... raise Exception()
... except:
... get_exc_frames() # doctest: +ELLIPSIS
[<frame object at ...>]
So, if the exception was raised from a function, its frame will be present
in the list::
>>> def raise_exception():
... raise Exception()
>>> try:
... raise_exception()
... except:
... get_exc_frames() # doctest: +ELLIPSIS
[<frame object at ...>, <frame object at ...>]
"""
traceback = sys.exc_info()[2]
frame_list = []
while traceback:
frame_list.append(traceback.tb_frame)
traceback = traceback.tb_next
return frame_list
def get_doctestable(testable):
"""
Given a "testable" argument, returns something that can be run by
``doctest`` - a "doctestable."
Doctests can be found in modules (as docstrings) and in text files. This
function can receive, then, a module, a string or a file object, and
returns either a module or a path to a file.
Retrieving modules
------------------
If the function receives a module, it merely returns the module.
>>> from inelegant.module import installed_module
>>> with installed_module('m') as m:
... get_doctestable(m) # doctest: +ELLIPSIS
<module 'm' ...>
If it receives a string, it can be one of two things: a module name or a
file path. If it is a module name, then the function returns the module::
>>> with installed_module('m') as m:
... get_doctestable('m') # doctest: +ELLIPSIS
<module 'm' ...>
Retrieving files
----------------
If ``get_doctestable()`` receives a file object, then it will return the
path to the file::
>>> from inelegant.fs import temp_file as tempfile
>>> import os, os.path
>>> with tempfile() as path:
... doctestable = get_doctestable(open(path))
... os.path.samefile(path, doctestable)
True
If it receives a string, and the sting is not a module name, then it is
assumed to be a file path, so it is returned as well::
>>> get_doctestable('/tmp/doctest.txt')
'/tmp/doctest.txt'
"""
doctestable = None
if inspect.ismodule(testable):
doctestable = testable
elif isinstance(testable, file):
doctestable = testable.name
elif isinstance(testable, basestring):
doctestable = get_module(testable)
if doctestable is None:
doctestable = testable
return doctestable
def add_doctest(suite, doctestable, working_dir=None, exclude_empty=False):
r"""
Given a doctestable, add a test case to run it into the given suite.
But, what is a doctestable?
Well, a doctestable is an object that can contain doctests. It is either a
module or a path to a file.
Loading modules
===============
If the doctestable is a module, it will load the docstrings from the module
definitions into the test suite::
>>> class Test(object):
... '''
... >>> 2+2
... 4
... '''
>>> suite = unittest.TestSuite()
>>> from inelegant.module import installed_module
>>> with installed_module('m', to_adopt=[Test]) as m:
... add_doctest(suite, m)
... suite.countTestCases()
1
Loading absolute paths
======================
Paths to files are also valid doctestables. The behavior, however, depends
whether the path is absolute or relative. If the doctestable is an absolute
path, then it will read the content as doctest and add a test running it
into the suite::
::
>>> from inelegant.fs import temp_file as tempfile
>>> with tempfile(content='>>> 2+2\n4') as docfile:
... os.path.isabs(docfile)
... suite = unittest.TestSuite()
... add_doctest(suite, docfile)
... suite.countTestCases()
True
1
Loading relative paths
======================
If it is a relative path, then it should be relative to the current path
by default::
>>> from inelegant.fs import change_dir as cd
>>> tempdir = os.path.dirname(docfile)
>>> with cd(tempdir):
... with tempfile(path='docfile', content='>>> 2+2\n4'):
... suite = unittest.TestSuite()
... add_doctest(suite, 'docfile')
... suite.countTestCases()
1
This behavior is useful for quick tests (for example, from the console).
Sometimes, however, we may want to specify the path to be used as
reference. In these cases, we can use the ``working_dir`` argument. The
doctest file will the be searched relative to the given working path::
>>> path = os.path.join(tempdir, 'docfile')
>>> with tempfile(path=path, content='>>> 2+2\n4'):
... suite = unittest.TestSuite()
... add_doctest(suite, 'docfile', working_dir=tempdir)
... suite.countTestCases()
1
This is specially useful to give the path of the current module to be used.
This way, we can ship documentation with the code itself.
"""
if working_dir is None:
working_dir = os.getcwd()
if inspect.ismodule(doctestable):
finder = doctest.DocTestFinder(exclude_empty=exclude_empty)
doctest_suite = doctest.DocTestSuite(doctestable, test_finder=finder)
else:
if os.path.isabs(doctestable):
path = doctestable
else:
path = os.path.join(working_dir, doctestable)
doctest_suite = doctest.DocFileSuite(path, module_relative=False)
suite.addTest(doctest_suite)
def add_module(suite, module, skip=None):
"""
Add all test cases and test suites from the given module into the given
suite.
Consider the test cases below...
::
>>> class TestCase1(unittest.TestCase):
... def test1(self):
... self.assertEquals(1, 1)
>>> class TestCase2(unittest.TestCase):
... def testFail(self):
... self.assertEquals(1, 0)
If they are in a module, and we call ``add_module()`` with this module and
a suite, the tests will be found in the suite::
>>> from inelegant.module import installed_module
>>> with installed_module('t', to_adopt=[TestCase1, TestCase2]) as t:
... suite = unittest.TestSuite()
... add_module(suite, t)
... suite.countTestCases()
2
The function also accepts an argument, ``skip``. It should be either a test
case class or an iterator yielding test case classes. If any of the classes
is found in the module, it will not be added to the suite::
>>> from inelegant.module import installed_module
>>> with installed_module('t', to_adopt=[TestCase1, TestCase2]) as t:
... suite = unittest.TestSuite()
... add_module(suite, t, skip=TestCase2)
... suite.countTestCases()
1
"""
skip = to_set(skip)
loaded_suite = unittest.defaultTestLoader.loadTestsFromModule(module)
test_cases = flatten(loaded_suite)
suite.addTests(
tc for tc in test_cases if tc.__class__ not in skip
)
def to_set(value):
"""
Converts a specific value to a set in the following ways:
* If the value is ``None``, then returns the empty set::
>>> to_set(None)
set([])
* If the value is an iterable, creates a set with all values from it::
>>> to_set(xrange(3)) == set([0, 1, 2])
True
(Pay attention to never pass a huge or infinite iterator to ``to_set()``.)
* Otherwise, returns a tuple containing the given value::
>>> to_set(3)
set([3])
"""
if value is None:
result = set()
else:
try:
result = set(value)
except TypeError:
result = set([value])
return result
def flatten(value, ids=None, depth=None):
"""
Flattens an iterator::
>>> a = [1, [[2, 3, (4, 5, xrange(6, 10)), 10], (11, 12)], [13], 14]
>>> list(flatten(a))
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
It prevents infinite loops with recursive iterators::
>>> b = [1, 2, 3]
>>> c = [4, 5, 6, b, 7]
>>> b.append(c)
>>> list(flatten(b))
[1, 2, 3, 4, 5, 6, 7]
"""
if ids is None:
ids = set()
try:
for v in value:
if id(v) in ids:
continue
ids.add(id(v))
for u in flatten(v, ids=ids):
yield u
except TypeError:
yield value
|
hurricup/intellij-community | refs/heads/master | python/helpers/py3only/docutils/transforms/frontmatter.py | 44 | # $Id: frontmatter.py 7595 2013-01-21 17:33:56Z milde $
# Author: David Goodger, Ueli Schlaepfer <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Transforms related to the front matter of a document or a section
(information found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, promote a remaining lone top-level section's
title to the document subtitle, and determine the document's title
metadata (document['title']) based on the document title and/or the
"title" setting.
- `SectionSubTitle`: Used to transform a lone subsection into a
subtitle.
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
__docformat__ = 'reStructuredText'
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class TitlePromoter(Transform):
"""
Abstract base class for DocTitle and SectionSubTitle transforms.
"""
def promote_title(self, node):
"""
Transform the following tree::
<node>
<section>
<title>
...
into ::
<node>
<title>
...
`node` is normally a document.
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError('node must be of Element-derived type.')
# `node` must not have a title yet.
assert not (len(node) and isinstance(node[0], nodes.title))
section, index = self.candidate_index(node)
if index is None:
return None
# Transfer the section's attributes to the node:
# NOTE: Change second parameter to False to NOT replace
# attributes that already exist in node with those in
# section
# NOTE: Remove third parameter to NOT copy the 'source'
# attribute from section
node.update_all_atts_concatenating(section, True, True)
# setup_child is called automatically for all nodes.
node[:] = (section[:1] # section title
+ node[:index] # everything that was in the
# node before the section
+ section[1:]) # everything that was in the section
assert isinstance(node[0], nodes.title)
return 1
def promote_subtitle(self, node):
"""
Transform the following node tree::
<node>
<title>
<section>
<title>
...
into ::
<node>
<title>
<subtitle>
...
"""
# Type check
if not isinstance(node, nodes.Element):
raise TypeError('node must be of Element-derived type.')
subsection, index = self.candidate_index(node)
if index is None:
return None
subtitle = nodes.subtitle()
# Transfer the subsection's attributes to the new subtitle
# NOTE: Change second parameter to False to NOT replace
# attributes that already exist in node with those in
# section
# NOTE: Remove third parameter to NOT copy the 'source'
# attribute from section
subtitle.update_all_atts_concatenating(subsection, True, True)
# Transfer the contents of the subsection's title to the
# subtitle:
subtitle[:] = subsection[0][:]
node[:] = (node[:1] # title
+ [subtitle]
# everything that was before the section:
+ node[1:index]
# everything that was in the subsection:
+ subsection[1:])
return 1
def candidate_index(self, node):
"""
Find and return the promotion candidate and its index.
Return (None, None) if no valid candidate was found.
"""
index = node.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None or len(node) > (index + 1) or \
not isinstance(node[index], nodes.section):
return None, None
else:
return node[index], index
class DocTitle(TitlePromoter):
"""
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
two-step transform to "raise" or "promote" the title(s) (and their
corresponding section contents) to the document level.
1. If the document contains a single top-level section as its
first non-comment element, the top-level section's title
becomes the document's title, and the top-level section's
contents become the document's immediate contents. The lone
top-level section header must be the first non-comment element
in the document.
For example, take this input text::
=================
Top-Level Title
=================
A paragraph.
Once parsed, it looks like this::
<document>
<section names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
After running the DocTitle transform, we have::
<document names="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
2. If step 1 successfully determines the document title, we
continue by checking for a subtitle.
If the lone top-level section itself contains a single
second-level section as its first non-comment element, that
section's title is promoted to the document's subtitle, and
that section's contents become the document's immediate
contents. Given this input text::
=================
Top-Level Title
=================
Second-Level Title
~~~~~~~~~~~~~~~~~~
A paragraph.
After parsing and running the Section Promotion transform, the
result is::
<document names="top-level title">
<title>
Top-Level Title
<subtitle names="second-level title">
Second-Level Title
<paragraph>
A paragraph.
(Note that the implicit hyperlink target generated by the
"Second-Level Title" is preserved on the "subtitle" element
itself.)
Any comment elements occurring before the document title or
subtitle are accumulated and inserted as the first body elements
after the title(s).
This transform also sets the document's metadata title
(document['title']).
.. _reStructuredText: http://docutils.sf.net/rst.html
"""
default_priority = 320
def set_metadata(self):
"""
Set document['title'] metadata title from the following
sources, listed in order of priority:
* Existing document['title'] attribute.
* "title" setting.
* Document title node (as promoted by promote_title).
"""
if not self.document.hasattr('title'):
if self.document.settings.title is not None:
self.document['title'] = self.document.settings.title
elif len(self.document) and isinstance(self.document[0], nodes.title):
self.document['title'] = self.document[0].astext()
def apply(self):
if getattr(self.document.settings, 'doctitle_xform', 1):
# promote_(sub)title defined in TitlePromoter base class.
if self.promote_title(self.document):
# If a title has been promoted, also try to promote a
# subtitle.
self.promote_subtitle(self.document)
# Set document['title'].
self.set_metadata()
class SectionSubTitle(TitlePromoter):
"""
This works like document subtitles, but for sections. For example, ::
<section>
<title>
Title
<section>
<title>
Subtitle
...
is transformed into ::
<section>
<title>
Title
<subtitle>
Subtitle
...
For details refer to the docstring of DocTitle.
"""
default_priority = 350
def apply(self):
if not getattr(self.document.settings, 'sectsubtitle_xform', 1):
return
for section in self.document.traverse(nodes.section):
# On our way through the node tree, we are deleting
# sections, but we call self.promote_subtitle for those
# sections nonetheless. To do: Write a test case which
# shows the problem and discuss on Docutils-develop.
self.promote_subtitle(section)
class DocInfo(Transform):
"""
This transform is specific to the reStructuredText_ markup syntax;
see "Bibliographic Fields" in the `reStructuredText Markup
Specification`_ for a high-level description. This transform
should be run *after* the `DocTitle` transform.
Given a field list as the first non-comment element after the
document title and subtitle (if present), registered bibliographic
field names are transformed to the corresponding DTD elements,
becoming child elements of the "docinfo" element (except for a
dedication and/or an abstract, which become "topic" elements after
"docinfo").
For example, given this document fragment after parsing::
<document>
<title>
Document Title
<field_list>
<field>
<field_name>
Author
<field_body>
<paragraph>
A. Name
<field>
<field_name>
Status
<field_body>
<paragraph>
$RCSfile$
...
After running the bibliographic field list transform, the
resulting document tree would look like this::
<document>
<title>
Document Title
<docinfo>
<author>
A. Name
<status>
frontmatter.py
...
The "Status" field contained an expanded RCS keyword, which is
normally (but optionally) cleaned up by the transform. The sole
contents of the field body must be a paragraph containing an
expanded RCS keyword of the form "$keyword: expansion text $". Any
RCS keyword can be processed in any bibliographic field. The
dollar signs and leading RCS keyword name are removed. Extra
processing is done for the following RCS keywords:
- "RCSfile" expands to the name of the file in the RCS or CVS
repository, which is the name of the source file with a ",v"
suffix appended. The transform will remove the ",v" suffix.
- "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC
time zone). The RCS Keywords transform will extract just the
date itself and transform it to an ISO 8601 format date, as in
"2000-12-31".
(Since the source file for this text is itself stored under CVS,
we can't show an example of the "Date" RCS keyword because we
can't prevent any RCS keywords used in this explanation from
being expanded. Only the "RCSfile" keyword is stable; its
expansion text changes only if the file name changes.)
.. _reStructuredText: http://docutils.sf.net/rst.html
.. _reStructuredText Markup Specification:
http://docutils.sf.net/docs/ref/rst/restructuredtext.html
"""
default_priority = 340
biblio_nodes = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Canonical field name (lowcased) to node class name mapping for
bibliographic fields (field_list)."""
def apply(self):
if not getattr(self.document.settings, 'docinfo_xform', 1):
return
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None:
return
candidate = document[index]
if isinstance(candidate, nodes.field_list):
biblioindex = document.first_child_not_matching_class(
(nodes.Titular, nodes.Decorative))
nodelist = self.extract_bibliographic(candidate)
del document[index] # untransformed field list (candidate)
document[biblioindex:biblioindex] = nodelist
def extract_bibliographic(self, field_list):
docinfo = nodes.docinfo()
bibliofields = self.language.bibliographic_fields
labels = self.language.labels
topics = {'dedication': None, 'abstract': None}
for field in field_list:
try:
name = field[0][0].astext()
normedname = nodes.fully_normalize_name(name)
if not (len(field) == 2 and normedname in bibliofields
and self.check_empty_biblio_field(field, name)):
raise TransformError
canonical = bibliofields[normedname]
biblioclass = self.biblio_nodes[canonical]
if issubclass(biblioclass, nodes.TextElement):
if not self.check_compound_biblio_field(field, name):
raise TransformError
utils.clean_rcs_keywords(
field[1][0], self.rcs_keyword_substitutions)
docinfo.append(biblioclass('', '', *field[1][0]))
elif issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[canonical]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[canonical])
topics[canonical] = biblioclass(
'', title, classes=[canonical], *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
if len(field[-1]) == 1 \
and isinstance(field[-1][0], nodes.paragraph):
utils.clean_rcs_keywords(
field[-1][0], self.rcs_keyword_substitutions)
docinfo.append(field)
nodelist = []
if len(docinfo) != 0:
nodelist.append(docinfo)
for name in ('dedication', 'abstract'):
if topics[name]:
nodelist.append(topics[name])
return nodelist
def check_empty_biblio_field(self, field, name):
if len(field[-1]) < 1:
field[-1] += self.document.reporter.warning(
'Cannot extract empty bibliographic field "%s".' % name,
base_node=field)
return None
return 1
def check_compound_biblio_field(self, field, name):
if len(field[-1]) > 1:
field[-1] += self.document.reporter.warning(
'Cannot extract compound bibliographic field "%s".' % name,
base_node=field)
return None
if not isinstance(field[-1][0], nodes.paragraph):
field[-1] += self.document.reporter.warning(
'Cannot extract bibliographic field "%s" containing '
'anything other than a single paragraph.' % name,
base_node=field)
return None
return 1
rcs_keyword_substitutions = [
(re.compile(r'\$' r'Date: (\d\d\d\d)[-/](\d\d)[-/](\d\d)[ T][\d:]+'
r'[^$]* \$', re.IGNORECASE), r'\1-\2-\3'),
(re.compile(r'\$' r'RCSfile: (.+),v \$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$'), r'\1'),]
def extract_authors(self, field, name, docinfo):
try:
if len(field[1]) == 1:
if isinstance(field[1][0], nodes.paragraph):
authors = self.authors_from_one_paragraph(field)
elif isinstance(field[1][0], nodes.bullet_list):
authors = self.authors_from_bullet_list(field)
else:
raise TransformError
else:
authors = self.authors_from_paragraphs(field)
authornodes = [nodes.author('', '', *author)
for author in authors if author]
if len(authornodes) >= 1:
docinfo.append(nodes.authors('', *authornodes))
else:
raise TransformError
except TransformError:
field[-1] += self.document.reporter.warning(
'Bibliographic field "%s" incompatible with extraction: '
'it must contain either a single paragraph (with authors '
'separated by one of "%s"), multiple paragraphs (one per '
'author), or a bullet list with one paragraph (one author) '
'per item.'
% (name, ''.join(self.language.author_separators)),
base_node=field)
raise
def authors_from_one_paragraph(self, field):
text = field[1][0].astext().strip()
if not text:
raise TransformError
for authorsep in self.language.author_separators:
authornames = text.split(authorsep)
if len(authornames) > 1:
break
authornames = [author.strip() for author in authornames]
authors = [[nodes.Text(author)] for author in authornames if author]
return authors
def authors_from_bullet_list(self, field):
authors = []
for item in field[1][0]:
if len(item) != 1 or not isinstance(item[0], nodes.paragraph):
raise TransformError
authors.append(item[0].children)
if not authors:
raise TransformError
return authors
def authors_from_paragraphs(self, field):
for item in field[1]:
if not isinstance(item, nodes.paragraph):
raise TransformError
authors = [item.children for item in field[1]]
return authors
|
tlby/mxnet | refs/heads/master | tests/python/unittest/test_base.py | 9 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet.base import data_dir
from nose.tools import *
import os
import unittest
import logging
import os.path as op
import platform
class MXNetDataDirTest(unittest.TestCase):
def setUp(self):
self.mxnet_data_dir = os.environ.get('MXNET_HOME')
if 'MXNET_HOME' in os.environ:
del os.environ['MXNET_HOME']
def tearDown(self):
if self.mxnet_data_dir:
os.environ['MXNET_HOME'] = self.mxnet_data_dir
else:
if 'MXNET_HOME' in os.environ:
del os.environ['MXNET_HOME']
def test_data_dir(self,):
prev_data_dir = data_dir()
system = platform.system()
if system != 'Windows':
self.assertEqual(data_dir(), op.join(op.expanduser('~'), '.mxnet'))
os.environ['MXNET_HOME'] = '/tmp/mxnet_data'
self.assertEqual(data_dir(), '/tmp/mxnet_data')
del os.environ['MXNET_HOME']
self.assertEqual(data_dir(), prev_data_dir)
|
sunqm/mpi4pyscf | refs/heads/master | mpi4pyscf/mp/__init__.py | 1 | from . import mp2
from .mp2 import RMP2
|
Zearin/python-xtraceback | refs/heads/master | test_support/python/3.3.0/test/support.py | 9 | """Supporting definitions for the Python regression tests."""
if __name__ != 'test.support':
raise ImportError('support must be imported from the test package')
import contextlib
import errno
import functools
import gc
import socket
import sys
import os
import platform
import shutil
import warnings
import unittest
import importlib
import collections.abc
import re
import subprocess
import imp
import time
import sysconfig
import fnmatch
import logging.handlers
import struct
import tempfile
import _testcapi
try:
import _thread, threading
except ImportError:
_thread = None
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
try:
import zlib
except ImportError:
zlib = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
__all__ = [
"Error", "TestFailed", "ResourceDenied", "import_module", "verbose",
"use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "requires_freebsd_version",
"requires_linux_version", "requires_mac_ver", "find_unused_port",
"bind_port", "IPV6_ENABLED", "is_jython", "TESTFN", "HOST", "SAVEDCWD",
"temp_cwd", "findfile", "create_empty_file", "sortdict",
"check_syntax_error", "open_urlresource", "check_warnings", "CleanImport",
"EnvironmentVarGuard", "TransientResource", "captured_stdout",
"captured_stdin", "captured_stderr", "time_out", "socket_peer_reset",
"ioerror_peer_reset", "run_with_locale", 'temp_umask',
"transient_internet", "set_memlimit", "bigmemtest", "bigaddrspacetest",
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
"threading_cleanup", "reap_children", "cpython_only", "check_impl_detail",
"get_attribute", "swap_item", "swap_attr", "requires_IEEE_754",
"TestHandler", "Matcher", "can_symlink", "skip_unless_symlink",
"skip_unless_xattr", "import_fresh_module", "requires_zlib",
"PIPE_MAX_SIZE", "failfast", "anticipate_failure", "run_with_tz",
"requires_bz2", "requires_lzma"
]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect."""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported."""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise."""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def anticipate_failure(condition):
"""Decorator to mark a test that is known to be broken in some cases
Any use of this decorator should have a comment identifying the
associated tracker issue.
"""
if condition:
return unittest.expectedFailure
return lambda f: f
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Imports and returns a module, deliberately bypassing the sys.modules cache
and importing a fresh copy of the module. Once the import is complete,
the sys.modules cache is restored to its original state.
Modules named in fresh are also imported anew if needed by the import.
If one of these modules can't be imported, None is returned.
Importing of modules named in blocked is prevented while the fresh import
takes place.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
# to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("object %r has no attribute %r" % (obj, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
failfast = False
match_tests = None
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Peform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existance of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except OSError as error:
# The filename need not exist.
if error.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
def rmdir(dirname):
try:
_rmdir(dirname)
except OSError as error:
# The directory need not exist.
if error.errno != errno.ENOENT:
raise
def rmtree(path):
try:
_rmtree(path)
except OSError as error:
if error.errno != errno.ENOENT:
raise
def make_legacy_pyc(source):
"""Move a PEP 3147 pyc/pyo file to its legacy pyc/pyo location.
The choice of .pyc or .pyo extension is done based on the __debug__ flag
value.
:param source: The file system path to the source file. The source file
does not need to exist, however the PEP 3147 pyc file must exist.
:return: The file system path to the legacy pyc file.
"""
pyc_file = imp.cache_from_source(source)
up_one = os.path.dirname(os.path.abspath(source))
legacy_pyc = os.path.join(up_one, source + ('c' if __debug__ else 'o'))
os.rename(pyc_file, legacy_pyc)
return legacy_pyc
def forget(modname):
"""'Forget' a module was ever imported.
This removes the module from sys.modules and deletes any PEP 3147 or
legacy .pyc and .pyo files.
"""
unload(modname)
for dirname in sys.path:
source = os.path.join(dirname, modname + '.py')
# It doesn't matter if they exist or not, unlink all possible
# combinations of PEP 3147 and legacy pyc and pyo files.
unlink(source + 'c')
unlink(source + 'o')
unlink(imp.cache_from_source(source, debug_override=True))
unlink(imp.cache_from_source(source, debug_override=False))
# On some platforms, should not run gui test even if it is allowed
# in `use_resources'.
if sys.platform.startswith('win'):
import ctypes
import ctypes.wintypes
def _is_gui_available():
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
return bool(uof.dwFlags & WSF_VISIBLE)
else:
def _is_gui_available():
return True
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is
executing.
"""
if resource == 'gui' and not _is_gui_available():
raise unittest.SkipTest("Cannot use the 'gui' resource")
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe(1).f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the %r resource not enabled" % resource
raise ResourceDenied(msg)
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version):
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
return True
except (socket.error, socket.gaierror):
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
# A constant likely larger than the underlying OS pipe buffer size.
# Windows limit seems to be around 512B, and most Unix kernels have a 64K pipe
# buffer size: take 1M to be sure.
PIPE_MAX_SIZE = 1024 * 1024
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
requires_zlib = unittest.skipUnless(zlib, 'requires zlib')
requires_bz2 = unittest.skipUnless(bz2, 'requires bz2')
requires_lzma = unittest.skipUnless(lzma, 'requires lzma')
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name in ('nt', 'ce'):
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(TESTFN_ENCODING)
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). '
'Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, TESTFN_ENCODING))
TESTFN_UNENCODABLE = None
# Mac OS X denies unencodable filenames (invalid utf-8)
elif sys.platform != 'darwin':
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN \
+ b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# Save the initial cwd
SAVEDCWD = os.getcwd()
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False, path=None):
"""
Context manager that temporarily changes the CWD.
An existing path may be provided as *path*, in which case this
function makes no changes to the file system.
Otherwise, the new CWD is created in the current directory and it's
named *name*. If *quiet* is False (default) and it's not possible to
create or change the CWD, an error is raised. If it's True, only a
warning is raised and the original CWD is used.
"""
saved_dir = os.getcwd()
is_temporary = False
if path is None:
path = name
try:
os.mkdir(name)
is_temporary = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to create temp CWD ' + name,
RuntimeWarning, stacklevel=3)
try:
os.chdir(path)
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change the CWD to ' + name,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
if is_temporary:
rmtree(name)
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
def findfile(file, here=__file__, subdir=None):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
if subdir is not None:
file = os.path.join(subdir, file)
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
check = kw.pop('check', None)
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(os.path.dirname(__file__), "data", filename)
def check_valid_file(fn):
f = open(fn, *args, **kw)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
print('\tfetching %s ...' % url, file=get_original_stdout())
f = urllib.request.urlopen(url, timeout=15)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource %r' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(IOError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(socket.error, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(IOError, errno=errno.ECONNRESET)
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except IOError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], IOError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], IOError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as s:
print("hello")
self.assertEqual(s.getvalue(), "hello")
"""
return captured_output("stdout")
def captured_stderr():
return captured_output("stderr")
def captured_stdin():
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
@contextlib.contextmanager
def disable_gc():
have_gc = gc.isenabled()
gc.disable()
try:
yield
finally:
if have_gc:
gc.enable()
def python_is_optimized():
"""Find if Python was built with optimizations."""
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt != '' and final_opt != '-O0'
_header = 'nP'
_align = '0n'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + _align)
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + _align)
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz == None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
"""
def __init__(self):
self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid())
self.started = False
def start(self):
try:
f = open(self.procfile, 'r')
except OSError as e:
warnings.warn('/proc not available for stats: {}'.format(e),
RuntimeWarning)
sys.stderr.flush()
return
watchdog_script = findfile("memory_watchdog.py")
self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script],
stdin=f, stderr=subprocess.DEVNULL)
f.close()
self.started = True
def stop(self):
if self.started:
self.mem_watchdog.terminate()
self.mem_watchdog.wait()
def bigmemtest(size, memuse, dry_run=True):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it.
if 'dry_run' is False, it means the test doesn't support dummy runs
when -M is not specified.
"""
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (size * memuse / (1024 ** 3)))
if real_max_memuse and verbose:
print()
print(" ... expected peak memory use: {peak:.1f}G"
.format(peak=size * memuse / (1024 ** 3)))
watchdog = _MemoryWatchdog()
watchdog.start()
else:
watchdog = None
try:
return f(self, maxsize)
finally:
if watchdog:
watchdog.stop()
wrapper.size = size
wrapper.memuse = memuse
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31:
raise unittest.SkipTest(
"not enough memory: try a 32-bit build instead")
else:
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (MAX_Py_ssize_t / (1024 ** 3)))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip("resource 'gui' is not available")
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def no_tracing(func):
"""Decorator to temporarily turn off tracing for the duration of a test."""
if not hasattr(sys, 'gettrace'):
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def refcount_test(test):
"""Decorator for tests which involve reference counting.
To start, the decorator does not run the test if is not run by CPython.
After that, any trace function is unset during the test to prevent
unexpected refcounts caused by the trace function.
"""
return no_tracing(cpython_only(test))
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
def case_pred(test):
if match_tests is None:
return True
for name in test.id().split("."):
if fnmatch.fnmatchcase(name, match_tests):
return True
return False
_filter_suite(suite, case_pred)
_run_suite(suite)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None, optionflags=0):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Support for saving and restoring the imported modules.
def modules_setup():
return sys.modules.copy(),
def modules_cleanup(oldmodules):
# Encoders/decoders are registered permanently within the internal
# codec cache. If we destroy the corresponding modules their
# globals will be set to None which will trip up the cached functions.
encodings = [(k, v) for k, v in sys.modules.items()
if k.startswith('encodings.')]
sys.modules.clear()
sys.modules.update(encodings)
# XXX: This kind of problem can affect more than just encodings. In particular
# extension modules (such as _ssl) don't cope with reloading properly.
# Really, test modules should be cleaning out the test specific modules they
# know they added (ala test_runpy) rather than relying on this function (as
# test_importhooks and test_pkg do currently).
# Implicitly imported *real* modules should be left alone (see issue 10556).
sys.modules.update(oldmodules)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if _thread:
return _thread._count(), threading._dangling.copy()
else:
return 1, ()
def threading_cleanup(*original_values):
if not _thread:
return
_MAX_COUNT = 10
for count in range(_MAX_COUNT):
values = _thread._count(), threading._dangling
if values == original_values:
break
time.sleep(0.1)
gc_collect()
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not _thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
@contextlib.contextmanager
def swap_item(obj, item, new_val):
"""Temporary swap out an item with a new object.
Usage:
with swap_item(obj, "item", 5):
...
This will set obj["item"] to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `item` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if item in obj:
real_val = obj[item]
obj[item] = new_val
try:
yield
finally:
obj[item] = real_val
else:
obj[item] = new_val
try:
yield
finally:
del obj[item]
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs\]\r?\n?", b"", stderr).strip()
return stderr
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
return subprocess._args_from_interpreter_flags()
#============================================================
# Support for assertions about logging.
#============================================================
class TestHandler(logging.handlers.BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
logging.handlers.BufferingHandler.__init__(self, 0)
self.matcher = matcher
def shouldFlush(self):
return False
def emit(self, record):
self.format(record)
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_can_xattr = None
def can_xattr():
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
tmp_fp, tmp_name = tempfile.mkstemp()
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match("2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
def patch(test_instance, object_to_patch, attr_name, new_value):
"""Override 'object_to_patch'.'attr_name' with 'new_value'.
Also, add a cleanup procedure to 'test_instance' to restore
'object_to_patch' value for 'attr_name'.
The 'attr_name' should be a valid attribute for 'object_to_patch'.
"""
# check that 'attr_name' is a real attribute for 'object_to_patch'
# will raise AttributeError if it does not exist
getattr(object_to_patch, attr_name)
# keep a copy of the old value
attr_is_local = False
try:
old_value = object_to_patch.__dict__[attr_name]
except (AttributeError, KeyError):
old_value = getattr(object_to_patch, attr_name, None)
else:
attr_is_local = True
# restore the value when the test is done
def cleanup():
if attr_is_local:
setattr(object_to_patch, attr_name, old_value)
else:
delattr(object_to_patch, attr_name)
test_instance.addCleanup(cleanup)
# actually override the attribute
setattr(object_to_patch, attr_name, new_value)
|
ston380/account-financial-reporting | refs/heads/8.0 | mis_builder/report/__init__.py | 9 | # -*- encoding: utf-8 -*-
##############################################################################
#
# mis_builder module for Odoo, Management Information System Builder
# Copyright (C) 2014-2015 ACSONE SA/NV (<http://acsone.eu>)
#
# This file is a part of mis_builder
#
# mis_builder is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License v3 or later
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# mis_builder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License v3 or later for more details.
#
# You should have received a copy of the GNU Affero General Public License
# v3 or later along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
from . import mis_builder_xls
except ImportError:
pass # this module is not installed
from . import report_mis_report_instance
|
zhaozengguang/opencog | refs/heads/master | opencog/python/spatiotemporal/temporal_events/composition/non_linear_least_squares.py | 1 | from math import fabs
import numpy
from lmfit import minimize, Parameters
__author__ = 'keyvan'
_keys = ['beginning', 'ending']
class DecompositionFitter(object):
combinations = [(dist_1_key, dist_2_key) for dist_1_key in _keys for dist_2_key in _keys]
def __init__(self, relations):
self.data = relations.to_vector()
self.params = Parameters()
self.params.add_many(
('before_dist_1_beginning_dist_2_beginning', 0.5, True, 0.0, 1.0, None),
('similarity_dist_1_beginning_dist_2_beginning', 0.5, True, 0.0, 1.0, None),
('after_dist_1_beginning_dist_2_beginning', None, False, None, None,
'1 - before_dist_1_beginning_dist_2_beginning'),
('before_dist_1_beginning_dist_2_ending', 0.5, True, 0.0, 1.0, None),
('similarity_dist_1_beginning_dist_2_ending', 0.5, True, 0.0, 1.0, None),
('after_dist_1_beginning_dist_2_ending', None, False, None, None,
'1 - before_dist_1_beginning_dist_2_ending'),
('before_dist_1_ending_dist_2_beginning', 0.5, True, 0.0, 1.0, None),
('similarity_dist_1_ending_dist_2_beginning', 0.5, True, 0.0, 1.0, None),
('after_dist_1_ending_dist_2_beginning', None, False, None, None,
'1 - before_dist_1_ending_dist_2_beginning'),
('before_dist_1_ending_dist_2_ending', 0.5, True, 0.0, 1.0, None),
('similarity_dist_1_ending_dist_2_ending', 0.5, True, 0.0, 1.0, None),
('after_dist_1_ending_dist_2_ending', None, False, None, None,
'1 - before_dist_1_ending_dist_2_ending')
)
minimize(self.fitness, self.params)
for param_key in self.params:
self.params[param_key].value = round(self.params[param_key].value, 6)
def fitness(self, params):
model = numpy.zeros(13)
before_dist_1_beginning_dist_2_beginning = params['before_dist_1_beginning_dist_2_beginning'].value
similarity_dist_1_beginning_dist_2_beginning = params['similarity_dist_1_beginning_dist_2_beginning'].value
after_dist_1_beginning_dist_2_beginning = params['after_dist_1_beginning_dist_2_beginning'].value
before_dist_1_beginning_dist_2_ending = params['before_dist_1_beginning_dist_2_ending'].value
similarity_dist_1_beginning_dist_2_ending = params['similarity_dist_1_beginning_dist_2_ending'].value
after_dist_1_beginning_dist_2_ending = params['after_dist_1_beginning_dist_2_ending'].value
before_dist_1_ending_dist_2_beginning = params['before_dist_1_ending_dist_2_beginning'].value
similarity_dist_1_ending_dist_2_beginning = params['similarity_dist_1_ending_dist_2_beginning'].value
after_dist_1_ending_dist_2_beginning = params['after_dist_1_ending_dist_2_beginning'].value
before_dist_1_ending_dist_2_ending = params['before_dist_1_ending_dist_2_ending'].value
similarity_dist_1_ending_dist_2_ending = params['similarity_dist_1_ending_dist_2_ending'].value
after_dist_1_ending_dist_2_ending = params['after_dist_1_ending_dist_2_ending'].value
same_dist_1_beginning_dist_2_beginning = similarity_dist_1_beginning_dist_2_beginning * (
1 - fabs(before_dist_1_beginning_dist_2_beginning - after_dist_1_beginning_dist_2_beginning)
)
same_dist_1_beginning_dist_2_ending = similarity_dist_1_beginning_dist_2_ending * (
1 - fabs(before_dist_1_beginning_dist_2_ending - after_dist_1_beginning_dist_2_ending)
)
same_dist_1_ending_dist_2_beginning = similarity_dist_1_ending_dist_2_beginning * (
1 - fabs(before_dist_1_ending_dist_2_beginning - after_dist_1_ending_dist_2_beginning)
)
same_dist_1_ending_dist_2_ending = similarity_dist_1_ending_dist_2_ending * (
1 - fabs(before_dist_1_ending_dist_2_ending - after_dist_1_ending_dist_2_ending)
)
model[0] = before_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
before_dist_1_ending_dist_2_beginning * before_dist_1_ending_dist_2_ending
model[1] = before_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
same_dist_1_ending_dist_2_beginning * before_dist_1_ending_dist_2_ending
model[2] = before_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * before_dist_1_ending_dist_2_ending
model[3] = before_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * same_dist_1_ending_dist_2_ending
model[4] = before_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * after_dist_1_ending_dist_2_ending
model[5] = same_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * before_dist_1_ending_dist_2_ending
model[6] = same_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * same_dist_1_ending_dist_2_ending
model[7] = same_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * after_dist_1_ending_dist_2_ending
model[8] = after_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * before_dist_1_ending_dist_2_ending
model[9] = after_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * same_dist_1_ending_dist_2_ending
model[10] = after_dist_1_beginning_dist_2_beginning * before_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * after_dist_1_ending_dist_2_ending
model[11] = after_dist_1_beginning_dist_2_beginning * same_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * after_dist_1_ending_dist_2_ending
model[12] = after_dist_1_beginning_dist_2_beginning * after_dist_1_beginning_dist_2_ending * \
after_dist_1_ending_dist_2_beginning * after_dist_1_ending_dist_2_ending
return model - self.data
def compare(self, dist_1_key='beginning', dist_2_key='beginning'):
before = self.params['before_dist_1_' + dist_1_key + '_dist_2_' + dist_2_key].value
after = self.params['after_dist_1_' + dist_1_key + '_dist_2_' + dist_2_key].value
similarity = self.params['similarity_dist_1_' + dist_1_key + '_dist_2_' + dist_2_key].value
# before, similarity, after = round(before, 6), round(similarity, 6), round(after, 6)
same = similarity * (1 - fabs(before - after))
return before, same, after
def get_composition_data(self):
data = []
for key in self.combinations:
before, same, after = self.compare(*key)
data.append(before)
data.append(same)
return data
def check(self):
from spatiotemporal.temporal_events import FormulaCreator
print self.data
print FormulaCreator(self).calculate_relations().to_vector()
print
if __name__ == '__main__':
from spatiotemporal.temporal_events import FormulaCreator
from spatiotemporal.temporal_events.trapezium import generate_random_events
for i in xrange(50):
A, B = generate_random_events(2)
relations = A * B
formula = FormulaCreator(DecompositionFitter(relations))
print relations.to_list()
relations_estimate = formula.calculate_relations()
print relations_estimate.to_list()
print relations.to_vector() - relations_estimate.to_vector()
print
|
ShassAro/ShassAro | refs/heads/master | DockerAdmin/dockerVirtualEnv/lib/python2.7/site-packages/django/conf/locale/sl/__init__.py | 12133432 | |
tsg-/pyeclib | refs/heads/master | test/__init__.py | 12133432 | |
savoirfairelinux/django | refs/heads/master | django/conf/locale/ca/__init__.py | 12133432 | |
Chemcy/vnpy | refs/heads/master | vn.trader/ctaStrategy/language/chinese/__init__.py | 12133432 | |
mbauskar/omnitech-demo-erpnext | refs/heads/develop | erpnext/stock/doctype/item_reorder/__init__.py | 12133432 | |
UFAL-DSG/pjsip | refs/heads/master | tests/pjsua/config_site.py | 59 | # $Id: config_site.py 2237 2008-08-26 12:13:25Z bennylp $
# Specify if host has sound device, or test should be performed using sound device
HAS_SND_DEV = 0
|
tommymcglynn/group-tinder | refs/heads/master | create_fake_people.py | 1 | from web import views
views.create_fake_people(None)
|
blitzagency/django-chatterbox | refs/heads/master | chatterbox/tests/collectors/demo.py | 1 | from chatterbox.collectors import Collector
class DemoCollector(Collector):
def action(self, job):
print(job.key.api)
def post_save(self, job):
pass
def post_delete(self, job):
pass
|
erikng/sal | refs/heads/master | server/migrations/0055_auto_20170822_1155.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-08-22 18:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0054_auto_20170705_1603'),
]
operations = [
migrations.AlterField(
model_name='machine',
name='os_family',
field=models.CharField(choices=[(b'Darwin', b'macOS'), (b'Windows', b'Windows'), (b'Linux', b'Linux')], db_index=True, default=b'Darwin', max_length=256, verbose_name=b'OS Family'),
),
]
|
roadmapper/ansible | refs/heads/devel | lib/ansible/module_utils/gcp_utils.py | 21 | # Copyright (c), Google Inc, 2017
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
import google.auth
import google.auth.compute_engine
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
HAS_GOOGLE_LIBRARIES = True
except ImportError:
HAS_GOOGLE_LIBRARIES = False
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text, to_native
import ast
import os
import json
def navigate_hash(source, path, default=None):
if not source:
return None
key = path[0]
path = path[1:]
if key not in source:
return default
result = source[key]
if path:
return navigate_hash(result, path, default)
else:
return result
class GcpRequestException(Exception):
pass
def remove_nones_from_dict(obj):
new_obj = {}
for key in obj:
value = obj[key]
if value is not None and value != {} and value != []:
new_obj[key] = value
# Blank dictionaries should return None or GCP API may complain.
if not new_obj:
return None
return new_obj
# Handles the replacement of dicts with values -> the needed value for GCP API
def replace_resource_dict(item, value):
if isinstance(item, list):
items = []
for i in item:
items.append(replace_resource_dict(i, value))
return items
else:
if not item:
return item
else:
return item.get(value)
# Handles all authentication and HTTP sessions for GCP API calls.
class GcpSession(object):
def __init__(self, module, product):
self.module = module
self.product = product
self._validate()
def get(self, url, body=None, **kwargs):
"""
This method should be avoided in favor of full_get
"""
kwargs.update({'json': body})
return self.full_get(url, **kwargs)
def post(self, url, body=None, headers=None, **kwargs):
"""
This method should be avoided in favor of full_post
"""
kwargs.update({'json': body, 'headers': headers})
return self.full_post(url, **kwargs)
def post_contents(self, url, file_contents=None, headers=None, **kwargs):
"""
This method should be avoided in favor of full_post
"""
kwargs.update({'data': file_contents, 'headers': headers})
return self.full_post(url, **kwargs)
def delete(self, url, body=None):
"""
This method should be avoided in favor of full_delete
"""
kwargs = {'json': body}
return self.full_delete(url, **kwargs)
def put(self, url, body=None):
"""
This method should be avoided in favor of full_put
"""
kwargs = {'json': body}
return self.full_put(url, **kwargs)
def patch(self, url, body=None, **kwargs):
"""
This method should be avoided in favor of full_patch
"""
kwargs.update({'json': body})
return self.full_patch(url, **kwargs)
def list(self, url, callback, params=None, array_name='items',
pageToken='nextPageToken', **kwargs):
"""
This should be used for calling the GCP list APIs. It will return
an array of items
This takes a callback to a `return_if_object(module, response)`
function that will decode the response + return a dictionary. Some
modules handle the decode + error processing differently, so we should
defer to the module to handle this.
"""
resp = callback(self.module, self.full_get(url, params, **kwargs))
items = resp.get(array_name) if resp.get(array_name) else []
while resp.get(pageToken):
if params:
params['pageToken'] = resp.get(pageToken)
else:
params = {'pageToken': resp[pageToken]}
resp = callback(self.module, self.full_get(url, params, **kwargs))
if resp.get(array_name):
items = items + resp.get(array_name)
return items
# The following methods fully mimic the requests API and should be used.
def full_get(self, url, params=None, **kwargs):
kwargs['headers'] = self._set_headers(kwargs.get('headers'))
try:
return self.session().get(url, params=params, **kwargs)
except getattr(requests.exceptions, 'RequestException') as inst:
# Only log the message to avoid logging any sensitive info.
self.module.fail_json(msg=inst.message)
def full_post(self, url, data=None, json=None, **kwargs):
kwargs['headers'] = self._set_headers(kwargs.get('headers'))
try:
return self.session().post(url, data=data, json=json, **kwargs)
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def full_put(self, url, data=None, **kwargs):
kwargs['headers'] = self._set_headers(kwargs.get('headers'))
try:
return self.session().put(url, data=data, **kwargs)
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def full_patch(self, url, data=None, **kwargs):
kwargs['headers'] = self._set_headers(kwargs.get('headers'))
try:
return self.session().patch(url, data=data, **kwargs)
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def full_delete(self, url, **kwargs):
kwargs['headers'] = self._set_headers(kwargs.get('headers'))
try:
return self.session().delete(url, **kwargs)
except getattr(requests.exceptions, 'RequestException') as inst:
self.module.fail_json(msg=inst.message)
def _set_headers(self, headers):
if headers:
return self._merge_dictionaries(headers, self._headers())
else:
return self._headers()
def session(self):
return AuthorizedSession(
self._credentials())
def _validate(self):
if not HAS_REQUESTS:
self.module.fail_json(msg="Please install the requests library")
if not HAS_GOOGLE_LIBRARIES:
self.module.fail_json(msg="Please install the google-auth library")
if self.module.params.get('service_account_email') is not None and self.module.params['auth_kind'] != 'machineaccount':
self.module.fail_json(
msg="Service Account Email only works with Machine Account-based authentication"
)
if (self.module.params.get('service_account_file') is not None or
self.module.params.get('service_account_contents') is not None) and self.module.params['auth_kind'] != 'serviceaccount':
self.module.fail_json(
msg="Service Account File only works with Service Account-based authentication"
)
def _credentials(self):
cred_type = self.module.params['auth_kind']
if cred_type == 'application':
credentials, project_id = google.auth.default(scopes=self.module.params['scopes'])
return credentials
elif cred_type == 'serviceaccount' and self.module.params.get('service_account_file'):
path = os.path.realpath(os.path.expanduser(self.module.params['service_account_file']))
return service_account.Credentials.from_service_account_file(path).with_scopes(self.module.params['scopes'])
elif cred_type == 'serviceaccount' and self.module.params.get('service_account_contents'):
try:
cred = json.loads(self.module.params.get('service_account_contents'))
except json.decoder.JSONDecodeError as e:
self.module.fail_json(
msg="Unable to decode service_account_contents as JSON"
)
return service_account.Credentials.from_service_account_info(cred).with_scopes(self.module.params['scopes'])
elif cred_type == 'machineaccount':
return google.auth.compute_engine.Credentials(
self.module.params['service_account_email'])
else:
self.module.fail_json(msg="Credential type '%s' not implemented" % cred_type)
def _headers(self):
if self.module.params.get('env_type'):
return {
'User-Agent': "Google-Ansible-MM-{0}-{1}".format(self.product, self.module.params.get('env_type'))
}
else:
return {
'User-Agent': "Google-Ansible-MM-{0}".format(self.product)
}
def _merge_dictionaries(self, a, b):
new = a.copy()
new.update(b)
return new
class GcpModule(AnsibleModule):
def __init__(self, *args, **kwargs):
arg_spec = {}
if 'argument_spec' in kwargs:
arg_spec = kwargs['argument_spec']
kwargs['argument_spec'] = self._merge_dictionaries(
arg_spec,
dict(
project=dict(
required=False,
type='str',
fallback=(env_fallback, ['GCP_PROJECT'])),
auth_kind=dict(
required=True,
fallback=(env_fallback, ['GCP_AUTH_KIND']),
choices=['machineaccount', 'serviceaccount', 'application'],
type='str'),
service_account_email=dict(
required=False,
fallback=(env_fallback, ['GCP_SERVICE_ACCOUNT_EMAIL']),
type='str'),
service_account_file=dict(
required=False,
fallback=(env_fallback, ['GCP_SERVICE_ACCOUNT_FILE']),
type='path'),
service_account_contents=dict(
required=False,
fallback=(env_fallback, ['GCP_SERVICE_ACCOUNT_CONTENTS']),
no_log=True,
type='jsonarg'),
scopes=dict(
required=False,
fallback=(env_fallback, ['GCP_SCOPES']),
type='list'),
env_type=dict(
required=False,
fallback=(env_fallback, ['GCP_ENV_TYPE']),
type='str')
)
)
mutual = []
if 'mutually_exclusive' in kwargs:
mutual = kwargs['mutually_exclusive']
kwargs['mutually_exclusive'] = mutual.append(
['service_account_email', 'service_account_file', 'service_account_contents']
)
AnsibleModule.__init__(self, *args, **kwargs)
def raise_for_status(self, response):
try:
response.raise_for_status()
except getattr(requests.exceptions, 'RequestException') as inst:
self.fail_json(msg="GCP returned error: %s" % response.json())
def _merge_dictionaries(self, a, b):
new = a.copy()
new.update(b)
return new
# This class does difference checking according to a set of GCP-specific rules.
# This will be primarily used for checking dictionaries.
# In an equivalence check, the left-hand dictionary will be the request and
# the right-hand side will be the response.
# Rules:
# Extra keys in response will be ignored.
# Ordering of lists does not matter.
# - exception: lists of dictionaries are
# assumed to be in sorted order.
class GcpRequest(object):
def __init__(self, request):
self.request = request
def __eq__(self, other):
return not self.difference(other)
def __ne__(self, other):
return not self.__eq__(other)
# Returns the difference between a request + response.
# While this is used under the hood for __eq__ and __ne__,
# it is useful for debugging.
def difference(self, response):
return self._compare_value(self.request, response.request)
def _compare_dicts(self, req_dict, resp_dict):
difference = {}
for key in req_dict:
if resp_dict.get(key):
difference[key] = self._compare_value(req_dict.get(key), resp_dict.get(key))
# Remove all empty values from difference.
sanitized_difference = {}
for key in difference:
if difference[key]:
sanitized_difference[key] = difference[key]
return sanitized_difference
# Takes in two lists and compares them.
# All things in the list should be identical (even if a dictionary)
def _compare_lists(self, req_list, resp_list):
# Have to convert each thing over to unicode.
# Python doesn't handle equality checks between unicode + non-unicode well.
difference = []
new_req_list = self._convert_value(req_list)
new_resp_list = self._convert_value(resp_list)
# We have to compare each thing in the request to every other thing
# in the response.
# This is because the request value will be a subset of the response value.
# The assumption is that these lists will be small enough that it won't
# be a performance burden.
for req_item in new_req_list:
found_item = False
for resp_item in new_resp_list:
# Looking for a None value here.
if not self._compare_value(req_item, resp_item):
found_item = True
if not found_item:
difference.append(req_item)
difference2 = []
for value in difference:
if value:
difference2.append(value)
return difference2
# Compare two values of arbitrary types.
def _compare_value(self, req_value, resp_value):
diff = None
# If a None is found, a difference does not exist.
# Only differing values matter.
if not resp_value:
return None
# Can assume non-None types at this point.
try:
if isinstance(req_value, list):
diff = self._compare_lists(req_value, resp_value)
elif isinstance(req_value, dict):
diff = self._compare_dicts(req_value, resp_value)
elif isinstance(req_value, bool):
diff = self._compare_boolean(req_value, resp_value)
# Always use to_text values to avoid unicode issues.
elif to_text(req_value) != to_text(resp_value):
diff = req_value
# to_text may throw UnicodeErrors.
# These errors shouldn't crash Ansible and should be hidden.
except UnicodeError:
pass
return diff
# Compare two boolean values.
def _compare_boolean(self, req_value, resp_value):
try:
# Both True
if req_value and isinstance(resp_value, bool) and resp_value:
return None
# Value1 True, resp_value 'true'
elif req_value and to_text(resp_value) == 'true':
return None
# Both False
elif not req_value and isinstance(resp_value, bool) and not resp_value:
return None
# Value1 False, resp_value 'false'
elif not req_value and to_text(resp_value) == 'false':
return None
else:
return resp_value
# to_text may throw UnicodeErrors.
# These errors shouldn't crash Ansible and should be hidden.
except UnicodeError:
return None
# Python (2 esp.) doesn't do comparisons between unicode + non-unicode well.
# This leads to a lot of false positives when diffing values.
# The Ansible to_text() function is meant to get all strings
# into a standard format.
def _convert_value(self, value):
if isinstance(value, list):
new_list = []
for item in value:
new_list.append(self._convert_value(item))
return new_list
elif isinstance(value, dict):
new_dict = {}
for key in value:
new_dict[key] = self._convert_value(value[key])
return new_dict
else:
return to_text(value)
|
sagangwee/sagangwee.github.io | refs/heads/master | build/pygments/build/lib.linux-i686-2.7/pygments/lexers/c_cpp.py | 72 | # -*- coding: utf-8 -*-
"""
pygments.lexers.c_cpp
~~~~~~~~~~~~~~~~~~~~~
Lexers for C/C++ languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, inherit, default, words
from pygments.util import get_bool_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['CLexer', 'CppLexer']
class CFamilyLexer(RegexLexer):
"""
For C family source code. This is used as a base class to avoid repetitious
definitions.
"""
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(words(('auto', 'break', 'case', 'const', 'continue', 'default', 'do',
'else', 'enum', 'extern', 'for', 'goto', 'if', 'register',
'restricted', 'return', 'sizeof', 'static', 'struct',
'switch', 'typedef', 'union', 'volatile', 'while'),
suffix=r'\b'), Keyword),
(r'(bool|int|long|float|short|double|char|unsigned|signed|void|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(words(('inline', '_inline', '__inline', 'naked', 'restrict',
'thread', 'typename'), suffix=r'\b'), Keyword.Reserved),
# Vector intrinsics
(r'(__m(128i|128d|128|64))\b', Keyword.Reserved),
# Microsoft-isms
(words((
'asm', 'int8', 'based', 'except', 'int16', 'stdcall', 'cdecl',
'fastcall', 'int32', 'declspec', 'finally', 'int64', 'try',
'leave', 'wchar_t', 'w64', 'unaligned', 'raise', 'noop',
'identifier', 'forceinline', 'assume'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
(r'([a-zA-Z_]\w*)(\s*)(:)(?!:)', bygroups(Name.Label, Text, Punctuation)),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t',
'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options, 'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options, 'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class CLexer(CFamilyLexer):
"""
For C source code with preprocessor directives.
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc']
mimetypes = ['text/x-chdr', 'text/x-csrc']
priority = 0.1
def analyse_text(text):
if re.search('^\s*#include [<"]', text, re.MULTILINE):
return 0.1
if re.search('^\s*#ifdef ', text, re.MULTILINE):
return 0.1
class CppLexer(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
priority = 0.1
tokens = {
'statements': [
(words((
'asm', 'catch', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
'export', 'friend', 'mutable', 'namespace', 'new', 'operator',
'private', 'protected', 'public', 'reinterpret_cast',
'restrict', 'static_cast', 'template', 'this', 'throw', 'throws',
'typeid', 'typename', 'using', 'virtual',
'constexpr', 'nullptr', 'decltype', 'thread_local',
'alignas', 'alignof', 'static_assert', 'noexcept', 'override',
'final'), suffix=r'\b'), Keyword),
(r'char(16_t|32_t)\b', Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(words(('virtual_inheritance', 'uuidof', 'super', 'single_inheritance',
'multiple_inheritance', 'interface', 'event'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'__(offload|blockingoffload|outer)\b', Keyword.Pseudo),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
def analyse_text(text):
if re.search('#include <[a-z]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4
|
bvisness/the-blue-alliance | refs/heads/master | datafeeds/usfirst_team_details_parser.py | 8 | import logging
import re
# for db.link
from google.appengine.ext import db
from BeautifulSoup import BeautifulSoup
from datafeeds.parser_base import ParserBase
class UsfirstTeamDetailsParser(ParserBase):
"""
Facilitates building TBAVideos store from TBA.
"""
@classmethod
def parse(self, html):
"""
Parse the information table on USFIRSTs site to extract relevant team
information. Return a dictionary.
"""
# page_titles look like this:
# Team Number <NUM> - "<NICK>"
team_num_re = r'Team Number ([0-9]+) \-'
team_nick_re = r'"(.*)\"'
# team addresses look like tihs:
# <locality>, <region> <random string can have spaces> <country>
team_address_re = r'(.*?), ([^ ]*) *.* (.*)'
team = dict()
soup = BeautifulSoup(html,
convertEntities=BeautifulSoup.HTML_ENTITIES)
page_title = soup.find('h1', {'id': 'thepagetitle'}).text
try:
team['team_number'] = int(re.search(team_num_re, page_title).group(1).strip())
except AttributeError, details:
logging.warning("Team number could not be parsed: {}".format(details))
return None, False
team['nickname'] = re.sub(' +', ' ', unicode(re.search(team_nick_re, page_title).group(1).strip()))
full_address = unicode(soup.find('div', {'class': 'team-address'}).find('div', {'class': 'content'}).text)
match = re.match(team_address_re, full_address)
if match:
locality, region, country = match.group(1), match.group(2), match.group(3)
team['address'] = '%s, %s, %s' % (locality, region, country)
team['name'] = unicode(soup.find('div', {'class': 'team-name'}).text)
try:
website_str = re.sub(r'^/|/$', '', unicode(soup.find('div', {'class': 'team-website'}).find('a')['href'])) # strip starting and trailing slashes
if not (website_str.startswith('http://') or website_str.startswith('https://')):
website_str = 'http://%s' % website_str
team['website'] = db.Link(website_str)
except Exception, details:
logging.info("Team website is invalid for team %s." % team['team_number'])
logging.info(details)
self._html_unescape_items(team)
return team, False
|
bskinn/opan | refs/heads/master | opan/utils/base.py | 1 | #-------------------------------------------------------------------------------
# Name: utils
# Purpose: Module containing utility functions for Open Anharmonic
#
# Author: Brian Skinn
# bskinn@alum.mit.edu
#
# Created: 15 Aug 2014
# Copyright: (c) Brian Skinn 2016
# License: The MIT License; see "license.txt" for full license terms
# and contributor agreement.
#
# This file is part of opan (Open Anharmonic), a system for automated
# computation of anharmonic properties of molecular systems via wrapper
# calls to computational/quantum chemical software packages.
#
# http://www.github.com/bskinn/opan
#
#-------------------------------------------------------------------------------
"""General purpose utility functions for Open Anharmonic.
*This module docstring is not used in the Sphinx docs.*
check_geom -- Confirm two OpenBabel geometries (atom types and
coordinates) match to within a specified tolerance
delta_fxn -- Generalized Kronecker delta function
iterable -- Test whether an object is iterable
make_timestamp -- Construct a string time-elapsed timestamp in h/m/s format
pack_tups -- Pack an arbitrary combination of iterables and non-
iterables into a list of tuples
safe_cast -- Robustified casting with a post-check to confirm the cast
actually resulted in the proper type
template_subst -- Perform a field-based substitution into a string template
"""
# Imports
from ..const import DEF as _DEF
from .decorate import arraysqueeze as _arraysqueeze
def pack_tups(*args):
"""Pack an arbitrary set of iterables and non-iterables into tuples.
Function packs a set of inputs with arbitrary iterability into tuples.
Iterability is tested with :func:`iterable`. Non-iterable inputs
are repeated in each output tuple. Iterable inputs are expanded
uniformly across the output tuples. For consistency, all iterables must
be the same length.
The input arguments are parsed such that bare strings are treated as
**NON-ITERABLE**, through the use of a local subclass of |str| that
cripples the ``__iter__()`` method. Any strings passed are returned
in the packed tuples as standard, **ITERABLE** instances of |str|, however.
The order of the input arguments is retained within each output tuple.
No structural conversion is attempted on the arguments.
If all inputs are non-iterable, a list containing a single |tuple| will be
returned.
Parameters
----------
\*args
Arbitrary number of arbitrary mix of iterable and non-iterable
objects to be packed into tuples.
Returns
-------
tups
|list| of |tuple| --
Number of tuples returned is equal to the length of the iterables
passed in `*args`
Raises
------
~exceptions.ValueError
If any iterable objects are of different lengths
"""
# Imports
import numpy as np
# Debug flag
_DEBUG = False
# Marker value for non-iterable items
NOT_ITER = -1
# Uninitialized test value
UNINIT_VAL = -1
# Print the input if in debug mode
if _DEBUG: # pragma: no cover
print("args = {0}".format(args))
# Non-iterable subclass of str
class StrNoIter(str):
""" Non-iterable subclass of |str|. """
def __iter__(self):
raise NotImplementedError("Non-iterable string")
## end def __iter__
## end class StrNoIter
# Re-wrap input arguments with non-iterable strings if required
mod_args = [(StrNoIter(a) if isinstance(a, str) else a) for a in args]
# Determine the length or non-iterable status of each item and store
# the maximum value (depends on NOT_ITER < 0)
iterlens = [(len(a) if iterable(a) else NOT_ITER) for a in mod_args]
maxiter = max(iterlens)
# Check to ensure all iterables are the same length
if not all(map(lambda v: v in (NOT_ITER, maxiter), iterlens)):
raise ValueError("All iterable items must be of equal length")
## end if
# If everything is non-iterable, just return the args tuple wrapped in
# a list (as above, depends on NOT_ITER < 0)
if maxiter == NOT_ITER:
return [args]
## end if
# Swap any non-iterables for a suitable length repeat, and zip to
# tuples for return
tups = list(zip(*[(np.repeat(a, maxiter) if l == NOT_ITER else a)
for (a,l) in zip(mod_args, iterlens)]))
# Dump the resulting tuples, if in debug mode
if _DEBUG: # pragma: no cover
print("tups = {0}".format(tups))
## end if
# Return the tuples
return tups
## end def pack_tups
def delta_fxn(a, b):
"""Kronecker delta for objects `a` and `b`.
Parameters
----------
a :
First object
b :
Second object
Returns
-------
delta
|int| --
Value of Kronecker delta for provided indices, as tested by
Python ``==``
"""
return (1 if a == b else 0)
## end def delta_fxn
def safe_cast(invar, totype):
"""Performs a "safe" typecast.
Ensures that `invar` properly casts to `totype`. Checks after
casting that the result is actually of type `totype`. Any exceptions raised
by the typecast itself are unhandled.
Parameters
----------
invar
(arbitrary) -- Value to be typecast.
totype
|type| -- Type to which `invar` is to be cast.
Returns
-------
outvar
`type 'totype'` -- Typecast version of `invar`
Raises
------
~exceptions.TypeError
If result of typecast is not of type `totype`
"""
# Make the typecast. Just use Python built-in exceptioning
outvar = totype(invar)
# Check that the cast type matches
if not isinstance(outvar, totype):
raise TypeError("Result of cast to '{0}' is '{1}'"
.format(totype, type(outvar)))
## end if
# Success; return the cast value
return outvar
## end def safe_cast
def make_timestamp(el_time):
""" Generate an hour-minutes-seconds timestamp from an interval in seconds.
Assumes numeric input of a time interval in seconds. Converts this
interval to a string of the format "#h #m #s", indicating the number of
hours, minutes, and seconds in the interval. Intervals greater than 24h
are unproblematic.
Parameters
----------
el_time
|int| or |float| --
Time interval in seconds to be converted to h/m/s format
Returns
-------
stamp
|str| -- String timestamp in #h #m #s format
"""
# Calc hours
hrs = el_time // 3600.0
# Calc minutes
mins = (el_time % 3600.0) // 60.0
# Calc seconds
secs = el_time % 60.0
# Construct timestamp string
stamp = "{0}h {1}m {2}s".format(int(hrs), int(mins), int(secs))
# Return
return stamp
## end def make_timestamp
@_arraysqueeze(0,1,2,3)
def check_geom(c1, a1, c2, a2, tol=_DEF.XYZ_COORD_MATCH_TOL):
""" Check for consistency of two geometries and atom symbol lists
Cartesian coordinates are considered consistent with the input
coords if each component matches to within `tol`. If coords or
atoms vectors are passed that are of mismatched lengths, a
|False| value is returned.
Both coords vectors must be three times the length of the atoms vectors
or a :exc:`~exceptions.ValueError` is raised.
Parameters
----------
c1
length-3N |npfloat_| --
Vector of first set of stacked 'lab-frame' Cartesian coordinates
a1
length-N |str| or |int| --
Vector of first set of atom symbols or atomic numbers
c2
length-3N |npfloat_| --
Vector of second set of stacked 'lab-frame' Cartesian coordinates
a2
length-N |str| or |int| --
Vector of second set of atom symbols or atomic numbers
tol
|float|, optional --
Tolerance for acceptable deviation of each geometry coordinate
from that in the reference instance to still be considered
matching. Default value is specified by
:attr:`opan.const.DEF.XYZ_COORD_MATCH_TOL`)
Returns
-------
match
|bool| --
Whether input coords and atoms match (|True|) or
not (|False|)
fail_type
:class:`~opan.const.EnumCheckGeomMismatch` or |None|
-- Type of check failure
If `match` == |True|:
Returns as |None|
If `match` == |False|:
An :class:`~opan.const.EnumCheckGeomMismatch` value
indicating the reason for the failed match:
:attr:`~opan.const.EnumCheckGeomMismatch.DIMENSION`
-- Mismatch in geometry size (number of atoms)
:attr:`~opan.const.EnumCheckGeomMismatch.COORDS`
-- Mismatch in one or more coordinates
:attr:`~opan.const.EnumCheckGeomMismatch.ATOMS`
-- Mismatch in one or more atoms
fail_loc
length-3N |bool| or length-N |bool| or |None| --
Mismatched elements
If `match` == |True|:
Returns as |None|
If `match` == |False|:
For "array-level" problems such as a dimension mismatch, a
|None| value is returned.
For "element-level" problems, a vector is returned
indicating positions of mismatch in either `coords` or `atoms`,
depending on the value of `fail_type`.
|True| elements indicate **MATCHING** values
|False| elements mark **MISMATCHES**
Raises
------
~exceptions.ValueError
If a pair of coords & atoms array lengths is inconsistent:
.. code-block:: python
if len(c1) != 3 * len(a1) or len(c2) != 3 * len(a2):
raise ValueError(...)
"""
# Import(s)
from ..const import atom_num
import numpy as np
from ..const import EnumCheckGeomMismatch as ECGM
# Initialize return value to success condition
match = True
#** Check coords for suitable shape. Assume 1-D np.arrays.
if not len(c1.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c1' is not a vector."))
## end if
if not len(c2.shape) == 1:
# Cannot coerce to vector; complain.
raise ValueError(("'c2' is not a vector."))
## end if
#** Check atoms for suitable shape. Assume lists of strings, so
# convert to np.array to check.
if not len(a1.shape) == 1:
# Not a vector; complain
raise ValueError(("'a1' is not a simple list."))
## end if
if not len(a2.shape) == 1:
# Not a vector; complain.
raise ValueError(("'a2' is not a simple list."))
## end if
#** Confirm proper lengths of coords vs atoms
if not c1.shape[0] == 3 * a1.shape[0]:
raise ValueError("len(c1) != 3*len(a1)")
## end if
if not c2.shape[0] == 3 * a2.shape[0]:
raise ValueError("len(c2) != 3*len(a2)")
## end if
#** Confirm matching lengths of coords and atoms w/corresponding
# objects among the two geometries
if not c1.shape[0] == c2.shape[0]:
match = False
fail_type = ECGM.DIMENSION
return match, fail_type, None
## end if
#** Element-wise check for geometry match to within 'tol'
fail_loc = np.less_equal(np.abs(np.subtract(c1,c2)), tol)
if sum(fail_loc) != c2.shape[0]:
# Count of matching coordinates should equal the number of
# coordinates. If not, complain with 'coord_mismatch' fail type.
match = False
fail_type = ECGM.COORDS
return match, fail_type, fail_loc
## end if
#** Element-wise check for atoms match. Quietly convert both input and
# instance atom arrays to atom_nums to allow np.equals comparison.
if np.issubdtype(a1.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a1 = np.array([atom_num[e] for e in a1])
## end if
if np.issubdtype(a2.dtype, np.dtype('str')):
# Presume atomic symbol data and attempt conversion
a2 = np.array([atom_num[e] for e in a2])
## end if
fail_loc = np.equal(a1, a2)
#** Perform the test to ensure all atoms match.
if sum(fail_loc) != a2.shape[0]:
# Count of matching atoms should equal number of atoms. If not,
# complain with the 'atom_mismatch' fail type.
match = False
fail_type = ECGM.ATOMS
return match, fail_type, fail_loc
#** If reached here, all tests passed; return success.
return match, None, None
## end def check_geom
def template_subst(template, subs, delims=('<', '>')):
""" Perform substitution of content into tagged string.
For substitutions into template input files for external computational
packages, no checks for valid syntax are performed.
Each key in `subs` corresponds to a delimited
substitution tag to be replaced in `template` by the entire text of the
value of that key. For example, the dict ``{"ABC": "text"}`` would
convert ``The <ABC> is working`` to ``The text is working``, using the
default delimiters of '<' and '>'. Substitutions are performed in
iteration order from `subs`; recursive substitution
as the tag parsing proceeds is thus
feasible if an :class:`~collections.OrderedDict` is used and substitution
key/value pairs are added in the proper order.
Start and end delimiters for the tags are modified by `delims`. For
example, to substitute a tag of the form **{\|TAG\|}**, the tuple
``("{|","|}")`` should be passed to `subs_delims`. Any elements in
`delims` past the second are ignored. No checking is
performed for whether the delimiters are "sensible" or not.
Parameters
----------
template
|str| --
Template containing tags delimited by `subs_delims`,
with tag names and substitution contents provided in `subs`
subs
|dict| of |str| --
Each item's key and value are the tag name and corresponding content to
be substituted into the provided template.
delims
iterable of |str| --
Iterable containing the 'open' and 'close' strings used to mark tags
in the template, which are drawn from elements zero and one,
respectively. Any elements beyond these are ignored.
Returns
-------
subst_text
|str| --
String generated from the parsed template, with all tag
substitutions performed.
"""
# Store the template into the working variable
subst_text = template
# Iterate over subs and perform the .replace() calls
for (k,v) in subs.items():
subst_text = subst_text.replace(
delims[0] + k + delims[1], v)
## next tup
# Return the result
return subst_text
## end def template_subst
def iterable(y):
"""Check whether or not an object supports iteration.
Adapted directly from NumPy ~= 1.10 at commit `46d2e83
<https://github.com/numpy/numpy/tree/
46d2e8356760e7549d0c80da9fe232177924183c/numpy/lib/
function_base.py#L48-L76>`__.
Parameters
----------
y
(arbitrary) -- Object to be tested.
Returns
-------
test
|bool| --
Returns |False| if :func:`iter` raises an exception when `y` is
passed to it; |True| otherwise.
Examples
--------
>>> opan.utils.iterable([1, 2, 3])
True
>>> opan.utils.iterable(2)
False
"""
try:
iter(y)
except Exception:
return False
return True
## end def iterable
def assert_npfloatarray(obj, varname, desc, exc, tc, errsrc):
""" Assert a value is an |nparray| of NumPy floats.
Pass |None| to `varname` if `obj` itself is to be checked.
Otherwise, `varname` is the string name of the attribute of `obj` to
check. In either case, `desc` is a string description of the
object to be checked, for use in raising of exceptions.
Raises the exception `exc` with typecode `tc` if the indicated
object is determined not to be an |nparray|, with a NumPy float dtype.
Intended primarily to serve as an early check for
proper implementation of subclasses of
:class:`~opan.grad.SuperOpanGrad` and
:class:`~opan.hess.SuperOpanHess`. Early type-checking of key
attributes will hopefully avoid confusing bugs downstream.
Parameters
----------
obj
(arbitrary) --
Object to be checked, or object with attribute to be checked.
varname
|str| or |None| --
Name of the attribute of `obj` to be type-checked. |None|
indicates to check `obj` itself.
desc
|str| --
Description of the object being checked to be used in any
raised exceptions.
exc
Subclass of :class:`~opan.error.OpanError` to be raised on
a failed typecheck.
tc
Typecode of `exc` to be raised on a failed typecheck.
errsrc
|str| --
String description of the source of the data leading to a
failed typecheck.
"""
# Imports
import numpy as np
# Check for whether member or object is to be checked
if varname is None:
var = obj
else:
# Try to get the variable to be typechecked
try:
var = getattr(obj, varname)
except AttributeError:
raise exc(tc, "Attribute '{0}' not defined in '{1}'"
.format(varname, obj), errsrc)
## end try
## end if
# Try to pull the np dtype off of it
try:
dt = var.dtype
except AttributeError:
raise exc(tc, "'{0}' is not an np.array (lacks a 'dtype' member)"
.format(desc), errsrc)
else:
if not var.shape:
raise exc(tc, "'{0}' is not an np.array ('len(shape)' < 1)"
.format(desc), errsrc)
## end try
# Confirm dtype inherits from np.float
if not np.issubdtype(dt, np.float):
raise exc(tc, "'{0}' is not an np.array of np.float".format(desc),
errsrc)
## end if
## end def assert_npfloatarray
if __name__ == '__main__': # pragma: no cover
print("Module not executable.")
|
mcepl/youtube-dl | refs/heads/master | youtube_dl/extractor/oktoberfesttv.py | 168 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class OktoberfestTVIE(InfoExtractor):
_VALID_URL = r'https?://www\.oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt',
'info_dict': {
'id': 'hb-zelt',
'ext': 'mp4',
'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'thumbnail': 're:^https?://.*\.jpg$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._live_title(self._html_search_regex(
r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title'))
clip = self._search_regex(
r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip')
ncurl = self._search_regex(
r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base')
video_url = ncurl + clip
thumbnail = self._search_regex(
r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage,
'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'ext': 'mp4',
'is_live': True,
'thumbnail': thumbnail,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.