code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
# Copyright 2013 Openstack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ v2 Neutron Plug-in API Quark Implementation """ from neutron.extensions import securitygroup as sg_ext from neutron import neutron_plugin_base_v2 from neutron import quota from oslo.config import cfg from oslo_log import log as logging from quark.api import extensions from quark import ip_availability from quark.plugin_modules import floating_ips from quark.plugin_modules import ip_addresses from quark.plugin_modules import ip_policies from quark.plugin_modules import mac_address_ranges from quark.plugin_modules import networks from quark.plugin_modules import ports from quark.plugin_modules import router from quark.plugin_modules import routes from quark.plugin_modules import security_groups from quark.plugin_modules import subnets LOG = logging.getLogger(__name__) CONF = cfg.CONF quark_resources = [ quota.BaseResource('alloc_pools_per_subnet', 'quota_alloc_pools_per_subnet'), quota.BaseResource('dns_nameservers_per_subnet', 'quota_dns_nameservers_per_subnet'), quota.BaseResource('ports_per_network', 'quota_ports_per_network'), quota.BaseResource('routes_per_subnet', 'quota_routes_per_subnet'), quota.BaseResource('security_rules_per_group', 'quota_security_rules_per_group'), quota.BaseResource('security_groups_per_port', 'quota_security_groups_per_port'), quota.BaseResource('v4_subnets_per_network', 'quota_v4_subnets_per_network'), quota.BaseResource('v6_subnets_per_network', 'quota_v6_subnets_per_network'), quota.BaseResource('fixed_ips_per_port', 'quota_fixed_ips_per_port') ] quark_quota_opts = [ cfg.IntOpt("quota_alloc_pools_per_subnet", default=5, help=_("Maximum number of allocation pools per subnet")), cfg.IntOpt('quota_dns_nameservers_per_subnet', default=2, help=_('Maximum number of dns nameservers per subnet')), cfg.IntOpt('quota_ports_per_network', default=250, help=_('Maximum ports per network')), cfg.IntOpt('quota_routes_per_subnet', default=3, help=_('Maximum routes per subnet')), cfg.IntOpt('quota_security_rules_per_group', default=20, help=_('Maximum security group rules in a group')), cfg.IntOpt("quota_security_groups_per_port", default=5, help=_("Maximum number of security groups per port")), cfg.IntOpt('quota_v4_subnets_per_network', default=1, help=_('Maximum v4 subnets per network')), cfg.IntOpt('quota_v6_subnets_per_network', default=1, help=_('Maximum v6 subnets per network')), cfg.IntOpt('quota_fixed_ips_per_port', default=5, help=_('Maximum number of fixed IPs per port')) ] def append_quark_extensions(conf): """Adds the Quark API Extensions to the extension path. Pulled out for test coveage. """ if 'api_extensions_path' in conf: conf.set_override('api_extensions_path', ":".join(extensions.__path__)) append_quark_extensions(CONF) CONF.register_opts(quark_quota_opts, "QUOTAS") quota.QUOTAS.register_resources(quark_resources) def sessioned(func): def _wrapped(self, context, *args, **kwargs): res = func(self, context, *args, **kwargs) context.session.close() # NOTE(mdietz): Forces neutron to get a fresh session # if it needs it after our call context._session = None return res return _wrapped class Plugin(neutron_plugin_base_v2.NeutronPluginBaseV2, sg_ext.SecurityGroupPluginBase): supported_extension_aliases = ["mac_address_ranges", "routes", "ip_addresses", "security-group", "diagnostics", "subnets_quark", "provider", "ip_policies", "quotas", "networks_quark", "router", "ip_availabilities", "ports_quark"] def __init__(self): LOG.info("Starting quark plugin") def _fix_missing_tenant_id(self, context, resource): """Will add the tenant_id to the context from body. It is assumed that the body must have a tenant_id because neutron core could never have gotten here otherwise. """ if context.tenant_id is None: context.tenant_id = resource["tenant_id"] @sessioned def get_mac_address_range(self, context, id, fields=None): return mac_address_ranges.get_mac_address_range(context, id, fields) @sessioned def get_mac_address_ranges(self, context): return mac_address_ranges.get_mac_address_ranges(context) @sessioned def create_mac_address_range(self, context, mac_range): self._fix_missing_tenant_id(context, mac_range["mac_address_range"]) return mac_address_ranges.create_mac_address_range(context, mac_range) @sessioned def delete_mac_address_range(self, context, id): mac_address_ranges.delete_mac_address_range(context, id) @sessioned def create_security_group(self, context, security_group): self._fix_missing_tenant_id(context, security_group["security_group"]) return security_groups.create_security_group(context, security_group) @sessioned def create_security_group_rule(self, context, security_group_rule): self._fix_missing_tenant_id(context, security_group_rule["security_group_rule"]) return security_groups.create_security_group_rule(context, security_group_rule) @sessioned def delete_security_group(self, context, id): security_groups.delete_security_group(context, id) @sessioned def delete_security_group_rule(self, context, id): security_groups.delete_security_group_rule(context, id) @sessioned def get_security_group(self, context, id, fields=None): return security_groups.get_security_group(context, id, fields) @sessioned def get_security_group_rule(self, context, id, fields=None): return security_groups.get_security_group_rule(context, id, fields) @sessioned def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return security_groups.get_security_groups(context, filters, fields, sorts, limit, marker, page_reverse) @sessioned def get_security_group_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return security_groups.get_security_group_rules(context, filters, fields, sorts, limit, marker, page_reverse) @sessioned def update_security_group(self, context, id, security_group): return security_groups.update_security_group(context, id, security_group) @sessioned def create_ip_policy(self, context, ip_policy): self._fix_missing_tenant_id(context, ip_policy["ip_policy"]) return ip_policies.create_ip_policy(context, ip_policy) @sessioned def get_ip_policy(self, context, id): return ip_policies.get_ip_policy(context, id) @sessioned def get_ip_policies(self, context, **filters): return ip_policies.get_ip_policies(context, **filters) @sessioned def update_ip_policy(self, context, id, ip_policy): return ip_policies.update_ip_policy(context, id, ip_policy) @sessioned def delete_ip_policy(self, context, id): return ip_policies.delete_ip_policy(context, id) @sessioned def get_ip_addresses(self, context, **filters): return ip_addresses.get_ip_addresses(context, **filters) @sessioned def get_ip_address(self, context, id): return ip_addresses.get_ip_address(context, id) @sessioned def create_ip_address(self, context, ip_address): self._fix_missing_tenant_id(context, ip_address["ip_address"]) return ip_addresses.create_ip_address(context, ip_address) @sessioned def update_ip_address(self, context, id, ip_address): return ip_addresses.update_ip_address(context, id, ip_address) @sessioned def create_port(self, context, port): self._fix_missing_tenant_id(context, port["port"]) return ports.create_port(context, port) @sessioned def get_port(self, context, id, fields=None): return ports.get_port(context, id, fields) @sessioned def update_port(self, context, id, port): return ports.update_port(context, id, port) @sessioned def get_ports(self, context, limit=None, page_reverse=False, sorts=None, marker=None, filters=None, fields=None): return ports.get_ports(context, limit, sorts, marker, page_reverse, filters, fields) @sessioned def get_ports_count(self, context, filters=None): return ports.get_ports_count(context, filters) @sessioned def delete_port(self, context, id): return ports.delete_port(context, id) @sessioned def disassociate_port(self, context, id, ip_address_id): return ports.disassociate_port(context, id, ip_address_id) @sessioned def diagnose_port(self, context, id, fields): return ports.diagnose_port(context, id, fields) @sessioned def get_route(self, context, id): return routes.get_route(context, id) @sessioned def get_routes(self, context): return routes.get_routes(context) @sessioned def create_route(self, context, route): self._fix_missing_tenant_id(context, route["route"]) return routes.create_route(context, route) @sessioned def delete_route(self, context, id): routes.delete_route(context, id) @sessioned def create_subnet(self, context, subnet): self._fix_missing_tenant_id(context, subnet["subnet"]) return subnets.create_subnet(context, subnet) @sessioned def update_subnet(self, context, id, subnet): return subnets.update_subnet(context, id, subnet) @sessioned def get_subnet(self, context, id, fields=None): return subnets.get_subnet(context, id, fields) @sessioned def get_subnets(self, context, limit=None, page_reverse=False, sorts=None, marker=None, filters=None, fields=None): return subnets.get_subnets(context, limit, page_reverse, sorts, marker, filters, fields) @sessioned def get_subnets_count(self, context, filters=None): return subnets.get_subnets_count(context, filters) @sessioned def delete_subnet(self, context, id): return subnets.delete_subnet(context, id) @sessioned def diagnose_subnet(self, context, id, fields): return subnets.diagnose_subnet(context, id, fields) @sessioned def create_network(self, context, network): self._fix_missing_tenant_id(context, network["network"]) return networks.create_network(context, network) @sessioned def update_network(self, context, id, network): return networks.update_network(context, id, network) @sessioned def get_network(self, context, id, fields=None): return networks.get_network(context, id, fields) @sessioned def get_networks(self, context, limit=None, sorts=None, marker=None, page_reverse=False, filters=None, fields=None): return networks.get_networks(context, limit, sorts, marker, page_reverse, filters, fields) @sessioned def get_networks_count(self, context, filters=None): return networks.get_networks_count(context, filters) @sessioned def delete_network(self, context, id): return networks.delete_network(context, id) @sessioned def diagnose_network(self, context, id, fields): return networks.diagnose_network(context, id, fields) # NOTE(mdietz): we don't actually support these, but despite the fact that # they're extensions in Neutron, Nova still expects to be # able to call some of these as if they aren't def create_router(self, context, router): raise NotImplementedError() def update_router(self, context, id, router): raise NotImplementedError() def get_router(self, context, id, fields=None): return router.get_router(context, id, fields) def delete_router(self, context, id): raise NotImplementedError() def get_routers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return router.get_routers(context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) def add_router_interface(self, context, router_id, interface_info): raise NotImplementedError() def remove_router_interface(self, context, router_id, interface_info): raise NotImplementedError() @sessioned def create_floatingip(self, context, floatingip): self._fix_missing_tenant_id(context, floatingip["floatingip"]) return floating_ips.create_floatingip(context, floatingip["floatingip"]) @sessioned def update_floatingip(self, context, id, floatingip): return floating_ips.update_floatingip(context, id, floatingip) @sessioned def get_floatingip(self, context, id, fields=None): return floating_ips.get_floatingip(context, id, fields) @sessioned def delete_floatingip(self, context, id): return floating_ips.delete_floatingip(context, id) @sessioned def get_floatingips(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return floating_ips.get_floatingips(context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) def get_routers_count(self, context, filters=None): raise NotImplementedError() def get_floatingips_count(self, context, filters=None): return floating_ips.get_floatingips_count(context, filters) def get_ip_availability(self, **kwargs): return ip_availability.get_ip_availability(**kwargs)
insequent/quark
quark/plugin.py
Python
apache-2.0
15,924
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import collections import copy import datetime import functools import sys import threading import uuid from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db import options as oslo_db_options from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import update_match from oslo_db.sqlalchemy import utils as sqlalchemyutils from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import uuidutils import six from six.moves import range from sqlalchemy import and_ from sqlalchemy.exc import NoSuchTableError from sqlalchemy import MetaData from sqlalchemy import or_ from sqlalchemy.orm import aliased from sqlalchemy.orm import contains_eager from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.orm import noload from sqlalchemy.orm import undefer from sqlalchemy.schema import Table from sqlalchemy import sql from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import desc from sqlalchemy.sql import false from sqlalchemy.sql import func from sqlalchemy.sql import null from sqlalchemy.sql import true from nova import block_device from nova.compute import task_states from nova.compute import vm_states import nova.context from nova.db.sqlalchemy import models from nova import exception from nova.i18n import _, _LI, _LE, _LW from nova import quota db_opts = [ cfg.StrOpt('osapi_compute_unique_server_name_scope', default='', help='When set, compute API will consider duplicate hostnames ' 'invalid within the specified scope, regardless of case. ' 'Should be empty, "project" or "global".'), ] api_db_opts = [ cfg.StrOpt('connection', help='The SQLAlchemy connection string to use to connect to ' 'the Nova API database.', secret=True), cfg.BoolOpt('sqlite_synchronous', default=True, help='If True, SQLite uses synchronous mode.'), cfg.StrOpt('slave_connection', secret=True, help='The SQLAlchemy connection string to use to connect to the' ' slave database.'), cfg.StrOpt('mysql_sql_mode', default='TRADITIONAL', help='The SQL mode to be used for MySQL sessions. ' 'This option, including the default, overrides any ' 'server-set SQL mode. To use whatever SQL mode ' 'is set by the server configuration, ' 'set this to no value. Example: mysql_sql_mode='), cfg.IntOpt('idle_timeout', default=3600, help='Timeout before idle SQL connections are reaped.'), cfg.IntOpt('max_pool_size', help='Maximum number of SQL connections to keep open in a ' 'pool.'), cfg.IntOpt('max_retries', default=10, help='Maximum number of database connection retries ' 'during startup. Set to -1 to specify an infinite ' 'retry count.'), cfg.IntOpt('retry_interval', default=10, help='Interval between retries of opening a SQL connection.'), cfg.IntOpt('max_overflow', help='If set, use this value for max_overflow with ' 'SQLAlchemy.'), cfg.IntOpt('connection_debug', default=0, help='Verbosity of SQL debugging information: 0=None, ' '100=Everything.'), cfg.BoolOpt('connection_trace', default=False, help='Add Python stack traces to SQL as comment strings.'), cfg.IntOpt('pool_timeout', help='If set, use this value for pool_timeout with ' 'SQLAlchemy.'), ] CONF = cfg.CONF CONF.register_opts(db_opts) CONF.register_opts(oslo_db_options.database_opts, 'database') CONF.register_opts(api_db_opts, group='api_database') LOG = logging.getLogger(__name__) _ENGINE_FACADE = {'main': None, 'api': None} _MAIN_FACADE = 'main' _API_FACADE = 'api' _LOCK = threading.Lock() def _create_facade(conf_group): # NOTE(dheeraj): This fragment is copied from oslo.db return db_session.EngineFacade( sql_connection=conf_group.connection, slave_connection=conf_group.slave_connection, sqlite_fk=False, autocommit=True, expire_on_commit=False, mysql_sql_mode=conf_group.mysql_sql_mode, idle_timeout=conf_group.idle_timeout, connection_debug=conf_group.connection_debug, max_pool_size=conf_group.max_pool_size, max_overflow=conf_group.max_overflow, pool_timeout=conf_group.pool_timeout, sqlite_synchronous=conf_group.sqlite_synchronous, connection_trace=conf_group.connection_trace, max_retries=conf_group.max_retries, retry_interval=conf_group.retry_interval) def _create_facade_lazily(facade, conf_group): global _LOCK, _ENGINE_FACADE if _ENGINE_FACADE[facade] is None: with _LOCK: if _ENGINE_FACADE[facade] is None: _ENGINE_FACADE[facade] = _create_facade(conf_group) return _ENGINE_FACADE[facade] def get_engine(use_slave=False): conf_group = CONF.database facade = _create_facade_lazily(_MAIN_FACADE, conf_group) return facade.get_engine(use_slave=use_slave) def get_api_engine(): conf_group = CONF.api_database facade = _create_facade_lazily(_API_FACADE, conf_group) return facade.get_engine() def get_session(use_slave=False, **kwargs): conf_group = CONF.database facade = _create_facade_lazily(_MAIN_FACADE, conf_group) return facade.get_session(use_slave=use_slave, **kwargs) def get_api_session(**kwargs): conf_group = CONF.api_database facade = _create_facade_lazily(_API_FACADE, conf_group) return facade.get_session(**kwargs) _SHADOW_TABLE_PREFIX = 'shadow_' _DEFAULT_QUOTA_NAME = 'default' PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks'] def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`nova.context.authorize_project_context` and :py:func:`nova.context.authorize_user_context`. The first argument to the wrapped function must be the context. """ @functools.wraps(f) def wrapper(*args, **kwargs): nova.context.require_context(args[0]) return f(*args, **kwargs) return wrapper def require_instance_exists_using_uuid(f): """Decorator to require the specified instance to exist. Requires the wrapped function to use context and instance_uuid as their first two arguments. """ @functools.wraps(f) def wrapper(context, instance_uuid, *args, **kwargs): instance_get_by_uuid(context, instance_uuid) return f(context, instance_uuid, *args, **kwargs) return wrapper def require_aggregate_exists(f): """Decorator to require the specified aggregate to exist. Requires the wrapped function to use context and aggregate_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, aggregate_id, *args, **kwargs): aggregate_get(context, aggregate_id) return f(context, aggregate_id, *args, **kwargs) return wrapper def model_query(context, model, args=None, session=None, use_slave=False, read_deleted=None, project_only=False): """Query helper that accounts for context's `read_deleted` field. :param context: NovaContext of the query. :param model: Model to query. Must be a subclass of ModelBase. :param args: Arguments to query. If None - model is used. :param session: If present, the session to use. :param use_slave: If true, use a slave connection to the DB if creating a session. :param read_deleted: If not None, overrides context's read_deleted field. Permitted values are 'no', which does not return deleted values; 'only', which only returns deleted values; and 'yes', which does not filter deleted values. :param project_only: If set and context is user-type, then restrict query to match the context's project_id. If set to 'allow_none', restriction includes project_id = None. """ if session is None: if CONF.database.slave_connection == '': use_slave = False session = get_session(use_slave=use_slave) if read_deleted is None: read_deleted = context.read_deleted query_kwargs = {} if 'no' == read_deleted: query_kwargs['deleted'] = False elif 'only' == read_deleted: query_kwargs['deleted'] = True elif 'yes' == read_deleted: pass else: raise ValueError(_("Unrecognized read_deleted value '%s'") % read_deleted) query = sqlalchemyutils.model_query(model, session, args, **query_kwargs) # We can't use oslo.db model_query's project_id here, as it doesn't allow # us to return both our projects and unowned projects. if nova.context.is_user_context(context) and project_only: if project_only == 'allow_none': query = query.\ filter(or_(model.project_id == context.project_id, model.project_id == null())) else: query = query.filter_by(project_id=context.project_id) return query def convert_objects_related_datetimes(values, *datetime_keys): if not datetime_keys: datetime_keys = ('created_at', 'deleted_at', 'updated_at') for key in datetime_keys: if key in values and values[key]: if isinstance(values[key], six.string_types): try: values[key] = timeutils.parse_strtime(values[key]) except ValueError: # Try alternate parsing since parse_strtime will fail # with say converting '2015-05-28T19:59:38+00:00' values[key] = timeutils.parse_isotime(values[key]) # NOTE(danms): Strip UTC timezones from datetimes, since they're # stored that way in the database values[key] = values[key].replace(tzinfo=None) return values def _sync_instances(context, project_id, user_id, session): return dict(zip(('instances', 'cores', 'ram'), _instance_data_get_for_user( context, project_id, user_id, session))) def _sync_floating_ips(context, project_id, user_id, session): return dict(floating_ips=_floating_ip_count_by_project( context, project_id, session)) def _sync_fixed_ips(context, project_id, user_id, session): return dict(fixed_ips=_fixed_ip_count_by_project( context, project_id, session)) def _sync_security_groups(context, project_id, user_id, session): return dict(security_groups=_security_group_count_by_project_and_user( context, project_id, user_id, session)) def _sync_server_groups(context, project_id, user_id, session): return dict(server_groups=_instance_group_count_by_project_and_user( context, project_id, user_id, session)) QUOTA_SYNC_FUNCTIONS = { '_sync_instances': _sync_instances, '_sync_floating_ips': _sync_floating_ips, '_sync_fixed_ips': _sync_fixed_ips, '_sync_security_groups': _sync_security_groups, '_sync_server_groups': _sync_server_groups, } ################### def constraint(**conditions): return Constraint(conditions) def equal_any(*values): return EqualityCondition(values) def not_equal(*values): return InequalityCondition(values) class Constraint(object): def __init__(self, conditions): self.conditions = conditions def apply(self, model, query): for key, condition in self.conditions.items(): for clause in condition.clauses(getattr(model, key)): query = query.filter(clause) return query class EqualityCondition(object): def __init__(self, values): self.values = values def clauses(self, field): # method signature requires us to return an iterable even if for OR # operator this will actually be a single clause return [or_(*[field == value for value in self.values])] class InequalityCondition(object): def __init__(self, values): self.values = values def clauses(self, field): return [field != value for value in self.values] ################### def service_destroy(context, service_id): session = get_session() with session.begin(): service = _service_get(context, service_id) model_query(context, models.Service, session=session).\ filter_by(id=service_id).\ soft_delete(synchronize_session=False) # TODO(sbauza): Remove the service_id filter in a later release # once we are sure that all compute nodes report the host field model_query(context, models.ComputeNode, session=session).\ filter(or_(models.ComputeNode.service_id == service_id, models.ComputeNode.host == service['host'])).\ soft_delete(synchronize_session=False) def _service_get(context, service_id, session=None, use_slave=False): query = model_query(context, models.Service, session=session, use_slave=use_slave).\ filter_by(id=service_id) result = query.first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result def service_get(context, service_id, use_slave=False): return _service_get(context, service_id, use_slave=use_slave) def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() def service_get_all_by_topic(context, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() def service_get_by_host_and_topic(context, host, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() def service_get_all_by_binary(context, binary): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(binary=binary).\ all() def service_get_by_host_and_binary(context, host, binary): result = model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result def service_get_all_by_host(context, host): return model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() def service_get_by_compute_host(context, host, use_slave=False): result = model_query(context, models.Service, read_deleted="no", use_slave=use_slave).\ filter_by(host=host).\ filter_by(binary='nova-compute').\ first() if not result: raise exception.ComputeHostNotFound(host=host) return result def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True try: service_ref.save() except db_exc.DBDuplicateEntry as e: if 'binary' in e.columns: raise exception.ServiceBinaryExists(host=values.get('host'), binary=values.get('binary')) raise exception.ServiceTopicExists(host=values.get('host'), topic=values.get('topic')) return service_ref @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = _service_get(context, service_id, session=session) # Only servicegroup.drivers.db.DbDriver._report_state() updates # 'report_count', so if that value changes then store the timestamp # as the last time we got a state report. if 'report_count' in values: if values['report_count'] > service_ref.report_count: service_ref.last_seen_up = timeutils.utcnow() service_ref.update(values) return service_ref ################### def compute_node_get(context, compute_id): return _compute_node_get(context, compute_id) def _compute_node_get(context, compute_id, session=None): result = model_query(context, models.ComputeNode, session=session).\ filter_by(id=compute_id).\ first() if not result: raise exception.ComputeHostNotFound(host=compute_id) return result def compute_nodes_get_by_service_id(context, service_id): result = model_query(context, models.ComputeNode, read_deleted='no').\ filter_by(service_id=service_id).\ all() if not result: raise exception.ServiceNotFound(service_id=service_id) return result def compute_node_get_by_host_and_nodename(context, host, nodename): result = model_query(context, models.ComputeNode, read_deleted='no').\ filter_by(host=host, hypervisor_hostname=nodename).\ first() if not result: raise exception.ComputeHostNotFound(host=host) return result def compute_node_get_all_by_host(context, host, use_slave=False): result = model_query(context, models.ComputeNode, read_deleted='no', use_slave=use_slave).\ filter_by(host=host).\ all() if not result: raise exception.ComputeHostNotFound(host=host) return result def compute_node_get_all(context): return model_query(context, models.ComputeNode, read_deleted='no').all() def compute_node_search_by_hypervisor(context, hypervisor_match): field = models.ComputeNode.hypervisor_hostname return model_query(context, models.ComputeNode).\ filter(field.like('%%%s%%' % hypervisor_match)).\ all() def compute_node_create(context, values): """Creates a new ComputeNode and populates the capacity fields with the most recent data. """ convert_objects_related_datetimes(values) compute_node_ref = models.ComputeNode() compute_node_ref.update(values) compute_node_ref.save() return compute_node_ref @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def compute_node_update(context, compute_id, values): """Updates the ComputeNode record with the most recent data.""" session = get_session() with session.begin(): compute_ref = _compute_node_get(context, compute_id, session=session) # Always update this, even if there's going to be no other # changes in data. This ensures that we invalidate the # scheduler cache of compute node data in case of races. values['updated_at'] = timeutils.utcnow() convert_objects_related_datetimes(values) compute_ref.update(values) return compute_ref def compute_node_delete(context, compute_id): """Delete a ComputeNode record.""" session = get_session() with session.begin(): result = model_query(context, models.ComputeNode, session=session).\ filter_by(id=compute_id).\ soft_delete(synchronize_session=False) if not result: raise exception.ComputeHostNotFound(host=compute_id) def compute_node_statistics(context): """Compute statistics over all compute nodes.""" # TODO(sbauza): Remove the service_id filter in a later release # once we are sure that all compute nodes report the host field _filter = or_(models.Service.host == models.ComputeNode.host, models.Service.id == models.ComputeNode.service_id) result = model_query(context, models.ComputeNode, ( func.count(models.ComputeNode.id), func.sum(models.ComputeNode.vcpus), func.sum(models.ComputeNode.memory_mb), func.sum(models.ComputeNode.local_gb), func.sum(models.ComputeNode.vcpus_used), func.sum(models.ComputeNode.memory_mb_used), func.sum(models.ComputeNode.local_gb_used), func.sum(models.ComputeNode.free_ram_mb), func.sum(models.ComputeNode.free_disk_gb), func.sum(models.ComputeNode.current_workload), func.sum(models.ComputeNode.running_vms), func.sum(models.ComputeNode.disk_available_least), ), read_deleted="no").\ filter(models.Service.disabled == false()).\ filter(models.Service.binary == "nova-compute").\ filter(_filter).\ first() # Build a dict of the info--making no assumptions about result fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb', 'current_workload', 'running_vms', 'disk_available_least') return {field: int(result[idx] or 0) for idx, field in enumerate(fields)} ################### def certificate_create(context, values): certificate_ref = models.Certificate() for (key, value) in values.items(): certificate_ref[key] = value certificate_ref.save() return certificate_ref def certificate_get_all_by_project(context, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(project_id=project_id).\ all() def certificate_get_all_by_user(context, user_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ all() def certificate_get_all_by_user_and_project(context, user_id, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() ################### @require_context def floating_ip_get(context, id): try: result = model_query(context, models.FloatingIp, project_only=True).\ filter_by(id=id).\ options(joinedload_all('fixed_ip.instance')).\ first() if not result: raise exception.FloatingIpNotFound(id=id) except db_exc.DBError: msg = _LW("Invalid floating ip id %s in request") % id LOG.warn(msg) raise exception.InvalidID(id=id) return result @require_context def floating_ip_get_pools(context): pools = [] for result in model_query(context, models.FloatingIp, (models.FloatingIp.pool,)).distinct(): pools.append({'name': result[0]}) return pools @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True, retry_on_request=True) def floating_ip_allocate_address(context, project_id, pool, auto_assigned=False): nova.context.authorize_project_context(context, project_id) session = get_session() with session.begin(): floating_ip_ref = model_query(context, models.FloatingIp, session=session, read_deleted="no").\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(pool=pool).\ first() if not floating_ip_ref: raise exception.NoMoreFloatingIps() params = {'project_id': project_id, 'auto_assigned': auto_assigned} rows_update = model_query(context, models.FloatingIp, session=session, read_deleted="no").\ filter_by(id=floating_ip_ref['id']).\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(pool=pool).\ update(params, synchronize_session='evaluate') if not rows_update: LOG.debug('The row was updated in a concurrent transaction, ' 'we will fetch another one') raise db_exc.RetryRequest(exception.FloatingIpAllocateFailed()) return floating_ip_ref['address'] @require_context def floating_ip_bulk_create(context, ips, want_result=True): session = get_session() with session.begin(): try: tab = models.FloatingIp().__table__ session.execute(tab.insert(), ips) except db_exc.DBDuplicateEntry as e: raise exception.FloatingIpExists(address=e.value) if want_result: return model_query( context, models.FloatingIp, session=session).filter( models.FloatingIp.address.in_( [ip['address'] for ip in ips])).all() def _ip_range_splitter(ips, block_size=256): """Yields blocks of IPs no more than block_size elements long.""" out = [] count = 0 for ip in ips: out.append(ip['address']) count += 1 if count > block_size - 1: yield out out = [] count = 0 if out: yield out @require_context def floating_ip_bulk_destroy(context, ips): session = get_session() with session.begin(): project_id_to_quota_count = collections.defaultdict(int) for ip_block in _ip_range_splitter(ips): # Find any floating IPs that were not auto_assigned and # thus need quota released. query = model_query(context, models.FloatingIp, session=session).\ filter(models.FloatingIp.address.in_(ip_block)).\ filter_by(auto_assigned=False) for row in query.all(): # The count is negative since we release quota by # reserving negative quota. project_id_to_quota_count[row['project_id']] -= 1 # Delete the floating IPs. model_query(context, models.FloatingIp, session=session).\ filter(models.FloatingIp.address.in_(ip_block)).\ soft_delete(synchronize_session='fetch') # Delete the quotas, if needed. # Quota update happens in a separate transaction, so previous must have # been committed first. for project_id, count in project_id_to_quota_count.items(): try: reservations = quota.QUOTAS.reserve(context, project_id=project_id, floating_ips=count) quota.QUOTAS.commit(context, reservations, project_id=project_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to update usages bulk " "deallocating floating IP")) @require_context def floating_ip_create(context, values): floating_ip_ref = models.FloatingIp() floating_ip_ref.update(values) try: floating_ip_ref.save() except db_exc.DBDuplicateEntry: raise exception.FloatingIpExists(address=values['address']) return floating_ip_ref def _floating_ip_count_by_project(context, project_id, session=None): nova.context.authorize_project_context(context, project_id) # TODO(tr3buchet): why leave auto_assigned floating IPs out? return model_query(context, models.FloatingIp, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ count() @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): session = get_session() with session.begin(): fixed_ip_ref = model_query(context, models.FixedIp, session=session).\ filter_by(address=fixed_address).\ options(joinedload('network')).\ first() if not fixed_ip_ref: raise exception.FixedIpNotFoundForAddress(address=fixed_address) rows = model_query(context, models.FloatingIp, session=session).\ filter_by(address=floating_address).\ filter(models.FloatingIp.project_id == context.project_id).\ filter(or_(models.FloatingIp.fixed_ip_id == fixed_ip_ref['id'], models.FloatingIp.fixed_ip_id.is_(None))).\ update({'fixed_ip_id': fixed_ip_ref['id'], 'host': host}) if not rows: raise exception.FloatingIpAssociateFailed(address=floating_address) return fixed_ip_ref @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def floating_ip_deallocate(context, address): return model_query(context, models.FloatingIp).\ filter_by(address=address).\ filter(and_(models.FloatingIp.project_id != null()), models.FloatingIp.fixed_ip_id == null()).\ update({'project_id': None, 'host': None, 'auto_assigned': False}, synchronize_session=False) @require_context def floating_ip_destroy(context, address): model_query(context, models.FloatingIp).\ filter_by(address=address).\ delete() @require_context def floating_ip_disassociate(context, address): session = get_session() with session.begin(): floating_ip_ref = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ first() if not floating_ip_ref: raise exception.FloatingIpNotFoundForAddress(address=address) fixed_ip_ref = model_query(context, models.FixedIp, session=session).\ filter_by(id=floating_ip_ref['fixed_ip_id']).\ options(joinedload('network')).\ first() floating_ip_ref.fixed_ip_id = None floating_ip_ref.host = None return fixed_ip_ref def _floating_ip_get_all(context, session=None): return model_query(context, models.FloatingIp, read_deleted="no", session=session) def floating_ip_get_all(context): floating_ip_refs = _floating_ip_get_all(context).\ options(joinedload('fixed_ip')).\ all() if not floating_ip_refs: raise exception.NoFloatingIpsDefined() return floating_ip_refs def floating_ip_get_all_by_host(context, host): floating_ip_refs = _floating_ip_get_all(context).\ filter_by(host=host).\ options(joinedload('fixed_ip')).\ all() if not floating_ip_refs: raise exception.FloatingIpNotFoundForHost(host=host) return floating_ip_refs @require_context def floating_ip_get_all_by_project(context, project_id): nova.context.authorize_project_context(context, project_id) # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? return _floating_ip_get_all(context).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ options(joinedload_all('fixed_ip.instance')).\ all() @require_context def floating_ip_get_by_address(context, address): return _floating_ip_get_by_address(context, address) def _floating_ip_get_by_address(context, address, session=None): # if address string is empty explicitly set it to None if not address: address = None try: result = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ options(joinedload_all('fixed_ip.instance')).\ first() if not result: raise exception.FloatingIpNotFoundForAddress(address=address) except db_exc.DBError: msg = _("Invalid floating IP %s in request") % address LOG.warn(msg) raise exception.InvalidIpAddressError(msg) # If the floating IP has a project ID set, check to make sure # the non-admin user has access. if result.project_id and nova.context.is_user_context(context): nova.context.authorize_project_context(context, result.project_id) return result @require_context def floating_ip_get_by_fixed_address(context, fixed_address): return model_query(context, models.FloatingIp).\ outerjoin(models.FixedIp, models.FixedIp.id == models.FloatingIp.fixed_ip_id).\ filter(models.FixedIp.address == fixed_address).\ all() @require_context def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): return model_query(context, models.FloatingIp).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() @require_context def floating_ip_update(context, address, values): session = get_session() with session.begin(): float_ip_ref = _floating_ip_get_by_address(context, address, session) float_ip_ref.update(values) try: float_ip_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.FloatingIpExists(address=values['address']) return float_ip_ref def _dnsdomain_get(context, session, fqdomain): return model_query(context, models.DNSDomain, session=session, read_deleted="no").\ filter_by(domain=fqdomain).\ with_lockmode('update').\ first() @require_context def dnsdomain_get(context, fqdomain): session = get_session() with session.begin(): return _dnsdomain_get(context, session, fqdomain) def _dnsdomain_get_or_create(context, session, fqdomain): domain_ref = _dnsdomain_get(context, session, fqdomain) if not domain_ref: dns_ref = models.DNSDomain() dns_ref.update({'domain': fqdomain, 'availability_zone': None, 'project_id': None}) return dns_ref return domain_ref def dnsdomain_register_for_zone(context, fqdomain, zone): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'private' domain_ref.availability_zone = zone session.add(domain_ref) def dnsdomain_register_for_project(context, fqdomain, project): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'public' domain_ref.project_id = project session.add(domain_ref) def dnsdomain_unregister(context, fqdomain): model_query(context, models.DNSDomain).\ filter_by(domain=fqdomain).\ delete() def dnsdomain_get_all(context): return model_query(context, models.DNSDomain, read_deleted="no").all() ################### @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True, retry_on_request=True) def fixed_ip_associate(context, address, instance_uuid, network_id=None, reserved=False, virtual_interface_id=None): """Keyword arguments: reserved -- should be a boolean value(True or False), exact value will be used to filter on the fixed ip address """ if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == null()) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=reserved).\ filter_by(address=address).\ first() if fixed_ip_ref is None: raise exception.FixedIpNotFoundForNetwork(address=address, network_uuid=network_id) if fixed_ip_ref.instance_uuid: raise exception.FixedIpAlreadyInUse(address=address, instance_uuid=instance_uuid) params = {'instance_uuid': instance_uuid, 'allocated': virtual_interface_id is not None} if not fixed_ip_ref.network_id: params['network_id'] = network_id if virtual_interface_id: params['virtual_interface_id'] = virtual_interface_id rows_updated = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter_by(id=fixed_ip_ref.id).\ filter(network_or_none).\ filter_by(reserved=reserved).\ filter_by(address=address).\ update(params, synchronize_session='evaluate') if not rows_updated: LOG.debug('The row was updated in a concurrent transaction, ' 'we will fetch another row') raise db_exc.RetryRequest( exception.FixedIpAssociateFailed(net=network_id)) return fixed_ip_ref @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True, retry_on_request=True) def fixed_ip_associate_pool(context, network_id, instance_uuid=None, host=None, virtual_interface_id=None): """allocate a fixed ip out of a fixed ip network pool. This allocates an unallocated fixed ip out of a specified network. We sort by updated_at to hand out the oldest address in the list. """ if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == null()) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=False).\ filter_by(instance_uuid=None).\ filter_by(host=None).\ order_by(asc(models.FixedIp.updated_at)).\ first() if not fixed_ip_ref: raise exception.NoMoreFixedIps(net=network_id) params = {'allocated': virtual_interface_id is not None} if fixed_ip_ref['network_id'] is None: params['network_id'] = network_id if instance_uuid: params['instance_uuid'] = instance_uuid if host: params['host'] = host if virtual_interface_id: params['virtual_interface_id'] = virtual_interface_id rows_updated = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter_by(id=fixed_ip_ref['id']).\ filter_by(network_id=fixed_ip_ref['network_id']).\ filter_by(reserved=False).\ filter_by(instance_uuid=None).\ filter_by(host=None).\ filter_by(address=fixed_ip_ref['address']).\ update(params, synchronize_session='evaluate') if not rows_updated: LOG.debug('The row was updated in a concurrent transaction, ' 'we will fetch another row') raise db_exc.RetryRequest( exception.FixedIpAssociateFailed(net=network_id)) return fixed_ip_ref @require_context def fixed_ip_create(context, values): fixed_ip_ref = models.FixedIp() fixed_ip_ref.update(values) try: fixed_ip_ref.save() except db_exc.DBDuplicateEntry: raise exception.FixedIpExists(address=values['address']) return fixed_ip_ref @require_context def fixed_ip_bulk_create(context, ips): engine = get_engine() with engine.begin() as conn: try: tab = models.FixedIp.__table__ conn.execute(tab.insert(), ips) except db_exc.DBDuplicateEntry as e: raise exception.FixedIpExists(address=e.value) @require_context def fixed_ip_disassociate(context, address): session = get_session() with session.begin(): _fixed_ip_get_by_address(context, address, session=session).\ update({'instance_uuid': None, 'virtual_interface_id': None}) def fixed_ip_disassociate_all_by_timeout(context, host, time): session = get_session() # NOTE(vish): only update fixed ips that "belong" to this # host; i.e. the network host or the instance # host matches. Two queries necessary because # join with update doesn't work. with session.begin(): host_filter = or_(and_(models.Instance.host == host, models.Network.multi_host == true()), models.Network.host == host) result = model_query(context, models.FixedIp, (models.FixedIp.id,), read_deleted="no", session=session).\ filter(models.FixedIp.allocated == false()).\ filter(models.FixedIp.updated_at < time).\ join((models.Network, models.Network.id == models.FixedIp.network_id)).\ join((models.Instance, models.Instance.uuid == models.FixedIp.instance_uuid)).\ filter(host_filter).\ all() fixed_ip_ids = [fip[0] for fip in result] if not fixed_ip_ids: return 0 result = model_query(context, models.FixedIp, session=session).\ filter(models.FixedIp.id.in_(fixed_ip_ids)).\ update({'instance_uuid': None, 'leased': False, 'updated_at': timeutils.utcnow()}, synchronize_session='fetch') return result @require_context def fixed_ip_get(context, id, get_network=False): query = model_query(context, models.FixedIp).filter_by(id=id) if get_network: query = query.options(joinedload('network')) result = query.first() if not result: raise exception.FixedIpNotFound(id=id) # FIXME(sirp): shouldn't we just use project_only here to restrict the # results? if (nova.context.is_user_context(context) and result['instance_uuid'] is not None): instance = instance_get_by_uuid(context.elevated(read_deleted='yes'), result['instance_uuid']) nova.context.authorize_project_context(context, instance.project_id) return result def fixed_ip_get_all(context): result = model_query(context, models.FixedIp, read_deleted="yes").all() if not result: raise exception.NoFixedIpsDefined() return result @require_context def fixed_ip_get_by_address(context, address, columns_to_join=None): return _fixed_ip_get_by_address(context, address, columns_to_join=columns_to_join) def _fixed_ip_get_by_address(context, address, session=None, columns_to_join=None): if session is None: session = get_session() if columns_to_join is None: columns_to_join = [] with session.begin(subtransactions=True): try: result = model_query(context, models.FixedIp, session=session) for column in columns_to_join: result = result.options(joinedload_all(column)) result = result.filter_by(address=address).first() if not result: raise exception.FixedIpNotFoundForAddress(address=address) except db_exc.DBError: msg = _("Invalid fixed IP Address %s in request") % address LOG.warn(msg) raise exception.FixedIpInvalid(msg) # NOTE(sirp): shouldn't we just use project_only here to restrict the # results? if (nova.context.is_user_context(context) and result['instance_uuid'] is not None): instance = _instance_get_by_uuid( context.elevated(read_deleted='yes'), result['instance_uuid'], session ) nova.context.authorize_project_context(context, instance.project_id) return result @require_context def fixed_ip_get_by_floating_address(context, floating_address): return model_query(context, models.FixedIp).\ join(models.FloatingIp, models.FloatingIp.fixed_ip_id == models.FixedIp.id).\ filter(models.FloatingIp.address == floating_address).\ first() # NOTE(tr3buchet) please don't invent an exception here, None is fine @require_context def fixed_ip_get_by_instance(context, instance_uuid): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) vif_and = and_(models.VirtualInterface.id == models.FixedIp.virtual_interface_id, models.VirtualInterface.deleted == 0) result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(instance_uuid=instance_uuid).\ outerjoin(models.VirtualInterface, vif_and).\ options(contains_eager("virtual_interface")).\ options(joinedload('network')).\ options(joinedload('floating_ips')).\ order_by(asc(models.VirtualInterface.created_at), asc(models.VirtualInterface.id)).\ all() if not result: raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid) return result def fixed_ip_get_by_host(context, host): session = get_session() with session.begin(): instance_uuids = _instance_get_all_uuids_by_host(context, host, session=session) if not instance_uuids: return [] return model_query(context, models.FixedIp, session=session).\ filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\ all() @require_context def fixed_ip_get_by_network_host(context, network_id, host): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id).\ filter_by(host=host).\ first() if not result: raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, host=host) return result @require_context def fixed_ips_by_virtual_interface(context, vif_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(virtual_interface_id=vif_id).\ options(joinedload('network')).\ options(joinedload('floating_ips')).\ all() return result @require_context def fixed_ip_update(context, address, values): session = get_session() with session.begin(): _fixed_ip_get_by_address(context, address, session=session).\ update(values) def _fixed_ip_count_by_project(context, project_id, session=None): nova.context.authorize_project_context(context, project_id) return model_query(context, models.FixedIp, (models.FixedIp.id,), read_deleted="no", session=session).\ join((models.Instance, models.Instance.uuid == models.FixedIp.instance_uuid)).\ filter(models.Instance.project_id == project_id).\ count() ################### @require_context def virtual_interface_create(context, values): """Create a new virtual interface record in the database. :param values: = dict containing column values """ try: vif_ref = models.VirtualInterface() vif_ref.update(values) vif_ref.save() except db_exc.DBError: raise exception.VirtualInterfaceCreateException() return vif_ref def _virtual_interface_query(context, session=None, use_slave=False): return model_query(context, models.VirtualInterface, session=session, read_deleted="no", use_slave=use_slave) @require_context def virtual_interface_get(context, vif_id): """Gets a virtual interface from the table. :param vif_id: = id of the virtual interface """ vif_ref = _virtual_interface_query(context).\ filter_by(id=vif_id).\ first() return vif_ref @require_context def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table. :param address: = the address of the interface you're looking to get """ try: vif_ref = _virtual_interface_query(context).\ filter_by(address=address).\ first() except db_exc.DBError: msg = _("Invalid virtual interface address %s in request") % address LOG.warn(msg) raise exception.InvalidIpAddressError(msg) return vif_ref @require_context def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table. :param vif_uuid: the uuid of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(uuid=vif_uuid).\ first() return vif_ref @require_context @require_instance_exists_using_uuid def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False): """Gets all virtual interfaces for instance. :param instance_uuid: = uuid of the instance to retrieve vifs for """ vif_refs = _virtual_interface_query(context, use_slave=use_slave).\ filter_by(instance_uuid=instance_uuid).\ order_by(asc("created_at"), asc("id")).\ all() return vif_refs @require_context def virtual_interface_get_by_instance_and_network(context, instance_uuid, network_id): """Gets virtual interface for instance that's associated with network.""" vif_ref = _virtual_interface_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(network_id=network_id).\ first() return vif_ref @require_context def virtual_interface_delete_by_instance(context, instance_uuid): """Delete virtual interface records that are associated with the instance given by instance_id. :param instance_uuid: = uuid of instance """ _virtual_interface_query(context).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() @require_context def virtual_interface_get_all(context): """Get all vifs.""" vif_refs = _virtual_interface_query(context).all() return vif_refs ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.items(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs def _validate_unique_server_name(context, session, name): if not CONF.osapi_compute_unique_server_name_scope: return lowername = name.lower() base_query = model_query(context, models.Instance, session=session, read_deleted='no').\ filter(func.lower(models.Instance.hostname) == lowername) if CONF.osapi_compute_unique_server_name_scope == 'project': instance_with_same_name = base_query.\ filter_by(project_id=context.project_id).\ count() elif CONF.osapi_compute_unique_server_name_scope == 'global': instance_with_same_name = base_query.count() else: msg = _('Unknown osapi_compute_unique_server_name_scope value: %s' ' Flag must be empty, "global" or' ' "project"') % CONF.osapi_compute_unique_server_name_scope LOG.warn(msg) return if instance_with_same_name > 0: raise exception.InstanceExists(name=lowername) def _handle_objects_related_type_conversions(values): """Make sure that certain things in values (which may have come from an objects.instance.Instance object) are in suitable form for the database. """ # NOTE(danms): Make sure IP addresses are passed as strings to # the database engine for key in ('access_ip_v4', 'access_ip_v6'): if key in values and values[key] is not None: values[key] = str(values[key]) datetime_keys = ('created_at', 'deleted_at', 'updated_at', 'launched_at', 'terminated_at') convert_objects_related_datetimes(values, *datetime_keys) def _check_instance_exists_in_project(context, session, instance_uuid): if not model_query(context, models.Instance, session=session, read_deleted="no", project_only=True).filter_by( uuid=instance_uuid).first(): raise exception.InstanceNotFound(instance_id=instance_uuid) @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. """ # NOTE(rpodolyaka): create the default security group, if it doesn't exist. # This must be done in a separate transaction, so that this one is not # aborted in case a concurrent one succeeds first and the unique constraint # for security group names is violated by a concurrent INSERT security_group_ensure_default(context) values = values.copy() values['metadata'] = _metadata_refs( values.get('metadata'), models.InstanceMetadata) values['system_metadata'] = _metadata_refs( values.get('system_metadata'), models.InstanceSystemMetadata) _handle_objects_related_type_conversions(values) instance_ref = models.Instance() if not values.get('uuid'): values['uuid'] = str(uuid.uuid4()) instance_ref['info_cache'] = models.InstanceInfoCache() info_cache = values.pop('info_cache', None) if info_cache is not None: instance_ref['info_cache'].update(info_cache) security_groups = values.pop('security_groups', []) instance_ref['extra'] = models.InstanceExtra() instance_ref['extra'].update( {'numa_topology': None, 'pci_requests': None, 'vcpu_model': None, }) instance_ref['extra'].update(values.pop('extra', {})) instance_ref.update(values) def _get_sec_group_models(session, security_groups): models = [] default_group = _security_group_ensure_default(context, session) if 'default' in security_groups: models.append(default_group) # Generate a new list, so we don't modify the original security_groups = [x for x in security_groups if x != 'default'] if security_groups: models.extend(_security_group_get_by_names(context, session, context.project_id, security_groups)) return models session = get_session() with session.begin(): if 'hostname' in values: _validate_unique_server_name(context, session, values['hostname']) instance_ref.security_groups = _get_sec_group_models(session, security_groups) session.add(instance_ref) # create the instance uuid to ec2_id mapping entry for instance ec2_instance_create(context, instance_ref['uuid']) return instance_ref def _instance_data_get_for_user(context, project_id, user_id, session=None): result = model_query(context, models.Instance, ( func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb), ), session=session).\ filter_by(project_id=project_id) if user_id: result = result.filter_by(user_id=user_id).first() else: result = result.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def instance_destroy(context, instance_uuid, constraint=None): session = get_session() with session.begin(): if uuidutils.is_uuid_like(instance_uuid): instance_ref = _instance_get_by_uuid(context, instance_uuid, session=session) else: raise exception.InvalidUUID(instance_uuid) query = model_query(context, models.Instance, session=session).\ filter_by(uuid=instance_uuid) if constraint is not None: query = constraint.apply(models.Instance, query) count = query.soft_delete() if count == 0: raise exception.ConstraintNotMet() model_query(context, models.SecurityGroupInstanceAssociation, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceInfoCache, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceMetadata, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceFault, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceExtra, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceSystemMetadata, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() # NOTE(snikitin): We can't use model_query here, because there is no # column 'deleted' in 'tags' table. session.query(models.Tag).filter_by(resource_id=instance_uuid).delete() return instance_ref @require_context def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False): return _instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join, use_slave=use_slave) def _instance_get_by_uuid(context, uuid, session=None, columns_to_join=None, use_slave=False): result = _build_instance_get(context, session=session, columns_to_join=columns_to_join, use_slave=use_slave).\ filter_by(uuid=uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=uuid) return result @require_context def instance_get(context, instance_id, columns_to_join=None): try: result = _build_instance_get(context, columns_to_join=columns_to_join ).filter_by(id=instance_id).first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result except db_exc.DBError: # NOTE(sdague): catch all in case the db engine chokes on the # id because it's too long of an int to store. msg = _("Invalid instance id %s in request") % instance_id LOG.warn(msg) raise exception.InvalidID(id=instance_id) def _build_instance_get(context, session=None, columns_to_join=None, use_slave=False): query = model_query(context, models.Instance, session=session, project_only=True, use_slave=use_slave).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')) if columns_to_join is None: columns_to_join = ['metadata', 'system_metadata'] for column in columns_to_join: if column in ['info_cache', 'security_groups']: # Already always joined above continue if 'extra.' in column: query = query.options(undefer(column)) else: query = query.options(joinedload(column)) # NOTE(alaski) Stop lazy loading of columns not needed. for col in ['metadata', 'system_metadata']: if col not in columns_to_join: query = query.options(noload(col)) return query def _instances_fill_metadata(context, instances, manual_joins=None, use_slave=False): """Selectively fill instances with manually-joined metadata. Note that instance will be converted to a dict. :param context: security context :param instances: list of instances to fill :param manual_joins: list of tables to manually join (can be any combination of 'metadata' and 'system_metadata' or None to take the default of both) """ uuids = [inst['uuid'] for inst in instances] if manual_joins is None: manual_joins = ['metadata', 'system_metadata'] meta = collections.defaultdict(list) if 'metadata' in manual_joins: for row in _instance_metadata_get_multi(context, uuids, use_slave=use_slave): meta[row['instance_uuid']].append(row) sys_meta = collections.defaultdict(list) if 'system_metadata' in manual_joins: for row in _instance_system_metadata_get_multi(context, uuids, use_slave=use_slave): sys_meta[row['instance_uuid']].append(row) pcidevs = collections.defaultdict(list) if 'pci_devices' in manual_joins: for row in _instance_pcidevs_get_multi(context, uuids): pcidevs[row['instance_uuid']].append(row) filled_instances = [] for inst in instances: inst = dict(inst) inst['system_metadata'] = sys_meta[inst['uuid']] inst['metadata'] = meta[inst['uuid']] if 'pci_devices' in manual_joins: inst['pci_devices'] = pcidevs[inst['uuid']] filled_instances.append(inst) return filled_instances def _manual_join_columns(columns_to_join): """Separate manually joined columns from columns_to_join If columns_to_join contains 'metadata', 'system_metadata', or 'pci_devices' those columns are removed from columns_to_join and added to a manual_joins list to be used with the _instances_fill_metadata method. The columns_to_join formal parameter is copied and not modified, the return tuple has the modified columns_to_join list to be used with joinedload in a model query. :param:columns_to_join: List of columns to join in a model query. :return: tuple of (manual_joins, columns_to_join) """ manual_joins = [] columns_to_join_new = copy.copy(columns_to_join) for column in ('metadata', 'system_metadata', 'pci_devices'): if column in columns_to_join_new: columns_to_join_new.remove(column) manual_joins.append(column) return manual_joins, columns_to_join_new @require_context def instance_get_all(context, columns_to_join=None): if columns_to_join is None: columns_to_join_new = ['info_cache', 'security_groups'] manual_joins = ['metadata', 'system_metadata'] else: manual_joins, columns_to_join_new = ( _manual_join_columns(columns_to_join)) query = model_query(context, models.Instance) for column in columns_to_join_new: query = query.options(joinedload(column)) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: query = query.filter_by(project_id=context.project_id) else: query = query.filter_by(user_id=context.user_id) instances = query.all() return _instances_fill_metadata(context, instances, manual_joins) @require_context def instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit=None, marker=None, columns_to_join=None, use_slave=False): """Return instances matching all filters sorted by the primary key. See instance_get_all_by_filters_sort for more information. """ # Invoke the API with the multiple sort keys and directions using the # single sort key/direction return instance_get_all_by_filters_sort(context, filters, limit=limit, marker=marker, columns_to_join=columns_to_join, use_slave=use_slave, sort_keys=[sort_key], sort_dirs=[sort_dir]) @require_context def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None, columns_to_join=None, use_slave=False, sort_keys=None, sort_dirs=None): """Return instances that match all filters sorted the the given keys. Deleted instances will be returned by default, unless there's a filter that says otherwise. Depending on the name of a filter, matching for that filter is performed using either exact matching or as regular expression matching. Exact matching is applied for the following filters:: | ['project_id', 'user_id', 'image_ref', | 'vm_state', 'instance_type_id', 'uuid', | 'metadata', 'host', 'system_metadata'] A third type of filter (also using exact matching), filters based on instance metadata tags when supplied under a special key named 'filter':: | filters = { | 'filter': [ | {'name': 'tag-key', 'value': '<metakey>'}, | {'name': 'tag-value', 'value': '<metaval>'}, | {'name': 'tag:<metakey>', 'value': '<metaval>'} | ] | } Special keys are used to tweek the query further:: | 'changes-since' - only return instances updated after | 'deleted' - only return (or exclude) deleted instances | 'soft_deleted' - modify behavior of 'deleted' to either | include or exclude instances whose | vm_state is SOFT_DELETED. A fourth type of filter (also using exact matching), filters based on instance tags (not metadata tags). There are two types of these tags: `tag` -- One or more strings that will be used to filter results in an AND expression. `tag-any` -- One or more strings that will be used to filter results in an OR expression. Tags should be represented as list:: | filters = { | 'tag': [some-tag, some-another-tag], | 'tag-any: [some-any-tag, some-another-any-tag] | } """ # NOTE(mriedem): If the limit is 0 there is no point in even going # to the database since nothing is going to be returned anyway. if limit == 0: return [] sort_keys, sort_dirs = process_sort_params(sort_keys, sort_dirs, default_dir='desc') if CONF.database.slave_connection == '': use_slave = False session = get_session(use_slave=use_slave) if columns_to_join is None: columns_to_join_new = ['info_cache', 'security_groups'] manual_joins = ['metadata', 'system_metadata'] else: manual_joins, columns_to_join_new = ( _manual_join_columns(columns_to_join)) query_prefix = session.query(models.Instance) for column in columns_to_join_new: if 'extra.' in column: query_prefix = query_prefix.options(undefer(column)) else: query_prefix = query_prefix.options(joinedload(column)) # Note: order_by is done in the sqlalchemy.utils.py paginate_query(), # no need to do it here as well # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() if 'changes-since' in filters: changes_since = timeutils.normalize_time(filters['changes-since']) query_prefix = query_prefix.\ filter(models.Instance.updated_at >= changes_since) deleted = False if 'deleted' in filters: # Instances can be soft or hard deleted and the query needs to # include or exclude both deleted = filters.pop('deleted') if deleted: if filters.pop('soft_deleted', True): delete = or_( models.Instance.deleted == models.Instance.id, models.Instance.vm_state == vm_states.SOFT_DELETED ) query_prefix = query_prefix.\ filter(delete) else: query_prefix = query_prefix.\ filter(models.Instance.deleted == models.Instance.id) else: query_prefix = query_prefix.\ filter_by(deleted=0) if not filters.pop('soft_deleted', False): # It would be better to have vm_state not be nullable # but until then we test it explicitly as a workaround. not_soft_deleted = or_( models.Instance.vm_state != vm_states.SOFT_DELETED, models.Instance.vm_state == null() ) query_prefix = query_prefix.filter(not_soft_deleted) if 'cleaned' in filters: if filters.pop('cleaned'): query_prefix = query_prefix.filter(models.Instance.cleaned == 1) else: query_prefix = query_prefix.filter(models.Instance.cleaned == 0) if 'tag' in filters: tags = filters.pop('tag') # We build a JOIN ladder expression for each tag, JOIN'ing # the first tag to the instances table, and each subsequent # tag to the last JOIN'd tags table first_tag = tags.pop(0) query_prefix = query_prefix.join(models.Instance.tags) query_prefix = query_prefix.filter(models.Tag.tag == first_tag) for tag in tags: tag_alias = aliased(models.Tag) query_prefix = query_prefix.join(tag_alias, models.Instance.tags) query_prefix = query_prefix.filter(tag_alias.tag == tag) if 'tag-any' in filters: tags = filters.pop('tag-any') tag_alias = aliased(models.Tag) query_prefix = query_prefix.join(tag_alias, models.Instance.tags) query_prefix = query_prefix.filter(tag_alias.tag.in_(tags)) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: filters['project_id'] = context.project_id else: filters['user_id'] = context.user_id # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'uuid', 'metadata', 'host', 'task_state', 'system_metadata'] # Filter the query query_prefix = _exact_instance_filter(query_prefix, filters, exact_match_filter_names) query_prefix = _regex_instance_filter(query_prefix, filters) query_prefix = _tag_instance_filter(context, query_prefix, filters) # paginate query if marker is not None: try: marker = _instance_get_by_uuid( context.elevated(read_deleted='yes'), marker, session=session) except exception.InstanceNotFound: raise exception.MarkerNotFound(marker) try: query_prefix = sqlalchemyutils.paginate_query(query_prefix, models.Instance, limit, sort_keys, marker=marker, sort_dirs=sort_dirs) except db_exc.InvalidSortKey: raise exception.InvalidSortKey() return _instances_fill_metadata(context, query_prefix.all(), manual_joins) def _tag_instance_filter(context, query, filters): """Applies tag filtering to an Instance query. Returns the updated query. This method alters filters to remove keys that are tags. This filters on resources by tags - this method assumes that the caller will take care of access control :param context: request context object :param query: query to apply filters to :param filters: dictionary of filters """ if filters.get('filter') is None: return query model = models.Instance model_metadata = models.InstanceMetadata model_uuid = model_metadata.instance_uuid or_query = None def _to_list(val): if isinstance(val, dict): val = val.values() if not isinstance(val, (tuple, list, set)): val = (val,) return val for filter_block in filters['filter']: if not isinstance(filter_block, dict): continue filter_name = filter_block.get('name') if filter_name is None: continue tag_name = filter_name[4:] tag_val = _to_list(filter_block.get('value')) if filter_name.startswith('tag-'): if tag_name not in ['key', 'value']: msg = _("Invalid field name: %s") % tag_name raise exception.InvalidParameterValue(err=msg) subq = getattr(model_metadata, tag_name).in_(tag_val) or_query = subq if or_query is None else or_(or_query, subq) elif filter_name.startswith('tag:'): subq = model_query(context, model_metadata, (model_uuid,), session=query.session).\ filter_by(key=tag_name).\ filter(model_metadata.value.in_(tag_val)) query = query.filter(model.uuid.in_(subq)) if or_query is not None: subq = model_query(context, model_metadata, (model_uuid,), session=query.session).\ filter(or_query) query = query.filter(model.uuid.in_(subq)) return query def _get_regexp_op_for_connection(db_connection): db_string = db_connection.split(':')[0].split('+')[0] regexp_op_map = { 'postgresql': '~', 'mysql': 'REGEXP', 'sqlite': 'REGEXP' } return regexp_op_map.get(db_string, 'LIKE') def _regex_instance_filter(query, filters): """Applies regular expression filtering to an Instance query. Returns the updated query. :param query: query to apply filters to :param filters: dictionary of filters with regex values """ model = models.Instance db_regexp_op = _get_regexp_op_for_connection(CONF.database.connection) for filter_name in filters: try: column_attr = getattr(model, filter_name) except AttributeError: continue if 'property' == type(column_attr).__name__: continue filter_val = filters[filter_name] # Sometimes the REGEX filter value is not a string if not isinstance(filter_val, six.string_types): filter_val = str(filter_val) if db_regexp_op == 'LIKE': query = query.filter(column_attr.op(db_regexp_op)( u'%' + filter_val + u'%')) else: query = query.filter(column_attr.op(db_regexp_op)( filter_val)) return query def _exact_instance_filter(query, filters, legal_keys): """Applies exact match filtering to an Instance query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} model = models.Instance # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if key in ('metadata', 'system_metadata'): column_attr = getattr(model, key) if isinstance(value, list): for item in value: for k, v in item.iteritems(): query = query.filter(column_attr.any(key=k)) query = query.filter(column_attr.any(value=v)) else: for k, v in value.items(): query = query.filter(column_attr.any(key=k)) query = query.filter(column_attr.any(value=v)) elif isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query def process_sort_params(sort_keys, sort_dirs, default_keys=['created_at', 'id'], default_dir='asc'): """Process the sort parameters to include default keys. Creates a list of sort keys and a list of sort directions. Adds the default keys to the end of the list if they are not already included. When adding the default keys to the sort keys list, the associated direction is: 1) The first element in the 'sort_dirs' list (if specified), else 2) 'default_dir' value (Note that 'asc' is the default value since this is the default in sqlalchemy.utils.paginate_query) :param sort_keys: List of sort keys to include in the processed list :param sort_dirs: List of sort directions to include in the processed list :param default_keys: List of sort keys that need to be included in the processed list, they are added at the end of the list if not already specified. :param default_dir: Sort direction associated with each of the default keys that are not supplied, used when they are added to the processed list :returns: list of sort keys, list of sort directions :raise exception.InvalidInput: If more sort directions than sort keys are specified or if an invalid sort direction is specified """ # Determine direction to use for when adding default keys if sort_dirs and len(sort_dirs) != 0: default_dir_value = sort_dirs[0] else: default_dir_value = default_dir # Create list of keys (do not modify the input list) if sort_keys: result_keys = list(sort_keys) else: result_keys = [] # If a list of directions is not provided, use the default sort direction # for all provided keys if sort_dirs: result_dirs = [] # Verify sort direction for sort_dir in sort_dirs: if sort_dir not in ('asc', 'desc'): msg = _("Unknown sort direction, must be 'desc' or 'asc'") raise exception.InvalidInput(reason=msg) result_dirs.append(sort_dir) else: result_dirs = [default_dir_value for _sort_key in result_keys] # Ensure that the key and direction length match while len(result_dirs) < len(result_keys): result_dirs.append(default_dir_value) # Unless more direction are specified, which is an error if len(result_dirs) > len(result_keys): msg = _("Sort direction size exceeds sort key size") raise exception.InvalidInput(reason=msg) # Ensure defaults are included for key in default_keys: if key not in result_keys: result_keys.append(key) result_dirs.append(default_dir_value) return result_keys, result_dirs @require_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None, host=None, use_slave=False, columns_to_join=None): """Return instances and joins that were active during window.""" session = get_session(use_slave=use_slave) query = session.query(models.Instance) if columns_to_join is None: columns_to_join_new = ['info_cache', 'security_groups'] manual_joins = ['metadata', 'system_metadata'] else: manual_joins, columns_to_join_new = ( _manual_join_columns(columns_to_join)) for column in columns_to_join_new: if 'extra.' in column: query = query.options(undefer(column)) else: query = query.options(joinedload(column)) query = query.filter(or_(models.Instance.terminated_at == null(), models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) if host: query = query.filter_by(host=host) return _instances_fill_metadata(context, query.all(), manual_joins) def _instance_get_all_query(context, project_only=False, joins=None, use_slave=False): if joins is None: joins = ['info_cache', 'security_groups'] query = model_query(context, models.Instance, project_only=project_only, use_slave=use_slave) for column in joins: if 'extra.' in column: query = query.options(undefer(column)) else: query = query.options(joinedload(column)) return query def instance_get_all_by_host(context, host, columns_to_join=None, use_slave=False): return _instances_fill_metadata(context, _instance_get_all_query(context, use_slave=use_slave).filter_by(host=host).all(), manual_joins=columns_to_join, use_slave=use_slave) def _instance_get_all_uuids_by_host(context, host, session=None): """Return a list of the instance uuids on a given host. Returns a list of UUIDs, not Instance model objects. This internal version allows you to specify a session object as a kwarg. """ uuids = [] for tuple in model_query(context, models.Instance, (models.Instance.uuid,), read_deleted="no", session=session).\ filter_by(host=host).\ all(): uuids.append(tuple[0]) return uuids def instance_get_all_by_host_and_node(context, host, node, columns_to_join=None): if columns_to_join is None: manual_joins = [] else: candidates = ['system_metadata', 'metadata'] manual_joins = [x for x in columns_to_join if x in candidates] columns_to_join = list(set(columns_to_join) - set(candidates)) return _instances_fill_metadata(context, _instance_get_all_query( context, joins=columns_to_join).filter_by(host=host). filter_by(node=node).all(), manual_joins=manual_joins) def instance_get_all_by_host_and_not_type(context, host, type_id=None): return _instances_fill_metadata(context, _instance_get_all_query(context).filter_by(host=host). filter(models.Instance.instance_type_id != type_id).all()) def instance_get_all_by_grantee_security_groups(context, group_ids): return _instances_fill_metadata(context, _instance_get_all_query(context). join(models.Instance.security_groups). filter(models.SecurityGroup.rules.any( models.SecurityGroupIngressRule.group_id.in_(group_ids))). all()) @require_context def instance_floating_address_get_all(context, instance_uuid): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) floating_ips = model_query(context, models.FloatingIp, (models.FloatingIp.address,)).\ join(models.FloatingIp.fixed_ip).\ filter_by(instance_uuid=instance_uuid) return [floating_ip.address for floating_ip in floating_ips] # NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0. def instance_get_all_hung_in_rebooting(context, reboot_window): reboot_window = (timeutils.utcnow() - datetime.timedelta(seconds=reboot_window)) # NOTE(danms): this is only used in the _poll_rebooting_instances() # call in compute/manager, so we can avoid the metadata lookups # explicitly return _instances_fill_metadata(context, model_query(context, models.Instance). filter(models.Instance.updated_at <= reboot_window). filter_by(task_state=task_states.REBOOTING).all(), manual_joins=[]) def _retry_instance_update(): """Wrap with oslo_db_api.wrap_db_retry, and also retry on UnknownInstanceUpdateConflict. """ exception_checker = \ lambda exc: isinstance(exc, (exception.UnknownInstanceUpdateConflict,)) return oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True, exception_checker=exception_checker) @require_context @_retry_instance_update() def instance_update(context, instance_uuid, values, expected=None): session = get_session() with session.begin(): return _instance_update(context, session, instance_uuid, values, expected) @require_context @_retry_instance_update() def instance_update_and_get_original(context, instance_uuid, values, columns_to_join=None, expected=None): """Set the given properties on an instance and update it. Return a shallow copy of the original instance reference, as well as the updated one. :param context: = request context object :param instance_uuid: = instance uuid :param values: = dict containing column values If "expected_task_state" exists in values, the update can only happen when the task state before update matches expected_task_state. Otherwise a UnexpectedTaskStateError is thrown. :returns: a tuple of the form (old_instance_ref, new_instance_ref) Raises NotFound if instance does not exist. """ session = get_session() with session.begin(): instance_ref = _instance_get_by_uuid(context, instance_uuid, columns_to_join=columns_to_join, session=session) return (copy.copy(instance_ref), _instance_update(context, session, instance_uuid, values, expected, original=instance_ref)) # NOTE(danms): This updates the instance's metadata list in-place and in # the database to avoid stale data and refresh issues. It assumes the # delete=True behavior of instance_metadata_update(...) def _instance_metadata_update_in_place(context, instance, metadata_type, model, metadata, session): metadata = dict(metadata) to_delete = [] for keyvalue in instance[metadata_type]: key = keyvalue['key'] if key in metadata: keyvalue['value'] = metadata.pop(key) elif key not in metadata: to_delete.append(keyvalue) # NOTE: we have to hard_delete here otherwise we will get more than one # system_metadata record when we read deleted for an instance; # regular metadata doesn't have the same problem because we don't # allow reading deleted regular metadata anywhere. if metadata_type == 'system_metadata': for condemned in to_delete: session.delete(condemned) instance[metadata_type].remove(condemned) else: for condemned in to_delete: condemned.soft_delete(session=session) for key, value in metadata.items(): newitem = model() newitem.update({'key': key, 'value': value, 'instance_uuid': instance['uuid']}) session.add(newitem) instance[metadata_type].append(newitem) def _instance_update(context, session, instance_uuid, values, expected, original=None): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(instance_uuid) if expected is None: expected = {} else: # Coerce all single values to singleton lists expected = {k: [None] if v is None else sqlalchemyutils.to_list(v) for (k, v) in six.iteritems(expected)} # Extract 'expected_' values from values dict, as these aren't actually # updates for field in ('task_state', 'vm_state'): expected_field = 'expected_%s' % field if expected_field in values: value = values.pop(expected_field, None) # Coerce all single values to singleton lists if value is None: expected[field] = [None] else: expected[field] = sqlalchemyutils.to_list(value) # Values which need to be updated separately metadata = values.pop('metadata', None) system_metadata = values.pop('system_metadata', None) _handle_objects_related_type_conversions(values) # Hostname is potentially unique, but this is enforced in code rather # than the DB. The query below races, but the number of users of # osapi_compute_unique_server_name_scope is small, and a robust fix # will be complex. This is intentionally left as is for the moment. if 'hostname' in values: _validate_unique_server_name(context, session, values['hostname']) compare = models.Instance(uuid=instance_uuid, **expected) try: instance_ref = model_query(context, models.Instance, project_only=True, session=session).\ update_on_match(compare, 'uuid', values) except update_match.NoRowsMatched: # Update failed. Try to find why and raise a specific error. # We should get here only because our expected values were not current # when update_on_match executed. Having failed, we now have a hint that # the values are out of date and should check them. # This code is made more complex because we are using repeatable reads. # If we have previously read the original instance in the current # transaction, reading it again will return the same data, even though # the above update failed because it has changed: it is not possible to # determine what has changed in this transaction. In this case we raise # UnknownInstanceUpdateConflict, which will cause the operation to be # retried in a new transaction. # Because of the above, if we have previously read the instance in the # current transaction it will have been passed as 'original', and there # is no point refreshing it. If we have not previously read the # instance, we can fetch it here and we will get fresh data. if original is None: original = _instance_get_by_uuid(context, instance_uuid, session=session) conflicts_expected = {} conflicts_actual = {} for (field, expected_values) in six.iteritems(expected): actual = original[field] if actual not in expected_values: conflicts_expected[field] = expected_values conflicts_actual[field] = actual # Exception properties exc_props = { 'instance_uuid': instance_uuid, 'expected': conflicts_expected, 'actual': conflicts_actual } # There was a conflict, but something (probably the MySQL read view, # but possibly an exceptionally unlikely second race) is preventing us # from seeing what it is. When we go round again we'll get a fresh # transaction and a fresh read view. if len(conflicts_actual) == 0: raise exception.UnknownInstanceUpdateConflict(**exc_props) # Task state gets special handling for convenience. We raise the # specific error UnexpectedDeletingTaskStateError or # UnexpectedTaskStateError as appropriate if 'task_state' in conflicts_actual: conflict_task_state = conflicts_actual['task_state'] if conflict_task_state == task_states.DELETING: exc = exception.UnexpectedDeletingTaskStateError else: exc = exception.UnexpectedTaskStateError # Everything else is an InstanceUpdateConflict else: exc = exception.InstanceUpdateConflict raise exc(**exc_props) if metadata is not None: _instance_metadata_update_in_place(context, instance_ref, 'metadata', models.InstanceMetadata, metadata, session) if system_metadata is not None: _instance_metadata_update_in_place(context, instance_ref, 'system_metadata', models.InstanceSystemMetadata, system_metadata, session) return instance_ref def instance_add_security_group(context, instance_uuid, security_group_id): """Associate the given security group with the given instance.""" sec_group_ref = models.SecurityGroupInstanceAssociation() sec_group_ref.update({'instance_uuid': instance_uuid, 'security_group_id': security_group_id}) sec_group_ref.save() @require_context def instance_remove_security_group(context, instance_uuid, security_group_id): """Disassociate the given security group from the given instance.""" model_query(context, models.SecurityGroupInstanceAssociation).\ filter_by(instance_uuid=instance_uuid).\ filter_by(security_group_id=security_group_id).\ soft_delete() ################### @require_context def instance_info_cache_get(context, instance_uuid): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance """ return model_query(context, models.InstanceInfoCache).\ filter_by(instance_uuid=instance_uuid).\ first() @require_context def instance_info_cache_update(context, instance_uuid, values): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update """ convert_objects_related_datetimes(values) session = get_session() with session.begin(): info_cache = model_query(context, models.InstanceInfoCache, session=session).\ filter_by(instance_uuid=instance_uuid).\ first() if info_cache and info_cache['deleted']: raise exception.InstanceInfoCacheNotFound( instance_uuid=instance_uuid) elif not info_cache: # NOTE(tr3buchet): just in case someone blows away an instance's # cache entry, re-create it. info_cache = models.InstanceInfoCache() values['instance_uuid'] = instance_uuid try: info_cache.update(values) except db_exc.DBDuplicateEntry: # NOTE(sirp): Possible race if two greenthreads attempt to # recreate the instance cache entry at the same time. First one # wins. pass return info_cache @require_context def instance_info_cache_delete(context, instance_uuid): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record """ model_query(context, models.InstanceInfoCache).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() ################### def _instance_extra_create(context, values): inst_extra_ref = models.InstanceExtra() inst_extra_ref.update(values) inst_extra_ref.save() return inst_extra_ref def instance_extra_update_by_uuid(context, instance_uuid, values): rows_updated = model_query(context, models.InstanceExtra).\ filter_by(instance_uuid=instance_uuid).\ update(values) if not rows_updated: LOG.debug("Created instance_extra for %s" % instance_uuid) create_values = copy.copy(values) create_values["instance_uuid"] = instance_uuid _instance_extra_create(context, create_values) rows_updated = 1 return rows_updated def instance_extra_get_by_instance_uuid(context, instance_uuid, columns=None): query = model_query(context, models.InstanceExtra).\ filter_by(instance_uuid=instance_uuid) if columns is None: columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model', 'migration_context'] for column in columns: query = query.options(undefer(column)) instance_extra = query.first() return instance_extra ################### @require_context def key_pair_create(context, values): try: key_pair_ref = models.KeyPair() key_pair_ref.update(values) key_pair_ref.save() return key_pair_ref except db_exc.DBDuplicateEntry: raise exception.KeyPairExists(key_name=values['name']) @require_context def key_pair_destroy(context, user_id, name): result = model_query(context, models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(name=name).\ soft_delete() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) @require_context def key_pair_get(context, user_id, name): result = model_query(context, models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(name=name).\ first() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) return result @require_context def key_pair_get_all_by_user(context, user_id): return model_query(context, models.KeyPair, read_deleted="no").\ filter_by(user_id=user_id).\ all() @require_context def key_pair_count_by_user(context, user_id): return model_query(context, models.KeyPair, read_deleted="no").\ filter_by(user_id=user_id).\ count() ################### def network_associate(context, project_id, network_id=None, force=False): """Associate a project with a network. called by project_get_networks under certain conditions and network manager add_network_to_project() only associate if the project doesn't already have a network or if force is True force solves race condition where a fresh project has multiple instance builds simultaneously picked up by multiple network hosts which attempt to associate the project with multiple networks force should only be used as a direct consequence of user request all automated requests should not use force """ session = get_session() with session.begin(): def network_query(project_filter, id=None): filter_kwargs = {'project_id': project_filter} if id is not None: filter_kwargs['id'] = id return model_query(context, models.Network, session=session, read_deleted="no").\ filter_by(**filter_kwargs).\ with_lockmode('update').\ first() if not force: # find out if project has a network network_ref = network_query(project_id) if force or not network_ref: # in force mode or project doesn't have a network so associate # with a new network # get new network network_ref = network_query(None, network_id) if not network_ref: raise exception.NoMoreNetworks() # associate with network # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues network_ref['project_id'] = project_id session.add(network_ref) return network_ref def _network_ips_query(context, network_id): return model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id) def network_count_reserved_ips(context, network_id): return _network_ips_query(context, network_id).\ filter_by(reserved=True).\ count() def network_create_safe(context, values): network_ref = models.Network() network_ref['uuid'] = str(uuid.uuid4()) network_ref.update(values) try: network_ref.save() return network_ref except db_exc.DBDuplicateEntry: raise exception.DuplicateVlan(vlan=values['vlan']) def network_delete_safe(context, network_id): session = get_session() with session.begin(): result = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter_by(network_id=network_id).\ filter_by(allocated=True).\ count() if result != 0: raise exception.NetworkInUse(network_id=network_id) network_ref = _network_get(context, network_id=network_id, session=session) model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter_by(network_id=network_id).\ soft_delete() session.delete(network_ref) def network_disassociate(context, network_id, disassociate_host, disassociate_project): net_update = {} if disassociate_project: net_update['project_id'] = None if disassociate_host: net_update['host'] = None network_update(context, network_id, net_update) def _network_get(context, network_id, session=None, project_only='allow_none'): result = model_query(context, models.Network, session=session, project_only=project_only).\ filter_by(id=network_id).\ first() if not result: raise exception.NetworkNotFound(network_id=network_id) return result @require_context def network_get(context, network_id, project_only='allow_none'): return _network_get(context, network_id, project_only=project_only) @require_context def network_get_all(context, project_only): result = model_query(context, models.Network, read_deleted="no", project_only=project_only).all() if not result: raise exception.NoNetworksFound() return result @require_context def network_get_all_by_uuids(context, network_uuids, project_only): result = model_query(context, models.Network, read_deleted="no", project_only=project_only).\ filter(models.Network.uuid.in_(network_uuids)).\ all() if not result: raise exception.NoNetworksFound() # check if the result contains all the networks # we are looking for for network_uuid in network_uuids: for network in result: if network['uuid'] == network_uuid: break else: if project_only: raise exception.NetworkNotFoundForProject( network_uuid=network_uuid, project_id=context.project_id) raise exception.NetworkNotFound(network_id=network_uuid) return result def _get_associated_fixed_ips_query(network_id, host=None): # NOTE(vish): The ugly joins here are to solve a performance issue and # should be removed once we can add and remove leases # without regenerating the whole list vif_and = and_(models.VirtualInterface.id == models.FixedIp.virtual_interface_id, models.VirtualInterface.deleted == 0) inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid, models.Instance.deleted == 0) session = get_session() # NOTE(vish): This subquery left joins the minimum interface id for each # instance. If the join succeeds (i.e. the 11th column is not # null), then the fixed ip is on the first interface. subq = session.query(func.min(models.VirtualInterface.id).label("id"), models.VirtualInterface.instance_uuid).\ group_by(models.VirtualInterface.instance_uuid).subquery() subq_and = and_(subq.c.id == models.FixedIp.virtual_interface_id, subq.c.instance_uuid == models.VirtualInterface.instance_uuid) query = session.query(models.FixedIp.address, models.FixedIp.instance_uuid, models.FixedIp.network_id, models.FixedIp.virtual_interface_id, models.VirtualInterface.address, models.Instance.hostname, models.Instance.updated_at, models.Instance.created_at, models.FixedIp.allocated, models.FixedIp.leased, subq.c.id).\ filter(models.FixedIp.deleted == 0).\ filter(models.FixedIp.network_id == network_id).\ join((models.VirtualInterface, vif_and)).\ join((models.Instance, inst_and)).\ outerjoin((subq, subq_and)).\ filter(models.FixedIp.instance_uuid != null()).\ filter(models.FixedIp.virtual_interface_id != null()) if host: query = query.filter(models.Instance.host == host) return query def network_get_associated_fixed_ips(context, network_id, host=None): # FIXME(sirp): since this returns fixed_ips, this would be better named # fixed_ip_get_all_by_network. query = _get_associated_fixed_ips_query(network_id, host) result = query.all() data = [] for datum in result: cleaned = {} cleaned['address'] = datum[0] cleaned['instance_uuid'] = datum[1] cleaned['network_id'] = datum[2] cleaned['vif_id'] = datum[3] cleaned['vif_address'] = datum[4] cleaned['instance_hostname'] = datum[5] cleaned['instance_updated'] = datum[6] cleaned['instance_created'] = datum[7] cleaned['allocated'] = datum[8] cleaned['leased'] = datum[9] # NOTE(vish): default_route is True if this fixed ip is on the first # interface its instance. cleaned['default_route'] = datum[10] is not None data.append(cleaned) return data def network_in_use_on_host(context, network_id, host): query = _get_associated_fixed_ips_query(network_id, host) return query.count() > 0 def _network_get_query(context, session=None): return model_query(context, models.Network, session=session, read_deleted="no") def network_get_by_uuid(context, uuid): result = _network_get_query(context).filter_by(uuid=uuid).first() if not result: raise exception.NetworkNotFoundForUUID(uuid=uuid) return result def network_get_by_cidr(context, cidr): result = _network_get_query(context).\ filter(or_(models.Network.cidr == cidr, models.Network.cidr_v6 == cidr)).\ first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) return result def network_get_all_by_host(context, host): session = get_session() fixed_host_filter = or_(models.FixedIp.host == host, and_(models.FixedIp.instance_uuid != null(), models.Instance.host == host)) fixed_ip_query = model_query(context, models.FixedIp, (models.FixedIp.network_id,), session=session).\ outerjoin((models.Instance, models.Instance.uuid == models.FixedIp.instance_uuid)).\ filter(fixed_host_filter) # NOTE(vish): return networks that have host set # or that have a fixed ip with host set # or that have an instance with host set host_filter = or_(models.Network.host == host, models.Network.id.in_(fixed_ip_query.subquery())) return _network_get_query(context, session=session).\ filter(host_filter).\ all() @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True, retry_on_request=True) def network_set_host(context, network_id, host_id): network_ref = _network_get_query(context).\ filter_by(id=network_id).\ first() if not network_ref: raise exception.NetworkNotFound(network_id=network_id) if network_ref.host: return None rows_updated = _network_get_query(context).\ filter_by(id=network_id).\ filter_by(host=None).\ update({'host': host_id}) if not rows_updated: LOG.debug('The row was updated in a concurrent transaction, ' 'we will fetch another row') raise db_exc.RetryRequest( exception.NetworkSetHostFailed(network_id=network_id)) @require_context def network_update(context, network_id, values): session = get_session() with session.begin(): network_ref = _network_get(context, network_id, session=session) network_ref.update(values) try: network_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.DuplicateVlan(vlan=values['vlan']) return network_ref ################### @require_context def quota_get(context, project_id, resource, user_id=None): model = models.ProjectUserQuota if user_id else models.Quota query = model_query(context, model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if user_id: query = query.filter_by(user_id=user_id) result = query.first() if not result: if user_id: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project_and_user(context, project_id, user_id): user_quotas = model_query(context, models.ProjectUserQuota, (models.ProjectUserQuota.resource, models.ProjectUserQuota.hard_limit)).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ all() result = {'project_id': project_id, 'user_id': user_id} for user_quota in user_quotas: result[user_quota.resource] = user_quota.hard_limit return result @require_context def quota_get_all_by_project(context, project_id): rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_get_all(context, project_id): result = model_query(context, models.ProjectUserQuota).\ filter_by(project_id=project_id).\ all() return result def quota_create(context, project_id, resource, limit, user_id=None): per_user = user_id and resource not in PER_PROJECT_QUOTAS quota_ref = models.ProjectUserQuota() if per_user else models.Quota() if per_user: quota_ref.user_id = user_id quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit try: quota_ref.save() except db_exc.DBDuplicateEntry: raise exception.QuotaExists(project_id=project_id, resource=resource) return quota_ref def quota_update(context, project_id, resource, limit, user_id=None): per_user = user_id and resource not in PER_PROJECT_QUOTAS model = models.ProjectUserQuota if per_user else models.Quota query = model_query(context, model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if per_user: query = query.filter_by(user_id=user_id) result = query.update({'hard_limit': limit}) if not result: if per_user: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) ################### @require_context def quota_class_get(context, class_name, resource): result = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result def quota_class_get_default(context): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=_DEFAULT_QUOTA_NAME).\ all() result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_class_get_all_by_name(context, class_name): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit quota_class_ref.save() return quota_class_ref def quota_class_update(context, class_name, resource, limit): result = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ update({'hard_limit': limit}) if not result: raise exception.QuotaClassNotFound(class_name=class_name) ################### @require_context def quota_usage_get(context, project_id, resource, user_id=None): query = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource) if user_id: if resource not in PER_PROJECT_QUOTAS: result = query.filter_by(user_id=user_id).first() else: result = query.filter_by(user_id=None).first() else: result = query.first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result def _quota_usage_get_all(context, project_id, user_id=None): query = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id) result = {'project_id': project_id} if user_id: query = query.filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id == null())) result['user_id'] = user_id rows = query.all() for row in rows: if row.resource in result: result[row.resource]['in_use'] += row.in_use result[row.resource]['reserved'] += row.reserved else: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result @require_context def quota_usage_get_all_by_project_and_user(context, project_id, user_id): return _quota_usage_get_all(context, project_id, user_id=user_id) @require_context def quota_usage_get_all_by_project(context, project_id): return _quota_usage_get_all(context, project_id) def _quota_usage_create(project_id, user_id, resource, in_use, reserved, until_refresh, session=None): quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.user_id = user_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh # updated_at is needed for judgement of max_age quota_usage_ref.updated_at = timeutils.utcnow() quota_usage_ref.save(session=session) return quota_usage_ref def quota_usage_update(context, project_id, user_id, resource, **kwargs): updates = {} for key in ['in_use', 'reserved', 'until_refresh']: if key in kwargs: updates[key] = kwargs[key] result = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id == null())).\ update(updates) if not result: raise exception.QuotaUsageNotFound(project_id=project_id) ################### def _reservation_create(uuid, usage, project_id, user_id, resource, delta, expire, session=None): reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage['id'] reservation_ref.project_id = project_id reservation_ref.user_id = user_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.save(session=session) return reservation_ref ################### # NOTE(johannes): The quota code uses SQL locking to ensure races don't # cause under or over counting of resources. To avoid deadlocks, this # code always acquires the lock on quota_usages before acquiring the lock # on reservations. def _get_project_user_quota_usages(context, session, project_id, user_id): rows = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ order_by(models.QuotaUsage.id.asc()).\ with_lockmode('update').\ all() proj_result = dict() user_result = dict() # Get the total count of in_use,reserved for row in rows: proj_result.setdefault(row.resource, dict(in_use=0, reserved=0, total=0)) proj_result[row.resource]['in_use'] += row.in_use proj_result[row.resource]['reserved'] += row.reserved proj_result[row.resource]['total'] += (row.in_use + row.reserved) if row.user_id is None or row.user_id == user_id: user_result[row.resource] = row return proj_result, user_result def _create_quota_usage_if_missing(user_usages, resource, until_refresh, project_id, user_id, session): """Creates a QuotaUsage record and adds to user_usages if not present. :param user_usages: dict of resource keys to QuotaUsage records. This is updated if resource is not in user_usages yet or until_refresh is not None. :param resource: The resource being checked for quota usage. :param until_refresh: Count of reservations until usage is refreshed, int or None :param project_id: The project being checked for quota usage. :param user_id: The user being checked for quota usage. :param session: DB session holding a transaction lock. :return: True if a new QuotaUsage record was created and added to user_usages, False otherwise. """ new_usage = None if resource not in user_usages: user_id_to_use = user_id if resource in PER_PROJECT_QUOTAS: user_id_to_use = None new_usage = _quota_usage_create(project_id, user_id_to_use, resource, 0, 0, until_refresh or None, session=session) user_usages[resource] = new_usage return new_usage is not None def _is_quota_refresh_needed(quota_usage, max_age): """Determines if a quota usage refresh is needed. :param quota_usage: A QuotaUsage object for a given resource. :param max_age: Number of seconds between subsequent usage refreshes. :return: True if a refresh is needed, False otherwise. """ refresh = False if quota_usage.in_use < 0: # Negative in_use count indicates a desync, so try to # heal from that... LOG.debug('in_use has dropped below 0; forcing refresh for ' 'QuotaUsage: %s', dict(quota_usage)) refresh = True elif quota_usage.until_refresh is not None: quota_usage.until_refresh -= 1 if quota_usage.until_refresh <= 0: refresh = True elif max_age and (timeutils.utcnow() - quota_usage.updated_at).seconds >= max_age: refresh = True return refresh def _refresh_quota_usages(quota_usage, until_refresh, in_use): """Refreshes quota usage for the given resource. :param quota_usage: A QuotaUsage object for a given resource. :param until_refresh: Count of reservations until usage is refreshed, int or None :param in_use: Actual quota usage for the resource. """ if quota_usage.in_use != in_use: LOG.info(_LI('quota_usages out of sync, updating. ' 'project_id: %(project_id)s, ' 'user_id: %(user_id)s, ' 'resource: %(res)s, ' 'tracked usage: %(tracked_use)s, ' 'actual usage: %(in_use)s'), {'project_id': quota_usage.project_id, 'user_id': quota_usage.user_id, 'res': quota_usage.resource, 'tracked_use': quota_usage.in_use, 'in_use': in_use}) else: LOG.debug('QuotaUsage has not changed, refresh is unnecessary for: %s', dict(quota_usage)) # Update the usage quota_usage.in_use = in_use quota_usage.until_refresh = until_refresh or None def _calculate_overquota(project_quotas, user_quotas, deltas, project_usages, user_usages): """Checks if any resources will go over quota based on the request. :param project_quotas: dict of resource quotas (limits) for the project. :param user_quotas: dict of resource quotas (limits) for the user. :param deltas: dict of resource keys to positive/negative quota changes for the resources in a given operation. :param project_usages: dict of resource keys to QuotaUsage records for the project. :param user_usages: dict of resource keys to QuotaUsage records for the user. :return: list of resources that are over-quota for the operation. """ overs = [] for res, delta in deltas.items(): # We can't go over-quota if we're not reserving anything or if # we have unlimited quotas. if user_quotas[res] >= 0 and delta >= 0: # over if the project usage + delta is more than project quota if project_quotas[res] < delta + project_usages[res]['total']: LOG.debug('Request is over project quota for resource ' '"%(res)s". Project limit: %(limit)s, delta: ' '%(delta)s, current total project usage: %(total)s', {'res': res, 'limit': project_quotas[res], 'delta': delta, 'total': project_usages[res]['total']}) overs.append(res) # over if the user usage + delta is more than user quota elif user_quotas[res] < delta + user_usages[res]['total']: LOG.debug('Request is over user quota for resource ' '"%(res)s". User limit: %(limit)s, delta: ' '%(delta)s, current total user usage: %(total)s', {'res': res, 'limit': user_quotas[res], 'delta': delta, 'total': user_usages[res]['total']}) overs.append(res) return overs @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def quota_reserve(context, resources, project_quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): elevated = context.elevated() session = get_session() with session.begin(): if project_id is None: project_id = context.project_id if user_id is None: user_id = context.user_id # Get the current usages project_usages, user_usages = _get_project_user_quota_usages( context, session, project_id, user_id) # Handle usage refresh work = set(deltas.keys()) while work: resource = work.pop() # Do we need to refresh the usage? created = _create_quota_usage_if_missing(user_usages, resource, until_refresh, project_id, user_id, session) refresh = created or _is_quota_refresh_needed( user_usages[resource], max_age) # OK, refresh the usage if refresh: # Grab the sync routine sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] updates = sync(elevated, project_id, user_id, session) for res, in_use in updates.items(): # Make sure we have a destination for the usage! _create_quota_usage_if_missing(user_usages, res, until_refresh, project_id, user_id, session) _refresh_quota_usages(user_usages[res], until_refresh, in_use) # Because more than one resource may be refreshed # by the call to the sync routine, and we don't # want to double-sync, we make sure all refreshed # resources are dropped from the work set. work.discard(res) # NOTE(Vek): We make the assumption that the sync # routine actually refreshes the # resources that it is the sync routine # for. We don't check, because this is # a best-effort mechanism. # Check for deltas that would go negative unders = [res for res, delta in deltas.items() if delta < 0 and delta + user_usages[res].in_use < 0] # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. for key, value in user_usages.items(): if key not in project_usages: LOG.debug('Copying QuotaUsage for resource "%(key)s" from ' 'user_usages into project_usages: %(value)s', {'key': key, 'value': dict(value)}) project_usages[key] = value overs = _calculate_overquota(project_quotas, user_quotas, deltas, project_usages, user_usages) # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because # we're over quota, so the OverQuota raise is # outside the transaction. If we did the raise # here, our usage updates would be discarded, but # they're not invalidated by being over-quota. # Create the reservations if not overs: reservations = [] for res, delta in deltas.items(): reservation = _reservation_create( str(uuid.uuid4()), user_usages[res], project_id, user_id, res, delta, expire, session=session) reservations.append(reservation.uuid) # Also update the reserved quantity # NOTE(Vek): Again, we are only concerned here about # positive increments. Here, though, we're # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0: user_usages[res].reserved += delta # Apply updates to the usages table for usage_ref in user_usages.values(): session.add(usage_ref) if unders: LOG.warning(_LW("Change will make usage less than 0 for the following " "resources: %s"), unders) if overs: if project_quotas == user_quotas: usages = project_usages else: # NOTE(mriedem): user_usages is a dict of resource keys to # QuotaUsage sqlalchemy dict-like objects and doen't log well # so convert the user_usages values to something useful for # logging. Remove this if we ever change how # _get_project_user_quota_usages returns the user_usages values. user_usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'], total=v['total']) for k, v in user_usages.items()} usages = user_usages usages = {k: dict(in_use=v['in_use'], reserved=v['reserved']) for k, v in usages.items()} LOG.debug('Raise OverQuota exception because: ' 'project_quotas: %(project_quotas)s, ' 'user_quotas: %(user_quotas)s, deltas: %(deltas)s, ' 'overs: %(overs)s, project_usages: %(project_usages)s, ' 'user_usages: %(user_usages)s', {'project_quotas': project_quotas, 'user_quotas': user_quotas, 'overs': overs, 'deltas': deltas, 'project_usages': project_usages, 'user_usages': user_usages}) raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas, usages=usages) return reservations def _quota_reservations_query(session, context, reservations): """Return the relevant reservations.""" # Get the listed reservations return model_query(context, models.Reservation, read_deleted="no", session=session).\ filter(models.Reservation.uuid.in_(reservations)).\ with_lockmode('update') @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_commit(context, reservations, project_id=None, user_id=None): session = get_session() with session.begin(): _project_usages, user_usages = _get_project_user_quota_usages( context, session, project_id, user_id) reservation_query = _quota_reservations_query(session, context, reservations) for reservation in reservation_query.all(): usage = user_usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta usage.in_use += reservation.delta reservation_query.soft_delete(synchronize_session=False) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_rollback(context, reservations, project_id=None, user_id=None): session = get_session() with session.begin(): _project_usages, user_usages = _get_project_user_quota_usages( context, session, project_id, user_id) reservation_query = _quota_reservations_query(session, context, reservations) for reservation in reservation_query.all(): usage = user_usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta reservation_query.soft_delete(synchronize_session=False) def quota_destroy_all_by_project_and_user(context, project_id, user_id): session = get_session() with session.begin(): model_query(context, models.ProjectUserQuota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ soft_delete(synchronize_session=False) model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ soft_delete(synchronize_session=False) model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ soft_delete(synchronize_session=False) def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.ProjectUserQuota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def reservation_expire(context): session = get_session() with session.begin(): current_time = timeutils.utcnow() reservation_query = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter(models.Reservation.expire < current_time) for reservation in reservation_query.join(models.QuotaUsage).all(): if reservation.delta >= 0: reservation.usage.reserved -= reservation.delta session.add(reservation.usage) reservation_query.soft_delete(synchronize_session=False) ################### def _ec2_volume_get_query(context, session=None): return model_query(context, models.VolumeIdMapping, session=session, read_deleted='yes') def _ec2_snapshot_get_query(context, session=None): return model_query(context, models.SnapshotIdMapping, session=session, read_deleted='yes') @require_context def ec2_volume_create(context, volume_uuid, id=None): """Create ec2 compatible volume by provided uuid.""" ec2_volume_ref = models.VolumeIdMapping() ec2_volume_ref.update({'uuid': volume_uuid}) if id is not None: ec2_volume_ref.update({'id': id}) ec2_volume_ref.save() return ec2_volume_ref @require_context def ec2_volume_get_by_uuid(context, volume_uuid): result = _ec2_volume_get_query(context).\ filter_by(uuid=volume_uuid).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_uuid) return result @require_context def ec2_volume_get_by_id(context, volume_id): result = _ec2_volume_get_query(context).\ filter_by(id=volume_id).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result @require_context def ec2_snapshot_create(context, snapshot_uuid, id=None): """Create ec2 compatible snapshot by provided uuid.""" ec2_snapshot_ref = models.SnapshotIdMapping() ec2_snapshot_ref.update({'uuid': snapshot_uuid}) if id is not None: ec2_snapshot_ref.update({'id': id}) ec2_snapshot_ref.save() return ec2_snapshot_ref @require_context def ec2_snapshot_get_by_ec2_id(context, ec2_id): result = _ec2_snapshot_get_query(context).\ filter_by(id=ec2_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=ec2_id) return result @require_context def ec2_snapshot_get_by_uuid(context, snapshot_uuid): result = _ec2_snapshot_get_query(context).\ filter_by(uuid=snapshot_uuid).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid) return result ################### def _block_device_mapping_get_query(context, session=None, columns_to_join=None, use_slave=False): if columns_to_join is None: columns_to_join = [] query = model_query(context, models.BlockDeviceMapping, session=session, use_slave=use_slave) for column in columns_to_join: query = query.options(joinedload(column)) return query def _scrub_empty_str_values(dct, keys_to_scrub): """Remove any keys found in sequence keys_to_scrub from the dict if they have the value ''. """ for key in keys_to_scrub: if key in dct and dct[key] == '': del dct[key] def _from_legacy_values(values, legacy, allow_updates=False): if legacy: if allow_updates and block_device.is_safe_for_update(values): return values else: return block_device.BlockDeviceDict.from_legacy(values) else: return values @require_context def block_device_mapping_create(context, values, legacy=True): _scrub_empty_str_values(values, ['volume_size']) values = _from_legacy_values(values, legacy) convert_objects_related_datetimes(values) bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save() return bdm_ref @require_context def block_device_mapping_update(context, bdm_id, values, legacy=True): _scrub_empty_str_values(values, ['volume_size']) values = _from_legacy_values(values, legacy, allow_updates=True) convert_objects_related_datetimes(values) query = _block_device_mapping_get_query(context).filter_by(id=bdm_id) query.update(values) return query.first() def block_device_mapping_update_or_create(context, values, legacy=True): _scrub_empty_str_values(values, ['volume_size']) values = _from_legacy_values(values, legacy, allow_updates=True) convert_objects_related_datetimes(values) session = get_session() with session.begin(): result = None # NOTE(xqueralt): Only update a BDM when device_name was provided. We # allow empty device names so they will be set later by the manager. if values['device_name']: query = _block_device_mapping_get_query(context, session=session) result = query.filter_by(instance_uuid=values['instance_uuid'], device_name=values['device_name']).first() if result: result.update(values) else: # Either the device_name doesn't exist in the database yet, or no # device_name was provided. Both cases mean creating a new BDM. result = models.BlockDeviceMapping(**values) result.save(session=session) # NOTE(xqueralt): Prevent from having multiple swap devices for the # same instance. This will delete all the existing ones. if block_device.new_format_is_swap(values): query = _block_device_mapping_get_query(context, session=session) query = query.filter_by(instance_uuid=values['instance_uuid'], source_type='blank', guest_format='swap') query = query.filter(models.BlockDeviceMapping.id != result.id) query.soft_delete() return result @require_context def block_device_mapping_get_all_by_instance(context, instance_uuid, use_slave=False): return _block_device_mapping_get_query(context, use_slave=use_slave).\ filter_by(instance_uuid=instance_uuid).\ all() @require_context def block_device_mapping_get_by_volume_id(context, volume_id, columns_to_join=None): return _block_device_mapping_get_query(context, columns_to_join=columns_to_join).\ filter_by(volume_id=volume_id).\ first() @require_context def block_device_mapping_destroy(context, bdm_id): _block_device_mapping_get_query(context).\ filter_by(id=bdm_id).\ soft_delete() @require_context def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, volume_id): _block_device_mapping_get_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(volume_id=volume_id).\ soft_delete() @require_context def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid, device_name): _block_device_mapping_get_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(device_name=device_name).\ soft_delete() ################### def _security_group_create(context, values, session=None): security_group_ref = models.SecurityGroup() # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception # once save() is called. This will get cleaned up in next orm pass. security_group_ref.rules security_group_ref.update(values) try: security_group_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.SecurityGroupExists( project_id=values['project_id'], security_group_name=values['name']) return security_group_ref def _security_group_get_query(context, session=None, read_deleted=None, project_only=False, join_rules=True): query = model_query(context, models.SecurityGroup, session=session, read_deleted=read_deleted, project_only=project_only) if join_rules: query = query.options(joinedload_all('rules.grantee_group')) return query def _security_group_get_by_names(context, session, project_id, group_names): """Get security group models for a project by a list of names. Raise SecurityGroupNotFoundForProject for a name not found. """ query = _security_group_get_query(context, session=session, read_deleted="no", join_rules=False).\ filter_by(project_id=project_id).\ filter(models.SecurityGroup.name.in_(group_names)) sg_models = query.all() if len(sg_models) == len(group_names): return sg_models # Find the first one missing and raise group_names_from_models = [x.name for x in sg_models] for group_name in group_names: if group_name not in group_names_from_models: raise exception.SecurityGroupNotFoundForProject( project_id=project_id, security_group_id=group_name) # Not Reached @require_context def security_group_get_all(context): return _security_group_get_query(context).all() @require_context def security_group_get(context, security_group_id, columns_to_join=None): query = _security_group_get_query(context, project_only=True).\ filter_by(id=security_group_id) if columns_to_join is None: columns_to_join = [] for column in columns_to_join: if column.startswith('instances'): query = query.options(joinedload_all(column)) result = query.first() if not result: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) return result @require_context def security_group_get_by_name(context, project_id, group_name, columns_to_join=None): query = _security_group_get_query(context, read_deleted="no", join_rules=False).\ filter_by(project_id=project_id).\ filter_by(name=group_name) if columns_to_join is None: columns_to_join = ['instances', 'rules.grantee_group'] for column in columns_to_join: query = query.options(joinedload_all(column)) result = query.first() if not result: raise exception.SecurityGroupNotFoundForProject( project_id=project_id, security_group_id=group_name) return result @require_context def security_group_get_by_project(context, project_id): return _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_context def security_group_get_by_instance(context, instance_uuid): return _security_group_get_query(context, read_deleted="no").\ join(models.SecurityGroup.instances).\ filter_by(uuid=instance_uuid).\ all() @require_context def security_group_in_use(context, group_id): session = get_session() with session.begin(): # Are there any instances that haven't been deleted # that include this group? inst_assoc = model_query(context, models.SecurityGroupInstanceAssociation, read_deleted="no", session=session).\ filter_by(security_group_id=group_id).\ all() for ia in inst_assoc: num_instances = model_query(context, models.Instance, session=session, read_deleted="no").\ filter_by(uuid=ia.instance_uuid).\ count() if num_instances: return True return False @require_context def security_group_create(context, values): return _security_group_create(context, values) @require_context def security_group_update(context, security_group_id, values, columns_to_join=None): session = get_session() with session.begin(): query = model_query(context, models.SecurityGroup, session=session).filter_by(id=security_group_id) if columns_to_join: for column in columns_to_join: query = query.options(joinedload_all(column)) security_group_ref = query.first() if not security_group_ref: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) security_group_ref.update(values) name = security_group_ref['name'] project_id = security_group_ref['project_id'] try: security_group_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.SecurityGroupExists( project_id=project_id, security_group_name=name) return security_group_ref def security_group_ensure_default(context): """Ensure default security group exists for a project_id.""" try: return _security_group_ensure_default(context) except exception.SecurityGroupExists: # NOTE(rpodolyaka): a concurrent transaction has succeeded first, # suppress the error and proceed return security_group_get_by_name(context, context.project_id, 'default') def _security_group_ensure_default(context, session=None): if session is None: session = get_session() with session.begin(subtransactions=True): try: default_group = _security_group_get_by_names(context, session, context.project_id, ['default'])[0] except exception.NotFound: values = {'name': 'default', 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} default_group = _security_group_create(context, values, session=session) usage = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=context.project_id).\ filter_by(user_id=context.user_id).\ filter_by(resource='security_groups') # Create quota usage for auto created default security group if not usage.first(): _quota_usage_create(context.project_id, context.user_id, 'security_groups', 1, 0, None, session=session) else: usage.update({'in_use': int(usage.first().in_use) + 1}) default_rules = _security_group_rule_get_default_query(context, session=session).all() for default_rule in default_rules: # This is suboptimal, it should be programmatic to know # the values of the default_rule rule_values = {'protocol': default_rule.protocol, 'from_port': default_rule.from_port, 'to_port': default_rule.to_port, 'cidr': default_rule.cidr, 'parent_group_id': default_group.id, } _security_group_rule_create(context, rule_values, session=session) return default_group @require_context def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): model_query(context, models.SecurityGroup, session=session).\ filter_by(id=security_group_id).\ soft_delete() model_query(context, models.SecurityGroupInstanceAssociation, session=session).\ filter_by(security_group_id=security_group_id).\ soft_delete() model_query(context, models.SecurityGroupIngressRule, session=session).\ filter_by(group_id=security_group_id).\ soft_delete() model_query(context, models.SecurityGroupIngressRule, session=session).\ filter_by(parent_group_id=security_group_id).\ soft_delete() def _security_group_count_by_project_and_user(context, project_id, user_id, session=None): nova.context.authorize_project_context(context, project_id) return model_query(context, models.SecurityGroup, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ count() ################### def _security_group_rule_create(context, values, session=None): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) security_group_rule_ref.save(session=session) return security_group_rule_ref def _security_group_rule_get_query(context, session=None): return model_query(context, models.SecurityGroupIngressRule, session=session) @require_context def security_group_rule_get(context, security_group_rule_id): result = (_security_group_rule_get_query(context). filter_by(id=security_group_rule_id). first()) if not result: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) return result @require_context def security_group_rule_get_by_security_group(context, security_group_id, columns_to_join=None): if columns_to_join is None: columns_to_join = ['grantee_group.instances.system_metadata', 'grantee_group.instances.info_cache'] query = (_security_group_rule_get_query(context). filter_by(parent_group_id=security_group_id)) for column in columns_to_join: query = query.options(joinedload_all(column)) return query.all() @require_context def security_group_rule_create(context, values): return _security_group_rule_create(context, values) @require_context def security_group_rule_destroy(context, security_group_rule_id): count = (_security_group_rule_get_query(context). filter_by(id=security_group_rule_id). soft_delete()) if count == 0: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) @require_context def security_group_rule_count_by_group(context, security_group_id): return (model_query(context, models.SecurityGroupIngressRule, read_deleted="no"). filter_by(parent_group_id=security_group_id). count()) # ################### def _security_group_rule_get_default_query(context, session=None): return model_query(context, models.SecurityGroupIngressDefaultRule, session=session) @require_context def security_group_default_rule_get(context, security_group_rule_default_id): result = _security_group_rule_get_default_query(context).\ filter_by(id=security_group_rule_default_id).\ first() if not result: raise exception.SecurityGroupDefaultRuleNotFound( rule_id=security_group_rule_default_id) return result def security_group_default_rule_destroy(context, security_group_rule_default_id): session = get_session() with session.begin(): count = _security_group_rule_get_default_query(context, session=session).\ filter_by(id=security_group_rule_default_id).\ soft_delete() if count == 0: raise exception.SecurityGroupDefaultRuleNotFound( rule_id=security_group_rule_default_id) def security_group_default_rule_create(context, values): security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule() security_group_default_rule_ref.update(values) security_group_default_rule_ref.save() return security_group_default_rule_ref @require_context def security_group_default_rule_list(context): return _security_group_rule_get_default_query(context).\ all() ################### def provider_fw_rule_create(context, rule): fw_rule_ref = models.ProviderFirewallRule() fw_rule_ref.update(rule) fw_rule_ref.save() return fw_rule_ref def provider_fw_rule_get_all(context): return model_query(context, models.ProviderFirewallRule).all() def provider_fw_rule_destroy(context, rule_id): session = get_session() with session.begin(): session.query(models.ProviderFirewallRule).\ filter_by(id=rule_id).\ soft_delete() ################### @require_context def project_get_networks(context, project_id, associate=True): # NOTE(tr3buchet): as before this function will associate # a project with a network if it doesn't have one and # associate is true result = model_query(context, models.Network, read_deleted="no").\ filter_by(project_id=project_id).\ all() if not result: if not associate: return [] return [network_associate(context, project_id)] return result ################### def migration_create(context, values): migration = models.Migration() migration.update(values) migration.save() return migration def migration_update(context, id, values): session = get_session() with session.begin(): migration = _migration_get(context, id, session=session) migration.update(values) return migration def _migration_get(context, id, session=None): result = model_query(context, models.Migration, session=session, read_deleted="yes").\ filter_by(id=id).\ first() if not result: raise exception.MigrationNotFound(migration_id=id) return result def migration_get(context, id): return _migration_get(context, id) def migration_get_by_instance_and_status(context, instance_uuid, status): result = model_query(context, models.Migration, read_deleted="yes").\ filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).\ first() if not result: raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result def migration_get_unconfirmed_by_dest_compute(context, confirm_window, dest_compute, use_slave=False): confirm_window = (timeutils.utcnow() - datetime.timedelta(seconds=confirm_window)) return model_query(context, models.Migration, read_deleted="yes", use_slave=use_slave).\ filter(models.Migration.updated_at <= confirm_window).\ filter_by(status="finished").\ filter_by(dest_compute=dest_compute).\ all() def migration_get_in_progress_by_host_and_node(context, host, node): return model_query(context, models.Migration).\ filter(or_(and_(models.Migration.source_compute == host, models.Migration.source_node == node), and_(models.Migration.dest_compute == host, models.Migration.dest_node == node))).\ filter(~models.Migration.status.in_(['accepted', 'confirmed', 'reverted', 'error', 'failed'])).\ options(joinedload_all('instance.system_metadata')).\ all() def migration_get_all_by_filters(context, filters): query = model_query(context, models.Migration) if "status" in filters: status = filters["status"] status = [status] if isinstance(status, str) else status query = query.filter(models.Migration.status.in_(status)) if "host" in filters: host = filters["host"] query = query.filter(or_(models.Migration.source_compute == host, models.Migration.dest_compute == host)) elif "source_compute" in filters: host = filters['source_compute'] query = query.filter(models.Migration.source_compute == host) if "migration_type" in filters: migtype = filters["migration_type"] query = query.filter(models.Migration.migration_type == migtype) if "hidden" in filters: hidden = filters["hidden"] query = query.filter(models.Migration.hidden == hidden) return query.all() ################## def console_pool_create(context, values): pool = models.ConsolePool() pool.update(values) try: pool.save() except db_exc.DBDuplicateEntry: raise exception.ConsolePoolExists( host=values["host"], console_type=values["console_type"], compute_host=values["compute_host"], ) return pool def console_pool_get_by_host_type(context, compute_host, host, console_type): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(compute_host=compute_host).\ options(joinedload('consoles')).\ first() if not result: raise exception.ConsolePoolNotFoundForHostType( host=host, console_type=console_type, compute_host=compute_host) return result def console_pool_get_all_by_host_type(context, host, console_type): return model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ options(joinedload('consoles')).\ all() def console_create(context, values): console = models.Console() console.update(values) console.save() return console def console_delete(context, console_id): session = get_session() with session.begin(): # NOTE(mdragon): consoles are meant to be transient. session.query(models.Console).\ filter_by(id=console_id).\ delete() def console_get_by_pool_instance(context, pool_id, instance_uuid): result = model_query(context, models.Console, read_deleted="yes").\ filter_by(pool_id=pool_id).\ filter_by(instance_uuid=instance_uuid).\ options(joinedload('pool')).\ first() if not result: raise exception.ConsoleNotFoundInPoolForInstance( pool_id=pool_id, instance_uuid=instance_uuid) return result def console_get_all_by_instance(context, instance_uuid, columns_to_join=None): query = model_query(context, models.Console, read_deleted="yes").\ filter_by(instance_uuid=instance_uuid) if columns_to_join: for column in columns_to_join: query = query.options(joinedload(column)) return query.all() def console_get(context, console_id, instance_uuid=None): query = model_query(context, models.Console, read_deleted="yes").\ filter_by(id=console_id).\ options(joinedload('pool')) if instance_uuid is not None: query = query.filter_by(instance_uuid=instance_uuid) result = query.first() if not result: if instance_uuid: raise exception.ConsoleNotFoundForInstance( console_id=console_id, instance_uuid=instance_uuid) else: raise exception.ConsoleNotFound(console_id=console_id) return result ################## def flavor_create(context, values, projects=None): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ specs = values.get('extra_specs') specs_refs = [] if specs: for k, v in specs.items(): specs_ref = models.InstanceTypeExtraSpecs() specs_ref['key'] = k specs_ref['value'] = v specs_refs.append(specs_ref) values['extra_specs'] = specs_refs instance_type_ref = models.InstanceTypes() instance_type_ref.update(values) if projects is None: projects = [] session = get_session() with session.begin(): try: instance_type_ref.save() except db_exc.DBDuplicateEntry as e: if 'flavorid' in e.columns: raise exception.FlavorIdExists(flavor_id=values['flavorid']) raise exception.FlavorExists(name=values['name']) except Exception as e: raise db_exc.DBError(e) for project in set(projects): access_ref = models.InstanceTypeProjects() access_ref.update({"instance_type_id": instance_type_ref.id, "project_id": project}) access_ref.save() return _dict_with_extra_specs(instance_type_ref) def _dict_with_extra_specs(inst_type_query): """Takes an instance or instance type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = {x['key']: x['value'] for x in inst_type_query['extra_specs']} inst_type_dict['extra_specs'] = extra_specs return inst_type_dict def _flavor_get_query(context, session=None, read_deleted=None): query = model_query(context, models.InstanceTypes, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')) if not context.is_admin: the_filter = [models.InstanceTypes.is_public == true()] the_filter.extend([ models.InstanceTypes.projects.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query @require_context def flavor_get_all(context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): """Returns all flavors. """ filters = filters or {} # FIXME(sirp): now that we have the `disabled` field for flavors, we # should probably remove the use of `deleted` to mark inactive. `deleted` # should mean truly deleted, e.g. we can safely purge the record out of the # database. read_deleted = "yes" if inactive else "no" query = _flavor_get_query(context, read_deleted=read_deleted) if 'min_memory_mb' in filters: query = query.filter( models.InstanceTypes.memory_mb >= filters['min_memory_mb']) if 'min_root_gb' in filters: query = query.filter( models.InstanceTypes.root_gb >= filters['min_root_gb']) if 'disabled' in filters: query = query.filter( models.InstanceTypes.disabled == filters['disabled']) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.InstanceTypes.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: the_filter.extend([ models.InstanceTypes.projects.any( project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) marker_row = None if marker is not None: marker_row = _flavor_get_query(context, read_deleted=read_deleted).\ filter_by(flavorid=marker).\ first() if not marker_row: raise exception.MarkerNotFound(marker) query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit, [sort_key, 'id'], marker=marker_row, sort_dir=sort_dir) inst_types = query.all() return [_dict_with_extra_specs(i) for i in inst_types] def _flavor_get_id_from_flavor_query(context, flavor_id, session=None): return model_query(context, models.InstanceTypes, (models.InstanceTypes.id,), read_deleted="no", session=session).\ filter_by(flavorid=flavor_id) def _flavor_get_id_from_flavor(context, flavor_id, session=None): result = _flavor_get_id_from_flavor_query(context, flavor_id, session=session).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return result[0] @require_context def flavor_get(context, id): """Returns a dict describing specific flavor.""" result = _flavor_get_query(context).\ filter_by(id=id).\ first() if not result: raise exception.FlavorNotFound(flavor_id=id) return _dict_with_extra_specs(result) @require_context def flavor_get_by_name(context, name): """Returns a dict describing specific flavor.""" result = _flavor_get_query(context).\ filter_by(name=name).\ first() if not result: raise exception.FlavorNotFoundByName(flavor_name=name) return _dict_with_extra_specs(result) @require_context def flavor_get_by_flavor_id(context, flavor_id, read_deleted): """Returns a dict describing specific flavor_id.""" result = _flavor_get_query(context, read_deleted=read_deleted).\ filter_by(flavorid=flavor_id).\ order_by(asc("deleted"), asc("id")).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return _dict_with_extra_specs(result) def flavor_destroy(context, name): """Marks specific flavor as deleted.""" session = get_session() with session.begin(): ref = model_query(context, models.InstanceTypes, session=session, read_deleted="no").\ filter_by(name=name).\ first() if not ref: raise exception.FlavorNotFoundByName(flavor_name=name) ref.soft_delete(session=session) model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=ref['id']).\ soft_delete() model_query(context, models.InstanceTypeProjects, session=session, read_deleted="no").\ filter_by(instance_type_id=ref['id']).\ soft_delete() def _flavor_access_query(context, session=None): return model_query(context, models.InstanceTypeProjects, session=session, read_deleted="no") def flavor_access_get_by_flavor_id(context, flavor_id): """Get flavor access list by flavor id.""" instance_type_id_subq = \ _flavor_get_id_from_flavor_query(context, flavor_id) access_refs = _flavor_access_query(context).\ filter_by(instance_type_id=instance_type_id_subq).\ all() return access_refs def flavor_access_add(context, flavor_id, project_id): """Add given tenant to the flavor access list.""" instance_type_id = _flavor_get_id_from_flavor(context, flavor_id) access_ref = models.InstanceTypeProjects() access_ref.update({"instance_type_id": instance_type_id, "project_id": project_id}) try: access_ref.save() except db_exc.DBDuplicateEntry: raise exception.FlavorAccessExists(flavor_id=flavor_id, project_id=project_id) return access_ref def flavor_access_remove(context, flavor_id, project_id): """Remove given tenant from the flavor access list.""" instance_type_id = _flavor_get_id_from_flavor(context, flavor_id) count = _flavor_access_query(context).\ filter_by(instance_type_id=instance_type_id).\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) if count == 0: raise exception.FlavorAccessNotFound(flavor_id=flavor_id, project_id=project_id) def _flavor_extra_specs_get_query(context, flavor_id, session=None): instance_type_id_subq = \ _flavor_get_id_from_flavor_query(context, flavor_id) return model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=instance_type_id_subq) @require_context def flavor_extra_specs_get(context, flavor_id): rows = _flavor_extra_specs_get_query(context, flavor_id).all() return {row['key']: row['value'] for row in rows} @require_context def flavor_extra_specs_delete(context, flavor_id, key): result = _flavor_extra_specs_get_query(context, flavor_id).\ filter(models.InstanceTypeExtraSpecs.key == key).\ soft_delete(synchronize_session=False) # did not find the extra spec if result == 0: raise exception.FlavorExtraSpecsNotFound( extra_specs_key=key, flavor_id=flavor_id) @require_context def flavor_extra_specs_update_or_create(context, flavor_id, specs, max_retries=10): for attempt in range(max_retries): try: session = get_session() with session.begin(): instance_type_id = _flavor_get_id_from_flavor(context, flavor_id, session) spec_refs = model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=instance_type_id).\ filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\ all() existing_keys = set() for spec_ref in spec_refs: key = spec_ref["key"] existing_keys.add(key) spec_ref.update({"value": specs[key]}) for key, value in specs.items(): if key in existing_keys: continue spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id}) session.add(spec_ref) return specs except db_exc.DBDuplicateEntry: # a concurrent transaction has been committed, # try again unless this was the last attempt if attempt == max_retries - 1: raise exception.FlavorExtraSpecUpdateCreateFailed( id=flavor_id, retries=max_retries) #################### def cell_create(context, values): cell = models.Cell() cell.update(values) try: cell.save() except db_exc.DBDuplicateEntry: raise exception.CellExists(name=values['name']) return cell def _cell_get_by_name_query(context, cell_name, session=None): return model_query(context, models.Cell, session=session).filter_by(name=cell_name) def cell_update(context, cell_name, values): session = get_session() with session.begin(): cell_query = _cell_get_by_name_query(context, cell_name, session=session) if not cell_query.update(values): raise exception.CellNotFound(cell_name=cell_name) cell = cell_query.first() return cell def cell_delete(context, cell_name): return _cell_get_by_name_query(context, cell_name).soft_delete() def cell_get(context, cell_name): result = _cell_get_by_name_query(context, cell_name).first() if not result: raise exception.CellNotFound(cell_name=cell_name) return result def cell_get_all(context): return model_query(context, models.Cell, read_deleted="no").all() ######################## # User-provided metadata def _instance_metadata_get_multi(context, instance_uuids, session=None, use_slave=False): if not instance_uuids: return [] return model_query(context, models.InstanceMetadata, session=session, use_slave=use_slave).\ filter( models.InstanceMetadata.instance_uuid.in_(instance_uuids)) def _instance_metadata_get_query(context, instance_uuid, session=None): return model_query(context, models.InstanceMetadata, session=session, read_deleted="no").\ filter_by(instance_uuid=instance_uuid) @require_context def instance_metadata_get(context, instance_uuid): rows = _instance_metadata_get_query(context, instance_uuid).all() return {row['key']: row['value'] for row in rows} @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def instance_metadata_delete(context, instance_uuid, key): _instance_metadata_get_query(context, instance_uuid).\ filter_by(key=key).\ soft_delete() @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def instance_metadata_update(context, instance_uuid, metadata, delete): all_keys = metadata.keys() session = get_session() with session.begin(subtransactions=True): if delete: _instance_metadata_get_query(context, instance_uuid, session=session).\ filter(~models.InstanceMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) already_existing_keys = [] meta_refs = _instance_metadata_get_query(context, instance_uuid, session=session).\ filter(models.InstanceMetadata.key.in_(all_keys)).\ all() for meta_ref in meta_refs: already_existing_keys.append(meta_ref.key) meta_ref.update({"value": metadata[meta_ref.key]}) new_keys = set(all_keys) - set(already_existing_keys) for key in new_keys: meta_ref = models.InstanceMetadata() meta_ref.update({"key": key, "value": metadata[key], "instance_uuid": instance_uuid}) session.add(meta_ref) return metadata ####################### # System-owned metadata def _instance_system_metadata_get_multi(context, instance_uuids, session=None, use_slave=False): if not instance_uuids: return [] return model_query(context, models.InstanceSystemMetadata, session=session, use_slave=use_slave, read_deleted='yes').\ filter( models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids)) def _instance_system_metadata_get_query(context, instance_uuid, session=None): return model_query(context, models.InstanceSystemMetadata, session=session).\ filter_by(instance_uuid=instance_uuid) @require_context def instance_system_metadata_get(context, instance_uuid): rows = _instance_system_metadata_get_query(context, instance_uuid).all() return {row['key']: row['value'] for row in rows} @require_context def instance_system_metadata_update(context, instance_uuid, metadata, delete): all_keys = metadata.keys() session = get_session() with session.begin(subtransactions=True): if delete: _instance_system_metadata_get_query(context, instance_uuid, session=session).\ filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) already_existing_keys = [] meta_refs = _instance_system_metadata_get_query(context, instance_uuid, session=session).\ filter(models.InstanceSystemMetadata.key.in_(all_keys)).\ all() for meta_ref in meta_refs: already_existing_keys.append(meta_ref.key) meta_ref.update({"value": metadata[meta_ref.key]}) new_keys = set(all_keys) - set(already_existing_keys) for key in new_keys: meta_ref = models.InstanceSystemMetadata() meta_ref.update({"key": key, "value": metadata[key], "instance_uuid": instance_uuid}) session.add(meta_ref) return metadata #################### def agent_build_create(context, values): agent_build_ref = models.AgentBuild() agent_build_ref.update(values) try: agent_build_ref.save() except db_exc.DBDuplicateEntry: raise exception.AgentBuildExists(hypervisor=values['hypervisor'], os=values['os'], architecture=values['architecture']) return agent_build_ref def agent_build_get_by_triple(context, hypervisor, os, architecture): return model_query(context, models.AgentBuild, read_deleted="no").\ filter_by(hypervisor=hypervisor).\ filter_by(os=os).\ filter_by(architecture=architecture).\ first() def agent_build_get_all(context, hypervisor=None): if hypervisor: return model_query(context, models.AgentBuild, read_deleted="no").\ filter_by(hypervisor=hypervisor).\ all() else: return model_query(context, models.AgentBuild, read_deleted="no").\ all() def agent_build_destroy(context, agent_build_id): rows_affected = model_query(context, models.AgentBuild).filter_by( id=agent_build_id).soft_delete() if rows_affected == 0: raise exception.AgentBuildNotFound(id=agent_build_id) def agent_build_update(context, agent_build_id, values): rows_affected = model_query(context, models.AgentBuild).\ filter_by(id=agent_build_id).\ update(values) if rows_affected == 0: raise exception.AgentBuildNotFound(id=agent_build_id) #################### @require_context def bw_usage_get(context, uuid, start_period, mac, use_slave=False): values = {'start_period': start_period} values = convert_objects_related_datetimes(values, 'start_period') return model_query(context, models.BandwidthUsage, read_deleted="yes", use_slave=use_slave).\ filter_by(start_period=values['start_period']).\ filter_by(uuid=uuid).\ filter_by(mac=mac).\ first() @require_context def bw_usage_get_by_uuids(context, uuids, start_period, use_slave=False): values = {'start_period': start_period} values = convert_objects_related_datetimes(values, 'start_period') return ( model_query(context, models.BandwidthUsage, read_deleted="yes", use_slave=use_slave). filter(models.BandwidthUsage.uuid.in_(uuids)). filter_by(start_period=values['start_period']). all() ) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=None): session = get_session() if last_refreshed is None: last_refreshed = timeutils.utcnow() # NOTE(comstud): More often than not, we'll be updating records vs # creating records. Optimize accordingly, trying to update existing # records. Fall back to creation when no rows are updated. with session.begin(): ts_values = {'last_refreshed': last_refreshed, 'start_period': start_period} ts_keys = ('start_period', 'last_refreshed') ts_values = convert_objects_related_datetimes(ts_values, *ts_keys) values = {'last_refreshed': ts_values['last_refreshed'], 'last_ctr_in': last_ctr_in, 'last_ctr_out': last_ctr_out, 'bw_in': bw_in, 'bw_out': bw_out} bw_usage = model_query(context, models.BandwidthUsage, session=session, read_deleted='yes').\ filter_by(start_period=ts_values['start_period']).\ filter_by(uuid=uuid).\ filter_by(mac=mac).first() if bw_usage: bw_usage.update(values) return bw_usage bwusage = models.BandwidthUsage() bwusage.start_period = ts_values['start_period'] bwusage.uuid = uuid bwusage.mac = mac bwusage.last_refreshed = ts_values['last_refreshed'] bwusage.bw_in = bw_in bwusage.bw_out = bw_out bwusage.last_ctr_in = last_ctr_in bwusage.last_ctr_out = last_ctr_out try: bwusage.save(session=session) except db_exc.DBDuplicateEntry: # NOTE(sirp): Possible race if two greenthreads attempt to create # the usage entry at the same time. First one wins. pass return bwusage #################### @require_context def vol_get_usage_by_time(context, begin): """Return volumes usage that have been updated after a specified time.""" return model_query(context, models.VolumeUsage, read_deleted="yes").\ filter(or_(models.VolumeUsage.tot_last_refreshed == null(), models.VolumeUsage.tot_last_refreshed > begin, models.VolumeUsage.curr_last_refreshed == null(), models.VolumeUsage.curr_last_refreshed > begin, )).\ all() @require_context def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes, instance_id, project_id, user_id, availability_zone, update_totals=False): session = get_session() refreshed = timeutils.utcnow() with session.begin(): values = {} # NOTE(dricco): We will be mostly updating current usage records vs # updating total or creating records. Optimize accordingly. if not update_totals: values = {'curr_last_refreshed': refreshed, 'curr_reads': rd_req, 'curr_read_bytes': rd_bytes, 'curr_writes': wr_req, 'curr_write_bytes': wr_bytes, 'instance_uuid': instance_id, 'project_id': project_id, 'user_id': user_id, 'availability_zone': availability_zone} else: values = {'tot_last_refreshed': refreshed, 'tot_reads': models.VolumeUsage.tot_reads + rd_req, 'tot_read_bytes': models.VolumeUsage.tot_read_bytes + rd_bytes, 'tot_writes': models.VolumeUsage.tot_writes + wr_req, 'tot_write_bytes': models.VolumeUsage.tot_write_bytes + wr_bytes, 'curr_reads': 0, 'curr_read_bytes': 0, 'curr_writes': 0, 'curr_write_bytes': 0, 'instance_uuid': instance_id, 'project_id': project_id, 'user_id': user_id, 'availability_zone': availability_zone} current_usage = model_query(context, models.VolumeUsage, session=session, read_deleted="yes").\ filter_by(volume_id=id).\ first() if current_usage: if (rd_req < current_usage['curr_reads'] or rd_bytes < current_usage['curr_read_bytes'] or wr_req < current_usage['curr_writes'] or wr_bytes < current_usage['curr_write_bytes']): LOG.info(_LI("Volume(%s) has lower stats then what is in " "the database. Instance must have been rebooted " "or crashed. Updating totals."), id) if not update_totals: values['tot_reads'] = (models.VolumeUsage.tot_reads + current_usage['curr_reads']) values['tot_read_bytes'] = ( models.VolumeUsage.tot_read_bytes + current_usage['curr_read_bytes']) values['tot_writes'] = (models.VolumeUsage.tot_writes + current_usage['curr_writes']) values['tot_write_bytes'] = ( models.VolumeUsage.tot_write_bytes + current_usage['curr_write_bytes']) else: values['tot_reads'] = (models.VolumeUsage.tot_reads + current_usage['curr_reads'] + rd_req) values['tot_read_bytes'] = ( models.VolumeUsage.tot_read_bytes + current_usage['curr_read_bytes'] + rd_bytes) values['tot_writes'] = (models.VolumeUsage.tot_writes + current_usage['curr_writes'] + wr_req) values['tot_write_bytes'] = ( models.VolumeUsage.tot_write_bytes + current_usage['curr_write_bytes'] + wr_bytes) current_usage.update(values) current_usage.save(session=session) session.refresh(current_usage) return current_usage vol_usage = models.VolumeUsage() vol_usage.volume_id = id vol_usage.instance_uuid = instance_id vol_usage.project_id = project_id vol_usage.user_id = user_id vol_usage.availability_zone = availability_zone if not update_totals: vol_usage.curr_last_refreshed = refreshed vol_usage.curr_reads = rd_req vol_usage.curr_read_bytes = rd_bytes vol_usage.curr_writes = wr_req vol_usage.curr_write_bytes = wr_bytes else: vol_usage.tot_last_refreshed = refreshed vol_usage.tot_reads = rd_req vol_usage.tot_read_bytes = rd_bytes vol_usage.tot_writes = wr_req vol_usage.tot_write_bytes = wr_bytes vol_usage.save(session=session) return vol_usage #################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id.""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(id=image_id).\ first() if not result: raise exception.ImageNotFound(image_id=image_id) return result def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid.""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(uuid=image_uuid).\ first() if not result: raise exception.ImageNotFound(image_id=image_uuid) return result def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid.""" try: s3_image_ref = models.S3Image() s3_image_ref.update({'uuid': image_uuid}) s3_image_ref.save() except Exception as e: raise db_exc.DBError(e) return s3_image_ref #################### def _aggregate_get_query(context, model_class, id_field=None, id=None, session=None, read_deleted=None): columns_to_join = {models.Aggregate: ['_hosts', '_metadata']} query = model_query(context, model_class, session=session, read_deleted=read_deleted) for c in columns_to_join.get(model_class, []): query = query.options(joinedload(c)) if id and id_field: query = query.filter(id_field == id) return query def aggregate_create(context, values, metadata=None): session = get_session() query = _aggregate_get_query(context, models.Aggregate, models.Aggregate.name, values['name'], session=session, read_deleted='no') aggregate = query.first() if not aggregate: aggregate = models.Aggregate() aggregate.update(values) aggregate.save(session=session) # We don't want these to be lazy loaded later. We know there is # nothing here since we just created this aggregate. aggregate._hosts = [] aggregate._metadata = [] else: raise exception.AggregateNameExists(aggregate_name=values['name']) if metadata: aggregate_metadata_add(context, aggregate.id, metadata) return aggregate_get(context, aggregate.id) def aggregate_get(context, aggregate_id): query = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id) aggregate = query.first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_id) return aggregate def aggregate_get_by_host(context, host, key=None): """Return rows that match host (mandatory) and metadata key (optional). :param host matches host, and is required. :param key Matches metadata key, if not None. """ query = model_query(context, models.Aggregate) query = query.options(joinedload('_hosts')) query = query.options(joinedload('_metadata')) query = query.join('_hosts') query = query.filter(models.AggregateHost.host == host) if key: query = query.join("_metadata").filter( models.AggregateMetadata.key == key) return query.all() def aggregate_metadata_get_by_host(context, host, key=None): query = model_query(context, models.Aggregate) query = query.join("_hosts") query = query.join("_metadata") query = query.filter(models.AggregateHost.host == host) query = query.options(contains_eager("_metadata")) if key: query = query.filter(models.AggregateMetadata.key == key) rows = query.all() metadata = collections.defaultdict(set) for agg in rows: for kv in agg._metadata: metadata[kv['key']].add(kv['value']) return dict(metadata) def aggregate_get_by_metadata_key(context, key): """Return rows that match metadata key. :param key Matches metadata key. """ query = model_query(context, models.Aggregate) query = query.join("_metadata") query = query.filter(models.AggregateMetadata.key == key) query = query.options(contains_eager("_metadata")) query = query.options(joinedload("_hosts")) return query.all() def aggregate_update(context, aggregate_id, values): session = get_session() if "name" in values: aggregate_by_name = (_aggregate_get_query(context, models.Aggregate, models.Aggregate.name, values['name'], session=session, read_deleted='no').first()) if aggregate_by_name and aggregate_by_name.id != aggregate_id: # there is another aggregate with the new name raise exception.AggregateNameExists(aggregate_name=values['name']) aggregate = (_aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, session=session).first()) set_delete = True if aggregate: if "availability_zone" in values: az = values.pop('availability_zone') if 'metadata' not in values: values['metadata'] = {'availability_zone': az} set_delete = False else: values['metadata']['availability_zone'] = az metadata = values.get('metadata') if metadata is not None: aggregate_metadata_add(context, aggregate_id, values.pop('metadata'), set_delete=set_delete) aggregate.update(values) aggregate.save(session=session) return aggregate_get(context, aggregate.id) else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) def aggregate_delete(context, aggregate_id): session = get_session() with session.begin(): count = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, session=session).\ soft_delete() if count == 0: raise exception.AggregateNotFound(aggregate_id=aggregate_id) # Delete Metadata model_query(context, models.AggregateMetadata, session=session).\ filter_by(aggregate_id=aggregate_id).\ soft_delete() def aggregate_get_all(context): return _aggregate_get_query(context, models.Aggregate).all() def _aggregate_metadata_get_query(context, aggregate_id, session=None, read_deleted="yes"): return model_query(context, models.AggregateMetadata, read_deleted=read_deleted, session=session).\ filter_by(aggregate_id=aggregate_id) @require_aggregate_exists def aggregate_metadata_get(context, aggregate_id): rows = model_query(context, models.AggregateMetadata).\ filter_by(aggregate_id=aggregate_id).all() return {r['key']: r['value'] for r in rows} @require_aggregate_exists def aggregate_metadata_delete(context, aggregate_id, key): count = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id).\ filter_by(key=key).\ soft_delete() if count == 0: raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, metadata_key=key) @require_aggregate_exists def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False, max_retries=10): all_keys = metadata.keys() for attempt in range(max_retries): try: session = get_session() with session.begin(): query = _aggregate_metadata_get_query(context, aggregate_id, read_deleted='no', session=session) if set_delete: query.filter(~models.AggregateMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) query = \ query.filter(models.AggregateMetadata.key.in_(all_keys)) already_existing_keys = set() for meta_ref in query.all(): key = meta_ref.key meta_ref.update({"value": metadata[key]}) already_existing_keys.add(key) new_entries = [] for key, value in metadata.items(): if key in already_existing_keys: continue new_entries.append({"key": key, "value": value, "aggregate_id": aggregate_id}) if new_entries: session.execute( models.AggregateMetadata.__table__.insert(), new_entries) return metadata except db_exc.DBDuplicateEntry: # a concurrent transaction has been committed, # try again unless this was the last attempt with excutils.save_and_reraise_exception() as ctxt: if attempt < max_retries - 1: ctxt.reraise = False else: msg = _("Add metadata failed for aggregate %(id)s after " "%(retries)s retries") % {"id": aggregate_id, "retries": max_retries} LOG.warn(msg) @require_aggregate_exists def aggregate_host_get_all(context, aggregate_id): rows = model_query(context, models.AggregateHost).\ filter_by(aggregate_id=aggregate_id).all() return [r.host for r in rows] @require_aggregate_exists def aggregate_host_delete(context, aggregate_id, host): count = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id).\ filter_by(host=host).\ soft_delete() if count == 0: raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, host=host) @require_aggregate_exists def aggregate_host_add(context, aggregate_id, host): host_ref = models.AggregateHost() host_ref.update({"host": host, "aggregate_id": aggregate_id}) try: host_ref.save() except db_exc.DBDuplicateEntry: raise exception.AggregateHostExists(host=host, aggregate_id=aggregate_id) return host_ref ################ def instance_fault_create(context, values): """Create a new InstanceFault.""" fault_ref = models.InstanceFault() fault_ref.update(values) fault_ref.save() return dict(fault_ref) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" if not instance_uuids: return {} rows = model_query(context, models.InstanceFault, read_deleted='no').\ filter(models.InstanceFault.instance_uuid.in_( instance_uuids)).\ order_by(desc("created_at"), desc("id")).\ all() output = {} for instance_uuid in instance_uuids: output[instance_uuid] = [] for row in rows: data = dict(row) output[row['instance_uuid']].append(data) return output ################## def action_start(context, values): convert_objects_related_datetimes(values, 'start_time') action_ref = models.InstanceAction() action_ref.update(values) action_ref.save() return action_ref def action_finish(context, values): convert_objects_related_datetimes(values, 'start_time', 'finish_time') session = get_session() with session.begin(): query = model_query(context, models.InstanceAction, session=session).\ filter_by(instance_uuid=values['instance_uuid']).\ filter_by(request_id=values['request_id']) if query.update(values) != 1: raise exception.InstanceActionNotFound( request_id=values['request_id'], instance_uuid=values['instance_uuid']) return query.one() def actions_get(context, instance_uuid): """Get all instance actions for the provided uuid.""" actions = model_query(context, models.InstanceAction).\ filter_by(instance_uuid=instance_uuid).\ order_by(desc("created_at"), desc("id")).\ all() return actions def action_get_by_request_id(context, instance_uuid, request_id): """Get the action by request_id and given instance.""" action = _action_get_by_request_id(context, instance_uuid, request_id) return action def _action_get_by_request_id(context, instance_uuid, request_id, session=None): result = model_query(context, models.InstanceAction, session=session).\ filter_by(instance_uuid=instance_uuid).\ filter_by(request_id=request_id).\ first() return result def _action_get_last_created_by_instance_uuid(context, instance_uuid, session=None): result = (model_query(context, models.InstanceAction, session=session). filter_by(instance_uuid=instance_uuid). order_by(desc("created_at"), desc("id")). first()) return result def action_event_start(context, values): """Start an event on an instance action.""" convert_objects_related_datetimes(values, 'start_time') session = get_session() with session.begin(): action = _action_get_by_request_id(context, values['instance_uuid'], values['request_id'], session) # When nova-compute restarts, the context is generated again in # init_host workflow, the request_id was different with the request_id # recorded in InstanceAction, so we can't get the original record # according to request_id. Try to get the last created action so that # init_instance can continue to finish the recovery action, like: # powering_off, unpausing, and so on. if not action and not context.project_id: action = _action_get_last_created_by_instance_uuid( context, values['instance_uuid'], session) if not action: raise exception.InstanceActionNotFound( request_id=values['request_id'], instance_uuid=values['instance_uuid']) values['action_id'] = action['id'] event_ref = models.InstanceActionEvent() event_ref.update(values) session.add(event_ref) return event_ref def action_event_finish(context, values): """Finish an event on an instance action.""" convert_objects_related_datetimes(values, 'start_time', 'finish_time') session = get_session() with session.begin(): action = _action_get_by_request_id(context, values['instance_uuid'], values['request_id'], session) # When nova-compute restarts, the context is generated again in # init_host workflow, the request_id was different with the request_id # recorded in InstanceAction, so we can't get the original record # according to request_id. Try to get the last created action so that # init_instance can continue to finish the recovery action, like: # powering_off, unpausing, and so on. if not action and not context.project_id: action = _action_get_last_created_by_instance_uuid( context, values['instance_uuid'], session) if not action: raise exception.InstanceActionNotFound( request_id=values['request_id'], instance_uuid=values['instance_uuid']) event_ref = model_query(context, models.InstanceActionEvent, session=session).\ filter_by(action_id=action['id']).\ filter_by(event=values['event']).\ first() if not event_ref: raise exception.InstanceActionEventNotFound(action_id=action['id'], event=values['event']) event_ref.update(values) if values['result'].lower() == 'error': action.update({'message': 'Error'}) return event_ref def action_events_get(context, action_id): events = model_query(context, models.InstanceActionEvent).\ filter_by(action_id=action_id).\ order_by(desc("created_at"), desc("id")).\ all() return events def action_event_get_by_id(context, action_id, event_id): event = model_query(context, models.InstanceActionEvent).\ filter_by(action_id=action_id).\ filter_by(id=event_id).\ first() return event ################## @require_context def ec2_instance_create(context, instance_uuid, id=None): """Create ec2 compatible instance by provided uuid.""" ec2_instance_ref = models.InstanceIdMapping() ec2_instance_ref.update({'uuid': instance_uuid}) if id is not None: ec2_instance_ref.update({'id': id}) ec2_instance_ref.save() return ec2_instance_ref @require_context def ec2_instance_get_by_uuid(context, instance_uuid): result = _ec2_instance_get_query(context).\ filter_by(uuid=instance_uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_uuid) return result @require_context def ec2_instance_get_by_id(context, instance_id): result = _ec2_instance_get_query(context).\ filter_by(id=instance_id).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result @require_context def get_instance_uuid_by_ec2_id(context, ec2_id): result = ec2_instance_get_by_id(context, ec2_id) return result['uuid'] def _ec2_instance_get_query(context, session=None): return model_query(context, models.InstanceIdMapping, session=session, read_deleted='yes') def _task_log_get_query(context, task_name, period_beginning, period_ending, host=None, state=None, session=None): values = {'period_beginning': period_beginning, 'period_ending': period_ending} values = convert_objects_related_datetimes(values, *values.keys()) query = model_query(context, models.TaskLog, session=session).\ filter_by(task_name=task_name).\ filter_by(period_beginning=values['period_beginning']).\ filter_by(period_ending=values['period_ending']) if host is not None: query = query.filter_by(host=host) if state is not None: query = query.filter_by(state=state) return query def task_log_get(context, task_name, period_beginning, period_ending, host, state=None): return _task_log_get_query(context, task_name, period_beginning, period_ending, host, state).first() def task_log_get_all(context, task_name, period_beginning, period_ending, host=None, state=None): return _task_log_get_query(context, task_name, period_beginning, period_ending, host, state).all() def task_log_begin_task(context, task_name, period_beginning, period_ending, host, task_items=None, message=None): values = {'period_beginning': period_beginning, 'period_ending': period_ending} values = convert_objects_related_datetimes(values, *values.keys()) task = models.TaskLog() task.task_name = task_name task.period_beginning = values['period_beginning'] task.period_ending = values['period_ending'] task.host = host task.state = "RUNNING" if message: task.message = message if task_items: task.task_items = task_items try: task.save() except db_exc.DBDuplicateEntry: raise exception.TaskAlreadyRunning(task_name=task_name, host=host) def task_log_end_task(context, task_name, period_beginning, period_ending, host, errors, message=None): values = dict(state="DONE", errors=errors) if message: values["message"] = message session = get_session() with session.begin(): rows = _task_log_get_query(context, task_name, period_beginning, period_ending, host, session=session).\ update(values) if rows == 0: # It's not running! raise exception.TaskNotRunning(task_name=task_name, host=host) def archive_deleted_rows_for_table(context, tablename, max_rows): """Move up to max_rows rows from one tables to the corresponding shadow table. The context argument is only used for the decorator. :returns: number of rows archived """ # NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils # imports nova.db.sqlalchemy.api. from nova.db.sqlalchemy import utils as db_utils engine = get_engine() conn = engine.connect() metadata = MetaData() metadata.bind = engine # NOTE(tdurakov): table metadata should be received # from models, not db tables. Default value specified by SoftDeleteMixin # is known only by models, not DB layer. # IMPORTANT: please do not change source of metadata information for table. table = models.BASE.metadata.tables[tablename] shadow_tablename = _SHADOW_TABLE_PREFIX + tablename rows_archived = 0 try: shadow_table = Table(shadow_tablename, metadata, autoload=True) except NoSuchTableError: # No corresponding shadow table; skip it. return rows_archived if tablename == "dns_domains": # We have one table (dns_domains) where the key is called # "domain" rather than "id" column = table.c.domain else: column = table.c.id # NOTE(guochbo): Use DeleteFromSelect to avoid # database's limit of maximum parameter in one SQL statement. deleted_column = table.c.deleted columns = [c.name for c in table.c] insert = shadow_table.insert(inline=True).\ from_select(columns, sql.select([table], deleted_column != deleted_column.default.arg). order_by(column).limit(max_rows)) query_delete = sql.select([column], deleted_column != deleted_column.default.arg).\ order_by(column).limit(max_rows) delete_statement = db_utils.DeleteFromSelect(table, query_delete, column) try: # Group the insert and delete in a transaction. with conn.begin(): conn.execute(insert) result_delete = conn.execute(delete_statement) except db_exc.DBError: # TODO(ekudryashova): replace by DBReferenceError when db layer # raise it. # A foreign key constraint keeps us from deleting some of # these rows until we clean up a dependent table. Just # skip this table for now; we'll come back to it later. msg = _("IntegrityError detected when archiving table %s") % tablename LOG.warn(msg) return rows_archived rows_archived = result_delete.rowcount return rows_archived def archive_deleted_rows(context, max_rows=None): """Move up to max_rows rows from production tables to the corresponding shadow tables. :returns: Number of rows archived. """ # The context argument is only used for the decorator. tablenames = [] for model_class in six.itervalues(models.__dict__): if hasattr(model_class, "__tablename__"): tablenames.append(model_class.__tablename__) rows_archived = 0 for tablename in tablenames: rows_archived += archive_deleted_rows_for_table(context, tablename, max_rows=max_rows - rows_archived) if rows_archived >= max_rows: break return rows_archived #################### def _instance_group_get_query(context, model_class, id_field=None, id=None, session=None, read_deleted=None): columns_to_join = {models.InstanceGroup: ['_policies', '_members']} query = model_query(context, model_class, session=session, read_deleted=read_deleted, project_only=True) for c in columns_to_join.get(model_class, []): query = query.options(joinedload(c)) if id and id_field: query = query.filter(id_field == id) return query def instance_group_create(context, values, policies=None, members=None): """Create a new group.""" uuid = values.get('uuid', None) if uuid is None: uuid = uuidutils.generate_uuid() values['uuid'] = uuid session = get_session() with session.begin(): try: group = models.InstanceGroup() group.update(values) group.save(session=session) except db_exc.DBDuplicateEntry: raise exception.InstanceGroupIdExists(group_uuid=uuid) # We don't want these to be lazy loaded later. We know there is # nothing here since we just created this instance group. group._policies = [] group._members = [] if policies: _instance_group_policies_add(context, group.id, policies, session=session) if members: _instance_group_members_add(context, group.id, members, session=session) return instance_group_get(context, uuid) def instance_group_get(context, group_uuid): """Get a specific group by uuid.""" group = _instance_group_get_query(context, models.InstanceGroup, models.InstanceGroup.uuid, group_uuid).\ first() if not group: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) return group def instance_group_get_by_instance(context, instance_uuid): session = get_session() with session.begin(): group_member = model_query(context, models.InstanceGroupMember, session=session).\ filter_by(instance_id=instance_uuid).\ first() if not group_member: raise exception.InstanceGroupNotFound(group_uuid='') group = _instance_group_get_query(context, models.InstanceGroup, models.InstanceGroup.id, group_member.group_id, session=session).first() if not group: raise exception.InstanceGroupNotFound( group_uuid=group_member.group_id) return group def instance_group_update(context, group_uuid, values): """Update the attributes of an group. If values contains a metadata key, it updates the aggregate metadata too. Similarly for the policies and members. """ session = get_session() with session.begin(): group = model_query(context, models.InstanceGroup, session=session).\ filter_by(uuid=group_uuid).\ first() if not group: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) policies = values.get('policies') if policies is not None: _instance_group_policies_add(context, group.id, values.pop('policies'), set_delete=True, session=session) members = values.get('members') if members is not None: _instance_group_members_add(context, group.id, values.pop('members'), set_delete=True, session=session) group.update(values) if policies: values['policies'] = policies if members: values['members'] = members def instance_group_delete(context, group_uuid): """Delete an group.""" session = get_session() with session.begin(): group_id = _instance_group_id(context, group_uuid, session=session) count = _instance_group_get_query(context, models.InstanceGroup, models.InstanceGroup.uuid, group_uuid, session=session).soft_delete() if count == 0: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) # Delete policies, metadata and members instance_models = [models.InstanceGroupPolicy, models.InstanceGroupMember] for model in instance_models: model_query(context, model, session=session).\ filter_by(group_id=group_id).\ soft_delete() def instance_group_get_all(context): """Get all groups.""" return _instance_group_get_query(context, models.InstanceGroup).all() def instance_group_get_all_by_project_id(context, project_id): """Get all groups.""" return _instance_group_get_query(context, models.InstanceGroup).\ filter_by(project_id=project_id).\ all() def _instance_group_count_by_project_and_user(context, project_id, user_id, session=None): return model_query(context, models.InstanceGroup, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ count() def _instance_group_model_get_query(context, model_class, group_id, session=None, read_deleted='no'): return model_query(context, model_class, read_deleted=read_deleted, session=session).\ filter_by(group_id=group_id) def _instance_group_id(context, group_uuid, session=None): """Returns the group database ID for the group UUID.""" result = model_query(context, models.InstanceGroup, (models.InstanceGroup.id,), session=session).\ filter_by(uuid=group_uuid).\ first() if not result: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) return result.id def _instance_group_members_add(context, id, members, set_delete=False, session=None): if not session: session = get_session() all_members = set(members) with session.begin(subtransactions=True): query = _instance_group_model_get_query(context, models.InstanceGroupMember, id, session=session) if set_delete: query.filter(~models.InstanceGroupMember.instance_id.in_( all_members)).\ soft_delete(synchronize_session=False) query = query.filter( models.InstanceGroupMember.instance_id.in_(all_members)) already_existing = set() for member_ref in query.all(): already_existing.add(member_ref.instance_id) for instance_id in members: if instance_id in already_existing: continue member_ref = models.InstanceGroupMember() member_ref.update({'instance_id': instance_id, 'group_id': id}) session.add(member_ref) return members def instance_group_members_add(context, group_uuid, members, set_delete=False): id = _instance_group_id(context, group_uuid) return _instance_group_members_add(context, id, members, set_delete=set_delete) def instance_group_member_delete(context, group_uuid, instance_id): id = _instance_group_id(context, group_uuid) count = _instance_group_model_get_query(context, models.InstanceGroupMember, id).\ filter_by(instance_id=instance_id).\ soft_delete() if count == 0: raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid, instance_id=instance_id) def instance_group_members_get(context, group_uuid): id = _instance_group_id(context, group_uuid) instances = model_query(context, models.InstanceGroupMember, (models.InstanceGroupMember.instance_id,)).\ filter_by(group_id=id).all() return [instance[0] for instance in instances] def _instance_group_policies_add(context, id, policies, set_delete=False, session=None): if not session: session = get_session() allpols = set(policies) with session.begin(subtransactions=True): query = _instance_group_model_get_query(context, models.InstanceGroupPolicy, id, session=session) if set_delete: query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\ soft_delete(synchronize_session=False) query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols)) already_existing = set() for policy_ref in query.all(): already_existing.add(policy_ref.policy) for policy in policies: if policy in already_existing: continue policy_ref = models.InstanceGroupPolicy() policy_ref.update({'policy': policy, 'group_id': id}) session.add(policy_ref) return policies #################### def pci_device_get_by_addr(context, node_id, dev_addr): pci_dev_ref = model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ filter_by(address=dev_addr).\ first() if not pci_dev_ref: raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr) return pci_dev_ref def pci_device_get_by_id(context, id): pci_dev_ref = model_query(context, models.PciDevice).\ filter_by(id=id).\ first() if not pci_dev_ref: raise exception.PciDeviceNotFoundById(id=id) return pci_dev_ref def pci_device_get_all_by_node(context, node_id): return model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ all() @require_context def pci_device_get_all_by_instance_uuid(context, instance_uuid): return model_query(context, models.PciDevice).\ filter_by(status='allocated').\ filter_by(instance_uuid=instance_uuid).\ all() def _instance_pcidevs_get_multi(context, instance_uuids, session=None): return model_query(context, models.PciDevice, session=session).\ filter_by(status='allocated').\ filter(models.PciDevice.instance_uuid.in_(instance_uuids)) def pci_device_destroy(context, node_id, address): result = model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ filter_by(address=address).\ soft_delete() if not result: raise exception.PciDeviceNotFound(node_id=node_id, address=address) def pci_device_update(context, node_id, address, values): session = get_session() with session.begin(): query = model_query(context, models.PciDevice, session=session, read_deleted="no").\ filter_by(compute_node_id=node_id).\ filter_by(address=address) if query.update(values) == 0: device = models.PciDevice() device.update(values) session.add(device) return query.one() #################### def instance_tag_add(context, instance_uuid, tag): session = get_session() tag_ref = models.Tag() tag_ref.resource_id = instance_uuid tag_ref.tag = tag try: with session.begin(subtransactions=True): _check_instance_exists_in_project(context, session, instance_uuid) session.add(tag_ref) except db_exc.DBDuplicateEntry: # NOTE(snikitin): We should ignore tags duplicates pass return tag_ref def instance_tag_set(context, instance_uuid, tags): session = get_session() with session.begin(subtransactions=True): _check_instance_exists_in_project(context, session, instance_uuid) existing = session.query(models.Tag.tag).filter_by( resource_id=instance_uuid).all() existing = set(row.tag for row in existing) tags = set(tags) to_delete = existing - tags to_add = tags - existing if to_delete: session.query(models.Tag).filter_by( resource_id=instance_uuid).filter( models.Tag.tag.in_(to_delete)).delete( synchronize_session=False) if to_add: data = [ {'resource_id': instance_uuid, 'tag': tag} for tag in to_add] session.execute(models.Tag.__table__.insert(), data) return session.query(models.Tag).filter_by( resource_id=instance_uuid).all() def instance_tag_get_by_instance_uuid(context, instance_uuid): session = get_session() with session.begin(subtransactions=True): _check_instance_exists_in_project(context, session, instance_uuid) return session.query(models.Tag).filter_by( resource_id=instance_uuid).all() def instance_tag_delete(context, instance_uuid, tag): session = get_session() with session.begin(subtransactions=True): _check_instance_exists_in_project(context, session, instance_uuid) result = session.query(models.Tag).filter_by( resource_id=instance_uuid, tag=tag).delete() if not result: raise exception.InstanceTagNotFound(instance_id=instance_uuid, tag=tag) def instance_tag_delete_all(context, instance_uuid): session = get_session() with session.begin(subtransactions=True): _check_instance_exists_in_project(context, session, instance_uuid) session.query(models.Tag).filter_by(resource_id=instance_uuid).delete() def instance_tag_exists(context, instance_uuid, tag): session = get_session() with session.begin(subtransactions=True): _check_instance_exists_in_project(context, session, instance_uuid) q = session.query(models.Tag).filter_by( resource_id=instance_uuid, tag=tag) return session.query(q.exists()).scalar()
Francis-Liu/animated-broccoli
nova/db/sqlalchemy/api.py
Python
apache-2.0
240,590
import os import re import yaml try: from packaging.version import parse as parse_version except ImportError: from pkg_resources import parse_version from toolbox.config.common import BUTTON_CONFIG_KEYS, CRP_TYPES, CURRENT_MAX_VERSION, CURRENT_MIN_VERSION, PROTOCOLS from .utils import counted_error, fatal_error def compare_version(config: dict, min_version: str, max_version: str): version = parse_version(config['version']) if version < parse_version(min_version): return -1 if version > parse_version(max_version): return 1 return 0 def validate_version(config: dict): cmp = compare_version(config, CURRENT_MIN_VERSION, CURRENT_MAX_VERSION) if cmp < 0: fatal_error('Please, upgrade to version %s with upgrade.py!', CURRENT_MIN_VERSION) if cmp > 0: fatal_error('Please, use a newer toolbox for version %s!', config['version']) def get_crp_type(config: dict) -> str: crp_type = config.get('crp_type') or 'static' if crp_type not in CRP_TYPES: fatal_error("Unknown crp_type: '%s' / %s", crp_type, CRP_TYPES) return crp_type def read_config(path: str, *, pre_validate: bool = True) -> dict: """ Read the config.yml file :param path: path to the file or the base directory :param pre_validate: check version and crp_type fields :return: dict """ if os.path.isdir(path): path = os.path.join(path, 'config.yml') try: with open(path, 'r') as f: config = yaml.safe_load(f) if pre_validate: validate_version(config) get_crp_type(config) return config except Exception as e: fatal_error('%s(%s)', type(e).__name__, e) def parse_bool(value) -> bool: return str(value).lower() in ('true', '1') def validate_bool(key, value): if str(value).lower() not in ('true', 'false', '1', '0'): counted_error('Invalid %s value. It must be boolean.', key) def validate_flag(config: dict, flag_required: bool = False): validate_bool('enable_flag_input', config.get('enable_flag_input')) if config.get('flag'): try: if config['flag'][0:6] == 'regex:': re.compile(config['flag'][6:]) except TypeError: counted_error('Invalid flag value. It must be string.') except Exception: counted_error('Failed to compile regex flag.') if not parse_bool(config.get('enable_flag_input')): counted_error('enable_flag_input must be true for static flags.') elif flag_required: counted_error('A static (or regex) flag must be set.') def validate_ports(ports: list, buttons: dict = None): # pylint: disable=too-many-branches unique_ports = set() ssh_ports_count = 0 for port in ports: try: port, protocol = port.split('/', 1) unique_ports.add(port) try: if not 0 < int(port) < 65536: raise ValueError except Exception: counted_error('Invalid port number: %s. Ports must be numbers between 1 and 65535.', port) if protocol not in PROTOCOLS: counted_error('Invalid protocol in config.yml: %s. Valid protocols: %s', protocol, PROTOCOLS) if protocol == 'ssh': ssh_ports_count += 1 except Exception: counted_error('Invalid port format. [port/protocol]') if len(unique_ports) != len(ports): counted_error('Duplicate port numbers found.') if ssh_ports_count > 1: counted_error('More than one SSH ports. Please, use a single SSH connection.') if buttons is not None: if not isinstance(buttons, dict): counted_error('The buttons field must be a dict.') else: for button_key, button in buttons.items(): if button_key not in ports: counted_error('Button key %s is not found in ports.', button_key) for key in button.keys(): if key not in BUTTON_CONFIG_KEYS: counted_error('Key %s is invalid for button %s.', key, button_key)
avatao-content/challenge-toolbox
toolbox/utils/config.py
Python
apache-2.0
4,202
__author__ = 'mpetyx' from django.db import models from OPENiapp.APIS.Context.models import OpeniContextAwareModel class OpeniCard(OpeniContextAwareModel): # id is missing because it is the default url = models.TextField() object_type = models.TextField() service = models.TextField() From = models.TextField() billing_address = models.TextField() number = models.TextField() card_owner_date_of_birth = models.TextField() card_type = models.TextField() expiration_date = models.TextField() card_verification_number = models.TextField()
OPENi-ict/ntua_demo
openiPrototype/openiPrototype/APIS/Products_and_Services/Card/models.py
Python
apache-2.0
585
#!/usr/bin/env python # Copyright 2015-2017 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import matplotlib.pyplot as plt import argparse import json from pr_json_common import * import sys sys.path.append('../JSON_Common') from json_dict_common import * def plot_metrics_as_bar(fileList, metricList, labelList, threads, ylabel): """ Plot metrics on a bar char from the list of metrics supplied, where the metric values are read from the list of files supplied. It is assumed that the list of files are generated from a series of runs which show strong scaling of a code Args: fileList (list): List of filenames from which to read information metricList (list): List of metrics to read labelList (list): List of labels for the metrics to use in the legend threads (bool): Indicates whether threads or processes are used ylabel (str): Label for the y-axis Returns: Nothing """ yData = {} for filename in fileList: profileDict = {} # Read the json in from file with open(filename, 'r') as f: profileDict = json.load(f) # Get the number of processes or threads used numProcs = get_num_threads(profileDict) if threads else get_num_processes(profileDict) # Read the given metrics and update the values to plot yData.update({numProcs : get_dict_field_vals(profileDict, metricList)}) # Plot the data # Get the x-axis data xData = range(len(yData)) # Get the width of an individual bar totalBarsWidth = 0.95 barsPerProc = len(list(yData.values())[0]) barWidth = float(totalBarsWidth) / barsPerProc barsPerProc -= 1 # For each of the processes plot a bar colors = ['r', 'b', 'g', 'k'] sortedKeys = sorted(yData.keys()) xInd = 0 for key in sortedKeys: # For each of the metrics plot a bar barData = yData[key] ind = 0 barLoc = xData[xInd] - float(barsPerProc) * barWidth / 2 barHandles = [] for barItem in barData: barHandles.append(plt.bar(barLoc, barItem, width=barWidth, color=colors[ind % len(colors)], align='center', label=labelList[ind])) barLoc += barWidth ind += 1 xInd += 1 plt.xticks(xData, sortedKeys) if (threads): plt.xlabel("Number of Threads") else: plt.xlabel("Number of Processes") plt.ylabel(ylabel) plt.legend(handles=barHandles, loc=1, bbox_to_anchor=(1.1, 1.1)) #### End of function plot_metrics_as_bar if (__name__ == "__main__"): parser = argparse.ArgumentParser(description="Utility to plot a bar chart" + " of different metrics stored in a series of JSON files, assumed to" + " be the export of a Performance Report. It is also assumed " + "that the files are generated from a series of runs that show " + "strong / weak scaling of an application") # Add a file containing a list of files to read data from parser.add_argument("infile", help="JSON file to read a list of input files from", type=argparse.FileType('r')) # Add an argument to provide a file with a list of metrics in parser.add_argument("metricFile", help="File from which to read a list of " + "metrics to show. The contents of the file is of the following form:\n" + "\tlist, of, dictionary, keys [: label]\n" + "where the label is optional, and is used as a label in a legend", type=argparse.FileType('r')) # Add an argument to show if the strong scaling is for threads or processes parser.add_argument("--threads", help="Indicates whether threads or processes" + " should used in the scaling analysis", action="store_true", default=False) defaultYLabel = "Proportion of Time (%)" parser.add_argument("--ylabel", help="Label for the y-axis. Default is " + defaultYLabel.replace('%','%%'), default=defaultYLabel) args = parser.parse_args() # Read in the list of files fileList = [line.strip() for line in args.infile.readlines()] # Read in the list of metrics metricList = [] labelList = [] for line in args.metricFile.readlines(): vals = line.strip().split(':') if (len(vals) == 1): metricList.append([val.strip() for val in vals[0].split(',')]) labelList.append(''.join(vals[0].split()[-1])) else: metricList.append([val.strip() for val in vals[0].split(',')]) labelList.append(' '.join(vals[1:])) # Plot the metrics from the files plot_metrics_as_bar(fileList, metricList, labelList, args.threads, args.ylabel) plt.show()
arm-hpc/allinea_json_analysis
PR_JSON_Scripts/plot_pr_bar.py
Python
apache-2.0
5,284
# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ldap import ldap.modlist from oslo_config import cfg from keystone import exception from keystone import identity from keystone.tests import unit as tests from keystone.tests.unit import test_ldap_livetest CONF = cfg.CONF def create_object(dn, attrs): conn = ldap.initialize(CONF.ldap.url) conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password) ldif = ldap.modlist.addModlist(attrs) conn.add_s(dn, ldif) conn.unbind_s() class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity): def _ldap_skip_live(self): self.skip_if_env_not_set('ENABLE_TLS_LDAP_LIVE_TEST') def config_files(self): config_files = super(LiveTLSLDAPIdentity, self).config_files() config_files.append(tests.dirs.tests_conf('backend_tls_liveldap.conf')) return config_files def config_overrides(self): super(LiveTLSLDAPIdentity, self).config_overrides() self.config_fixture.config( group='identity', driver='keystone.identity.backends.ldap.Identity') def test_tls_certfile_demand_option(self): self.config_fixture.config(group='ldap', use_tls=True, tls_cacertdir=None, tls_req_cert='demand') self.identity_api = identity.backends.ldap.Identity() user = {'name': 'fake1', 'password': 'fakepass1', 'tenants': ['bar']} user = self.identity_api.create_user('user') user_ref = self.identity_api.get_user(user['id']) self.assertEqual(user['id'], user_ref['id']) user['password'] = 'fakepass2' self.identity_api.update_user(user['id'], user) self.identity_api.delete_user(user['id']) self.assertRaises(exception.UserNotFound, self.identity_api.get_user, user['id']) def test_tls_certdir_demand_option(self): self.config_fixture.config(group='ldap', use_tls=True, tls_cacertdir=None, tls_req_cert='demand') self.identity_api = identity.backends.ldap.Identity() user = {'id': 'fake1', 'name': 'fake1', 'password': 'fakepass1', 'tenants': ['bar']} self.identity_api.create_user('fake1', user) user_ref = self.identity_api.get_user('fake1') self.assertEqual('fake1', user_ref['id']) user['password'] = 'fakepass2' self.identity_api.update_user('fake1', user) self.identity_api.delete_user('fake1') self.assertRaises(exception.UserNotFound, self.identity_api.get_user, 'fake1') def test_tls_bad_certfile(self): self.config_fixture.config( group='ldap', use_tls=True, tls_req_cert='demand', tls_cacertfile='/etc/keystone/ssl/certs/mythicalcert.pem', tls_cacertdir=None) self.identity_api = identity.backends.ldap.Identity() user = {'name': 'fake1', 'password': 'fakepass1', 'tenants': ['bar']} self.assertRaises(IOError, self.identity_api.create_user, user) def test_tls_bad_certdir(self): self.config_fixture.config( group='ldap', use_tls=True, tls_cacertfile=None, tls_req_cert='demand', tls_cacertdir='/etc/keystone/ssl/mythicalcertdir') self.identity_api = identity.backends.ldap.Identity() user = {'name': 'fake1', 'password': 'fakepass1', 'tenants': ['bar']} self.assertRaises(IOError, self.identity_api.create_user, user)
UTSA-ICS/keystone-kerberos
keystone/tests/unit/test_ldap_tls_livetest.py
Python
apache-2.0
4,402
""" in this file, we describe the data type of fp-tree nodes and present an algorithm to build a fp-tree from existing datasets """ import fpgrowth import Queue def supfilter(dataset, minsup): """ supfilter : scan the items and drop the infrequent elements :param dataset: dataset could be in two available types: list(data) and list((data, support)) :param minsup: :return: """ rec = {} for item in dataset: for elem in item: # counting the number of existance for all elements if elem not in rec: rec[elem] = 0 rec[elem] += 1 # filter: find the infrequent keywords if '' in rec: del rec[''] inf = filter(lambda i: i[1] >= minsup, rec.items()) inf = map(lambda i: i[0], inf) # remove the infrequent elements from the dataset dataset = map( lambda i: filter(lambda ele: ele in inf, i), dataset ) # remove the empty items dataset = filter(lambda i: len(i) > 0, dataset) # sort by the support of items dataset = map( lambda i: sorted( i, # we use support of elements as sorting keys key=lambda ele: rec[ele], reverse=True ), dataset ) return dataset class Node: def __init__(self, name=None, parent=None, headertable=None): self.name = name self.sup = 0 self.children = {} self.parent = parent if headertable is not None: # the node will be appended to the header table if such # such a table is provided if name not in headertable: headertable[name] = [] headertable[name].append(self) def appenditem(self, item, headertable): self.sup += 1 if len(item) == 0: return if item[0] not in self.children: self.children[item[0]] = Node(item[0], self, headertable) self.children[item[0]].appenditem(item[1:], headertable) class fptree: def __init__(self, dataset, minsup): """ dataset should be a list of data items, in form of [ ['apple', 'milk', 'wtf'], ['banana'], .... ] data items can be any strings NOTE: once a fptree is created, it should never be modified (at least in this scenario) """ # minimum support self.minsup = minsup items = supfilter(dataset, minsup) """ the root node of a fp-tree """ self.root = Node() """ header table """ self.headertable = {} for item in items: self.root.appenditem(item, self.headertable) # check if the tree contains only a single prefix # todo def growth(self): """ important!! this function is not thread-safe, so never try to execute it parallelly :return: """ # initialize the queue fpgrowth.patterns = Queue.Queue() fpgrowth.fpgrowth(self, [], self.minsup) fpgrowth.wait() result = [] while not fpgrowth.patterns.empty(): patt = fpgrowth.patterns.get() result.append(patt) return result
liyi-david/dblpmining
lib/fptree.py
Python
apache-2.0
3,263
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Create, get, update, execute and delete an AWS DataSync Task.""" import logging import random from typing import List, Optional from airflow.exceptions import AirflowException, AirflowTaskTimeout from airflow.models import BaseOperator from airflow.providers.amazon.aws.hooks.datasync import AWSDataSyncHook class AWSDataSyncOperator(BaseOperator): r"""Find, Create, Update, Execute and Delete AWS DataSync Tasks. If ``do_xcom_push`` is True, then the DataSync TaskArn and TaskExecutionArn which were executed will be pushed to an XCom. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:AWSDataSyncOperator` .. note:: There may be 0, 1, or many existing DataSync Tasks defined in your AWS environment. The default behavior is to create a new Task if there are 0, or execute the Task if there was 1 Task, or fail if there were many Tasks. :param aws_conn_id: AWS connection to use. :type aws_conn_id: str :param wait_interval_seconds: Time to wait between two consecutive calls to check TaskExecution status. :type wait_interval_seconds: int :param max_iterations: Maximum number of consecutive calls to check TaskExecution status. :type max_iterations: int :param task_arn: AWS DataSync TaskArn to use. If None, then this operator will attempt to either search for an existing Task or attempt to create a new Task. :type task_arn: str :param source_location_uri: Source location URI to search for. All DataSync Tasks with a LocationArn with this URI will be considered. Example: ``smb://server/subdir`` :type source_location_uri: str :param destination_location_uri: Destination location URI to search for. All DataSync Tasks with a LocationArn with this URI will be considered. Example: ``s3://airflow_bucket/stuff`` :type destination_location_uri: str :param allow_random_task_choice: If multiple Tasks match, one must be chosen to execute. If allow_random_task_choice is True then a random one is chosen. :type allow_random_task_choice: bool :param allow_random_location_choice: If multiple Locations match, one must be chosen when creating a task. If allow_random_location_choice is True then a random one is chosen. :type allow_random_location_choice: bool :param create_task_kwargs: If no suitable TaskArn is identified, it will be created if ``create_task_kwargs`` is defined. ``create_task_kwargs`` is then used internally like this: ``boto3.create_task(**create_task_kwargs)`` Example: ``{'Name': 'xyz', 'Options': ..., 'Excludes': ..., 'Tags': ...}`` :type create_task_kwargs: dict :param create_source_location_kwargs: If no suitable LocationArn is found, a Location will be created if ``create_source_location_kwargs`` is defined. ``create_source_location_kwargs`` is then used internally like this: ``boto3.create_location_xyz(**create_source_location_kwargs)`` The xyz is determined from the prefix of source_location_uri, eg ``smb:/...`` or ``s3:/...`` Example: ``{'Subdirectory': ..., 'ServerHostname': ..., ...}`` :type create_source_location_kwargs: dict :param create_destination_location_kwargs: If no suitable LocationArn is found, a Location will be created if ``create_destination_location_kwargs`` is defined. ``create_destination_location_kwargs`` is used internally like this: ``boto3.create_location_xyz(**create_destination_location_kwargs)`` The xyz is determined from the prefix of destination_location_uri, eg ``smb:/...` or ``s3:/...`` Example: ``{'S3BucketArn': ..., 'S3Config': {'BucketAccessRoleArn': ...}, ...}`` :type create_destination_location_kwargs: dict :param update_task_kwargs: If a suitable TaskArn is found or created, it will be updated if ``update_task_kwargs`` is defined. ``update_task_kwargs`` is used internally like this: ``boto3.update_task(TaskArn=task_arn, **update_task_kwargs)`` Example: ``{'Name': 'xyz', 'Options': ..., 'Excludes': ...}`` :type update_task_kwargs: dict :param task_execution_kwargs: Additional kwargs passed directly when starting the Task execution, used internally like this: ``boto3.start_task_execution(TaskArn=task_arn, **task_execution_kwargs)`` :type task_execution_kwargs: dict :param delete_task_after_execution: If True then the TaskArn which was executed will be deleted from AWS DataSync on successful completion. :type delete_task_after_execution: bool :raises AirflowException: If ``task_arn`` was not specified, or if either ``source_location_uri`` or ``destination_location_uri`` were not specified. :raises AirflowException: If source or destination Location were not found and could not be created. :raises AirflowException: If ``choose_task`` or ``choose_location`` fails. :raises AirflowException: If Task creation, update, execution or delete fails. """ template_fields = ( "task_arn", "source_location_uri", "destination_location_uri", "create_task_kwargs", "create_source_location_kwargs", "create_destination_location_kwargs", "update_task_kwargs", "task_execution_kwargs", ) template_fields_renderers = { "create_task_kwargs": "json", "create_source_location_kwargs": "json", "create_destination_location_kwargs": "json", "update_task_kwargs": "json", "task_execution_kwargs": "json", } ui_color = "#44b5e2" def __init__( self, *, aws_conn_id: str = "aws_default", wait_interval_seconds: int = 30, max_iterations: int = 60, task_arn: Optional[str] = None, source_location_uri: Optional[str] = None, destination_location_uri: Optional[str] = None, allow_random_task_choice: bool = False, allow_random_location_choice: bool = False, create_task_kwargs: Optional[dict] = None, create_source_location_kwargs: Optional[dict] = None, create_destination_location_kwargs: Optional[dict] = None, update_task_kwargs: Optional[dict] = None, task_execution_kwargs: Optional[dict] = None, delete_task_after_execution: bool = False, **kwargs, ): super().__init__(**kwargs) # Assignments self.aws_conn_id = aws_conn_id self.wait_interval_seconds = wait_interval_seconds self.max_iterations = max_iterations self.task_arn = task_arn self.source_location_uri = source_location_uri self.destination_location_uri = destination_location_uri self.allow_random_task_choice = allow_random_task_choice self.allow_random_location_choice = allow_random_location_choice self.create_task_kwargs = create_task_kwargs if create_task_kwargs else {} self.create_source_location_kwargs = {} if create_source_location_kwargs: self.create_source_location_kwargs = create_source_location_kwargs self.create_destination_location_kwargs = {} if create_destination_location_kwargs: self.create_destination_location_kwargs = create_destination_location_kwargs self.update_task_kwargs = update_task_kwargs if update_task_kwargs else {} self.task_execution_kwargs = task_execution_kwargs if task_execution_kwargs else {} self.delete_task_after_execution = delete_task_after_execution # Validations valid = False if self.task_arn: valid = True if self.source_location_uri and self.destination_location_uri: valid = True if not valid: raise AirflowException( "Either specify task_arn or both source_location_uri and destination_location_uri. " "task_arn={} source_location_uri={} destination_location_uri={}".format( task_arn, source_location_uri, destination_location_uri ) ) # Others self.hook: Optional[AWSDataSyncHook] = None # Candidates - these are found in AWS as possible things # for us to use self.candidate_source_location_arns: Optional[List[str]] = None self.candidate_destination_location_arns: Optional[List[str]] = None self.candidate_task_arns: Optional[List[str]] = None # Actuals self.source_location_arn: Optional[str] = None self.destination_location_arn: Optional[str] = None self.task_execution_arn: Optional[str] = None def get_hook(self) -> AWSDataSyncHook: """Create and return AWSDataSyncHook. :return AWSDataSyncHook: An AWSDataSyncHook instance. """ if self.hook: return self.hook self.hook = AWSDataSyncHook( aws_conn_id=self.aws_conn_id, wait_interval_seconds=self.wait_interval_seconds, ) return self.hook def execute(self, context): # If task_arn was not specified then try to # find 0, 1 or many candidate DataSync Tasks to run if not self.task_arn: self._get_tasks_and_locations() # If some were found, identify which one to run if self.candidate_task_arns: self.task_arn = self.choose_task(self.candidate_task_arns) # If we could not find one then try to create one if not self.task_arn and self.create_task_kwargs: self._create_datasync_task() if not self.task_arn: raise AirflowException("DataSync TaskArn could not be identified or created.") self.log.info("Using DataSync TaskArn %s", self.task_arn) # Update the DataSync Task if self.update_task_kwargs: self._update_datasync_task() # Execute the DataSync Task self._execute_datasync_task() if not self.task_execution_arn: raise AirflowException("Nothing was executed") # Delete the DataSyncTask if self.delete_task_after_execution: self._delete_datasync_task() return {"TaskArn": self.task_arn, "TaskExecutionArn": self.task_execution_arn} def _get_tasks_and_locations(self) -> None: """Find existing DataSync Task based on source and dest Locations.""" hook = self.get_hook() self.candidate_source_location_arns = self._get_location_arns(self.source_location_uri) self.candidate_destination_location_arns = self._get_location_arns(self.destination_location_uri) if not self.candidate_source_location_arns: self.log.info("No matching source Locations") return if not self.candidate_destination_location_arns: self.log.info("No matching destination Locations") return self.log.info("Finding DataSync TaskArns that have these LocationArns") self.candidate_task_arns = hook.get_task_arns_for_location_arns( self.candidate_source_location_arns, self.candidate_destination_location_arns, ) self.log.info("Found candidate DataSync TaskArns %s", self.candidate_task_arns) def choose_task(self, task_arn_list: list) -> Optional[str]: """Select 1 DataSync TaskArn from a list""" if not task_arn_list: return None if len(task_arn_list) == 1: return task_arn_list[0] if self.allow_random_task_choice: # Items are unordered so we don't want to just take # the [0] one as it implies ordered items were received # from AWS and might lead to confusion. Rather explicitly # choose a random one return random.choice(task_arn_list) raise AirflowException(f"Unable to choose a Task from {task_arn_list}") def choose_location(self, location_arn_list: Optional[List[str]]) -> Optional[str]: """Select 1 DataSync LocationArn from a list""" if not location_arn_list: return None if len(location_arn_list) == 1: return location_arn_list[0] if self.allow_random_location_choice: # Items are unordered so we don't want to just take # the [0] one as it implies ordered items were received # from AWS and might lead to confusion. Rather explicitly # choose a random one return random.choice(location_arn_list) raise AirflowException(f"Unable to choose a Location from {location_arn_list}") def _create_datasync_task(self) -> None: """Create a AWS DataSyncTask.""" hook = self.get_hook() self.source_location_arn = self.choose_location(self.candidate_source_location_arns) if not self.source_location_arn and self.source_location_uri and self.create_source_location_kwargs: self.log.info('Attempting to create source Location') self.source_location_arn = hook.create_location( self.source_location_uri, **self.create_source_location_kwargs ) if not self.source_location_arn: raise AirflowException( "Unable to determine source LocationArn. Does a suitable DataSync Location exist?" ) self.destination_location_arn = self.choose_location(self.candidate_destination_location_arns) if ( not self.destination_location_arn and self.destination_location_uri and self.create_destination_location_kwargs ): self.log.info('Attempting to create destination Location') self.destination_location_arn = hook.create_location( self.destination_location_uri, **self.create_destination_location_kwargs ) if not self.destination_location_arn: raise AirflowException( "Unable to determine destination LocationArn. Does a suitable DataSync Location exist?" ) self.log.info("Creating a Task.") self.task_arn = hook.create_task( self.source_location_arn, self.destination_location_arn, **self.create_task_kwargs ) if not self.task_arn: raise AirflowException("Task could not be created") self.log.info("Created a Task with TaskArn %s", self.task_arn) def _update_datasync_task(self) -> None: """Update a AWS DataSyncTask.""" if not self.task_arn: return hook = self.get_hook() self.log.info("Updating TaskArn %s", self.task_arn) hook.update_task(self.task_arn, **self.update_task_kwargs) self.log.info("Updated TaskArn %s", self.task_arn) def _execute_datasync_task(self) -> None: """Create and monitor an AWSDataSync TaskExecution for a Task.""" if not self.task_arn: raise AirflowException("Missing TaskArn") hook = self.get_hook() # Create a task execution: self.log.info("Starting execution for TaskArn %s", self.task_arn) self.task_execution_arn = hook.start_task_execution(self.task_arn, **self.task_execution_kwargs) self.log.info("Started TaskExecutionArn %s", self.task_execution_arn) # Wait for task execution to complete self.log.info("Waiting for TaskExecutionArn %s", self.task_execution_arn) try: result = hook.wait_for_task_execution(self.task_execution_arn, max_iterations=self.max_iterations) except (AirflowTaskTimeout, AirflowException) as e: self.log.error('Cancelling TaskExecution after Exception: %s', e) self._cancel_datasync_task_execution() raise self.log.info("Completed TaskExecutionArn %s", self.task_execution_arn) task_execution_description = hook.describe_task_execution(task_execution_arn=self.task_execution_arn) self.log.info("task_execution_description=%s", task_execution_description) # Log some meaningful statuses level = logging.ERROR if not result else logging.INFO self.log.log(level, 'Status=%s', task_execution_description['Status']) if 'Result' in task_execution_description: for k, v in task_execution_description['Result'].items(): if 'Status' in k or 'Error' in k: self.log.log(level, '%s=%s', k, v) if not result: raise AirflowException(f"Failed TaskExecutionArn {self.task_execution_arn}") def _cancel_datasync_task_execution(self): """Cancel the submitted DataSync task.""" hook = self.get_hook() if self.task_execution_arn: self.log.info("Cancelling TaskExecutionArn %s", self.task_execution_arn) hook.cancel_task_execution(task_execution_arn=self.task_execution_arn) self.log.info("Cancelled TaskExecutionArn %s", self.task_execution_arn) def on_kill(self): self.log.error('Cancelling TaskExecution after task was killed') self._cancel_datasync_task_execution() def _delete_datasync_task(self) -> None: """Deletes an AWS DataSync Task.""" if not self.task_arn: return hook = self.get_hook() # Delete task: self.log.info("Deleting Task with TaskArn %s", self.task_arn) hook.delete_task(self.task_arn) self.log.info("Task Deleted") def _get_location_arns(self, location_uri) -> List[str]: location_arns = self.get_hook().get_location_arns(location_uri) self.log.info("Found LocationArns %s for LocationUri %s", location_arns, location_uri) return location_arns
dhuang/incubator-airflow
airflow/providers/amazon/aws/operators/datasync.py
Python
apache-2.0
18,667
"""Numeric integration of data coming from a source sensor over time.""" from decimal import Decimal, DecimalException import logging import voluptuous as vol from homeassistant.components.sensor import ( DEVICE_CLASS_ENERGY, DEVICE_CLASS_POWER, PLATFORM_SCHEMA, STATE_CLASS_TOTAL, SensorEntity, ) from homeassistant.const import ( ATTR_DEVICE_CLASS, ATTR_UNIT_OF_MEASUREMENT, CONF_METHOD, CONF_NAME, STATE_UNAVAILABLE, STATE_UNKNOWN, TIME_DAYS, TIME_HOURS, TIME_MINUTES, TIME_SECONDS, ) from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.event import async_track_state_change_event from homeassistant.helpers.restore_state import RestoreEntity # mypy: allow-untyped-defs, no-check-untyped-defs _LOGGER = logging.getLogger(__name__) ATTR_SOURCE_ID = "source" CONF_SOURCE_SENSOR = "source" CONF_ROUND_DIGITS = "round" CONF_UNIT_PREFIX = "unit_prefix" CONF_UNIT_TIME = "unit_time" CONF_UNIT_OF_MEASUREMENT = "unit" TRAPEZOIDAL_METHOD = "trapezoidal" LEFT_METHOD = "left" RIGHT_METHOD = "right" INTEGRATION_METHOD = [TRAPEZOIDAL_METHOD, LEFT_METHOD, RIGHT_METHOD] # SI Metric prefixes UNIT_PREFIXES = {None: 1, "k": 10 ** 3, "M": 10 ** 6, "G": 10 ** 9, "T": 10 ** 12} # SI Time prefixes UNIT_TIME = { TIME_SECONDS: 1, TIME_MINUTES: 60, TIME_HOURS: 60 * 60, TIME_DAYS: 24 * 60 * 60, } ICON = "mdi:chart-histogram" DEFAULT_ROUND = 3 PLATFORM_SCHEMA = vol.All( cv.deprecated(CONF_UNIT_OF_MEASUREMENT), PLATFORM_SCHEMA.extend( { vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_SOURCE_SENSOR): cv.entity_id, vol.Optional(CONF_ROUND_DIGITS, default=DEFAULT_ROUND): vol.Coerce(int), vol.Optional(CONF_UNIT_PREFIX, default=None): vol.In(UNIT_PREFIXES), vol.Optional(CONF_UNIT_TIME, default=TIME_HOURS): vol.In(UNIT_TIME), vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string, vol.Optional(CONF_METHOD, default=TRAPEZOIDAL_METHOD): vol.In( INTEGRATION_METHOD ), } ), ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the integration sensor.""" integral = IntegrationSensor( config[CONF_SOURCE_SENSOR], config.get(CONF_NAME), config[CONF_ROUND_DIGITS], config[CONF_UNIT_PREFIX], config[CONF_UNIT_TIME], config.get(CONF_UNIT_OF_MEASUREMENT), config[CONF_METHOD], ) async_add_entities([integral]) class IntegrationSensor(RestoreEntity, SensorEntity): """Representation of an integration sensor.""" def __init__( self, source_entity, name, round_digits, unit_prefix, unit_time, unit_of_measurement, integration_method, ): """Initialize the integration sensor.""" self._sensor_source_id = source_entity self._round_digits = round_digits self._state = None self._method = integration_method self._name = name if name is not None else f"{source_entity} integral" self._unit_template = ( f"{'' if unit_prefix is None else unit_prefix}{{}}{unit_time}" ) self._unit_of_measurement = unit_of_measurement self._unit_prefix = UNIT_PREFIXES[unit_prefix] self._unit_time = UNIT_TIME[unit_time] self._attr_state_class = STATE_CLASS_TOTAL async def async_added_to_hass(self): """Handle entity which will be added.""" await super().async_added_to_hass() if state := await self.async_get_last_state(): try: self._state = Decimal(state.state) except (DecimalException, ValueError) as err: _LOGGER.warning("Could not restore last state: %s", err) else: self._attr_device_class = state.attributes.get(ATTR_DEVICE_CLASS) if self._unit_of_measurement is None: self._unit_of_measurement = state.attributes.get( ATTR_UNIT_OF_MEASUREMENT ) @callback def calc_integration(event): """Handle the sensor state changes.""" old_state = event.data.get("old_state") new_state = event.data.get("new_state") if self._unit_of_measurement is None: unit = new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) self._unit_of_measurement = self._unit_template.format( "" if unit is None else unit ) if ( self.device_class is None and new_state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_POWER ): self._attr_device_class = DEVICE_CLASS_ENERGY if ( old_state is None or new_state is None or old_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE) or new_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE) ): return try: # integration as the Riemann integral of previous measures. area = 0 elapsed_time = ( new_state.last_updated - old_state.last_updated ).total_seconds() if self._method == TRAPEZOIDAL_METHOD: area = ( (Decimal(new_state.state) + Decimal(old_state.state)) * Decimal(elapsed_time) / 2 ) elif self._method == LEFT_METHOD: area = Decimal(old_state.state) * Decimal(elapsed_time) elif self._method == RIGHT_METHOD: area = Decimal(new_state.state) * Decimal(elapsed_time) integral = area / (self._unit_prefix * self._unit_time) assert isinstance(integral, Decimal) except ValueError as err: _LOGGER.warning("While calculating integration: %s", err) except DecimalException as err: _LOGGER.warning( "Invalid state (%s > %s): %s", old_state.state, new_state.state, err ) except AssertionError as err: _LOGGER.error("Could not calculate integral: %s", err) else: if isinstance(self._state, Decimal): self._state += integral else: self._state = integral self.async_write_ha_state() async_track_state_change_event( self.hass, [self._sensor_source_id], calc_integration ) @property def name(self): """Return the name of the sensor.""" return self._name @property def native_value(self): """Return the state of the sensor.""" if isinstance(self._state, Decimal): return round(self._state, self._round_digits) return self._state @property def native_unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._unit_of_measurement @property def should_poll(self): """No polling needed.""" return False @property def extra_state_attributes(self): """Return the state attributes of the sensor.""" return {ATTR_SOURCE_ID: self._sensor_source_id} @property def icon(self): """Return the icon to use in the frontend.""" return ICON
jawilson/home-assistant
homeassistant/components/integration/sensor.py
Python
apache-2.0
7,671
"""Pytorch Densenet implementation w/ tweaks This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with fixed kwargs passthrough and addition of dynamic global avg/max pool. """ import re from collections import OrderedDict from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from torch.jit.annotations import List from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from .helpers import build_model_with_cfg from .layers import BatchNormAct2d, create_norm_act, BlurPool2d, create_classifier from .registry import register_model __all__ = ['DenseNet'] def _cfg(url=''): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'features.conv0', 'classifier': 'classifier', } default_cfgs = { 'densenet121': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth'), 'densenet121d': _cfg(url=''), 'densenetblur121d': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth'), 'densenet169': _cfg(url='https://download.pytorch.org/models/densenet169-b2777c0a.pth'), 'densenet201': _cfg(url='https://download.pytorch.org/models/densenet201-c1103571.pth'), 'densenet161': _cfg(url='https://download.pytorch.org/models/densenet161-8d451a50.pth'), 'densenet264': _cfg(url=''), 'densenet264d_iabn': _cfg(url=''), 'tv_densenet121': _cfg(url='https://download.pytorch.org/models/densenet121-a639ec97.pth'), } class DenseLayer(nn.Module): def __init__(self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, drop_rate=0., memory_efficient=False): super(DenseLayer, self).__init__() self.add_module('norm1', norm_layer(num_input_features)), self.add_module('conv1', nn.Conv2d( num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), self.add_module('norm2', norm_layer(bn_size * growth_rate)), self.add_module('conv2', nn.Conv2d( bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), self.drop_rate = float(drop_rate) self.memory_efficient = memory_efficient def bottleneck_fn(self, xs): # type: (List[torch.Tensor]) -> torch.Tensor concated_features = torch.cat(xs, 1) bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484 return bottleneck_output # todo: rewrite when torchscript supports any def any_requires_grad(self, x): # type: (List[torch.Tensor]) -> bool for tensor in x: if tensor.requires_grad: return True return False @torch.jit.unused # noqa: T484 def call_checkpoint_bottleneck(self, x): # type: (List[torch.Tensor]) -> torch.Tensor def closure(*xs): return self.bottleneck_fn(xs) return cp.checkpoint(closure, *x) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (torch.Tensor) pass # torchscript does not yet support *args, so we overload method # allowing it to take either a List[Tensor] or single Tensor def forward(self, x): # noqa: F811 if isinstance(x, torch.Tensor): prev_features = [x] else: prev_features = x if self.memory_efficient and self.any_requires_grad(prev_features): if torch.jit.is_scripting(): raise Exception("Memory Efficient not supported in JIT") bottleneck_output = self.call_checkpoint_bottleneck(prev_features) else: bottleneck_output = self.bottleneck_fn(prev_features) new_features = self.conv2(self.norm2(bottleneck_output)) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class DenseBlock(nn.ModuleDict): _version = 2 def __init__(self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=nn.ReLU, drop_rate=0., memory_efficient=False): super(DenseBlock, self).__init__() for i in range(num_layers): layer = DenseLayer( num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, norm_layer=norm_layer, drop_rate=drop_rate, memory_efficient=memory_efficient, ) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features): features = [init_features] for name, layer in self.items(): new_features = layer(features) features.append(new_features) return torch.cat(features, 1) class DenseTransition(nn.Sequential): def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d, aa_layer=None): super(DenseTransition, self).__init__() self.add_module('norm', norm_layer(num_input_features)) self.add_module('conv', nn.Conv2d( num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) if aa_layer is not None: self.add_module('pool', aa_layer(num_output_features, stride=2)) else: self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) class DenseNet(nn.Module): r"""Densenet-BC model class, based on `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: growth_rate (int) - how many filters to add each layer (`k` in paper) block_config (list of 4 ints) - how many layers in each pooling block bn_size (int) - multiplicative factor for number of bottle neck layers (i.e. bn_size * k features in the bottleneck layer) drop_rate (float) - dropout rate after each dense layer num_classes (int) - number of classification classes memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ """ def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), bn_size=4, stem_type='', num_classes=1000, in_chans=3, global_pool='avg', norm_layer=BatchNormAct2d, aa_layer=None, drop_rate=0, memory_efficient=False, aa_stem_only=True): self.num_classes = num_classes self.drop_rate = drop_rate super(DenseNet, self).__init__() # Stem deep_stem = 'deep' in stem_type # 3x3 deep stem num_init_features = growth_rate * 2 if aa_layer is None: stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) else: stem_pool = nn.Sequential(*[ nn.MaxPool2d(kernel_size=3, stride=1, padding=1), aa_layer(channels=num_init_features, stride=2)]) if deep_stem: stem_chs_1 = stem_chs_2 = growth_rate if 'tiered' in stem_type: stem_chs_1 = 3 * (growth_rate // 4) stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) self.features = nn.Sequential(OrderedDict([ ('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), ('norm0', norm_layer(stem_chs_1)), ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), ('norm1', norm_layer(stem_chs_2)), ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), ('norm2', norm_layer(num_init_features)), ('pool0', stem_pool), ])) else: self.features = nn.Sequential(OrderedDict([ ('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', norm_layer(num_init_features)), ('pool0', stem_pool), ])) self.feature_info = [ dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')] current_stride = 4 # DenseBlocks num_features = num_init_features for i, num_layers in enumerate(block_config): block = DenseBlock( num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, norm_layer=norm_layer, drop_rate=drop_rate, memory_efficient=memory_efficient ) module_name = f'denseblock{(i + 1)}' self.features.add_module(module_name, block) num_features = num_features + num_layers * growth_rate transition_aa_layer = None if aa_stem_only else aa_layer if i != len(block_config) - 1: self.feature_info += [ dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] current_stride *= 2 trans = DenseTransition( num_input_features=num_features, num_output_features=num_features // 2, norm_layer=norm_layer, aa_layer=transition_aa_layer) self.features.add_module(f'transition{i + 1}', trans) num_features = num_features // 2 # Final batch norm self.features.add_module('norm5', norm_layer(num_features)) self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] self.num_features = num_features # Linear layer self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): return self.features(x) def forward(self, x): x = self.forward_features(x) x = self.global_pool(x) # both classifier and block drop? # if self.drop_rate > 0.: # x = F.dropout(x, p=self.drop_rate, training=self.training) x = self.classifier(x) return x def _filter_torchvision_pretrained(state_dict): pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] return state_dict def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): kwargs['growth_rate'] = growth_rate kwargs['block_config'] = block_config return build_model_with_cfg( DenseNet, variant, pretrained, default_cfg=default_cfgs[variant], feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained, **kwargs) @register_model def densenet121(pretrained=False, **kwargs): r"""Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) return model @register_model def densenetblur121d(pretrained=False, **kwargs): r"""Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenetblur121d', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, stem_type='deep', aa_layer=BlurPool2d, **kwargs) return model @register_model def densenet121d(pretrained=False, **kwargs): r"""Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet121d', growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', pretrained=pretrained, **kwargs) return model @register_model def densenet169(pretrained=False, **kwargs): r"""Densenet-169 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet169', growth_rate=32, block_config=(6, 12, 32, 32), pretrained=pretrained, **kwargs) return model @register_model def densenet201(pretrained=False, **kwargs): r"""Densenet-201 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet201', growth_rate=32, block_config=(6, 12, 48, 32), pretrained=pretrained, **kwargs) return model @register_model def densenet161(pretrained=False, **kwargs): r"""Densenet-161 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet161', growth_rate=48, block_config=(6, 12, 36, 24), pretrained=pretrained, **kwargs) return model @register_model def densenet264(pretrained=False, **kwargs): r"""Densenet-264 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet264', growth_rate=48, block_config=(6, 12, 64, 48), pretrained=pretrained, **kwargs) return model @register_model def densenet264d_iabn(pretrained=False, **kwargs): r"""Densenet-264 model with deep stem and Inplace-ABN """ def norm_act_fn(num_features, **kwargs): return create_norm_act('iabn', num_features, **kwargs) model = _create_densenet( 'densenet264d_iabn', growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep', norm_layer=norm_act_fn, pretrained=pretrained, **kwargs) return model @register_model def tv_densenet121(pretrained=False, **kwargs): r"""Densenet-121 model with original Torchvision weights, from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'tv_densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) return model
rwightman/pytorch-image-models
timm/models/densenet.py
Python
apache-2.0
15,611
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse from input_functions import get_input_fn import tensorflow as tf from tensorflow.contrib.keras.python.keras.datasets import imdb from tensorflow.contrib.learn.python.learn import learn_runner from tensorflow.contrib.learn.python.learn.estimators import constants from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import PredictionType print('TensorFlow version', tf.__version__) parser = argparse.ArgumentParser() parser.add_argument( '--model_dir', type=str, default='sentiment_analysis_output', help='The directory where the model outputs should be stored') parser.add_argument( '--batch_by_seq_len', type=bool, default=False, help='If True each bath will have sequences of similar length.' 'This makes the model train faster') parser.add_argument( '--max_len', type=int, default=250, help='Sentences will be truncated at max_len') parser.add_argument( '--num_words', type=int, default=1000, help='Only num_words more frequent words will be used for testing') parser.add_argument( '--train_batch_size', type=int, default=16, help='Batch size used for training') parser.add_argument( '--eval_batch_size', type=int, default=16, help='Batch size used for evaluation') parser.add_argument( '--embed_dim', type=int, default=30, help='Embedding dimension') parser.add_argument( '--learning_rate', type=int, default=0.001, help='Learning rate') parser.add_argument( '--num_epochs', type=int, default=10, help='Num epochs used for training (for evaluation is always 1)') parser.add_argument( '--cell_type', type=str, default='lstm', help='RNN cell type') parser.add_argument( '--optimizer', type=str, default='Adam', help='Optimizer used for training') parser.add_argument( '--num_rnn_units', nargs='+', type=int, default=[256, 128], help='Size of the hidden state for each RNN cell') parser.add_argument( '--dropout_keep_probabilities', nargs='+', type=int, default=[0.9, 0.9, 0.9], help='Dropout probabilities to keep the cell. ' 'If provided the length should be num_rnn_units + 1') parser.add_argument( '--num_classes', type=int, default=2, help='Number of output classes. ' 'For sentiment analysis is 2 (positive and negative)') FLAGS = parser.parse_args() # create experiment def generate_experiment_fn(x_train, y_train, x_test, y_test): def _experiment_fn(run_config, hparams): del hparams # unused arg # feature sequences xc = tf.contrib.layers.sparse_column_with_integerized_feature( 'x', FLAGS.num_words) xc = tf.contrib.layers.embedding_column(xc, FLAGS.embed_dim) # creates estimator estimator = tf.contrib.learn.DynamicRnnEstimator( config=run_config, problem_type=constants.ProblemType.CLASSIFICATION, prediction_type=PredictionType.SINGLE_VALUE, sequence_feature_columns=[xc], context_feature_columns=None, num_units=FLAGS.num_rnn_units, cell_type=FLAGS.cell_type, optimizer=FLAGS.optimizer, learning_rate=FLAGS.learning_rate, num_classes=FLAGS.num_classes, dropout_keep_probabilities=FLAGS.dropout_keep_probabilities) # input functions train_input = get_input_fn(x_train, y_train, FLAGS.train_batch_size, epochs=FLAGS.num_epochs, max_length=FLAGS.max_len, batch_by_seq_len=FLAGS.batch_by_seq_len) test_input = get_input_fn(x_test, y_test, FLAGS.eval_batch_size, epochs=1, max_length=FLAGS.max_len) # returns Experiment return tf.contrib.learn.Experiment( estimator, train_input_fn=train_input, eval_input_fn=test_input, ) return _experiment_fn def main(unused_argv): # Loading the data # data from: https://keras.io/datasets/ # Dataset of 25,000 movies reviews from IMDB, labeled by sentiment # (positive/negative). # Reviews have been preprocessed, and each review is encoded as a sequence # of word indexes (integers). # For convenience, words are indexed by overall frequency in the dataset. print('Loading data...') (x_train, y_train), (x_test, y_test) = imdb.load_data( num_words=FLAGS.num_words) print('size of the train dataset:', x_train.shape[0]) print('size of the test dataset:', x_test.shape[0]) # run experiment run_config = tf.contrib.learn.RunConfig(model_dir=FLAGS.model_dir) learn_runner.run(generate_experiment_fn(x_train, y_train, x_test, y_test), run_config=run_config) if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO) # enable TensorFlow logs tf.app.run()
mari-linhares/tensorflow-workshop
code_samples/RNN/sentiment_analysis/samples/canned_sentiment_analysis.py
Python
apache-2.0
4,929
""" file: processing.py author: Bryce Mecum (mecum@nceas.ucsb.edu) Processes the scientific metadata documents in ./documents for person and organization information. For each document, the script tries to find the person in an existing list. Matches are currently made off of all information available but future versions should be more loose about this. The document a person/organization was found in are also added to that person/organization so the documents belonging to that person/organization can be attributed to them and used in later graph generation activities. """ import os import re import xml.etree.ElementTree as ET from xml.etree.ElementTree import ParseError from d1lod.metadata import eml from d1lod.metadata import dryad from d1lod.metadata import fgdc def processDirectory(job): filenames = os.listdir("%s" % job.directory) i = 0 for filename in filenames: if i % 1000 == 0: print "%d..." % i try: xmldoc = ET.parse("%s/%s" % (job.directory, filename)) except ParseError: continue processDocument(job, xmldoc, filename) i += 1 print "Processed a total of %d documents" % i def detectMetadataFormat(xmldoc): """ Detect the format of the metadata in `xmldoc`. """ root = xmldoc if re.search("eml$", root.tag): return "eml" elif re.search("Dryad", root.tag): return "dryad" elif re.search("metadata", root.tag): return "fgdc" else: return "unknown" def extractCreators(identifier, doc): """ Detect the format of and extract people/organization creators from a document. Arguments: document: str The document's PID doc: An XML document of the scientific metadata Returns: List of records. """ if doc is None: return [] # Detect the format metadata_format = detectMetadataFormat(doc) # Process the document for people/orgs if metadata_format == "eml": records = eml.process(doc, identifier) elif metadata_format == "dryad": records = dryad.process(doc, identifier) elif metadata_format == "fgdc": records = fgdc.process(doc, identifier) else: print "Unknown format." records = [] return records def processDocument(job, xmldoc, filename): """ Process an individual document.""" document = filename # Strip trailing revision number from filename just_pid = re.match("(autogen.\d+)\.\d", document) if just_pid is not None: document = just_pid.groups(0)[0] # Map the filename to its PID if we have a map to go off of if job.identifier_map is not None: if document in job.identifier_map: document = job.identifier_map[document] # Null out the document PID if it's not public if job.public_pids is not None: if document not in job.public_pids: document = '' records = extractCreators(document, xmldoc) if records is not None: saveRecords(job, records) def saveRecords(job, records): """Saves an array of records to disk, according to their filename""" if records is None: return for record in records: # Skip empty records if 'type' not in record: continue if record['type'] == 'person': job.writePerson(record) # Add their organization too (if applicable) if 'organization' in record and len(record['organization']) > 0: org_record = { 'name': record['organization'], 'format': record['format'], 'source': record['source'], 'document': record['document'] } job.writeOrganization(org_record) elif record['type'] == 'organization': job.writeOrganization(record)
ec-geolink/d1lod
d1lod/d1lod/people/processing.py
Python
apache-2.0
3,993
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unit tests for the pipeline options validator module.""" from __future__ import absolute_import import logging import unittest from builtins import object from hamcrest import assert_that from hamcrest import contains_string from hamcrest import only_contains from hamcrest.core.base_matcher import BaseMatcher from apache_beam.internal import pickler from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options_validator import PipelineOptionsValidator # Mock runners to use for validations. class MockRunners(object): class DataflowRunner(object): pass class TestDataflowRunner(object): pass class OtherRunner(object): pass # Matcher that always passes for testing on_success_matcher option class AlwaysPassMatcher(BaseMatcher): def _matches(self, item): return True class SetupTest(unittest.TestCase): def check_errors_for_arguments(self, errors, args): """Checks that there is exactly one error for each given argument.""" missing = [] remaining = list(errors) for arg in args: found = False for error in remaining: if arg in error: remaining.remove(error) found = True break if not found: missing.append('Missing error for: ' + arg) # Return missing and remaining (not matched) errors. return missing + remaining def test_local_runner(self): runner = MockRunners.OtherRunner() options = PipelineOptions([]) validator = PipelineOptionsValidator(options, runner) errors = validator.validate() self.assertEqual(len(errors), 0) def test_missing_required_options(self): options = PipelineOptions(['']) runner = MockRunners.DataflowRunner() validator = PipelineOptionsValidator(options, runner) errors = validator.validate() self.assertEqual( self.check_errors_for_arguments( errors, ['project', 'staging_location', 'temp_location']), []) def test_gcs_path(self): def get_validator(temp_location, staging_location): options = ['--project=example:example', '--job_name=job'] if temp_location is not None: options.append('--temp_location=' + temp_location) if staging_location is not None: options.append('--staging_location=' + staging_location) pipeline_options = PipelineOptions(options) runner = MockRunners.DataflowRunner() validator = PipelineOptionsValidator(pipeline_options, runner) return validator test_cases = [ {'temp_location': None, 'staging_location': 'gs://foo/bar', 'errors': ['temp_location']}, {'temp_location': None, 'staging_location': None, 'errors': ['staging_location', 'temp_location']}, {'temp_location': 'gs://foo/bar', 'staging_location': None, 'errors': []}, {'temp_location': 'gs://foo/bar', 'staging_location': 'gs://ABC/bar', 'errors': ['staging_location']}, {'temp_location': 'gcs:/foo/bar', 'staging_location': 'gs://foo/bar', 'errors': ['temp_location']}, {'temp_location': 'gs:/foo/bar', 'staging_location': 'gs://foo/bar', 'errors': ['temp_location']}, {'temp_location': 'gs://ABC/bar', 'staging_location': 'gs://foo/bar', 'errors': ['temp_location']}, {'temp_location': 'gs://ABC/bar', 'staging_location': 'gs://foo/bar', 'errors': ['temp_location']}, {'temp_location': 'gs://foo', 'staging_location': 'gs://foo/bar', 'errors': ['temp_location']}, {'temp_location': 'gs://foo/', 'staging_location': 'gs://foo/bar', 'errors': []}, {'temp_location': 'gs://foo/bar', 'staging_location': 'gs://foo/bar', 'errors': []}, ] for case in test_cases: errors = get_validator(case['temp_location'], case['staging_location']).validate() self.assertEqual( self.check_errors_for_arguments(errors, case['errors']), []) def test_project(self): def get_validator(project): options = ['--job_name=job', '--staging_location=gs://foo/bar', '--temp_location=gs://foo/bar'] if project is not None: options.append('--project=' + project) pipeline_options = PipelineOptions(options) runner = MockRunners.DataflowRunner() validator = PipelineOptionsValidator(pipeline_options, runner) return validator test_cases = [ {'project': None, 'errors': ['project']}, {'project': '12345', 'errors': ['project']}, {'project': 'FOO', 'errors': ['project']}, {'project': 'foo:BAR', 'errors': ['project']}, {'project': 'fo', 'errors': ['project']}, {'project': 'foo', 'errors': []}, {'project': 'foo:bar', 'errors': []}, ] for case in test_cases: errors = get_validator(case['project']).validate() self.assertEqual( self.check_errors_for_arguments(errors, case['errors']), []) def test_job_name(self): def get_validator(job_name): options = ['--project=example:example', '--staging_location=gs://foo/bar', '--temp_location=gs://foo/bar'] if job_name is not None: options.append('--job_name=' + job_name) pipeline_options = PipelineOptions(options) runner = MockRunners.DataflowRunner() validator = PipelineOptionsValidator(pipeline_options, runner) return validator test_cases = [ {'job_name': None, 'errors': []}, {'job_name': '12345', 'errors': ['job_name']}, {'job_name': 'FOO', 'errors': ['job_name']}, {'job_name': 'foo:bar', 'errors': ['job_name']}, {'job_name': 'fo', 'errors': []}, {'job_name': 'foo', 'errors': []}, ] for case in test_cases: errors = get_validator(case['job_name']).validate() self.assertEqual( self.check_errors_for_arguments(errors, case['errors']), []) def test_num_workers(self): def get_validator(num_workers): options = ['--project=example:example', '--job_name=job', '--staging_location=gs://foo/bar', '--temp_location=gs://foo/bar'] if num_workers is not None: options.append('--num_workers=' + num_workers) pipeline_options = PipelineOptions(options) runner = MockRunners.DataflowRunner() validator = PipelineOptionsValidator(pipeline_options, runner) return validator test_cases = [ {'num_workers': None, 'errors': []}, {'num_workers': '1', 'errors': []}, {'num_workers': '0', 'errors': ['num_workers']}, {'num_workers': '-1', 'errors': ['num_workers']}, ] for case in test_cases: errors = get_validator(case['num_workers']).validate() self.assertEqual( self.check_errors_for_arguments(errors, case['errors']), []) def test_is_service_runner(self): test_cases = [ { 'runner': MockRunners.OtherRunner(), 'options': [], 'expected': False, }, { 'runner': MockRunners.OtherRunner(), 'options': ['--dataflow_endpoint=https://dataflow.googleapis.com'], 'expected': False, }, { 'runner': MockRunners.OtherRunner(), 'options': ['--dataflow_endpoint=https://dataflow.googleapis.com/'], 'expected': False, }, { 'runner': MockRunners.DataflowRunner(), 'options': ['--dataflow_endpoint=https://another.service.com'], 'expected': False, }, { 'runner': MockRunners.DataflowRunner(), 'options': ['--dataflow_endpoint=https://another.service.com/'], 'expected': False, }, { 'runner': MockRunners.DataflowRunner(), 'options': ['--dataflow_endpoint=https://dataflow.googleapis.com'], 'expected': True, }, { 'runner': MockRunners.DataflowRunner(), 'options': ['--dataflow_endpoint=https://dataflow.googleapis.com/'], 'expected': True, }, { 'runner': MockRunners.DataflowRunner(), 'options': [], 'expected': True, }, ] for case in test_cases: validator = PipelineOptionsValidator( PipelineOptions(case['options']), case['runner']) self.assertEqual(validator.is_service_runner(), case['expected']) def test_dataflow_job_file_and_template_location_mutually_exclusive(self): runner = MockRunners.OtherRunner() options = PipelineOptions([ '--template_location', 'abc', '--dataflow_job_file', 'def' ]) validator = PipelineOptionsValidator(options, runner) errors = validator.validate() self.assertTrue(errors) def test_validate_template_location(self): runner = MockRunners.OtherRunner() options = PipelineOptions([ '--template_location', 'abc', ]) validator = PipelineOptionsValidator(options, runner) errors = validator.validate() self.assertFalse(errors) def test_validate_dataflow_job_file(self): runner = MockRunners.OtherRunner() options = PipelineOptions([ '--dataflow_job_file', 'abc' ]) validator = PipelineOptionsValidator(options, runner) errors = validator.validate() self.assertFalse(errors) def test_test_matcher(self): def get_validator(matcher): options = ['--project=example:example', '--job_name=job', '--staging_location=gs://foo/bar', '--temp_location=gs://foo/bar',] if matcher: options.append('%s=%s' % ('--on_success_matcher', matcher.decode())) pipeline_options = PipelineOptions(options) runner = MockRunners.TestDataflowRunner() return PipelineOptionsValidator(pipeline_options, runner) test_case = [ {'on_success_matcher': None, 'errors': []}, {'on_success_matcher': pickler.dumps(AlwaysPassMatcher()), 'errors': []}, {'on_success_matcher': b'abc', 'errors': ['on_success_matcher']}, {'on_success_matcher': pickler.dumps(object), 'errors': ['on_success_matcher']}, ] for case in test_case: errors = get_validator(case['on_success_matcher']).validate() self.assertEqual( self.check_errors_for_arguments(errors, case['errors']), []) def test_transform_name_mapping_without_update(self): options = ['--project=example:example', '--staging_location=gs://foo/bar', '--temp_location=gs://foo/bar', '--transform_name_mapping={\"fromPardo\":\"toPardo\"}'] pipeline_options = PipelineOptions(options) runner = MockRunners.DataflowRunner() validator = PipelineOptionsValidator(pipeline_options, runner) errors = validator.validate() assert_that(errors, only_contains( contains_string('Transform name mapping option is only useful when ' '--update and --streaming is specified'))) def test_transform_name_mapping_invalid_format(self): options = ['--project=example:example', '--staging_location=gs://foo/bar', '--temp_location=gs://foo/bar', '--update', '--job_name=test', '--streaming', '--transform_name_mapping={\"fromPardo\":123}'] pipeline_options = PipelineOptions(options) runner = MockRunners.DataflowRunner() validator = PipelineOptionsValidator(pipeline_options, runner) errors = validator.validate() assert_that(errors, only_contains( contains_string('Invalid transform name mapping format.'))) if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) unittest.main()
markflyhigh/incubator-beam
sdks/python/apache_beam/options/pipeline_options_validator_test.py
Python
apache-2.0
12,764
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2018. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=invalid-name # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) """ Sphinx documentation builder """ # -- Project information ----------------------------------------------------- project = 'Qiskit' copyright = '2019, Qiskit Development Team' # pylint: disable=redefined-builtin author = 'Qiskit Development Team' # The short X.Y version version = '' # The full version, including alpha/beta/rc tags release = '0.12.0' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinx.ext.extlinks', 'sphinx_tabs.tabs', 'sphinx_automodapi.automodapi', 'IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive', 'reno.sphinxext', ] # If true, figures, tables and code-blocks are automatically numbered if they # have a caption. numfig = True # A dictionary mapping 'figure', 'table', 'code-block' and 'section' to # strings that are used for format of figure numbers. As a special character, # %s will be replaced to figure number. numfig_format = { 'table': 'Table %s' } # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # A boolean that decides whether module names are prepended to all object names # (for object types where a “module” of some kind is defined), e.g. for # py:function directives. add_module_names = False # A list of prefixes that are ignored for sorting the Python module index # (e.g., if this is set to ['foo.'], then foo.bar is shown under B, not F). # This can be handy if you document a project that consists of a single # package. Works only for the HTML builder currently. modindex_common_prefix = ['qiskit.'] # -- Configuration for extlinks extension ------------------------------------ # Refer to https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # use the theme in subdir 'theme' html_sidebars = {'**': ['globaltoc.html']} html_last_updated_fmt = '%Y/%m/%d'
QISKit/qiskit-sdk-py
docs/conf.py
Python
apache-2.0
4,087
########################################################################### # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################### from starthinker.util.bigquery import table_create from starthinker.util.data import get_rows from starthinker.util.data import put_rows from starthinker.util.google_api import API_DCM from starthinker.util.cm import get_profile_for_api from starthinker.util.discovery_to_bigquery import Discovery_To_BigQuery from starthinker.util.regexp import lookup_id def cm_placement_group_clear(config, task): table_create( config, task['auth_bigquery'], config.project, task['dataset'], 'CM_PlacementGroups', Discovery_To_BigQuery( 'dfareporting', 'v3.4' ).method_schema( 'placementGroups.list', iterate=True ) ) def cm_placement_group_load(config, task): # load multiple partners from user defined sheet def load_multiple(): campaigns = [str(lookup_id(r)) for r in set(get_rows( config, task['auth_cm'], { 'sheets': { 'sheet': task['sheet'], 'tab': 'CM Campaigns', 'header':False, 'range': 'A2:A' }}, unnest=True ))] for row in get_rows( config, task['auth_sheets'], { 'sheets': { 'sheet': task['sheet'], 'tab': 'CM Accounts', 'header':False, 'range': 'A2:A' }} ): if row: account_id = lookup_id(row[0]) is_superuser, profile_id = get_profile_for_api(config, task['auth_cm'], account_id) kwargs = { 'profileId': profile_id, 'campaignIds':campaigns, 'archived':False } if is_superuser: kwargs['accountId'] = account_id yield from API_DCM( config, task['auth_cm'], iterate=True, internal=is_superuser ).placementGroups().list( **kwargs).execute() cm_placement_group_clear(config, task) # write placement_groups to database put_rows( config, task['auth_bigquery'], { 'bigquery': { 'dataset': task['dataset'], 'table': 'CM_PlacementGroups', 'schema': Discovery_To_BigQuery( 'dfareporting', 'v3.4' ).method_schema( 'placementGroups.list', iterate=True ), 'format':'JSON' }}, load_multiple() )
google/starthinker
starthinker/task/cm_to_dv/cm_placement_group.py
Python
apache-2.0
2,931
from org.myrobotlab.net import BareBonesBrowserLaunch def outsideLights(value): if value = 1 BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01") else BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=0") def garageLights(value): if value = 1 BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01") else BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=0") def alarmOn(value): BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01")
MyRobotLab/pyrobotlab
home/brotherbrown831/old_py/HomeAutomation.py
Python
apache-2.0
1,156
from freight.models import App, Repository from .base import Serializer from .manager import add @add(App) class AppSerializer(Serializer): def serialize(self, item, attrs): env_map = {} for env, env_data in list(item.environments.items()): env_map[env] = {"defaultRef": env_data.get("default_ref", "master")} if not env_map: env_map["production"] = {"defaultRef": "master"} if item.repository_id: repo = ( Repository.query.filter(Repository.id == item.repository_id).first().url ) else: repo = None return { "id": str(item.id), "name": item.name, "environments": env_map, "repository": repo, }
getsentry/freight
freight/api/serializer/app.py
Python
apache-2.0
785
#!/usr/bin/env python # ---------------------------------------------------------------- # Copyright 2016 Cisco Systems # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------ # ---------------------------------------------------------------- # bgp.py Sample program illustrating use of generated api # ydk.models.bgp.bgp.py which inturn is derived from the # open-config bgp yang module. # from ydk.types import Empty from ydk.providers import NetconfServiceProvider, CodecServiceProvider from ydk.services import CRUDService, NetconfService, CodecService, Datastore from ydk.models.openconfig import bgp from ydk.models.openconfig.routing_policy import RoutingPolicy from _config_builder import _get_bgp_config, _get_routing_cfg, _get_bgp_routing_multiple_object def bgp_run(netconf_service, session): # set up routing policy definition routing_policy = _get_routing_cfg() netconf_service.edit_config(session, Datastore.candidate, routing_policy) bgp_cfg = _get_bgp_config() # IPv4 Neighbor instance config done netconf_service.edit_config(session, Datastore.candidate, bgp_cfg) bgp_cfg_read = netconf_service.get_config(session, Datastore.candidate, bgp.Bgp()) print bgp_cfg_read # IPv6 Neighbor instance config nbr_ipv6 = bgp.Bgp.Neighbors.Neighbor() nbr_ipv6.parent = bgp_cfg.neighbors nbr_ipv6.neighbor_address = '2001:db8:fff1::1' nbr_ipv6.config.neighbor_address = '2001:db8:fff1::1' nbr_ipv6.config.peer_as = 65002 nbr_ipv6_afsf = nbr_ipv6.afi_safis.AfiSafi() nbr_ipv6_afsf.afi_safi_name = 'ipv6-unicast' nbr_ipv6_afsf.config.peer_as = 65002 nbr_ipv6_afsf.config.afi_safi_name = 'ipv6-unicast' nbr_ipv6_afsf.config.enabled = True nbr_ipv6.afi_safis.afi_safi.append(nbr_ipv6_afsf) netconf_service.edit_config(session, Datastore.candidate, bgp_cfg) nbr_ipv6_filter = bgp.Bgp.Neighbors.Neighbor() nbr_ipv6_filter.neighbor_address = '2001:db8:fff1::1' nbr_ipv6_read = netconf_service.get_config(session, Datastore.candidate, bgp_cfg) print nbr_ipv6_read def run_multiple_routing_bgp(netconf_service, session): crud = CRUDService() codec = CodecService() codec_provider = CodecServiceProvider() crud.delete(session, bgp()) crud.delete(session, RoutingPolicy()) multi_cfg = _get_bgp_routing_multiple_object() multi_payload_expected = codec.encode(codec_provider, multi_cfg) result = netconf_service.edit_config(session, Datastore.candidate, multi_cfg) assert 'ok' in result multi_filter = {'bgp':bgp(), 'routing-policy':RoutingPolicy()} multi_entity_read = netconf_service.get_config(session, Datastore.candidate, multi_filter) multi_payload_actual = codec.encode(codec_provider, multi_entity_read) assert multi_payload_expected == multi_payload_actual def init_logging(): import logging logger = logging.getLogger("ydk") logger.setLevel(logging.DEBUG) handler = logging.StreamHandler() formatter = logging.Formatter(("%(asctime)s - %(name)s - " "%(levelname)s - %(message)s")) handler.setFormatter(formatter) logger.addHandler(handler) if __name__ == "__main__": init_logging() provider = NetconfServiceProvider(address='127.0.0.1', username='admin', password='admin', protocol='ssh', port=12022) netconf_service = NetconfService() bgp_run(netconf_service, provider) # run_multiple_routing_bgp(netconf_service, provider) exit()
abhikeshav/ydk-py
core/samples/bgp_netconf.py
Python
apache-2.0
4,063
from setuptools import setup import os import codecs here = os.path.abspath(os.path.dirname(__file__)) def read(*parts): # intentionally *not* adding an encoding option to open return codecs.open(os.path.join(here, *parts), 'r').read() setup( name='cloudify-agent-packager', version='3.5.2', url='https://github.com/cloudify-cosmo/cloudify-agent-packager', author='Gigaspaces', author_email='cosmo-admin@gigaspaces.com', license='LICENSE', platforms='All', description='Creates Cloudify Agent Packages', long_description=read('README.rst'), packages=['agent_packager'], include_package_data=True, entry_points={ 'console_scripts': [ 'cfy-ap = agent_packager.cli:main', ] }, install_requires=[ "docopt==.0.6.1", "pyyaml==3.10", "virtualenv==12.0.7", "requests==2.7.0", "jingen==0.1.0" ], )
codilime/cloudify-agent-packager
setup.py
Python
apache-2.0
930
"""Import Module Plotly To Ploting Graph""" import plotly.plotly as py import plotly.graph_objs as go """Open and Read CSV from database""" data = open('Real_Final_database_02.csv') alldata = data.readlines() listdata = [] for i in alldata: listdata.append(i.strip().split(',')) type_z = ['Flood', 'Epidemic', 'Drought', 'Earthquake', 'Storm'] size = [22, 19, 10, 7, 5] fill_colors = ['#00d0f5', '#ff4a2e', 'a36800', '#ad9900', '#8b00db'] trace = [] """Select and Set variable Data affect that happen in each disaster in Myanmar""" for i in range(5): year_x = [] death_z = [] types_y = [] for j in listdata: if j[0] == 'Myanmar' and j[2] == type_z[i]: year_x.append(int(j[1])) death_z.append(int(j[5])) types_y.append(type_z[i]) trace.append(go.Scatter(x = year_x, y = death_z, name = type_z[i], line = dict(color = fill_colors[i], width = 2), marker=dict(symbol = 'circle', sizemode = 'diameter', sizeref = 0.85, size = size[i], line = dict(width = 2)))) data = trace """Part of code that adjust layout of graph""" layout = go.Layout(title = 'Total Damage', yaxis = dict(title = 'Total Damage', titlefont = dict(color = '#ff2323'), tickfont = dict(color = '#ff2323')), paper_bgcolor = 'rgb(245, 245, 245)', plot_bgcolor = 'rgb(245, 245, 245)') """Part of plot graph in plotly""" fig = go.Figure(data=data, layout=layout) plot_url = py.plot(fig, filename='Total_Death_in_Myanmar')
pdeesawat/PSIT58_test_01
Code_Affect_Damage_Death_countries/Myanmar_total_death.py
Python
apache-2.0
1,846
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask_wtf import FlaskForm # type: ignore from wtforms import ( # type: ignore StringField, TextAreaField, SubmitField, FieldList, FormField, IntegerField, HiddenField, BooleanField, ) from wtforms import validators from data.models import VulnerabilityGitCommits, VulnerabilityResources from data.models.base import db class BaseForm(FlaskForm): @property def non_hidden_fields(self): for field in self: if isinstance(field, HiddenField): continue yield field class ModelFieldList(FieldList): def __init__(self, *args, **kwargs): self.model = kwargs.pop("model", None) super().__init__(*args, **kwargs) if not self.model: raise ValueError("ModelFieldList requires model to be set") def populate_obj(self, obj, name): if not hasattr(obj, name): setattr(obj, name, []) while len(getattr(obj, name)) < len(self.entries): new_model = self.model() db.session.add(new_model) getattr(obj, name).append(new_model) while len(getattr(obj, name)) > len(self.entries): db.session.delete(getattr(obj, name).pop()) super().populate_obj(obj, name) class CommitLinksForm(FlaskForm): repo_url = StringField( "Git Repo URL", validators=[validators.Optional(), validators.URL()] ) commit_hash = StringField("Commit Hash", validators=[]) # Commit data is optional -> otherwise use: validators.DataRequired(), commit_link = StringField( "Main commit link", validators=[validators.Optional(), validators.URL()] ) repo_name = StringField("Repository Name", validators=[]) class Meta: csrf = False class VulnerabilityResourcesForm(FlaskForm): link = StringField("Link", validators=[validators.DataRequired(), validators.URL()]) class Meta: csrf = False class VulnerabilityDetailsForm(FlaskForm): commits = ModelFieldList( FormField(CommitLinksForm), model=VulnerabilityGitCommits, min_entries=1, default=[VulnerabilityGitCommits], ) # Changing the CVE ID is disabled for now. # The filters argument is used to have Null fields instead of empty strings. # This is important since the cve_id is supposed to be unique OR Null. # cve_id = StringField( # "CVE-ID", # filters=[lambda x: x and str(x).upper().strip(), lambda x: x or None], # validators=[ # validators.Optional(), # validators.Regexp(r"^CVE-\d{4}-\d+$") # ], # ) comment = TextAreaField( "High-Level Bug Overview", validators=[validators.DataRequired()] ) resources = ModelFieldList( FormField(VulnerabilityResourcesForm), model=VulnerabilityResources ) submit = SubmitField("Propose change") class VulnerabilityProposalReject(FlaskForm): review_feedback = TextAreaField( "Feedback what should be changed", validators=[validators.DataRequired()] ) submit_reject = SubmitField("Ask for improvements") class VulnerabilityProposalApprove(FlaskForm): submit_approve = SubmitField("Approve proposal") class VulnerabilityProposalAssign(FlaskForm): submit_assign = SubmitField("Take review") class VulnerabilityProposalUnassign(FlaskForm): submit_unassign = SubmitField("Unassign from this review") class VulnerabilityProposalPublish(FlaskForm): submit_publish = SubmitField("Publish entry") class VulnerabilityDeleteForm(FlaskForm): delete_entry = IntegerField("Delete entry", [validators.DataRequired()]) submit = SubmitField() class UserProfileForm(BaseForm): full_name = StringField( "Name", description=( '<small class="form-text text-muted">' "What should be shown next to your contributions.</small>" ), ) hide_name = BooleanField("Hide Name") profile_picture = StringField( "Profile Picture URL", validators=[validators.Optional(), validators.URL()] ) hide_picture = BooleanField("Hide Profile Picture")
google/vulncode-db
data/forms/__init__.py
Python
apache-2.0
4,727
""" Sysconfig - files in ``/etc/sysconfig/`` ======================================== This is a collection of parsers that all deal with the system's configuration files under the ``/etc/sysconfig/`` folder. Parsers included in this module are: ChronydSysconfig - file ``/etc/sysconfig/chronyd`` -------------------------------------------------- DockerSysconfig - file ``/etc/sysconfig/docker`` ------------------------------------------------ HttpdSysconfig - file ``/etc/sysconfig/httpd`` ---------------------------------------------- IrqbalanceSysconfig - file ``/etc/sysconfig/irqbalance`` -------------------------------------------------------- KdumpSysconfig - file ``/etc/sysconfig/kdump`` ---------------------------------------------- MongodSysconfig - file ``/etc/sysconfig/mongod`` ------------------------------------------------ NtpdSysconfig - file ``/etc/sysconfig/ntpd`` -------------------------------------------- VirtWhoSysconfig - file ``/etc/sysconfig/virt-who`` --------------------------------------------------- """ from .. import parser, SysconfigOptions from insights.specs import docker_sysconfig from insights.specs import sysconfig_chronyd from insights.specs import sysconfig_httpd from insights.specs import sysconfig_irqbalance from insights.specs import sysconfig_kdump from insights.specs import sysconfig_mongod from insights.specs import sysconfig_ntpd from insights.specs import sysconfig_virt_who @parser(sysconfig_chronyd) class ChronydSysconfig(SysconfigOptions): """ A parser for analyzing the ``chronyd`` service config file in the ``/etc/sysconfig`` directory. Sample Input:: OPTIONS="-d" #HIDE="me" Examples: >>> service_opts = shared[ChronydSysconfig] >>> 'OPTIONS' in service_opts True >>> 'HIDE' in service_opts False >>> service_opts['OPTIONS'] '-d' """ pass @parser(sysconfig_ntpd) class NtpdSysconfig(SysconfigOptions): """ A parser for analyzing the ``ntpd`` service config file in the ``/etc/sysconfig`` directory Sample Input:: OPTIONS="-x -g" #HIDE="me" Examples: >>> service_opts = shared[NTPDService] >>> 'OPTIONS' in service_opts True >>> 'HIDE' in service_opts False >>> service_opts['OPTIONS'] '-x -g' """ pass @parser(docker_sysconfig) class DockerSysconfig(SysconfigOptions): """ Class for parsing the ``/etc/sysconfig/docker`` file using the standard ``SysconfigOptions`` parser class. The 'OPTIONS' variable is also provided in the ``options`` property as a convenience. Examples: >>> conf = shared[DockerSysconfig] >>> 'OPTIONS' in conf True >>> conf['OPTIONS'] '--selinux-enabled' >>> conf.options '--selinux-enabled' >>> conf['DOCKER_CERT_PATH'] '/etc/docker' """ @property def options(self): """ Return the value of the 'OPTIONS' variable, or '' if not defined. """ return self.data.get('OPTIONS', '') @parser(sysconfig_httpd) class HttpdSysconfig(SysconfigOptions): """ A parser for analyzing the ``httpd`` service config file in the ``/etc/sysconfig`` directory. Sample Input:: # The default processing model (MPM) is the process-based # 'prefork' model. A thread-based model, 'worker', is also # available, but does not work with some modules (such as PHP). # The service must be stopped before changing this variable. # HTTPD=/usr/sbin/httpd.worker # # To pass additional options (for instance, -D definitions) to the # httpd binary at startup, set OPTIONS here. # OPTIONS= Examples: >>> httpd_syscfg = shared[HttpdSysconfig] >>> httpd_syscfg['HTTPD'] '/usr/sbin/httpd.worker' >>> httpd_syscfg.get('OPTIONS') '' >>> 'NOOP' in httpd_syscfg False """ pass @parser(sysconfig_irqbalance) class IrqbalanceSysconfig(SysconfigOptions): """ A parser for analyzing the ``irqbalance`` service config file in the ``/etc/sysconfig`` directory. Sample Input:: #IRQBALANCE_ONESHOT=yes # # IRQBALANCE_BANNED_CPUS # 64 bit bitmask which allows you to indicate which cpu's should # be skipped when reblancing irqs. Cpu numbers which have their # corresponding bits set to one in this mask will not have any # irq's assigned to them on rebalance # IRQBALANCE_BANNED_CPUS=f8 IRQBALANCE_ARGS="-d" Examples: >>> irqb_syscfg = shared[IRQBalanceSysconfig] >>> irqb_syscfg['IRQBALANCE_BANNED_CPUS'] 'f8' >>> irqb_syscfg.get('IRQBALANCE_ARGS') # quotes will be stripped '-d' >>> irqb_syscfg.get('IRQBALANCE_ONESHOT') None >>> 'ONESHOT' in irqb_syscfg False """ pass @parser(sysconfig_kdump) class KdumpSysconfig(SysconfigOptions): """ Read data from the ``/etc/sysconfig/kdump`` file. This sets the following properties for ease of access: * KDUMP_COMMANDLINE * KDUMP_COMMANDLINE_REMOVE * KDUMP_COMMANDLINE_APPEND * KDUMP_KERNELVER * KDUMP_IMG * KDUMP_IMG_EXT * KEXEC_ARGS These are set to the value of the named variable in the kdump sysconfig file, or '' if not found. """ KDUMP_KEYS = [ 'KDUMP_COMMANDLINE', 'KDUMP_COMMANDLINE_REMOVE', 'KDUMP_COMMANDLINE_APPEND', 'KDUMP_KERNELVER', 'KDUMP_IMG', 'KDUMP_IMG_EXT', 'KEXEC_ARGS', ] def parse_content(self, content): super(KdumpSysconfig, self).parse_content(content) for key in self.KDUMP_KEYS: setattr(self, key, self.data.get(key, '')) @parser(sysconfig_virt_who) class VirtWhoSysconfig(SysconfigOptions): """ A parser for analyzing the ``virt-who`` service configuration file in the ``/etc/sysconfig`` directory. Sample Input:: # Register ESX machines using vCenter # VIRTWHO_ESX=0 # Register guests using RHEV-M VIRTWHO_RHEVM=1 # Options for RHEV-M mode VIRTWHO_RHEVM_OWNER= TEST_OPT="A TEST" Examples: >>> vwho_syscfg = shared[VirtWhoSysconfig] >>> vwho_syscfg['VIRTWHO_RHEVM'] '1' >>> vwho_syscfg.get('VIRTWHO_RHEVM_OWNER') '' >>> vwho_syscfg.get('NO_SUCH_OPTION') None >>> 'NOSUCHOPTION' in vwho_syscfg False >>> vwho_syscfg.get('TEST_OPT') # Quotes are stripped 'A TEST' """ pass @parser(sysconfig_mongod) class MongodSysconfig(SysconfigOptions): """ A parser for analyzing the ``mongod`` service configuration file in the ``etc/sysconfig`` directory, contains 'etc/sysconfig/mongod' and '/etc/opt/rh/rh-mongodb26/sysconfig/mongod'. Sample Input:: OPTIONS="--quiet -f /etc/mongod.conf" Examples: >>> mongod_syscfg = shared[MongodWhoSysconfig] >>> mongod_syscfg.get('OPTIONS') '--quiet -f /etc/mongod.conf' >>> mongod_syscfg.get('NO_SUCH_OPTION') None >>> 'NOSUCHOPTION' in mongod_syscfg False """ pass
wcmitchell/insights-core
insights/parsers/sysconfig.py
Python
apache-2.0
7,321
# Copyright 2018 Flight Lab authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility library for displaying arbitrary content on a machine.""" import jinja2 import tempfile from common import pattern from utils import app class Display(pattern.Closable): """Class for displaying arbitrary content on a machine. The implementation assumes Chrome browser is available on given machine and use it to display generated html content in kiosk mode so it appears as an app and works on any platform. """ def __init__(self, chrome_path, *args, **kwargs): """Creates Display instance. Args: chrome_path: path to chrome executable. """ super(Display, self).__init__(*args, **kwargs) self._chrome_path = chrome_path self._temp_path = tempfile.gettempdir() self._index_file = tempfile.mktemp(suffix='.html') self._chrome_app = app.Application( name='Browser', bin_path=chrome_path, arguments=[ '--kiosk', self._index_file, '--new-window', '--incognito', '--noerrordialogs', '--user-data-dir={0}'.format(self._temp_path) ], restart_on_crash=True) def close(self): """Closes Chrome browser.""" self._chrome_app.stop() def show_message(self, message, template_path='./data/display_message.html'): """Shows a text message in full screen. Args: message: text to show. template_path: a html template to use. It should contain "{{ message }}". """ self._generate_page( template_path=template_path, kwargs={ 'message': message }) self._relaunch() def show_image(self, image_path, template_path='./data/display_image_default.html'): """Shows an image in full screen. Current implementation only displays the image at (0,0) and at its original size. If image is smaller than screen size, the rest area will be white. If image is larger than screen size, it will be clipped and scrollbar will appear. Args: image_path: a locally accessible path to image file. template_path: a html template to use. It should contain "{{ image_path }}". """ self._generate_page( template_path=template_path, kwargs={ 'image_path': image_path }) self._relaunch() def _generate_page(self, template_path, kwargs={}): with open(template_path, 'r') as f: template = jinja2.Template(f.read()) with open(self._index_file, 'w') as f: f.write(template.render(**kwargs)) def _relaunch(self): self._chrome_app.stop() self._chrome_app.start()
google/flight-lab
controller/utils/display.py
Python
apache-2.0
3,167
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pre-commit script for Oppia. This script lints Python and JavaScript code, and prints a list of lint errors to the terminal. If the directory path is passed, it will lint all Python and JavaScript files in that directory; otherwise, it will only lint files that have been touched in this commit. This script ignores all filepaths contained within the excludeFiles argument in .jscsrc. Note that, as a side-effect, these filepaths will also prevent Python files in those paths from being linted. IMPORTANT NOTES: 1. Before running this script, you must install third-party dependencies by running bash scripts/start.sh at least once. ===================== CUSTOMIZATION OPTIONS ===================== 1. To lint only files that have been touched in this commit python scripts/pre_commit_linter.py 2. To lint all files in the folder or to lint just a specific file python scripts/pre_commit_linter.py --path filepath 3. To lint a specific list of files (*.js/*.py only). Separate files by spaces python scripts/pre_commit_linter.py --files file_1 file_2 ... file_n Note that the root folder MUST be named 'oppia'. """ # Pylint has issues with the import order of argparse. # pylint: disable=wrong-import-order import argparse import fnmatch import multiprocessing import os import json import subprocess import sys import time # pylint: enable=wrong-import-order _PARSER = argparse.ArgumentParser() _EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group() _EXCLUSIVE_GROUP.add_argument( '--path', help='path to the directory with files to be linted', action='store') _EXCLUSIVE_GROUP.add_argument( '--files', nargs='+', help='specific files to be linted. Space separated list', action='store') BAD_PATTERNS = { '__author__': { 'message': 'Please remove author tags from this file.', 'excluded_files': ()}, 'datetime.datetime.now()': { 'message': 'Please use datetime.datetime.utcnow() instead of' 'datetime.datetime.now().', 'excluded_files': ()}, '\t': { 'message': 'Please use spaces instead of tabs.', 'excluded_files': ()}, '\r': { 'message': 'Please make sure all files only have LF endings (no CRLF).', 'excluded_files': ()}, 'glyphicon': { 'message': 'Please use equivalent material-icons ' 'instead of glyphicons.', 'excluded_files': ()} } BAD_PATTERNS_JS = { ' == ': { 'message': 'Please replace == with === in this file.', 'excluded_files': ( 'core/templates/dev/head/expressions/parserSpec.js', 'core/templates/dev/head/expressions/evaluatorSpec.js', 'core/templates/dev/head/expressions/typeParserSpec.js')}, ' != ': { 'message': 'Please replace != with !== in this file.', 'excluded_files': ( 'core/templates/dev/head/expressions/parserSpec.js', 'core/templates/dev/head/expressions/evaluatorSpec.js', 'core/templates/dev/head/expressions/typeParserSpec.js')} } EXCLUDED_PATHS = ( 'third_party/*', '.git/*', '*.pyc', 'CHANGELOG', 'scripts/pre_commit_linter.py', 'integrations/*', 'integrations_dev/*', '*.svg', '*.png', '*.zip', '*.ico', '*.jpg') if not os.getcwd().endswith('oppia'): print '' print 'ERROR Please run this script from the oppia root directory.' _PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) _PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.5.2') if not os.path.exists(_PYLINT_PATH): print '' print 'ERROR Please run start.sh first to install pylint ' print ' and its dependencies.' sys.exit(1) _PATHS_TO_INSERT = [ _PYLINT_PATH, os.getcwd(), os.path.join( _PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19', 'google_appengine', 'lib', 'webapp2-2.3'), os.path.join( _PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19', 'google_appengine', 'lib', 'yaml-3.10'), os.path.join( _PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19', 'google_appengine', 'lib', 'jinja2-2.6'), os.path.join( _PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19', 'google_appengine'), os.path.join(_PARENT_DIR, 'oppia_tools', 'webtest-1.4.2'), os.path.join(_PARENT_DIR, 'oppia_tools', 'numpy-1.6.1'), os.path.join(_PARENT_DIR, 'oppia_tools', 'browsermob-proxy-0.7.1'), os.path.join(_PARENT_DIR, 'oppia_tools', 'selenium-2.53.2'), os.path.join(_PARENT_DIR, 'oppia_tools', 'xvfbwrapper-0.2.8'), os.path.join('third_party', 'gae-pipeline-1.9.17.0'), os.path.join('third_party', 'bleach-1.2.2'), os.path.join('third_party', 'gae-mapreduce-1.9.17.0'), ] for path in _PATHS_TO_INSERT: sys.path.insert(0, path) from pylint import lint # pylint: disable=wrong-import-position _MESSAGE_TYPE_SUCCESS = 'SUCCESS' _MESSAGE_TYPE_FAILED = 'FAILED' def _get_changed_filenames(): """Returns a list of modified files (both staged and unstaged) Returns: a list of filenames of modified files """ unstaged_files = subprocess.check_output([ 'git', 'diff', '--name-only']).splitlines() staged_files = subprocess.check_output([ 'git', 'diff', '--cached', '--name-only', '--diff-filter=ACM']).splitlines() return unstaged_files + staged_files def _get_glob_patterns_excluded_from_jscsrc(config_jscsrc): """Collects excludeFiles from jscsrc file. Args: - config_jscsrc: str. Path to .jscsrc file. Returns: a list of files in excludeFiles. """ with open(config_jscsrc) as f: f.readline() # First three lines are comments f.readline() f.readline() json_data = json.loads(f.read()) return json_data['excludeFiles'] def _get_all_files_in_directory(dir_path, excluded_glob_patterns): """Recursively collects all files in directory and subdirectories of specified path. Args: - dir_path: str. Path to the folder to be linted. - excluded_glob_patterns: set. Set of all files to be excluded. Returns: a list of files in directory and subdirectories without excluded files. """ files_in_directory = [] for _dir, _, files in os.walk(dir_path): for file_name in files: filename = os.path.relpath( os.path.join(_dir, file_name), os.getcwd()) if not any([fnmatch.fnmatch(filename, gp) for gp in excluded_glob_patterns]): files_in_directory.append(filename) return files_in_directory def _lint_js_files(node_path, jscs_path, config_jscsrc, files_to_lint, stdout, result): """Prints a list of lint errors in the given list of JavaScript files. Args: - node_path: str. Path to the node binary. - jscs_path: str. Path to the JSCS binary. - config_jscsrc: str. Configuration args for the call to the JSCS binary. - files_to_lint: list of str. A list of filepaths to lint. - stdout: multiprocessing.Queue. A queue to store JSCS outputs - result: multiprocessing.Queue. A queue to put results of test Returns: None """ start_time = time.time() num_files_with_errors = 0 num_js_files = len(files_to_lint) if not files_to_lint: result.put('') print 'There are no JavaScript files to lint.' return jscs_cmd_args = [node_path, jscs_path, config_jscsrc] for _, filename in enumerate(files_to_lint): proc_args = jscs_cmd_args + [filename] proc = subprocess.Popen( proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) linter_stdout, linter_stderr = proc.communicate() if linter_stderr: print 'LINTER FAILED' print linter_stderr sys.exit(1) if linter_stdout: num_files_with_errors += 1 stdout.put(linter_stdout) if num_files_with_errors: result.put('%s %s JavaScript files' % ( _MESSAGE_TYPE_FAILED, num_files_with_errors)) else: result.put('%s %s JavaScript files linted (%.1f secs)' % ( _MESSAGE_TYPE_SUCCESS, num_js_files, time.time() - start_time)) def _lint_py_files(config_pylint, files_to_lint, result): """Prints a list of lint errors in the given list of Python files. Args: - config_pylint: str. Path to the .pylintrc file. - files_to_lint: list of str. A list of filepaths to lint. - result: multiprocessing.Queue. A queue to put results of test Returns: None """ start_time = time.time() are_there_errors = False num_py_files = len(files_to_lint) if not files_to_lint: result.put('') print 'There are no Python files to lint.' return try: # This prints output to the console. lint.Run(files_to_lint + [config_pylint]) except SystemExit as e: if str(e) != '0': are_there_errors = True if are_there_errors: result.put('%s Python linting failed' % _MESSAGE_TYPE_FAILED) else: result.put('%s %s Python files linted (%.1f secs)' % ( _MESSAGE_TYPE_SUCCESS, num_py_files, time.time() - start_time)) def _get_all_files(): """This function is used to check if this script is ran from root directory and to return a list of all the files for linting and pattern checks. """ jscsrc_path = os.path.join(os.getcwd(), '.jscsrc') parsed_args = _PARSER.parse_args() if parsed_args.path: input_path = os.path.join(os.getcwd(), parsed_args.path) if not os.path.exists(input_path): print 'Could not locate file or directory %s. Exiting.' % input_path print '----------------------------------------' sys.exit(1) if os.path.isfile(input_path): all_files = [input_path] else: excluded_glob_patterns = _get_glob_patterns_excluded_from_jscsrc( jscsrc_path) all_files = _get_all_files_in_directory( input_path, excluded_glob_patterns) elif parsed_args.files: valid_filepaths = [] invalid_filepaths = [] for f in parsed_args.files: if os.path.isfile(f): valid_filepaths.append(f) else: invalid_filepaths.append(f) if invalid_filepaths: print ('The following file(s) do not exist: %s\n' 'Exiting.' % invalid_filepaths) sys.exit(1) all_files = valid_filepaths else: all_files = _get_changed_filenames() return all_files def _pre_commit_linter(all_files): """This function is used to check if node-jscs dependencies are installed and pass JSCS binary path """ jscsrc_path = os.path.join(os.getcwd(), '.jscsrc') pylintrc_path = os.path.join(os.getcwd(), '.pylintrc') config_jscsrc = '--config=%s' % jscsrc_path config_pylint = '--rcfile=%s' % pylintrc_path parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) node_path = os.path.join( parent_dir, 'oppia_tools', 'node-4.2.1', 'bin', 'node') jscs_path = os.path.join( parent_dir, 'node_modules', 'jscs', 'bin', 'jscs') if not os.path.exists(jscs_path): print '' print 'ERROR Please run start.sh first to install node-jscs ' print ' and its dependencies.' sys.exit(1) js_files_to_lint = [ filename for filename in all_files if filename.endswith('.js')] py_files_to_lint = [ filename for filename in all_files if filename.endswith('.py')] js_result = multiprocessing.Queue() linting_processes = [] js_stdout = multiprocessing.Queue() linting_processes.append(multiprocessing.Process( target=_lint_js_files, args=(node_path, jscs_path, config_jscsrc, js_files_to_lint, js_stdout, js_result))) py_result = multiprocessing.Queue() linting_processes.append(multiprocessing.Process( target=_lint_py_files, args=(config_pylint, py_files_to_lint, py_result))) print 'Starting Javascript and Python Linting' print '----------------------------------------' for process in linting_processes: process.start() for process in linting_processes: process.join() js_messages = [] while not js_stdout.empty(): js_messages.append(js_stdout.get()) print '' print '\n'.join(js_messages) print '----------------------------------------' summary_messages = [] summary_messages.append(js_result.get()) summary_messages.append(py_result.get()) print '\n'.join(summary_messages) print '' return summary_messages def _check_bad_patterns(all_files): """This function is used for detecting bad patterns. """ print 'Starting Pattern Checks' print '----------------------------------------' total_files_checked = 0 total_error_count = 0 summary_messages = [] all_files = [ filename for filename in all_files if not any(fnmatch.fnmatch(filename, pattern) for pattern in EXCLUDED_PATHS)] all_js_files = [ filename for filename in all_files if filename.endswith('.js')] failed = False for filename in all_files: with open(filename) as f: content = f.read() total_files_checked += 1 for pattern in BAD_PATTERNS: if pattern in content and filename not in ( BAD_PATTERNS[pattern]['excluded_files']): failed = True print '%s --> %s' % ( filename, BAD_PATTERNS[pattern]['message']) total_error_count += 1 if filename in all_js_files: for pattern in BAD_PATTERNS_JS: if filename not in ( BAD_PATTERNS_JS[pattern]['excluded_files']): if pattern in content: failed = True print '%s --> %s' % ( filename, BAD_PATTERNS_JS[pattern]['message']) total_error_count += 1 if failed: summary_message = '%s Pattern checks failed' % _MESSAGE_TYPE_FAILED summary_messages.append(summary_message) else: summary_message = '%s Pattern checks passed' % _MESSAGE_TYPE_SUCCESS summary_messages.append(summary_message) print '' print '----------------------------------------' print '' if total_files_checked == 0: print "There are no files to be checked." else: print '(%s files checked, %s errors found)' % ( total_files_checked, total_error_count) print summary_message return summary_messages def main(): all_files = _get_all_files() linter_messages = _pre_commit_linter(all_files) pattern_messages = _check_bad_patterns(all_files) all_messages = linter_messages + pattern_messages if any([message.startswith(_MESSAGE_TYPE_FAILED) for message in all_messages]): sys.exit(1) if __name__ == '__main__': main()
anggorodewanto/oppia
scripts/pre_commit_linter.py
Python
apache-2.0
16,047
from locust import HttpLocust, TaskSet, task class WebsiteTasks(TaskSet): @task def page1(self): self.client.get("/sugestoes-para/6a-feira-da-quarta-semana-da-pascoa/") @task def page2(self): self.client.get("/sugestoes-para/5a-feira-da-quarta-semana-da-pascoa/") @task def page3(self): self.client.get("/sugestoes-para/4a-feira-da-quarta-semana-da-pascoa/") @task def page4(self): self.client.get("/sugestoes-para/3a-feira-da-quarta-semana-da-pascoa/") @task def musica1(self): self.client.get("/musica/ressuscitou/") @task def musica2(self): self.client.get("/musica/prova-de-amor-maior-nao-ha/") @task def musica3(self): self.client.get("/musica/porque-ele-vive/") @task def musica4(self): self.client.get("/musica/o-senhor-ressuscitou-aleluia/") class WebsiteUser(HttpLocust): task_set = WebsiteTasks min_wait = 5000 max_wait = 15000
gustavofoa/pympm
scripts/locustfile.py
Python
apache-2.0
988
# coding: utf-8 """ Fitmarket Mali broj ljudi - donori - dijele dnevna mjerenja svoje težine. Iz dnevne težine jednog donora određujemo vrijednosti dviju dionica: - dionica X ima vrijednost koja odgovara težini donora na taj dan. - inverzna dionica ~X ima vrijednost (150 kg - X). Primjetimo da: - kako X raste, ~X pada. - X + ~X = 150 kg Svaki igrač počinje igru sa 10,000 kg raspoloživog novca. Igrač koristi taj novac za trgovanje dionicama. Ukupna vrijednost igrača je zbroj rapoloživog novca i aktualne vrijednosti svih dionica koje posjeduje. Cilj igre je maksimizirati ukupnu vrijednost dobrim predviđanjem kretanja vrijednosti dionica. Na primjer, u prvom danu igrac kupi 125 dionica \"X\" za 80 kg. U drugom danu, dionica naraste na 82 kg. Ako igrac proda sve dionice \"X\", zaradio je 2 kg * 125 = 250 kg! Igra ne dopušta donoru da trguje vlastitim dionicama. OpenAPI spec version: 1.1.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import os import sys import unittest import fitmarket_api from fitmarket_api.rest import ApiException from fitmarket_api.apis.status_api import StatusApi class TestStatusApi(unittest.TestCase): """ StatusApi unit test stubs """ def setUp(self): self.api = fitmarket_api.apis.status_api.StatusApi() def tearDown(self): pass def test_actual_state_get(self): """ Test case for actual_state_get Dohvaca JSON sa trenutnim cijenama svih dionica. """ pass def test_mystate_get(self): """ Test case for mystate_get Dohvaca JSON koji prikazuje korisnikovu ukupnu vrijednost, neinvestiranu vrijednost i vrijednosti investirane u dionice. """ pass def test_plot_txt_get(self): """ Test case for plot_txt_get Dohvaca CSV sa cijenama svih dionica u svim prijasnjim mjerenjima. """ pass if __name__ == '__main__': unittest.main()
brahle/fitmarket-python-api
test/test_status_api.py
Python
apache-2.0
2,605
import json import django import urllib3 if __name__ == '__main__': django.setup() from infrastructure.models import Server, CustomField from resourcehandlers.aws.models import AWSHandler from common.methods import set_progress from django.core.serializers.json import DjangoJSONEncoder def fetch_arns_for_findings(inspector_client): """ Fetch all ARNs for findings discovered in the latest run of all enabled Assessment Templates. :param inspector_client: :return: """ # at: Assessment template # arn: Amazon resource name findings = set() # Get all assessment templates for current region at_arns = inspector_client.list_assessment_templates()['assessmentTemplateArns'] if len(at_arns) > 0: at_details = inspector_client.describe_assessment_templates(assessmentTemplateArns=at_arns) # For each template, get the ARN for the latest run if "assessmentTemplates" in at_details: at_runs = [t['lastAssessmentRunArn'] for t in at_details['assessmentTemplates']] paginator = inspector_client.get_paginator('list_findings', ) for page in paginator.paginate(assessmentRunArns=at_runs, maxResults=500): if len(page['findingArns']) > 0: findings.add(page['findingArns'][0]) return findings def get_instance_id(finding): """ Given a finding, go find and return the corresponding AWS Instance ID :param finding: :return: """ for kv in finding['attributes']: if kv['key'] == 'INSTANCE_ID': return kv['value'] return None def update_instances(findings): """ For each finding build-up a dict keyed by instance ID with an array value of all applicable findings. Then create or update the aws_inspector_findings custom field for each corresponding CloudBolt server record. :param findings: :return: """ instances = {} # Group findings by instance for finding in findings['findings']: instance_id = get_instance_id(finding) if instance_id not in instances: instances[instance_id] = [] else: instances[instance_id].append(finding) # For each istance, find its CloudBolt Server record and update aws_inspector_findings for instance in instances.keys(): try: s = Server.objects.get(resource_handler_svr_id=instance) cf, _ = CustomField.objects.get_or_create(name='aws_inspector_findings', type='TXT', label="AWS Inspector Findings") s.set_value_for_custom_field(cf.name, json.dumps(instances[instance], indent=True, cls=DjangoJSONEncoder)) except Server.DoesNotExist as ex: # Unable to locate and update the server, carry on pass def describe_findings(inspector_client, all_finding_arns): """ Given a list of findind ARNs, return the details for each finding. :param inspector_client: :param all_finding_arns: :return: """ arns = list(all_finding_arns) if len(arns) == 0: return None findings = inspector_client.describe_findings(findingArns=arns) return findings def run(job, *args, **kwargs): rh: AWSHandler for rh in AWSHandler.objects.all(): regions = set([env.aws_region for env in rh.environment_set.all()]) # For each region currently used by the current AWSHandler for region in regions: inspector = rh.get_boto3_client(service_name='inspector', region_name=region) set_progress(f'Fetching findings for {rh.name} ({region}).') all_finding_arns = fetch_arns_for_findings(inspector) inspector_findings = describe_findings(inspector, all_finding_arns) set_progress(f'Updating CloudBolt instances in {region}.') if inspector_findings: update_instances(inspector_findings) return "", "", "" if __name__ == '__main__': urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) run(None)
CloudBoltSoftware/cloudbolt-forge
ui_extensions/aws_network_policy/actions/fetch_update_findings.py
Python
apache-2.0
4,209
# -*- coding: utf-8 -*- # Copyright 2017 ProjectV Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from flask_restful import Resource from flask import request, g from v.tools.exception import ExceptionRest from v.tools.v import processing_rest_exception, processing_rest_success, \ type_of_insert_rest, type_of_update_rest from v.tools.validate import validate_rest from v.buy.model.pendingMdl import PendingMdl class PendingListRst(Resource, PendingMdl): def get(self): try: _qrg = """ SELECT array_to_json(array_agg(row_to_json(t) )) as collection FROM ( SELECT id, name, description, completed_at FROM %s WHERE deleted_at IS NULL AND completed_at is NULL AND create_id=%s )t; """ % (self._table, g.user.id,) g.db_conn.execute(_qrg) if g.db_conn.count() > 0: _collection = g.db_conn.one()[0] if _collection: _data = {self._table: _collection} _get = processing_rest_success(data=_data) else: raise ExceptionRest(status_code=404, message="No se han encontrado resultados") else: raise ExceptionRest(status_code=404, message="No se han encontrado resultados") except (Exception, ExceptionRest), e: _get = processing_rest_exception(e) return _get def post(self): _request = request.json try: _errors = validate_rest(fields=self._fields, request=_request) if not _errors: _col, _val = type_of_insert_rest(self._fields, _request) _qrp = """ INSERT INTO %s (create_id , %s ) VALUES (%s, %s) RETURNING (select row_to_json(collection) FROM (VALUES(id)) collection(id)); """ % (self._table, _col, g.user.id, _val) g.db_conn.execute(_qrp) if g.db_conn.count() > 0: _data = {self._table: g.db_conn.one()} _post = processing_rest_success(data=_data, message='Fue creado correctamente', status_code=201) else: raise ExceptionRest(status_code=500, message='No se ha podido registrar') else: raise ExceptionRest(status_code=400, errors=_errors) except (Exception, ExceptionRest), e: _post = processing_rest_exception(e) return _post class PendingRst(Resource, PendingMdl): def get(self, id): try: _qrg = """ SELECT array_to_json(array_agg(row_to_json(t) )) as collection FROM ( SELECT id, name, description, completed_at FROM %s WHERE deleted_at IS NULL AND completed_at is NULL and create_id=%s and id = %s)t; """ % (self._table, g.user.id, id,) g.db_conn.execute(_qrg) if g.db_conn.count() > 0: _collection = g.db_conn.one()[0] if _collection: _data = {self._table: _collection} _get = processing_rest_success(data=_data) else: raise ExceptionRest(status_code=404, message="No se han encontrado resultados") else: raise ExceptionRest(status_code=404, message="No se han encontrado resultados") except (Exception, ExceptionRest), e: _get = processing_rest_exception(e) return _get def put(self, id): _request = request.json try: _errors = validate_rest(fields=self._fields, request=_request, method='put') if not _errors: _val = type_of_update_rest(self._fields, _request) _qrp = "UPDATE %s SET %s WHERE id=%s;" % (self._table, _val, id,) g.db_conn.execute(_qrp) if g.db_conn.count() > 0: _put = processing_rest_success(status_code=201, message="El registro fue actualizado correctamente") else: raise ExceptionRest(status_code=404, message="No se ha podido encontrar el registro, para actualizar.") else: raise ExceptionRest(status_code=400, errors=_errors) except (Exception, ExceptionRest), e: _put = processing_rest_exception(e) return _put def delete(self, id): try: _qrd = "UPDATE %s SET deleted_at=current_timestamp WHERE id=%s;" % (self._table, id,) g.db_conn.execute(_qrd) if g.db_conn.count() > 0: _delete = processing_rest_success(status_code=201, message="El registro fue eliminado correctamente") else: raise ExceptionRest(status_code=404, message="No se ha podido encontrar el registro, para eliminar.") except (Exception, ExceptionRest), e: _delete = processing_rest_exception(e) return _delete
jhbez/ProjectV
app/v/buy/rest/pendingRst.py
Python
apache-2.0
5,665
import os import time import yaml import random import argparse from rackattack.physical import pikapatch from rackattack.physical.tests.integration.main import useFakeRackConf, useFakeIPMITool intervalRanges = {0: (0.01, 0.05), 0.85: (0.3, 0.6), 0.95: (2, 4)} rangesProbabilities = intervalRanges.keys() rangesProbabilities.sort() def informFakeConsumersManagerOfReboot(hostname): rebootsPipe = os.environ["FAKE_REBOOTS_PIPE_PATH"] fd = os.open(rebootsPipe, os.O_WRONLY) os.write(fd, "%(hostname)s," % dict(hostname=hostname)) os.close(fd) def power(mode): time.sleep(0.02) if mode == "on": informFakeConsumersManagerOfReboot(args.H) print "Chassis Power Control: Up/On" elif mode == "off": print "Chassis Power Control: Down/Off" else: raise NotImplementedError def sol(subaction): if subaction != "activate": return possibleOutputLines = ("Yo yo i'm a cool server", "This server has got swag.", "Wow this totally looks like a serial log of a linux server", "asdasd") while True: withinBound = random.random() chosenRangeProbability = \ [rangeProb for rangeProb in rangesProbabilities if rangeProb <= withinBound][-1] chosenRange = intervalRanges[chosenRangeProbability] interval = chosenRange[0] + random.random() * (chosenRange[1] - chosenRange[0]) time.sleep(interval) print random.choice(possibleOutputLines) def main(args): useFakeRackConf() useFakeIPMITool() if args.I != "lanplus": assert args.I is None action = dict(power=power, sol=sol).get(args.action) action(args.subaction) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-I", default=None, type=str) parser.add_argument("-H", default=None, type=str) parser.add_argument("-U", default=None, type=str) parser.add_argument("-P", default=None, type=str) parser.add_argument("-R", default=1, type=int) parser.add_argument("action", default=None, type=str) parser.add_argument("subaction", default=None, type=str) args = parser.parse_args() main(args)
eliran-stratoscale/rackattack-physical
rackattack/physical/tests/integration/fakeipmitool.py
Python
apache-2.0
2,252
import datetime class FakeHeatTemplateManager(object): def __init__(self): self.templates = { 1: 'Dummy description #1.', 3: 'Dummy description #3.', 4: 'Dummy description #4.', 6: 'Dummy description #6.'} def get_description(self, template_id): return self.templates[template_id] def list_templates(self): return self.templates.keys() class FakeNovaAPI(object): def __init__(self): self.flavors = { 1: { 'name': 'flavor1', }, 2: { 'name': 'flavor2', }} def get_flavor(self, flavor_id): return self.flavors[flavor_id] def list_flavors(self): return self.flavors.keys() class FakeHeatAPI(object): def __init__(self): self.stacks = { 'stack1': { 'email': 'dummy1@localhost', }, 'stack2': { 'email': 'dummy2@localhost', }} self.expired_stacks = { 'stack3': { 'email': 'dummy3@localhost', }} def create_stack(self, name, template_id, **params): # TODO(sheeprine): Implement checks if name in self.stacks or name in self.expired_stacks: return False self.stacks[name] = params return True def delete_stack(self, name): if name is self.stacks: self.stacks.pop(name) return True else: return False def expired_stacks(self): return ['stack1', 'stack2'] def disable_expired_stacks(self): return len(self.expired_stacks) def purge_expired_stacks(self): purged = len(self.expired_stacks) self.expired_stacks = {} return purged def extend_stack_validity(self, name, days): if name in self.expired_stacks: return datetime.datetime.utcnow() def list_new_stacks(self): return ['stack4'] def get_stack_details(self, name): return self.stacks[name]
Ingesup-Lab-OS/OS-Lend-Frontend
lend_frontend/tests/api_mock.py
Python
apache-2.0
2,035
# Copyright 2014 Cloudera Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sqlalchemy as sa from ibis.sql.alchemy import unary, varargs, fixed_arity import ibis.sql.alchemy as alch import ibis.expr.datatypes as dt import ibis.expr.operations as ops import ibis.expr.types as ir import ibis.common as com _operation_registry = alch._operation_registry.copy() def _cast(t, expr): # It's not all fun and games with SQLite op = expr.op() arg, target_type = op.args sa_arg = t.translate(arg) sa_type = t.get_sqla_type(target_type) # SQLite does not have a physical date/time/timestamp type, so # unfortunately cast to typestamp must be a no-op, and we have to trust # that the user's data can actually be correctly parsed by SQLite. if isinstance(target_type, dt.Timestamp): if not isinstance(arg, (ir.IntegerValue, ir.StringValue)): raise com.TranslationError(type(arg)) return sa_arg if isinstance(arg, ir.CategoryValue) and target_type == 'int32': return sa_arg else: return sa.cast(sa_arg, sa_type) def _substr(t, expr): f = sa.func.substr arg, start, length = expr.op().args sa_arg = t.translate(arg) sa_start = t.translate(start) if length is None: return f(sa_arg, sa_start + 1) else: sa_length = t.translate(length) return f(sa_arg, sa_start + 1, sa_length) def _string_right(t, expr): f = sa.func.substr arg, length = expr.op().args sa_arg = t.translate(arg) sa_length = t.translate(length) return f(sa_arg, -sa_length, sa_length) def _string_find(t, expr): arg, substr, start, _ = expr.op().args if start is not None: raise NotImplementedError sa_arg = t.translate(arg) sa_substr = t.translate(substr) f = sa.func.instr return f(sa_arg, sa_substr) - 1 def _infix_op(infix_sym): def formatter(t, expr): op = expr.op() left, right = op.args left_arg = t.translate(left) right_arg = t.translate(right) return left_arg.op(infix_sym)(right_arg) return formatter def _strftime(t, expr): arg, format = expr.op().args sa_arg = t.translate(arg) sa_format = t.translate(format) return sa.func.strftime(sa_format, sa_arg) def _strftime_int(fmt): def translator(t, expr): arg, = expr.op().args sa_arg = t.translate(arg) return sa.cast(sa.func.strftime(fmt, sa_arg), sa.types.INTEGER) return translator def _now(t, expr): return sa.func.datetime('now') def _millisecond(t, expr): arg, = expr.op().args sa_arg = t.translate(arg) fractional_second = sa.func.strftime('%f', sa_arg) return (fractional_second * 1000) % 1000 def _identical_to(t, expr): left, right = args = expr.op().args if left.equals(right): return True else: left, right = map(t.translate, args) return sa.func.coalesce( (left.is_(None) & right.is_(None)) | (left == right), False ) _operation_registry.update({ ops.Cast: _cast, ops.Substring: _substr, ops.StrRight: _string_right, ops.StringFind: _string_find, ops.StringLength: unary('length'), ops.Least: varargs(sa.func.min), ops.Greatest: varargs(sa.func.max), ops.IfNull: fixed_arity(sa.func.ifnull, 2), ops.Lowercase: unary('lower'), ops.Uppercase: unary('upper'), ops.Strip: unary('trim'), ops.LStrip: unary('ltrim'), ops.RStrip: unary('rtrim'), ops.StringReplace: fixed_arity(sa.func.replace, 3), ops.StringSQLLike: _infix_op('LIKE'), ops.RegexSearch: _infix_op('REGEXP'), ops.Strftime: _strftime, ops.ExtractYear: _strftime_int('%Y'), ops.ExtractMonth: _strftime_int('%m'), ops.ExtractDay: _strftime_int('%d'), ops.ExtractHour: _strftime_int('%H'), ops.ExtractMinute: _strftime_int('%M'), ops.ExtractSecond: _strftime_int('%S'), ops.ExtractMillisecond: _millisecond, ops.TimestampNow: _now, ops.IdenticalTo: _identical_to, }) def add_operation(op, translation_func): _operation_registry[op] = translation_func class SQLiteExprTranslator(alch.AlchemyExprTranslator): _registry = _operation_registry _rewrites = alch.AlchemyExprTranslator._rewrites.copy() _type_map = alch.AlchemyExprTranslator._type_map.copy() _type_map.update({ dt.Double: sa.types.REAL, dt.Float: sa.types.REAL }) rewrites = SQLiteExprTranslator.rewrites compiles = SQLiteExprTranslator.compiles class SQLiteDialect(alch.AlchemyDialect): translator = SQLiteExprTranslator
wesm/ibis
ibis/sql/sqlite/compiler.py
Python
apache-2.0
5,142
from google.appengine.ext import ndb class Tirage(ndb.Model): nomtirage = ndb.StringProperty() datecreation = ndb.DateTimeProperty(auto_now_add=True)
crancerkill/pokeploud
server/TirageModele.py
Python
apache-2.0
159
import sys import unittest from string import ascii_letters from random import randint, choice import logging from influxgraph.templates import parse_influxdb_graphite_templates from influxgraph.utils import parse_series as parse_py_series try: from influxgraph.ext.nodetrie import Node from influxgraph.ext.templates import parse_series except ImportError: NODE_TRIE = False else: NODE_TRIE = True logger = logging.getLogger('influxgraph') logger.setLevel(logging.DEBUG) logging.basicConfig() @unittest.skipUnless(NODE_TRIE, "NodeTrie extension not enabled") class CNodeTreeTestCase(unittest.TestCase): def setUp(self): self.all_series = ['b1.b1.b1.b1.leaf1', 'b1.b1.b1.b2.leaf1', 'b1.b1.b2.b2.leaf1', 'b1.b1.b1.b1.leaf2', 'b1.b1.b1.b2.leaf2', 'b1.b1.b2.b2.leaf2' ] self.index = Node() for serie in self.all_series: split_path = serie.split('.') self.index.insert_split_path(split_path) def tearDown(self): del self.index def test_parse_series(self): all_series = [u'b1.b1.b1.b1.leaf1', u'b1.b1.b1.b2.leaf1', u'b1.b1.b2.b2.leaf1', u'b1.b1.b1.b1.leaf2', u'b1.b1.b1.b2.leaf2', u'b1.b1.b2.b2.leaf2' ] index = parse_series(all_series, None, None) result = list(index.query(u'*')) self.assertTrue(len(result) > 0) self.assertEqual(result[0][0], 'b1') result = list(index.query('b1')) self.assertTrue(result[0][0] == 'b1') result = list(index.query('b1.*')) self.assertTrue(len(result) == 1) self.assertTrue(result[0][0] == 'b1.b1') result = list(index.query('b1.b1.*')) self.assertEqual(len(result), 2) self.assertTrue(result[0][0] == 'b1.b1.b1') self.assertTrue(result[1][0] == 'b1.b1.b2') result = list(index.query('b1.b1.*.*')) self.assertTrue(len(result) == 3) self.assertTrue(result[0][0] == 'b1.b1.b1.b1') self.assertTrue(result[1][0] == 'b1.b1.b1.b2') self.assertTrue(result[2][0] == 'b1.b1.b2.b2') result = list(index.query('b1.b1.*.*.*')) self.assertTrue(len(result) == 6) self.assertTrue(result[0][0] == 'b1.b1.b1.b1.leaf1') self.assertTrue(result[1][0] == 'b1.b1.b1.b1.leaf2') self.assertTrue(result[2][0] == 'b1.b1.b1.b2.leaf1') self.assertTrue(result[3][0] == 'b1.b1.b1.b2.leaf2') self.assertTrue(result[4][0] == 'b1.b1.b2.b2.leaf1') self.assertTrue(result[5][0] == 'b1.b1.b2.b2.leaf2') result = list(index.query('b1.b1.*.*.{leaf1,leaf2}')) self.assertTrue(len(result) == 6) self.assertTrue(result[0][0] == 'b1.b1.b1.b1.leaf1') self.assertTrue(result[1][0] == 'b1.b1.b1.b1.leaf2') self.assertTrue(result[2][0] == 'b1.b1.b1.b2.leaf1') self.assertTrue(result[3][0] == 'b1.b1.b1.b2.leaf2') self.assertTrue(result[4][0] == 'b1.b1.b2.b2.leaf1') self.assertTrue(result[5][0] == 'b1.b1.b2.b2.leaf2') result = list(index.query('b1.b1.b1.b1.leaf1')) self.assertTrue(len(result) == 1) self.assertTrue(result[0][0] == 'b1.b1.b1.b1.leaf1') result = list(index.query('fakey*')) self.assertFalse(result) def test_parse_series_templates(self): _templates = ["dc.env.host.measurement.field*"] templates = parse_influxdb_graphite_templates(_templates) all_series = [u'm1,host=b1,dc=dc1,env=a', u'm1,host=b1,dc=dc1,env=a', u'm2,host=b1,dc=dc1,env=a', u'm2,host=b2,dc=dc1,env=a', u'm2,host=b2,dc=dc2,env=a', u'm2,host=b2,dc=dc2,env=b' ] fields = {u'm1': [u'leaf1', u'leaf2'], u'm2': [u'leaf1']} index = parse_series(all_series, fields, templates) result = sorted(list(index.query(u'*'))) self.assertTrue(len(result) > 0) self.assertEqual(result[0][0], 'dc1') def test_empty_tree(self): tree = Node() self.assertTrue(len(tree.children) == 0) self.assertTrue(tree.name is None) def test_index(self): self.assertEqual(self.index.children_size, 1) for serie in self.all_series: split_path = serie.split('.') i = 0 parent = self.index while i < len(split_path): path = split_path[i] child = [c for c in parent.children if c.name == path] self.assertTrue(len(child) > 0) child = child[0] self.assertEqual(child.name, path) parent = child i += 1 if i < len(split_path): self.assertFalse(child.is_leaf()) else: self.assertTrue(child.is_leaf()) def test_root_wildcard(self): result = list(self.index.query('*')) self.assertTrue(len(result) == 1) # Unicode query test result = list(self.index.query(u'*')) self.assertTrue(len(result) == 1) self.assertTrue(result[0][0] == 'b1') result = list(self.index.query('b1')) self.assertTrue(result[0][0] == 'b1') result = list(self.index.query('b1.*')) self.assertTrue(len(result) == 1) self.assertTrue(result[0][0] == 'b1.b1') result = list(self.index.query('b1.b1.*')) self.assertTrue(len(result) == 2) self.assertTrue(result[0][0] == 'b1.b1.b1') self.assertTrue(result[1][0] == 'b1.b1.b2') result = list(self.index.query('b1.b1.*.*')) self.assertTrue(len(result) == 3) self.assertTrue(result[0][0] == 'b1.b1.b1.b1') self.assertTrue(result[1][0] == 'b1.b1.b1.b2') self.assertTrue(result[2][0] == 'b1.b1.b2.b2') result = list(self.index.query('b1.b1.*.*.*')) self.assertTrue(len(result) == 6) self.assertTrue(result[0][0] == 'b1.b1.b1.b1.leaf1') self.assertTrue(result[1][0] == 'b1.b1.b1.b1.leaf2') self.assertTrue(result[2][0] == 'b1.b1.b1.b2.leaf1') self.assertTrue(result[3][0] == 'b1.b1.b1.b2.leaf2') self.assertTrue(result[4][0] == 'b1.b1.b2.b2.leaf1') self.assertTrue(result[5][0] == 'b1.b1.b2.b2.leaf2') result = list(self.index.query('b1.b1.*.*.{leaf1,leaf2}')) self.assertTrue(len(result) == 6) self.assertTrue(result[0][0] == 'b1.b1.b1.b1.leaf1') self.assertTrue(result[1][0] == 'b1.b1.b1.b1.leaf2') self.assertTrue(result[2][0] == 'b1.b1.b1.b2.leaf1') self.assertTrue(result[3][0] == 'b1.b1.b1.b2.leaf2') self.assertTrue(result[4][0] == 'b1.b1.b2.b2.leaf1') self.assertTrue(result[5][0] == 'b1.b1.b2.b2.leaf2') result = list(self.index.query('b1.b1.b1.b1.leaf1')) self.assertTrue(len(result) == 1) self.assertTrue(result[0][0] == 'b1.b1.b1.b1.leaf1') result = list(self.index.query('fakey*')) self.assertFalse(result) # dumped_ar = self.index.to_array() # self.index = Node.from_array(dumped_ar) # import ipdb; ipdb.set_trace() def test_string_insert(self): del self.index self.index = Node() for serie in self.all_series: self.index.insert(serie) self.test_root_wildcard()
InfluxGraph/influxgraph
tests/test_c_node_tree.py
Python
apache-2.0
7,573
# -*- coding: utf-8 -*- from openprocurement.auctions.core.utils import ( apply_patch, context_unpack, get_now, json_view, opresource, save_auction, ) from openprocurement.auctions.core.validation import ( validate_lot_data, validate_patch_lot_data, ) from openprocurement.auctions.core.views.mixins import AuctionLotResource @opresource(name='dgfOtherAssets:Auction Lots', collection_path='/auctions/{auction_id}/lots', path='/auctions/{auction_id}/lots/{lot_id}', auctionsprocurementMethodType="dgfOtherAssets", description="Auction lots") class AuctionLotResource(AuctionLotResource): @json_view(content_type="application/json", validators=(validate_lot_data,), permission='edit_auction') def collection_post(self): """Add a lot """ auction = self.request.validated['auction'] if auction.status not in ['active.tendering']: self.request.errors.add('body', 'data', 'Can\'t add lot in current ({}) auction status'.format(auction.status)) self.request.errors.status = 403 return lot = self.request.validated['lot'] lot.date = get_now() auction.lots.append(lot) if save_auction(self.request): self.LOGGER.info('Created auction lot {}'.format(lot.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_create'}, {'lot_id': lot.id})) self.request.response.status = 201 route = self.request.matched_route.name.replace("collection_", "") self.request.response.headers['Location'] = self.request.current_route_url(_route_name=route, lot_id=lot.id, _query={}) return {'data': lot.serialize("view")} @json_view(content_type="application/json", validators=(validate_patch_lot_data,), permission='edit_auction') def patch(self): """Update of lot """ auction = self.request.validated['auction'] if auction.status not in ['active.tendering']: self.request.errors.add('body', 'data', 'Can\'t update lot in current ({}) auction status'.format(auction.status)) self.request.errors.status = 403 return if apply_patch(self.request, src=self.request.context.serialize()): self.LOGGER.info('Updated auction lot {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_patch'})) return {'data': self.request.context.serialize("view")} @json_view(permission='edit_auction') def delete(self): """Lot deleting """ auction = self.request.validated['auction'] if auction.status not in ['active.tendering']: self.request.errors.add('body', 'data', 'Can\'t delete lot in current ({}) auction status'.format(auction.status)) self.request.errors.status = 403 return lot = self.request.context res = lot.serialize("view") auction.lots.remove(lot) if save_auction(self.request): self.LOGGER.info('Deleted auction lot {}'.format(self.request.context.id), extra=context_unpack(self.request, {'MESSAGE_ID': 'auction_lot_delete'})) return {'data': res}
openprocurement/openprocurement.auctions.dgf
openprocurement/auctions/dgf/views/other/lot.py
Python
apache-2.0
3,339
import sys from cool_commits import info, find if __name__ == '__main__': command = sys.argv[1] if command == 'find': print(*find(sys.argv[2])) elif command == 'info': for info_text in info(sys.argv[2]): print('='*60) print(info_text) print('='*60) else: raise IOError('Invalid command, supporting only `find` and `info`.')
OrDuan/cool_commits
cool_commits/__main__.py
Python
apache-2.0
399
extensions = dict( required_params=[], # empty to override defaults in gen_defaults validate_required_params=""" # Required args: either model_key or path if (is.null(model_key) && is.null(path)) stop("argument 'model_key' or 'path' must be provided") """, set_required_params="", ) doc = dict( preamble=""" Imports a generic model into H2O. Such model can be used then used for scoring and obtaining additional information about the model. The imported model has to be supported by H2O. """, examples=""" # library(h2o) # h2o.init() # generic_model <- h2o.genericModel(path="/path/to/model.zip", model_id="my_model") # predictions <- h2o.predict(generic_model, dataset) """ )
h2oai/h2o-3
h2o-bindings/bin/custom/R/gen_generic.py
Python
apache-2.0
701
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ TOKEN-BASED AUTH MIDDLEWARE This WSGI component: * Verifies that incoming client requests have valid tokens by validating tokens with the auth service. * Rejects unauthenticated requests UNLESS it is in 'delay_auth_decision' mode, which means the final decision is delegated to the downstream WSGI component (usually the OpenStack service) * Collects and forwards identity information based on a valid token such as user name, tenant, etc Refer to: http://keystone.openstack.org/middlewarearchitecture.html HEADERS ------- * Headers starting with HTTP\_ is a standard http header * Headers starting with HTTP_X is an extended http header Coming in from initial call from client or customer ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ HTTP_X_AUTH_TOKEN The client token being passed in. HTTP_X_STORAGE_TOKEN The client token being passed in (legacy Rackspace use) to support swift/cloud files Used for communication between components ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ WWW-Authenticate HTTP header returned to a user indicating which endpoint to use to retrieve a new token What we add to the request for use by the OpenStack service ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ HTTP_X_IDENTITY_STATUS 'Confirmed' or 'Invalid' The underlying service will only see a value of 'Invalid' if the Middleware is configured to run in 'delay_auth_decision' mode HTTP_X_DOMAIN_ID Identity service managed unique identifier, string. Only present if this is a domain-scoped v3 token. HTTP_X_DOMAIN_NAME Unique domain name, string. Only present if this is a domain-scoped v3 token. HTTP_X_PROJECT_ID Identity service managed unique identifier, string. Only present if this is a project-scoped v3 token, or a tenant-scoped v2 token. HTTP_X_PROJECT_NAME Project name, unique within owning domain, string. Only present if this is a project-scoped v3 token, or a tenant-scoped v2 token. HTTP_X_PROJECT_DOMAIN_ID Identity service managed unique identifier of owning domain of project, string. Only present if this is a project-scoped v3 token. If this variable is set, this indicates that the PROJECT_NAME can only be assumed to be unique within this domain. HTTP_X_PROJECT_DOMAIN_NAME Name of owning domain of project, string. Only present if this is a project-scoped v3 token. If this variable is set, this indicates that the PROJECT_NAME can only be assumed to be unique within this domain. HTTP_X_USER_ID Identity-service managed unique identifier, string HTTP_X_USER_NAME User identifier, unique within owning domain, string HTTP_X_USER_DOMAIN_ID Identity service managed unique identifier of owning domain of user, string. If this variable is set, this indicates that the USER_NAME can only be assumed to be unique within this domain. HTTP_X_USER_DOMAIN_NAME Name of owning domain of user, string. If this variable is set, this indicates that the USER_NAME can only be assumed to be unique within this domain. HTTP_X_ROLES Comma delimited list of case-sensitive role names HTTP_X_SERVICE_CATALOG json encoded keystone service catalog (optional). HTTP_X_TENANT_ID *Deprecated* in favor of HTTP_X_PROJECT_ID Identity service managed unique identifier, string. For v3 tokens, this will be set to the same value as HTTP_X_PROJECT_ID HTTP_X_TENANT_NAME *Deprecated* in favor of HTTP_X_PROJECT_NAME Project identifier, unique within owning domain, string. For v3 tokens, this will be set to the same value as HTTP_X_PROJECT_NAME HTTP_X_TENANT *Deprecated* in favor of HTTP_X_TENANT_ID and HTTP_X_TENANT_NAME Keystone-assigned unique identifier, string. For v3 tokens, this will be set to the same value as HTTP_X_PROJECT_ID HTTP_X_USER *Deprecated* in favor of HTTP_X_USER_ID and HTTP_X_USER_NAME User name, unique within owning domain, string HTTP_X_ROLE *Deprecated* in favor of HTTP_X_ROLES Will contain the same values as HTTP_X_ROLES. OTHER ENVIRONMENT VARIABLES --------------------------- keystone.token_info Information about the token discovered in the process of validation. This may include extended information returned by the Keystone token validation call, as well as basic information about the tenant and user. """ import datetime import logging import os import requests import stat import tempfile import time import urllib import netaddr import six from keystoneclient.common import cms from keystoneclient.middleware import memcache_crypt from keystoneclient.openstack.common import jsonutils from keystoneclient.openstack.common import memorycache from keystoneclient.openstack.common import timeutils from keystoneclient import utils CONF = None # to pass gate before oslo-config is deployed everywhere, # try application copies first for app in 'nova', 'glance', 'quantum', 'cinder': try: cfg = __import__('%s.openstack.common.cfg' % app, fromlist=['%s.openstack.common' % app]) # test which application middleware is running in if hasattr(cfg, 'CONF') and 'config_file' in cfg.CONF: CONF = cfg.CONF break except ImportError: pass if not CONF: from oslo.config import cfg CONF = cfg.CONF # alternative middleware configuration in the main application's # configuration file e.g. in nova.conf # [keystone_authtoken] # auth_host = 127.0.0.1 # auth_port = 35357 # auth_protocol = http # admin_tenant_name = admin # admin_user = admin # admin_password = badpassword # when deploy Keystone auth_token middleware with Swift, user may elect # to use Swift memcache instead of the local Keystone memcache. Swift memcache # is passed in from the request environment and its identified by the # 'swift.cache' key. However it could be different, depending on deployment. # To use Swift memcache, you must set the 'cache' option to the environment # key where the Swift cache object is stored. opts = [ cfg.StrOpt('auth_admin_prefix', default='', help='Prefix to prepend at the beginning of the path'), cfg.StrOpt('auth_host', default='127.0.0.1', help='Host providing the admin Identity API endpoint'), cfg.IntOpt('auth_port', default=35357, help='Port of the admin Identity API endpoint'), cfg.StrOpt('auth_protocol', default='https', help='Protocol of the admin Identity API endpoint' '(http or https)'), cfg.StrOpt('auth_uri', default=None, # FIXME(dolph): should be default='http://127.0.0.1:5000/v2.0/', # or (depending on client support) an unversioned, publicly # accessible identity endpoint (see bug 1207517) help='Complete public Identity API endpoint'), cfg.StrOpt('auth_version', default=None, help='API version of the admin Identity API endpoint'), cfg.BoolOpt('delay_auth_decision', default=False, help='Do not handle authorization requests within the' ' middleware, but delegate the authorization decision to' ' downstream WSGI components'), cfg.BoolOpt('http_connect_timeout', default=None, help='Request timeout value for communicating with Identity' ' API server.'), cfg.IntOpt('http_request_max_retries', default=3, help='How many times are we trying to reconnect when' ' communicating with Identity API Server.'), cfg.StrOpt('http_handler', default=None, help='Allows to pass in the name of a fake http_handler' ' callback function used instead of httplib.HTTPConnection or' ' httplib.HTTPSConnection. Useful for unit testing where' ' network is not available.'), cfg.StrOpt('admin_token', secret=True, help='Single shared secret with the Keystone configuration' ' used for bootstrapping a Keystone installation, or otherwise' ' bypassing the normal authentication process.'), cfg.StrOpt('admin_user', help='Keystone account username'), cfg.StrOpt('admin_password', secret=True, help='Keystone account password'), cfg.StrOpt('admin_tenant_name', default='admin', help='Keystone service account tenant name to validate' ' user tokens'), cfg.StrOpt('cache', default=None, help='Env key for the swift cache'), cfg.StrOpt('certfile', help='Required if Keystone server requires client certificate'), cfg.StrOpt('keyfile', help='Required if Keystone server requires client certificate'), cfg.StrOpt('cafile', default=None, help='A PEM encoded Certificate Authority to use when ' 'verifying HTTPs connections. Defaults to system CAs.'), cfg.BoolOpt('insecure', default=False, help='Verify HTTPS connections.'), cfg.StrOpt('signing_dir', help='Directory used to cache files related to PKI tokens'), cfg.ListOpt('memcached_servers', deprecated_name='memcache_servers', help='If defined, the memcache server(s) to use for' ' caching'), cfg.IntOpt('token_cache_time', default=300, help='In order to prevent excessive requests and validations,' ' the middleware uses an in-memory cache for the tokens the' ' Keystone API returns. This is only valid if memcache_servers' ' is defined. Set to -1 to disable caching completely.'), cfg.IntOpt('revocation_cache_time', default=1, help='Value only used for unit testing'), cfg.StrOpt('memcache_security_strategy', default=None, help='(optional) if defined, indicate whether token data' ' should be authenticated or authenticated and encrypted.' ' Acceptable values are MAC or ENCRYPT. If MAC, token data is' ' authenticated (with HMAC) in the cache. If ENCRYPT, token' ' data is encrypted and authenticated in the cache. If the' ' value is not one of these options or empty, auth_token will' ' raise an exception on initialization.'), cfg.StrOpt('memcache_secret_key', default=None, secret=True, help='(optional, mandatory if memcache_security_strategy is' ' defined) this string is used for key derivation.') ] CONF.register_opts(opts, group='keystone_authtoken') LIST_OF_VERSIONS_TO_ATTEMPT = ['v2.0', 'v3.0'] CACHE_KEY_TEMPLATE = 'tokens/%s' def will_expire_soon(expiry): """Determines if expiration is about to occur. :param expiry: a datetime of the expected expiration :returns: boolean : true if expiration is within 30 seconds """ soon = (timeutils.utcnow() + datetime.timedelta(seconds=30)) return expiry < soon def safe_quote(s): """URL-encode strings that are not already URL-encoded.""" return urllib.quote(s) if s == urllib.unquote(s) else s class InvalidUserToken(Exception): pass class ServiceError(Exception): pass class ConfigurationError(Exception): pass class NetworkError(Exception): pass class MiniResp(object): def __init__(self, error_message, env, headers=[]): # The HEAD method is unique: it must never return a body, even if # it reports an error (RFC-2616 clause 9.4). We relieve callers # from varying the error responses depending on the method. if env['REQUEST_METHOD'] == 'HEAD': self.body = [''] else: self.body = [error_message] self.headers = list(headers) self.headers.append(('Content-type', 'text/plain')) class AuthProtocol(object): """Auth Middleware that handles authenticating client calls.""" def __init__(self, app, conf): self.LOG = logging.getLogger(conf.get('log_name', __name__)) self.LOG.info('Starting keystone auth_token middleware') self.conf = conf self.app = app # delay_auth_decision means we still allow unauthenticated requests # through and we let the downstream service make the final decision self.delay_auth_decision = (self._conf_get('delay_auth_decision') in (True, 'true', 't', '1', 'on', 'yes', 'y')) # where to find the auth service (we use this to validate tokens) auth_host = self._conf_get('auth_host') auth_port = int(self._conf_get('auth_port')) auth_protocol = self._conf_get('auth_protocol') self.auth_admin_prefix = self._conf_get('auth_admin_prefix') self.auth_uri = self._conf_get('auth_uri') if netaddr.valid_ipv6(auth_host): # Note(dzyu) it is an IPv6 address, so it needs to be wrapped # with '[]' to generate a valid IPv6 URL, based on # http://www.ietf.org/rfc/rfc2732.txt auth_host = '[%s]' % auth_host self.request_uri = '%s://%s:%s' % (auth_protocol, auth_host, auth_port) if self.auth_uri is None: self.LOG.warning( 'Configuring auth_uri to point to the public identity ' 'endpoint is required; clients may not be able to ' 'authenticate against an admin endpoint') # FIXME(dolph): drop support for this fallback behavior as # documented in bug 1207517 self.auth_uri = self.request_uri # SSL self.cert_file = self._conf_get('certfile') self.key_file = self._conf_get('keyfile') self.ssl_ca_file = self._conf_get('cafile') self.ssl_insecure = self._conf_get('insecure') # signing self.signing_dirname = self._conf_get('signing_dir') if self.signing_dirname is None: self.signing_dirname = tempfile.mkdtemp(prefix='keystone-signing-') self.LOG.info('Using %s as cache directory for signing certificate' % self.signing_dirname) self.verify_signing_dir() val = '%s/signing_cert.pem' % self.signing_dirname self.signing_cert_file_name = val val = '%s/cacert.pem' % self.signing_dirname self.signing_ca_file_name = val val = '%s/revoked.pem' % self.signing_dirname self.revoked_file_name = val # Credentials used to verify this component with the Auth service since # validating tokens is a privileged call self.admin_token = self._conf_get('admin_token') self.admin_token_expiry = None self.admin_user = self._conf_get('admin_user') self.admin_password = self._conf_get('admin_password') self.admin_tenant_name = self._conf_get('admin_tenant_name') # Token caching via memcache self._cache = None self._cache_initialized = False # cache already initialzied? # memcache value treatment, ENCRYPT or MAC self._memcache_security_strategy = \ self._conf_get('memcache_security_strategy') if self._memcache_security_strategy is not None: self._memcache_security_strategy = \ self._memcache_security_strategy.upper() self._memcache_secret_key = \ self._conf_get('memcache_secret_key') self._assert_valid_memcache_protection_config() # By default the token will be cached for 5 minutes self.token_cache_time = int(self._conf_get('token_cache_time')) self._token_revocation_list = None self._token_revocation_list_fetched_time = None self.token_revocation_list_cache_timeout = datetime.timedelta( seconds=self._conf_get('revocation_cache_time')) http_connect_timeout_cfg = self._conf_get('http_connect_timeout') self.http_connect_timeout = (http_connect_timeout_cfg and int(http_connect_timeout_cfg)) self.auth_version = None self.http_request_max_retries = \ self._conf_get('http_request_max_retries') def _assert_valid_memcache_protection_config(self): if self._memcache_security_strategy: if self._memcache_security_strategy not in ('MAC', 'ENCRYPT'): raise Exception('memcache_security_strategy must be ' 'ENCRYPT or MAC') if not self._memcache_secret_key: raise Exception('mecmache_secret_key must be defined when ' 'a memcache_security_strategy is defined') def _init_cache(self, env): cache = self._conf_get('cache') memcache_servers = self._conf_get('memcached_servers') if cache and env.get(cache, None) is not None: # use the cache from the upstream filter self.LOG.info('Using %s memcache for caching token', cache) self._cache = env.get(cache) else: # use Keystone memcache self._cache = memorycache.get_client(memcache_servers) self._cache_initialized = True def _conf_get(self, name): # try config from paste-deploy first if name in self.conf: return self.conf[name] else: return CONF.keystone_authtoken[name] def _choose_api_version(self): """Determine the api version that we should use.""" # If the configuration specifies an auth_version we will just # assume that is correct and use it. We could, of course, check # that this version is supported by the server, but in case # there are some problems in the field, we want as little code # as possible in the way of letting auth_token talk to the # server. if self._conf_get('auth_version'): version_to_use = self._conf_get('auth_version') self.LOG.info('Auth Token proceeding with requested %s apis', version_to_use) else: version_to_use = None versions_supported_by_server = self._get_supported_versions() if versions_supported_by_server: for version in LIST_OF_VERSIONS_TO_ATTEMPT: if version in versions_supported_by_server: version_to_use = version break if version_to_use: self.LOG.info('Auth Token confirmed use of %s apis', version_to_use) else: self.LOG.error( 'Attempted versions [%s] not in list supported by ' 'server [%s]', ', '.join(LIST_OF_VERSIONS_TO_ATTEMPT), ', '.join(versions_supported_by_server)) raise ServiceError('No compatible apis supported by server') return version_to_use def _get_supported_versions(self): versions = [] response, data = self._json_request('GET', '/') if response.status_code == 501: self.LOG.warning("Old keystone installation found...assuming v2.0") versions.append("v2.0") elif response.status_code != 300: self.LOG.error('Unable to get version info from keystone: %s' % response.status_code) raise ServiceError('Unable to get version info from keystone') else: try: for version in data['versions']['values']: versions.append(version['id']) except KeyError: self.LOG.error( 'Invalid version response format from server', data) raise ServiceError('Unable to parse version response ' 'from keystone') self.LOG.debug('Server reports support for api versions: %s', ', '.join(versions)) return versions def __call__(self, env, start_response): """Handle incoming request. Authenticate send downstream on success. Reject request if we can't authenticate. """ self.LOG.debug('Authenticating user token') # initialize memcache if we haven't done so if not self._cache_initialized: self._init_cache(env) try: self._remove_auth_headers(env) user_token = self._get_user_token_from_header(env) token_info = self._validate_user_token(user_token) env['keystone.token_info'] = token_info user_headers = self._build_user_headers(token_info) self._add_headers(env, user_headers) return self.app(env, start_response) except InvalidUserToken: if self.delay_auth_decision: self.LOG.info( 'Invalid user token - deferring reject downstream') self._add_headers(env, {'X-Identity-Status': 'Invalid'}) return self.app(env, start_response) else: self.LOG.info('Invalid user token - rejecting request') return self._reject_request(env, start_response) except ServiceError as e: self.LOG.critical('Unable to obtain admin token: %s' % e) resp = MiniResp('Service unavailable', env) start_response('503 Service Unavailable', resp.headers) return resp.body def _remove_auth_headers(self, env): """Remove headers so a user can't fake authentication. :param env: wsgi request environment """ auth_headers = ( 'X-Identity-Status', 'X-Domain-Id', 'X-Domain-Name', 'X-Project-Id', 'X-Project-Name', 'X-Project-Domain-Id', 'X-Project-Domain-Name', 'X-User-Id', 'X-User-Name', 'X-User-Domain-Id', 'X-User-Domain-Name', 'X-Roles', 'X-Service-Catalog', # Deprecated 'X-User', 'X-Tenant-Id', 'X-Tenant-Name', 'X-Tenant', 'X-Role', ) self.LOG.debug('Removing headers from request environment: %s' % ','.join(auth_headers)) self._remove_headers(env, auth_headers) def _get_user_token_from_header(self, env): """Get token id from request. :param env: wsgi request environment :return token id :raises InvalidUserToken if no token is provided in request """ token = self._get_header(env, 'X-Auth-Token', self._get_header(env, 'X-Storage-Token')) if token: return token else: if not self.delay_auth_decision: self.LOG.warn("Unable to find authentication token" " in headers") self.LOG.debug("Headers: %s", env) raise InvalidUserToken('Unable to find token in headers') def _reject_request(self, env, start_response): """Redirect client to auth server. :param env: wsgi request environment :param start_response: wsgi response callback :returns HTTPUnauthorized http response """ headers = [('WWW-Authenticate', 'Keystone uri=\'%s\'' % self.auth_uri)] resp = MiniResp('Authentication required', env, headers) start_response('401 Unauthorized', resp.headers) return resp.body def get_admin_token(self): """Return admin token, possibly fetching a new one. if self.admin_token_expiry is set from fetching an admin token, check it for expiration, and request a new token is the existing token is about to expire. :return admin token id :raise ServiceError when unable to retrieve token from keystone """ if self.admin_token_expiry: if will_expire_soon(self.admin_token_expiry): self.admin_token = None if not self.admin_token: (self.admin_token, self.admin_token_expiry) = self._request_admin_token() return self.admin_token def _http_request(self, method, path, **kwargs): """HTTP request helper used to make unspecified content type requests. :param method: http method :param path: relative request url :return (http response object, response body) :raise ServerError when unable to communicate with keystone """ url = "%s/%s" % (self.request_uri, path.lstrip('/')) kwargs.setdefault('timeout', self.http_connect_timeout) if self.cert_file and self.key_file: kwargs['cert'] = (self.cert_file, self.key_file) elif self.cert_file or self.key_file: self.LOG.warn('Cannot use only a cert or key file. ' 'Please provide both. Ignoring.') kwargs['verify'] = self.ssl_ca_file or True if self.ssl_insecure: kwargs['verify'] = False RETRIES = self.http_request_max_retries retry = 0 while True: try: response = requests.request(method, url, **kwargs) break except Exception as e: if retry >= RETRIES: self.LOG.error('HTTP connection exception: %s', e) raise NetworkError('Unable to communicate with keystone') # NOTE(vish): sleep 0.5, 1, 2 self.LOG.warn('Retrying on HTTP connection exception: %s' % e) time.sleep(2.0 ** retry / 2) retry += 1 return response def _json_request(self, method, path, body=None, additional_headers=None): """HTTP request helper used to make json requests. :param method: http method :param path: relative request url :param body: dict to encode to json as request body. Optional. :param additional_headers: dict of additional headers to send with http request. Optional. :return (http response object, response body parsed as json) :raise ServerError when unable to communicate with keystone """ kwargs = { 'headers': { 'Content-type': 'application/json', 'Accept': 'application/json', }, } if additional_headers: kwargs['headers'].update(additional_headers) if body: kwargs['data'] = jsonutils.dumps(body) path = self.auth_admin_prefix + path response = self._http_request(method, path, **kwargs) try: data = jsonutils.loads(response.text) except ValueError: self.LOG.debug('Keystone did not return json-encoded body') data = {} return response, data def _request_admin_token(self): """Retrieve new token as admin user from keystone. :return token id upon success :raises ServerError when unable to communicate with keystone Irrespective of the auth version we are going to use for the user token, for simplicity we always use a v2 admin token to validate the user token. """ params = { 'auth': { 'passwordCredentials': { 'username': self.admin_user, 'password': self.admin_password, }, 'tenantName': self.admin_tenant_name, } } response, data = self._json_request('POST', '/v2.0/tokens', body=params) try: token = data['access']['token']['id'] expiry = data['access']['token']['expires'] if not (token and expiry): raise AssertionError('invalid token or expire') datetime_expiry = timeutils.parse_isotime(expiry) return (token, timeutils.normalize_time(datetime_expiry)) except (AssertionError, KeyError): self.LOG.warn( "Unexpected response from keystone service: %s", data) raise ServiceError('invalid json response') except (ValueError): self.LOG.warn( "Unable to parse expiration time from token: %s", data) raise ServiceError('invalid json response') def _validate_user_token(self, user_token, retry=True): """Authenticate user using PKI :param user_token: user's token id :param retry: Ignored, as it is not longer relevant :return uncrypted body of the token if the token is valid :raise InvalidUserToken if token is rejected :no longer raises ServiceError since it no longer makes RPC """ token_id = None try: token_id = cms.cms_hash_token(user_token) cached = self._cache_get(token_id) if cached: return cached if cms.is_ans1_token(user_token): verified = self.verify_signed_token(user_token) data = jsonutils.loads(verified) else: data = self.verify_uuid_token(user_token, retry) expires = self._confirm_token_not_expired(data) self._cache_put(token_id, data, expires) return data except NetworkError: self.LOG.debug('Token validation failure.', exc_info=True) self.LOG.warn("Authorization failed for token %s", token_id) raise InvalidUserToken('Token authorization failed') except Exception: self.LOG.debug('Token validation failure.', exc_info=True) if token_id: self._cache_store_invalid(token_id) self.LOG.warn("Authorization failed for token %s", token_id) raise InvalidUserToken('Token authorization failed') def _token_is_v2(self, token_info): return ('access' in token_info) def _token_is_v3(self, token_info): return ('token' in token_info) def _build_user_headers(self, token_info): """Convert token object into headers. Build headers that represent authenticated user - see main doc info at start of file for details of headers to be defined. :param token_info: token object returned by keystone on authentication :raise InvalidUserToken when unable to parse token object """ def get_tenant_info(): """Returns a (tenant_id, tenant_name) tuple from context.""" def essex(): """Essex puts the tenant ID and name on the token.""" return (token['tenant']['id'], token['tenant']['name']) def pre_diablo(): """Pre-diablo, Keystone only provided tenantId.""" return (token['tenantId'], token['tenantId']) def default_tenant(): """Pre-grizzly, assume the user's default tenant.""" return (user['tenantId'], user['tenantName']) for method in [essex, pre_diablo, default_tenant]: try: return method() except KeyError: pass raise InvalidUserToken('Unable to determine tenancy.') # For clarity. set all those attributes that are optional in # either a v2 or v3 token to None first domain_id = None domain_name = None project_id = None project_name = None user_domain_id = None user_domain_name = None project_domain_id = None project_domain_name = None if self._token_is_v2(token_info): user = token_info['access']['user'] token = token_info['access']['token'] roles = ','.join([role['name'] for role in user.get('roles', [])]) catalog_root = token_info['access'] catalog_key = 'serviceCatalog' project_id, project_name = get_tenant_info() else: #v3 token token = token_info['token'] user = token['user'] user_domain_id = user['domain']['id'] user_domain_name = user['domain']['name'] roles = (','.join([role['name'] for role in token.get('roles', [])])) catalog_root = token catalog_key = 'catalog' # For v3, the server will put in the default project if there is # one, so no need for us to add it here (like we do for a v2 token) if 'domain' in token: domain_id = token['domain']['id'] domain_name = token['domain']['name'] elif 'project' in token: project_id = token['project']['id'] project_name = token['project']['name'] project_domain_id = token['project']['domain']['id'] project_domain_name = token['project']['domain']['name'] user_id = user['id'] user_name = user['name'] rval = { 'X-Identity-Status': 'Confirmed', 'X-Domain-Id': domain_id, 'X-Domain-Name': domain_name, 'X-Project-Id': project_id, 'X-Project-Name': project_name, 'X-Project-Domain-Id': project_domain_id, 'X-Project-Domain-Name': project_domain_name, 'X-User-Id': user_id, 'X-User-Name': user_name, 'X-User-Domain-Id': user_domain_id, 'X-User-Domain-Name': user_domain_name, 'X-Roles': roles, # Deprecated 'X-User': user_name, 'X-Tenant-Id': project_id, 'X-Tenant-Name': project_name, 'X-Tenant': project_name, 'X-Role': roles, } self.LOG.debug("Received request from user: %s with project_id : %s" " and roles: %s ", user_id, project_id, roles) try: catalog = catalog_root[catalog_key] rval['X-Service-Catalog'] = jsonutils.dumps(catalog) except KeyError: pass return rval def _header_to_env_var(self, key): """Convert header to wsgi env variable. :param key: http header name (ex. 'X-Auth-Token') :return wsgi env variable name (ex. 'HTTP_X_AUTH_TOKEN') """ return 'HTTP_%s' % key.replace('-', '_').upper() def _add_headers(self, env, headers): """Add http headers to environment.""" for (k, v) in six.iteritems(headers): env_key = self._header_to_env_var(k) env[env_key] = v def _remove_headers(self, env, keys): """Remove http headers from environment.""" for k in keys: env_key = self._header_to_env_var(k) try: del env[env_key] except KeyError: pass def _get_header(self, env, key, default=None): """Get http header from environment.""" env_key = self._header_to_env_var(key) return env.get(env_key, default) def _cache_get(self, token_id, ignore_expires=False): """Return token information from cache. If token is invalid raise InvalidUserToken return token only if fresh (not expired). """ if self._cache and token_id: if self._memcache_security_strategy is None: key = CACHE_KEY_TEMPLATE % token_id serialized = self._cache.get(key) else: keys = memcache_crypt.derive_keys( token_id, self._memcache_secret_key, self._memcache_security_strategy) cache_key = CACHE_KEY_TEMPLATE % ( memcache_crypt.get_cache_key(keys)) raw_cached = self._cache.get(cache_key) try: # unprotect_data will return None if raw_cached is None serialized = memcache_crypt.unprotect_data(keys, raw_cached) except Exception: msg = 'Failed to decrypt/verify cache data' self.LOG.exception(msg) # this should have the same effect as data not # found in cache serialized = None if serialized is None: return None # Note that 'invalid' and (data, expires) are the only # valid types of serialized cache entries, so there is not # a collision with jsonutils.loads(serialized) == None. cached = jsonutils.loads(serialized) if cached == 'invalid': self.LOG.debug('Cached Token %s is marked unauthorized', token_id) raise InvalidUserToken('Token authorization failed') data, expires = cached if ignore_expires or time.time() < float(expires): self.LOG.debug('Returning cached token %s', token_id) return data else: self.LOG.debug('Cached Token %s seems expired', token_id) def _cache_store(self, token_id, data): """Store value into memcache. data may be the string 'invalid' or a tuple like (data, expires) """ serialized_data = jsonutils.dumps(data) if self._memcache_security_strategy is None: cache_key = CACHE_KEY_TEMPLATE % token_id data_to_store = serialized_data else: keys = memcache_crypt.derive_keys( token_id, self._memcache_secret_key, self._memcache_security_strategy) cache_key = CACHE_KEY_TEMPLATE % memcache_crypt.get_cache_key(keys) data_to_store = memcache_crypt.protect_data(keys, serialized_data) # Historically the swift cache conection used the argument # timeout= for the cache timeout, but this has been unified # with the official python memcache client with time= since # grizzly, we still need to handle folsom for a while until # this could get removed. try: self._cache.set(cache_key, data_to_store, time=self.token_cache_time) except(TypeError): self._cache.set(cache_key, data_to_store, timeout=self.token_cache_time) def _confirm_token_not_expired(self, data): if not data: raise InvalidUserToken('Token authorization failed') if self._token_is_v2(data): timestamp = data['access']['token']['expires'] elif self._token_is_v3(data): timestamp = data['token']['expires_at'] else: raise InvalidUserToken('Token authorization failed') expires = timeutils.parse_isotime(timestamp).strftime('%s') if time.time() >= float(expires): self.LOG.debug('Token expired a %s', timestamp) raise InvalidUserToken('Token authorization failed') return expires def _cache_put(self, token_id, data, expires): """Put token data into the cache. Stores the parsed expire date in cache allowing quick check of token freshness on retrieval. """ if self._cache: self.LOG.debug('Storing %s token in memcache', token_id) self._cache_store(token_id, (data, expires)) def _cache_store_invalid(self, token_id): """Store invalid token in cache.""" if self._cache: self.LOG.debug( 'Marking token %s as unauthorized in memcache', token_id) self._cache_store(token_id, 'invalid') def cert_file_missing(self, proc_output, file_name): return (file_name in proc_output and not os.path.exists(file_name)) def verify_uuid_token(self, user_token, retry=True): """Authenticate user token with keystone. :param user_token: user's token id :param retry: flag that forces the middleware to retry user authentication when an indeterminate response is received. Optional. :return token object received from keystone on success :raise InvalidUserToken if token is rejected :raise ServiceError if unable to authenticate token """ # Determine the highest api version we can use. if not self.auth_version: self.auth_version = self._choose_api_version() if self.auth_version == 'v3.0': headers = {'X-Auth-Token': self.get_admin_token(), 'X-Subject-Token': safe_quote(user_token)} response, data = self._json_request( 'GET', '/v3/auth/tokens', additional_headers=headers) else: headers = {'X-Auth-Token': self.get_admin_token()} response, data = self._json_request( 'GET', '/v2.0/tokens/%s' % safe_quote(user_token), additional_headers=headers) if response.status_code == 200: return data if response.status_code == 404: self.LOG.warn("Authorization failed for token %s", user_token) raise InvalidUserToken('Token authorization failed') if response.status_code == 401: self.LOG.info( 'Keystone rejected admin token %s, resetting', headers) self.admin_token = None else: self.LOG.error('Bad response code while validating token: %s' % response.status_code) if retry: self.LOG.info('Retrying validation') return self._validate_user_token(user_token, False) else: self.LOG.warn("Invalid user token: %s. Keystone response: %s.", user_token, data) raise InvalidUserToken() def is_signed_token_revoked(self, signed_text): """Indicate whether the token appears in the revocation list.""" revocation_list = self.token_revocation_list revoked_tokens = revocation_list.get('revoked', []) if not revoked_tokens: return revoked_ids = (x['id'] for x in revoked_tokens) token_id = utils.hash_signed_token(signed_text) for revoked_id in revoked_ids: if token_id == revoked_id: self.LOG.debug('Token %s is marked as having been revoked', token_id) return True return False def cms_verify(self, data): """Verifies the signature of the provided data's IAW CMS syntax. If either of the certificate files are missing, fetch them and retry. """ while True: try: output = cms.cms_verify(data, self.signing_cert_file_name, self.signing_ca_file_name) except cms.subprocess.CalledProcessError as err: if self.cert_file_missing(err.output, self.signing_cert_file_name): self.fetch_signing_cert() continue if self.cert_file_missing(err.output, self.signing_ca_file_name): self.fetch_ca_cert() continue self.LOG.warning('Verify error: %s' % err) raise err return output def verify_signed_token(self, signed_text): """Check that the token is unrevoked and has a valid signature.""" if self.is_signed_token_revoked(signed_text): raise InvalidUserToken('Token has been revoked') formatted = cms.token_to_cms(signed_text) return self.cms_verify(formatted) def verify_signing_dir(self): if os.path.exists(self.signing_dirname): if not os.access(self.signing_dirname, os.W_OK): raise ConfigurationError( 'unable to access signing_dir %s' % self.signing_dirname) if os.stat(self.signing_dirname).st_uid != os.getuid(): self.LOG.warning( 'signing_dir is not owned by %s' % os.getuid()) current_mode = stat.S_IMODE(os.stat(self.signing_dirname).st_mode) if current_mode != stat.S_IRWXU: self.LOG.warning( 'signing_dir mode is %s instead of %s' % (oct(current_mode), oct(stat.S_IRWXU))) else: os.makedirs(self.signing_dirname, stat.S_IRWXU) @property def token_revocation_list_fetched_time(self): if not self._token_revocation_list_fetched_time: # If the fetched list has been written to disk, use its # modification time. if os.path.exists(self.revoked_file_name): mtime = os.path.getmtime(self.revoked_file_name) fetched_time = datetime.datetime.fromtimestamp(mtime) # Otherwise the list will need to be fetched. else: fetched_time = datetime.datetime.min self._token_revocation_list_fetched_time = fetched_time return self._token_revocation_list_fetched_time @token_revocation_list_fetched_time.setter def token_revocation_list_fetched_time(self, value): self._token_revocation_list_fetched_time = value @property def token_revocation_list(self): timeout = (self.token_revocation_list_fetched_time + self.token_revocation_list_cache_timeout) list_is_current = timeutils.utcnow() < timeout if list_is_current: # Load the list from disk if required if not self._token_revocation_list: with open(self.revoked_file_name, 'r') as f: self._token_revocation_list = jsonutils.loads(f.read()) else: self.token_revocation_list = self.fetch_revocation_list() return self._token_revocation_list @token_revocation_list.setter def token_revocation_list(self, value): """Save a revocation list to memory and to disk. :param value: A json-encoded revocation list """ self._token_revocation_list = jsonutils.loads(value) self.token_revocation_list_fetched_time = timeutils.utcnow() with open(self.revoked_file_name, 'w') as f: f.write(value) def fetch_revocation_list(self, retry=True): headers = {'X-Auth-Token': self.get_admin_token()} response, data = self._json_request('GET', '/v2.0/tokens/revoked', additional_headers=headers) if response.status_code == 401: if retry: self.LOG.info( 'Keystone rejected admin token %s, resetting admin token', headers) self.admin_token = None return self.fetch_revocation_list(retry=False) if response.status_code != 200: raise ServiceError('Unable to fetch token revocation list.') if 'signed' not in data: raise ServiceError('Revocation list improperly formatted.') return self.cms_verify(data['signed']) def fetch_signing_cert(self): path = self.auth_admin_prefix.rstrip('/') path += '/v2.0/certificates/signing' response = self._http_request('GET', path) def write_cert_file(data): with open(self.signing_cert_file_name, 'w') as certfile: certfile.write(data) try: #todo check response try: write_cert_file(response.text) except IOError: self.verify_signing_dir() write_cert_file(response.text) except (AssertionError, KeyError): self.LOG.warn( "Unexpected response from keystone service: %s", response.text) raise ServiceError('invalid json response') def fetch_ca_cert(self): path = self.auth_admin_prefix.rstrip('/') + '/v2.0/certificates/ca' response = self._http_request('GET', path) try: #todo check response with open(self.signing_ca_file_name, 'w') as certfile: certfile.write(response.text) except (AssertionError, KeyError): self.LOG.warn( "Unexpected response from keystone service: %s", response.text) raise ServiceError('invalid json response') def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return AuthProtocol(app, conf) return auth_filter def app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AuthProtocol(None, conf)
citrix-openstack-build/python-keystoneclient
keystoneclient/middleware/auth_token.py
Python
apache-2.0
50,380
"""ThreatConnect TI Email""" # standard library from typing import TYPE_CHECKING # first-party from tcex.api.tc.v2.threat_intelligence.mappings.group.group import Group if TYPE_CHECKING: # first-party from tcex.api.tc.v2.threat_intelligence.threat_intelligence import ThreatIntelligence class Tactic(Group): """Unique API calls for Tactic API Endpoints Args: ti (ThreatIntelligence): An instance of the ThreatIntelligence Class. name (str, kwargs): [Required for Create] The name for this Group. owner (str, kwargs): The name for this Group. Default to default Org when not provided """ def __init__(self, ti: 'ThreatIntelligence', **kwargs): """Initialize Class properties.""" super().__init__(ti, sub_type='Tactic', api_entity='tactic', api_branch='tactics', **kwargs)
ThreatConnect-Inc/tcex
tcex/api/tc/v2/threat_intelligence/mappings/group/group_types/tactic.py
Python
apache-2.0
841
import os PUPPETDB_HOST = 'localhost' PUPPETDB_PORT = 8080 PUPPETDB_SSL_VERIFY = True PUPPETDB_KEY = None PUPPETDB_CERT = None PUPPETDB_TIMEOUT = 20 SECRET_KEY = os.urandom(24) DEV_LISTEN_HOST = '127.0.0.1' DEV_LISTEN_PORT = 5000 DEV_COFFEE_LOCATION = 'coffee' UNRESPONSIVE_HOURS = 2 ENABLE_QUERY = True LOCALISE_TIMESTAMP = True LOGLEVEL = 'info' REPORTS_COUNT = 10 OFFLINE_MODE = False ENABLE_CATALOG = False GRAPH_FACTS = ['architecture', 'domain', 'lsbcodename', 'lsbdistcodename', 'lsbdistid', 'lsbdistrelease', 'lsbmajdistrelease', 'netmask', 'osfamily', 'puppetversion', 'processorcount'] INVENTORY_FACTS = [ ('Hostname', 'fqdn' ), ('IP Address', 'ipaddress' ), ('OS', 'lsbdistdescription'), ('Architecture', 'hardwaremodel' ), ('Kernel Version', 'kernelrelease' ), ('Puppet Version', 'puppetversion' ), ]
bewiwi/puppetboard
puppetboard/default_settings.py
Python
apache-2.0
1,121
#!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from django.conf import settings from polycommon.options.exceptions import OptionException from polycommon.options.feature import Feature from polycommon.options.option import NAMESPACE_DB_OPTION_MARKER, OptionStores class DummyFeature(Feature): pass class TestFeature(TestCase): def test_feature_default_store(self): assert DummyFeature.store == OptionStores(settings.STORE_OPTION) def test_feature_marker(self): assert DummyFeature.get_marker() == NAMESPACE_DB_OPTION_MARKER def test_parse_key_wtong_namespace(self): DummyFeature.key = "FOO" with self.assertRaises(OptionException): DummyFeature.parse_key() DummyFeature.key = "FOO:BAR" with self.assertRaises(OptionException): DummyFeature.parse_key() def test_parse_key_without_namespace(self): DummyFeature.key = "FEATURES:FOO" assert DummyFeature.parse_key() == (None, "FOO") def test_parse_key_with_namespace(self): DummyFeature.key = "FEATURES:FOO:BAR" assert DummyFeature.parse_key() == ("FOO", "BAR")
polyaxon/polyaxon
platform/polycommon/tests/test_options/test_feature.py
Python
apache-2.0
1,739
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # flake8: noqa import argparse from aria_cli import commands as aria from aria_cli.config import argument_utils from argcomplete import completers yaml_files_completer = completers.FilesCompleter(['*.yml', '*.yaml']) archive_files_completer = completers.FilesCompleter( ['*.zip', '*.tar', '*.tar.gz', '*.tar.bz2']) FORMAT_INPUT_AS_YAML_OR_DICT = 'formatted as YAML or as "key1=value1;key2=value2"' def workflow_id_argument(hlp): return { 'metavar': 'WORKFLOW', 'dest': 'workflow_id', 'type': str, 'required': True, 'help': hlp, } def parser_config(): return { 'description': 'Manages ARIA in different Cloud Environments', 'arguments': { '--version': { 'help': 'show version information and exit', 'action': aria.version } }, 'commands': { 'validate': { 'arguments': { '-p,--blueprint-path': { 'metavar': 'BLUEPRINT_FILE', 'type': argparse.FileType(), 'dest': 'blueprint_path', 'required': True, 'help': "Path to the application's blueprint file", 'completer': yaml_files_completer } }, 'help': 'command for validating a blueprint', 'handler': aria.local.validate }, 'init': { 'help': 'Init a local workflow execution environment in ' 'in the current working directory', 'arguments': { '-p,--blueprint-path': { 'dest': 'blueprint_path', 'metavar': 'BLUEPRINT_PATH', 'type': str, 'required': True, 'help': 'Path to a blueprint' }, '-i,--inputs': { 'metavar': 'INPUTS', 'dest': 'inputs', 'required': False, 'help': 'Inputs file/string for the local workflow creation ({0})' .format(FORMAT_INPUT_AS_YAML_OR_DICT) }, '--install-plugins': { 'dest': 'install_plugins_', 'action': 'store_true', 'default': False, 'help': 'Install necessary plugins of the given blueprint.' } }, 'handler': aria.local.init }, 'install-plugins': { 'help': 'Installs the necessary plugins for a given blueprint', 'arguments': { '-p,--blueprint-path': { 'dest': 'blueprint_path', 'metavar': 'BLUEPRINT_PATH', 'type': str, 'required': True, 'help': 'Path to a blueprint' } }, 'handler': aria.local.install_plugins }, 'create-requirements': { 'help': 'Creates a PIP compliant requirements file for the given blueprint', 'arguments': { '-p,--blueprint-path': { 'dest': 'blueprint_path', 'metavar': 'BLUEPRINT_PATH', 'type': str, 'required': True, 'help': 'Path to a blueprint' }, '-o,--output': { 'metavar': 'REQUIREMENTS_OUTPUT', 'dest': 'output', 'required': False, 'help': 'Path to a file that will hold the ' 'requirements of the blueprint' } }, 'handler': aria.local.create_requirements }, 'execute': { 'help': 'Execute a workflow locally', 'arguments': { '-w,--workflow': argument_utils.remove_completer( workflow_id_argument( hlp='The workflow to execute locally')), '-p,--parameters': { 'metavar': 'PARAMETERS', 'dest': 'parameters', 'default': {}, 'type': str, 'required': False, 'help': 'Parameters for the workflow execution ({0})' .format(FORMAT_INPUT_AS_YAML_OR_DICT) }, '--allow-custom-parameters': { 'dest': 'allow_custom_parameters', 'action': 'store_true', 'default': False, 'help': 'A flag for allowing the passing of custom parameters (' "parameters which were not defined in the workflow's schema in " 'the blueprint) to the execution' }, '--task-retries': { 'metavar': 'TASK_RETRIES', 'dest': 'task_retries', 'default': 0, 'type': int, 'help': 'How many times should a task be retried in case ' 'it fails' }, '--task-retry-interval': { 'metavar': 'TASK_RETRY_INTERVAL', 'dest': 'task_retry_interval', 'default': 1, 'type': int, 'help': 'How many seconds to wait before each task is retried' }, '--task-thread-pool-size': { 'metavar': 'TASK_THREAD_POOL_SIZE', 'dest': 'task_thread_pool_size', 'default': 1, 'type': int, 'help': 'The size of the thread pool size to execute tasks in' } }, 'handler': aria.local.execute }, 'outputs': { 'help': 'Display outputs', 'arguments': {}, 'handler': aria.local.outputs }, 'instances': { 'help': 'Display node instances', 'arguments': { '--node-id': { 'metavar': 'NODE_ID', 'dest': 'node_id', 'default': None, 'type': str, 'required': False, 'help': 'Only display node instances of this node id' } }, 'handler': aria.local.instances } } }
denismakogon/aria-cli
aria_cli/config/parser_config.py
Python
apache-2.0
7,852
""" decorstate ~~~~~~~~~~ Simple "state machines" with Python decorators. :copyright: (c) 2015-2017 Andrew Hawker :license: Apache 2.0, see LICENSE for more details. """ try: from setuptools import setup except ImportError: from distutils.core import setup setup( name='decorstate', version='0.0.3', description='Simple "state machines" with Python decorators', long_description=open('README.md').read(), author='Andrew Hawker', author_email='andrew.r.hawker@gmail.com', url='https://github.com/ahawker/decorstate', license='Apache 2.0', py_modules=['decorstate'], classifiers=( 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ) )
ahawker/decorstate
setup.py
Python
apache-2.0
1,177
# Copyright 2011 Midokura KK # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittests for ofstats.py. """ import struct import unittest2 from openfaucet import buffer from openfaucet import ofaction from openfaucet import ofmatch from openfaucet import ofstats class TestDescriptionStats(unittest2.TestCase): def setUp(self): self.buf = buffer.ReceiveBuffer() self.mfr_desc = 'Dummy Manufacturer Inc.' self.hw_desc = 'DummySwitch' self.sw_desc = 'DummyOS' self.serial_num = '0000000042' self.dp_desc = 'unittest switch' self.desc_stats = ofstats.DescriptionStats( mfr_desc=self.mfr_desc, hw_desc=self.hw_desc, sw_desc=self.sw_desc, serial_num=self.serial_num, dp_desc=self.dp_desc) def test_serialize(self): self.assertEqual( self.mfr_desc + '\x00' * (256 - len(self.mfr_desc)) + self.hw_desc + '\x00' * (256 - len(self.hw_desc)) + self.sw_desc + '\x00' * (256 - len(self.sw_desc)) + self.serial_num + '\x00' * (32 - len(self.serial_num)) + self.dp_desc + '\x00' * (256 - len(self.dp_desc)), self.desc_stats.serialize()) def test_deserialize(self): self.buf.append(self.mfr_desc + '\x00' * (256 - len(self.mfr_desc))) self.buf.append(self.hw_desc + '\x00' * (256 - len(self.hw_desc))) self.buf.append(self.sw_desc + '\x00' * (256 - len(self.sw_desc))) self.buf.append(self.serial_num + '\x00' * (32 - len(self.serial_num))) self.buf.append(self.dp_desc + '\x00' * (256 - len(self.dp_desc))) self.buf.set_message_boundaries(1056) self.assertTupleEqual(self.desc_stats, ofstats.DescriptionStats.deserialize(self.buf)) class TestFlowStats(unittest2.TestCase): def setUp(self): self.buf = buffer.ReceiveBuffer() self.match = ofmatch.Match( in_port=0x13, dl_src='\x13\x24\x35\x46\x57\x68', dl_dst='\x12\x23\x34\x45\x56\x67', dl_vlan=0x11, dl_vlan_pcp=0x22, dl_type=0x3344, nw_tos=0x80, nw_proto=0xcc, nw_src=('\xaa\xbb\xcc\xdd', 32), nw_dst=('\x21\x32\x43\x54', 32), tp_src=0x38, tp_dst=0x49) self.flow_stats = ofstats.FlowStats( 0xac, self.match, 0x10203040, 0x11223344, 0x1002, 0x0136, 0x0247, 0xffeeddccbbaa9988, 0x42, 0x0153, ( ofaction.ActionOutput(port=0x1234, max_len=0x9abc), ofaction.ActionSetDlDst(dl_addr='\x12\x34\x56\x78\xab\xcd'))) def _serialize_action(self, a): a_ser = a.serialize() header = struct.pack('!HH', a.type, 4 + len(a_ser)) return (header, a_ser) def _deserialize_action(self, buf): action_type, action_length = buf.unpack('!HH') action_class = ofaction.ACTION_CLASSES.get(action_type) return action_class.deserialize(buf) def test_serialize(self): self.assertEqual( '\x00\x70' '\xac\x00' + self.match.serialize() + '\x10\x20\x30\x40' '\x11\x22\x33\x44' '\x10\x02' '\x01\x36' '\x02\x47' '\x00\x00\x00\x00\x00\x00' '\xff\xee\xdd\xcc\xbb\xaa\x99\x88' '\x00\x00\x00\x00\x00\x00\x00\x42' '\x00\x00\x00\x00\x00\x00\x01\x53' '\x00\x00\x00\x08' '\x12\x34\x9a\xbc' '\x00\x05\x00\x10' '\x12\x34\x56\x78\xab\xcd\x00\x00\x00\x00\x00\x00', ''.join(self.flow_stats.serialize(self._serialize_action))) def test_deserialize(self): self.buf.append('\x00\x70' '\xac\x00') self.buf.append(self.match.serialize()) self.buf.append( '\x10\x20\x30\x40' '\x11\x22\x33\x44' '\x10\x02' '\x01\x36' '\x02\x47' '\x00\x00\x00\x00\x00\x00' '\xff\xee\xdd\xcc\xbb\xaa\x99\x88' '\x00\x00\x00\x00\x00\x00\x00\x42' '\x00\x00\x00\x00\x00\x00\x01\x53' '\x00\x00\x00\x08' '\x12\x34\x9a\xbc' '\x00\x05\x00\x10' '\x12\x34\x56\x78\xab\xcd\x00\x00\x00\x00\x00\x00') self.buf.set_message_boundaries(112) self.assertTupleEqual( self.flow_stats, ofstats.FlowStats.deserialize(self.buf, self._deserialize_action)) class TestTableStats(unittest2.TestCase): def setUp(self): self.buf = buffer.ReceiveBuffer() self.wildcards = ofmatch.Wildcards( in_port=True, dl_src=True, dl_dst=True, dl_vlan=True, dl_vlan_pcp=True, dl_type=True, nw_tos=False, nw_proto=False, nw_src=0, nw_dst=0, tp_src=False, tp_dst=False) self.table_stats = ofstats.TableStats( 0xac, 'eth_wildcards', self.wildcards, 0x100000, 0x1234, 0x5678, 0x9abcd) def test_serialize(self): self.assertEqual('\xac\x00\x00\x00' 'eth_wildcards' + '\x00' * (32 - len('eth_wildcards')) + '\x00\x10\x00\x1f' '\x00\x10\x00\x00' '\x00\x00\x12\x34' '\x00\x00\x00\x00\x00\x00\x56\x78' '\x00\x00\x00\x00\x00\x09\xab\xcd', ''.join(self.table_stats.serialize())) def test_deserialize(self): self.buf.append('\xac\x00\x00\x00' 'eth_wildcards') self.buf.append('\x00' * (32 - len('eth_wildcards'))) self.buf.append('\x00\x10\x00\x1f' '\x00\x10\x00\x00' '\x00\x00\x12\x34' '\x00\x00\x00\x00\x00\x00\x56\x78' '\x00\x00\x00\x00\x00\x09\xab\xcd') self.buf.set_message_boundaries(64) self.assertTupleEqual(self.table_stats, ofstats.TableStats.deserialize(self.buf)) class TestPortStats(unittest2.TestCase): def setUp(self): self.buf = buffer.ReceiveBuffer() self.port_stats = ofstats.PortStats( port_no=0xabcd, rx_packets=0x1234, tx_packets=0x5678, rx_bytes=0x1324, tx_bytes=0x5768, rx_dropped=0x1a2b, tx_dropped=0x3c4d, rx_errors=0xab12, tx_errors=0xcd34, rx_frame_err=0x1432, rx_over_err=0x2543, rx_crc_err=0x3654, collisions=0x4765) def test_serialize(self): self.assertEqual('\xab\xcd\x00\x00\x00\x00\x00\x00' '\x00\x00\x00\x00\x00\x00\x12\x34' '\x00\x00\x00\x00\x00\x00\x56\x78' '\x00\x00\x00\x00\x00\x00\x13\x24' '\x00\x00\x00\x00\x00\x00\x57\x68' '\x00\x00\x00\x00\x00\x00\x1a\x2b' '\x00\x00\x00\x00\x00\x00\x3c\x4d' '\x00\x00\x00\x00\x00\x00\xab\x12' '\x00\x00\x00\x00\x00\x00\xcd\x34' '\x00\x00\x00\x00\x00\x00\x14\x32' '\x00\x00\x00\x00\x00\x00\x25\x43' '\x00\x00\x00\x00\x00\x00\x36\x54' '\x00\x00\x00\x00\x00\x00\x47\x65', self.port_stats.serialize()) def test_serialize_every_counter_unavailable(self): index = 8 port_stats_ser = ('\xab\xcd\x00\x00\x00\x00\x00\x00' '\x00\x00\x00\x00\x00\x00\x12\x34' '\x00\x00\x00\x00\x00\x00\x56\x78' '\x00\x00\x00\x00\x00\x00\x13\x24' '\x00\x00\x00\x00\x00\x00\x57\x68' '\x00\x00\x00\x00\x00\x00\x1a\x2b' '\x00\x00\x00\x00\x00\x00\x3c\x4d' '\x00\x00\x00\x00\x00\x00\xab\x12' '\x00\x00\x00\x00\x00\x00\xcd\x34' '\x00\x00\x00\x00\x00\x00\x14\x32' '\x00\x00\x00\x00\x00\x00\x25\x43' '\x00\x00\x00\x00\x00\x00\x36\x54' '\x00\x00\x00\x00\x00\x00\x47\x65') for attr in ('rx_packets', 'tx_packets', 'rx_bytes', 'tx_bytes', 'rx_dropped', 'tx_dropped', 'rx_errors', 'tx_errors', 'rx_frame_err', 'rx_over_err', 'rx_crc_err', 'collisions'): ps = self.port_stats._replace(**{attr: None}) # set as unavailable self.assertEqual(port_stats_ser[:index] + '\xff\xff\xff\xff\xff\xff\xff\xff' + port_stats_ser[index + 8:], ps.serialize()) index += 8 def test_deserialize(self): self.buf.append('\xab\xcd\x00\x00\x00\x00\x00\x00' '\x00\x00\x00\x00\x00\x00\x12\x34' '\x00\x00\x00\x00\x00\x00\x56\x78' '\x00\x00\x00\x00\x00\x00\x13\x24' '\x00\x00\x00\x00\x00\x00\x57\x68' '\x00\x00\x00\x00\x00\x00\x1a\x2b' '\x00\x00\x00\x00\x00\x00\x3c\x4d' '\x00\x00\x00\x00\x00\x00\xab\x12' '\x00\x00\x00\x00\x00\x00\xcd\x34' '\x00\x00\x00\x00\x00\x00\x14\x32' '\x00\x00\x00\x00\x00\x00\x25\x43' '\x00\x00\x00\x00\x00\x00\x36\x54' '\x00\x00\x00\x00\x00\x00\x47\x65') self.buf.set_message_boundaries(104) self.assertTupleEqual(self.port_stats, ofstats.PortStats.deserialize(self.buf)) def test_deserialize_every_counter_unavailable(self): index = 8 port_stats_ser = ('\xab\xcd\x00\x00\x00\x00\x00\x00' '\x00\x00\x00\x00\x00\x00\x12\x34' '\x00\x00\x00\x00\x00\x00\x56\x78' '\x00\x00\x00\x00\x00\x00\x13\x24' '\x00\x00\x00\x00\x00\x00\x57\x68' '\x00\x00\x00\x00\x00\x00\x1a\x2b' '\x00\x00\x00\x00\x00\x00\x3c\x4d' '\x00\x00\x00\x00\x00\x00\xab\x12' '\x00\x00\x00\x00\x00\x00\xcd\x34' '\x00\x00\x00\x00\x00\x00\x14\x32' '\x00\x00\x00\x00\x00\x00\x25\x43' '\x00\x00\x00\x00\x00\x00\x36\x54' '\x00\x00\x00\x00\x00\x00\x47\x65') for attr in ('rx_packets', 'tx_packets', 'rx_bytes', 'tx_bytes', 'rx_dropped', 'tx_dropped', 'rx_errors', 'tx_errors', 'rx_frame_err', 'rx_over_err', 'rx_crc_err', 'collisions'): self.buf.append(port_stats_ser[:index]) self.buf.append('\xff\xff\xff\xff\xff\xff\xff\xff') self.buf.append(port_stats_ser[index + 8:]) self.buf.set_message_boundaries(104) self.assertTupleEqual(self.port_stats._replace(**{attr: None}), ofstats.PortStats.deserialize(self.buf)) index += 8 class TestQueueStats(unittest2.TestCase): def setUp(self): self.buf = buffer.ReceiveBuffer() self.queue_stats = ofstats.QueueStats( port_no=0xabcd, queue_id=0x10203040, tx_bytes=0x5768, tx_packets=0x5678, tx_errors=0xcd34) def test_serialize(self): self.assertEqual('\xab\xcd\x00\x00' '\x10\x20\x30\x40' '\x00\x00\x00\x00\x00\x00\x57\x68' '\x00\x00\x00\x00\x00\x00\x56\x78' '\x00\x00\x00\x00\x00\x00\xcd\x34', self.queue_stats.serialize()) def test_deserialize(self): self.buf.append('\xab\xcd\x00\x00' '\x10\x20\x30\x40' '\x00\x00\x00\x00\x00\x00\x57\x68' '\x00\x00\x00\x00\x00\x00\x56\x78' '\x00\x00\x00\x00\x00\x00\xcd\x34') self.buf.set_message_boundaries(32) self.assertTupleEqual(self.queue_stats, ofstats.QueueStats.deserialize(self.buf)) if __name__ == '__main__': unittest2.main()
rlenglet/openfaucet
src/openfaucet/test_ofstats.py
Python
apache-2.0
12,672
"""Support for Streamlabs Water Monitor Usage.""" from datetime import timedelta from homeassistant.components.streamlabswater import DOMAIN as STREAMLABSWATER_DOMAIN from homeassistant.const import VOLUME_GALLONS from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle DEPENDENCIES = ["streamlabswater"] WATER_ICON = "mdi:water" MIN_TIME_BETWEEN_USAGE_UPDATES = timedelta(seconds=60) NAME_DAILY_USAGE = "Daily Water" NAME_MONTHLY_USAGE = "Monthly Water" NAME_YEARLY_USAGE = "Yearly Water" def setup_platform(hass, config, add_devices, discovery_info=None): """Set up water usage sensors.""" client = hass.data[STREAMLABSWATER_DOMAIN]["client"] location_id = hass.data[STREAMLABSWATER_DOMAIN]["location_id"] location_name = hass.data[STREAMLABSWATER_DOMAIN]["location_name"] streamlabs_usage_data = StreamlabsUsageData(location_id, client) streamlabs_usage_data.update() add_devices( [ StreamLabsDailyUsage(location_name, streamlabs_usage_data), StreamLabsMonthlyUsage(location_name, streamlabs_usage_data), StreamLabsYearlyUsage(location_name, streamlabs_usage_data), ] ) class StreamlabsUsageData: """Track and query usage data.""" def __init__(self, location_id, client): """Initialize the usage data.""" self._location_id = location_id self._client = client self._today = None self._this_month = None self._this_year = None @Throttle(MIN_TIME_BETWEEN_USAGE_UPDATES) def update(self): """Query and store usage data.""" water_usage = self._client.get_water_usage_summary(self._location_id) self._today = round(water_usage["today"], 1) self._this_month = round(water_usage["thisMonth"], 1) self._this_year = round(water_usage["thisYear"], 1) def get_daily_usage(self): """Return the day's usage.""" return self._today def get_monthly_usage(self): """Return the month's usage.""" return self._this_month def get_yearly_usage(self): """Return the year's usage.""" return self._this_year class StreamLabsDailyUsage(Entity): """Monitors the daily water usage.""" def __init__(self, location_name, streamlabs_usage_data): """Initialize the daily water usage device.""" self._location_name = location_name self._streamlabs_usage_data = streamlabs_usage_data self._state = None @property def name(self): """Return the name for daily usage.""" return "{} {}".format(self._location_name, NAME_DAILY_USAGE) @property def icon(self): """Return the daily usage icon.""" return WATER_ICON @property def state(self): """Return the current daily usage.""" return self._streamlabs_usage_data.get_daily_usage() @property def unit_of_measurement(self): """Return gallons as the unit measurement for water.""" return VOLUME_GALLONS def update(self): """Retrieve the latest daily usage.""" self._streamlabs_usage_data.update() class StreamLabsMonthlyUsage(StreamLabsDailyUsage): """Monitors the monthly water usage.""" @property def name(self): """Return the name for monthly usage.""" return "{} {}".format(self._location_name, NAME_MONTHLY_USAGE) @property def state(self): """Return the current monthly usage.""" return self._streamlabs_usage_data.get_monthly_usage() class StreamLabsYearlyUsage(StreamLabsDailyUsage): """Monitors the yearly water usage.""" @property def name(self): """Return the name for yearly usage.""" return "{} {}".format(self._location_name, NAME_YEARLY_USAGE) @property def state(self): """Return the current yearly usage.""" return self._streamlabs_usage_data.get_yearly_usage()
fbradyirl/home-assistant
homeassistant/components/streamlabswater/sensor.py
Python
apache-2.0
3,962
# -*- coding: utf-8 -*- """File containing a Windows Registry plugin to parse the AMCache.hve file.""" from __future__ import unicode_literals import pyregf from dfdatetime import filetime from dfdatetime import posix_time from dfwinreg import definitions as dfwinreg_definitions from plaso.containers import events from plaso.containers import time_events from plaso.lib import definitions from plaso.parsers import interface from plaso.parsers import manager class AMCacheFileEventData(events.EventData): """AMCache file event data. Attributes: company_name (str): company name that created product file belongs to. file_description (str): description of file. file_reference (str): file system file reference, for example 9-1 (MFT entry - sequence number). file_size (int): size of file in bytes. file_version (str): version of file. full_path (str): full path of file. language_code (int): language code of file. product_name (str): product name file belongs to. program_identifier (str): GUID of entry under Root/Program key file belongs to. sha1 (str): SHA-1 of file. """ DATA_TYPE = 'windows:registry:amcache' def __init__(self): """Initializes event data.""" super(AMCacheFileEventData, self).__init__(data_type=self.DATA_TYPE) self.company_name = None self.file_description = None self.file_reference = None self.file_size = None self.file_version = None self.full_path = None self.language_code = None self.product_name = None self.program_identifier = None self.sha1 = None class AMCacheProgramEventData(events.EventData): """AMCache programs event data. Attributes: entry_type (str): type of entry (usually AddRemoveProgram). file_paths (str): file paths of installed program. files (str): list of files belonging to program. language_code (int): language_code of program. msi_package_code (str): MSI package code of program. msi_product_code (str): MSI product code of program. name (str): name of installed program. package_code (str): package code of program. product_code (str): product code of program. publisher (str): publisher of program. uninstall_key (str): unicode string of uninstall registry key for program. version (str): version of program. """ DATA_TYPE = 'windows:registry:amcache:programs' def __init__(self): """Initializes event data.""" super(AMCacheProgramEventData, self).__init__(data_type=self.DATA_TYPE) self.entry_type = None self.file_paths = None self.files = None self.language_code = None self.msi_package_code = None self.msi_product_code = None self.name = None self.package_code = None self.product_code = None self.publisher = None self.uninstall_key = None self.version = None class AMCacheParser(interface.FileObjectParser): """AMCache Registry plugin for recently run programs.""" NAME = 'amcache' DATA_FORMAT = 'AMCache Windows NT Registry (AMCache.hve) file' # Contains: {value name: attribute name} _FILE_REFERENCE_KEY_VALUES = { '0': 'product_name', '1': 'company_name', '3': 'language_code', '5': 'file_version', '6': 'file_size', 'c': 'file_description', '15': 'full_path', '100': 'program_identifier', '101': 'sha1'} _AMCACHE_COMPILATION_TIME = 'f' _AMCACHE_FILE_MODIFICATION_TIME = '11' _AMCACHE_FILE_CREATION_TIME = '12' _AMCACHE_ENTRY_WRITE_TIME = '17' _AMCACHE_P_INSTALLATION_TIME = 'a' _AMCACHE_P_FILES = 'Files' _PRODUCT_KEY_VALUES = { '0': 'name', '1': 'version', '2': 'publisher', '3': 'language_code', '6': 'entry_type', '7': 'uninstall_key', 'd': 'file_paths', 'f': 'product_code', '10': 'package_code', '11': 'msi_product_code', '12': 'msi_package_code', } #TODO Add GetFormatSpecification when issues are fixed with adding # multiple parsers for the same file format (in this case regf files) # AddNewSignature -> # b'\x41\x00\x6d\x00\x63\x00\x61\x00\x63\x00\x68\x00\x65', offset=88 def _GetValueDataAsObject(self, parser_mediator, value): """Retrieves the value data as an object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. value (pyregf_value): value. Returns: object: data as a Python type or None if the value cannot be read. """ try: if value.type in ( dfwinreg_definitions.REG_SZ, dfwinreg_definitions.REG_EXPAND_SZ, dfwinreg_definitions.REG_LINK): value_data = value.get_data_as_string() elif value.type in ( dfwinreg_definitions.REG_DWORD, dfwinreg_definitions.REG_DWORD_BIG_ENDIAN, dfwinreg_definitions.REG_QWORD): value_data = value.get_data_as_integer() elif value.type == dfwinreg_definitions.REG_MULTI_SZ: value_data = list(value.get_data_as_multi_string()) else: value_data = value.data except (IOError, OverflowError) as exception: parser_mediator.ProduceExtractionWarning( 'Unable to read data from value: {0:s} with error: {1!s}'.format( value.name, exception)) return None return value_data def _ParseFileKey(self, parser_mediator, file_key): """Parses a Root\\File key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_key (pyregf.key): the File key. """ for volume_key in file_key.sub_keys: for file_reference_key in volume_key.sub_keys: self._ParseFileReferenceKey(parser_mediator, file_reference_key) def _ParseFileReferenceKey(self, parser_mediator, file_reference_key): """Parses a file reference key (sub key of Root\\File\\%VOLUME%) for events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_reference_key (pyregf.key): file reference key. """ event_data = AMCacheFileEventData() try: if '0000' in file_reference_key.name: # A NTFS file is a combination of MFT entry and sequence number. sequence_number, mft_entry = file_reference_key.name.split('0000') mft_entry = int(mft_entry, 16) sequence_number = int(sequence_number, 16) event_data.file_reference = '{0:d}-{1:d}'.format( mft_entry, sequence_number) else: # A FAT file is a single number. file_reference = int(file_reference_key.name, 16) event_data.file_reference = '{0:d}'.format(file_reference) except (ValueError, TypeError): pass for value_name, attribute_name in self._FILE_REFERENCE_KEY_VALUES.items(): value = file_reference_key.get_value_by_name(value_name) if not value: continue value_data = self._GetValueDataAsObject(parser_mediator, value) if attribute_name == 'sha1' and value_data.startswith('0000'): # Strip off the 4 leading zero's from the sha1 hash. value_data = value_data[4:] setattr(event_data, attribute_name, value_data) amcache_time_value = file_reference_key.get_value_by_name( self._AMCACHE_ENTRY_WRITE_TIME) if amcache_time_value: amcache_time = filetime.Filetime(amcache_time_value.get_data_as_integer()) event = time_events.DateTimeValuesEvent( amcache_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) creation_time_value = file_reference_key.get_value_by_name( self._AMCACHE_FILE_CREATION_TIME) if creation_time_value: creation_time = filetime.Filetime( creation_time_value.get_data_as_integer()) event = time_events.DateTimeValuesEvent( creation_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) modification_time_value = file_reference_key.get_value_by_name( self._AMCACHE_FILE_MODIFICATION_TIME) if modification_time_value: modification_time = filetime.Filetime( modification_time_value.get_data_as_integer()) event = time_events.DateTimeValuesEvent( modification_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) compilation_time_value = file_reference_key.get_value_by_name( self._AMCACHE_COMPILATION_TIME) if compilation_time_value: link_time = posix_time.PosixTime( compilation_time_value.get_data_as_integer()) event = time_events.DateTimeValuesEvent( link_time, definitions.TIME_DESCRIPTION_CHANGE) parser_mediator.ProduceEventWithEventData(event, event_data) def _ParseProgramKey(self, parser_mediator, program_key): """Parses a program key (a sub key of Root\\Programs) for events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. program_key (pyregf_key): program key. """ event_data = AMCacheProgramEventData() for value_name, attribute_name in self._PRODUCT_KEY_VALUES.items(): value = program_key.get_value_by_name(value_name) if not value: continue value_data = self._GetValueDataAsObject(parser_mediator, value) setattr(event_data, attribute_name, value_data) installation_time_value = program_key.get_value_by_name( self._AMCACHE_P_INSTALLATION_TIME) if installation_time_value: installation_time = posix_time.PosixTime( installation_time_value.get_data_as_integer()) event = time_events.DateTimeValuesEvent( installation_time, definitions.TIME_DESCRIPTION_INSTALLATION) parser_mediator.ProduceEventWithEventData(event, event_data) def _ParseProgramsKey(self, parser_mediator, programs_key): """Parses a Root\\Programs key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. programs_key (pyregf.key): the Programs key. """ for program_key in programs_key.sub_keys: self._ParseProgramKey(parser_mediator, program_key) def ParseFileObject(self, parser_mediator, file_object): """Parses an AMCache.hve file-like object for events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. """ regf_file = pyregf.file() try: regf_file.open_file_object(file_object) except IOError: # The error is currently ignored -> see TODO above related to the # fixing of handling multiple parsers for the same file format. return root_key = regf_file.get_key_by_path('Root') if root_key: file_key = root_key.get_sub_key_by_path('File') if file_key: self._ParseFileKey(parser_mediator, file_key) programs_key = root_key.get_sub_key_by_path('Programs') if programs_key: self._ParseProgramsKey(parser_mediator, programs_key) regf_file.close() manager.ParsersManager.RegisterParser(AMCacheParser)
rgayon/plaso
plaso/parsers/amcache.py
Python
apache-2.0
11,498
# Copyright 2012 Rackspace # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from cliff.lister import Lister from service_registry_cli.utils import BaseListCommand, get_client class ListCommand(BaseListCommand, Lister): """ Return a list of the configuration values. """ log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(ListCommand, self).get_parser(prog_name=prog_name) parser.add_argument('--namespace', dest='namespace') return parser def take_action(self, parsed_args): client = get_client(parsed_args) marker = parsed_args.marker if parsed_args.marker else None limit = parsed_args.limit if parsed_args.limit else None kwargs = {'marker': marker, 'limit': limit} if parsed_args.namespace: kwargs['namespace'] = parsed_args.namespace result = client.configuration.list_for_namespace(**kwargs) else: result = client.configuration.list(**kwargs) values = result['values'] metadata = result['metadata'] parsed_args.returned_metadata = metadata result = [(value['id'], value['value']) for value in values] return (('ID', 'Value'), result)
racker/python-service-registry-cli
service_registry_cli/commands/configuration/list.py
Python
apache-2.0
2,023
# Copyright 2019 Apex.AI, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import inspect import itertools import os import unittest import warnings from .actions import ReadyToTest # Patch up the warnings module to streamline the warning messages. See # https://docs.python.org/3/library/warnings.html#warnings.showwarning def slim_formatwarning(msg, *args, **kwargs): return 'Warning: ' + str(msg) + os.linesep warnings.formatwarning = slim_formatwarning def _normalize_ld(launch_description_fn): # A launch description fn can return just a launch description, or a tuple of # (launch_description, test_context). This wrapper function normalizes things # so we always get a tuple, sometimes with an empty dictionary for the test_context def normalize(result): if isinstance(result, tuple): return result else: return result, {} def wrapper(**kwargs): fn_args = inspect.getfullargspec(launch_description_fn) if 'ready_fn' in fn_args.args + fn_args.kwonlyargs: # This is an old-style launch_description function which expects ready_fn to be passed # in to the function # This type of launch description will be deprecated in the future. Warn about it # here warnings.warn( 'Passing ready_fn as an argument to generate_test_description will ' 'be removed in a future release. Include a launch_testing.actions.ReadyToTest ' 'action in the LaunchDescription instead.' ) return normalize(launch_description_fn(**kwargs)) else: # This is a new-style launch_description which should contain a ReadyToTest action ready_fn = kwargs.pop('ready_fn') result = normalize(launch_description_fn(**kwargs)) # Fish the ReadyToTest action out of the launch description and plumb our # ready_fn to it def iterate_ready_to_test_actions(entities): """Recursively search LaunchDescription entities for all ReadyToTest actions.""" for entity in entities: if isinstance(entity, ReadyToTest): yield entity yield from iterate_ready_to_test_actions( entity.describe_sub_entities() ) for conditional_sub_entity in entity.describe_conditional_sub_entities(): yield from iterate_ready_to_test_actions( conditional_sub_entity[1] ) try: ready_action = next(e for e in iterate_ready_to_test_actions(result[0].entities)) except StopIteration: # No ReadyToTest action found raise Exception( 'generate_test_description functions without a ready_fn argument must return ' 'a LaunchDescription containing a ReadyToTest action' ) ready_action._add_callback(ready_fn) return result return wrapper class TestRun: def __init__(self, name, test_description_function, param_args, pre_shutdown_tests, post_shutdown_tests): self.name = name if not hasattr(test_description_function, '__markers__'): test_description_function.__markers__ = {} self._test_description_function = test_description_function self.normalized_test_description = _normalize_ld(test_description_function) self.param_args = param_args self.pre_shutdown_tests = pre_shutdown_tests self.post_shutdown_tests = post_shutdown_tests # If we're parametrized, extend the test names so we can tell more easily what # params they were run with if self.param_args: for tc in itertools.chain( _iterate_tests_in_test_suite(pre_shutdown_tests), _iterate_tests_in_test_suite(post_shutdown_tests) ): test_method = getattr(tc, tc._testMethodName) new_name = tc._testMethodName + self._format_params() setattr(tc, '_testMethodName', new_name) setattr(tc, new_name, test_method) # Disable cleanup of test cases once they are run for tc in itertools.chain( _iterate_test_suites(pre_shutdown_tests), _iterate_test_suites(post_shutdown_tests) ): tc._removeTestAtIndex = lambda *args, **kwargs: None @property def markers(self): return self._test_description_function.__markers__ def bind(self, tests, injected_attributes={}, injected_args={}): """ Bind injected_attributes and injected_args to tests. Injected Attributes can be accessed from a test as self.name Injected Arguments can be accessed as an argument if the test has an argument with a matching name """ # Inject test attributes into the test as self.whatever. This method of giving # objects to the test is pretty inferior to injecting them as arguments to the # test methods - we may deprecate this in favor of everything being an argument for name, value in injected_attributes.items(): _give_attribute_to_tests(value, name, tests) # Give objects with matching names as arguments to tests. This doesn't have the # weird scoping and name collision issues that the above method has. In fact, # we give proc_info and proc_output to the tests as arguments too, so anything # you can do with test attributes can also be accomplished with test arguments _bind_test_args_to_tests(injected_args, tests) def get_launch_description(self): """ Get just the launch description portion of the test_description. This should only be used for the purposes of introspecting the launch description. The returned launch description is not meant to be launched """ return self.normalized_test_description(ready_fn=lambda: None)[0] def all_cases(self): yield from _iterate_tests_in_test_suite(self.pre_shutdown_tests) yield from _iterate_tests_in_test_suite(self.post_shutdown_tests) def __str__(self): return self.name + self._format_params() def _format_params(self): if not self.param_args: return '' else: str_args = map(str, self.param_args.values()) return '[{}]'.format(', '.join(str_args)) def LoadTestsFromPythonModule(module, *, name='launch_tests'): if not hasattr(module.generate_test_description, '__parametrized__'): normalized_test_description_func = ( lambda: [(module.generate_test_description, {})] ) else: normalized_test_description_func = module.generate_test_description # If our test description is parameterized, we'll load a set of tests for each # individual launch return [TestRun(name, description, args, PreShutdownTestLoader().loadTestsFromModule(module), PostShutdownTestLoader().loadTestsFromModule(module)) for description, args in normalized_test_description_func()] def PreShutdownTestLoader(): return _make_loader(False) def PostShutdownTestLoader(): return _make_loader(True) def _make_loader(load_post_shutdown): class _loader(unittest.TestLoader): """TestLoader selectively loads pre-shutdown or post-shutdown tests.""" def loadTestsFromTestCase(self, testCaseClass): if getattr(testCaseClass, '__post_shutdown_test__', False) == load_post_shutdown: # Isolate test classes instances on a per parameterization basis cases = super(_loader, self).loadTestsFromTestCase( type(testCaseClass.__name__, (testCaseClass,), { '__module__': testCaseClass.__module__ }) ) return cases # Empty test suites will be ignored by the test runner return self.suiteClass() return _loader() def _bind_test_args_to_tests(context, test_suite): # Look for tests that expect additional arguments and bind items from the context # to the tests for test in _iterate_tests_in_test_suite(test_suite): # Need to reach a little deep into the implementation here to get the test # method. See unittest.TestCase test_method = getattr(test, test._testMethodName) # Replace the test with a functools.partial that has the arguments # provided by the test context already bound setattr( test, test._testMethodName, _partially_bind_matching_args(test_method, context) ) test.setUp = _partially_bind_matching_args( test.setUp, context ) test.tearDown = _partially_bind_matching_args( test.tearDown, context ) for test_class in _iterate_test_classes_in_test_suite(test_suite): test_class.setUpClass = _partially_bind_matching_args( test_class.setUpClass, context ) test_class.tearDownClass = _partially_bind_matching_args( test_class.tearDownClass, context ) def _partially_bind_matching_args(unbound_function, arg_candidates): function_args = inspect.signature(unbound_function).parameters # We only want to bind the part of the context matches the test args matching_args = {k: v for (k, v) in arg_candidates.items() if k in function_args} return functools.partial(unbound_function, **matching_args) def _give_attribute_to_tests(data, attr_name, test_suite): def _warn_getter(self): if not hasattr(self, '__warned'): warnings.warn( 'Automatically adding attributes like self.{0} ' 'to the test class will be deprecated in a future release. ' 'Instead, add {0} to the test method argument list to ' 'access the test object you need'.format(attr_name) ) setattr(self, '__warned', True) return data # The effect of this is that every test will have `self.attr_name` available to it so that # it can interact with ROS2 or the process exit coes, or IO or whatever data we want for cls in _iterate_test_classes_in_test_suite(test_suite): setattr(cls, attr_name, property(fget=_warn_getter)) def _iterate_test_classes_in_test_suite(test_suite): classes = [] for t in _iterate_tests_in_test_suite(test_suite): if t.__class__ not in classes: classes.append(t.__class__) yield t.__class__ def _iterate_test_suites(test_suite): try: iter(test_suite) except TypeError: pass else: if isinstance(test_suite, unittest.TestSuite): yield test_suite for test in test_suite: yield from _iterate_test_suites(test) def _iterate_tests_in_test_suite(test_suite): try: iter(test_suite) except TypeError: # Base case - test_suite is not iterable, so it must be an individual test method yield test_suite else: # Otherwise, it's a test_suite, or a list of individual test methods. recurse for test in test_suite: yield from _iterate_tests_in_test_suite(test)
ros2/launch
launch_testing/launch_testing/loader.py
Python
apache-2.0
12,234
# Domato - main generator script # -------------------------------------- # # Written and maintained by Ivan Fratric <ifratric@google.com> # # Copyright 2017 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import re import random import argparse from grammar import Grammar from svg_tags import _SVG_TYPES from html_tags import _HTML_TYPES _N_MAIN_LINES = 1000 _N_EVENTHANDLER_LINES = 500 _N_ADDITIONAL_HTMLVARS = 5 def generate_html_elements(ctx, n): for i in range(n): tag = random.choice(list(_HTML_TYPES)) tagtype = _HTML_TYPES[tag] ctx['htmlvarctr'] += 1 varname = 'htmlvar%05d' % ctx['htmlvarctr'] ctx['htmlvars'].append({'name': varname, 'type': tagtype}) ctx['htmlvargen'] += '/* newvar{' + varname + ':' + tagtype + '} */ var ' + varname + ' = document.createElement(\"' + tag + '\"); //' + tagtype + '\n' def add_html_ids(matchobj, ctx): tagname = matchobj.group(0)[1:-1] if tagname in _HTML_TYPES: ctx['htmlvarctr'] += 1 varname = 'htmlvar%05d' % ctx['htmlvarctr'] ctx['htmlvars'].append({'name': varname, 'type': _HTML_TYPES[tagname]}) ctx['htmlvargen'] += '/* newvar{' + varname + ':' + _HTML_TYPES[tagname] + '} */ var ' + varname + ' = document.getElementById(\"' + varname + '\"); //' + _HTML_TYPES[tagname] + '\n' return matchobj.group(0) + 'id=\"' + varname + '\" ' elif tagname in _SVG_TYPES: ctx['svgvarctr'] += 1 varname = 'svgvar%05d' % ctx['svgvarctr'] ctx['htmlvars'].append({'name': varname, 'type': _SVG_TYPES[tagname]}) ctx['htmlvargen'] += '/* newvar{' + varname + ':' + _SVG_TYPES[tagname] + '} */ var ' + varname + ' = document.getElementById(\"' + varname + '\"); //' + _SVG_TYPES[tagname] + '\n' return matchobj.group(0) + 'id=\"' + varname + '\" ' else: return matchobj.group(0) def generate_function_body(jsgrammar, htmlctx, num_lines): js = '' js += 'var fuzzervars = {};\n\n' js += "SetVariable(fuzzervars, window, 'Window');\nSetVariable(fuzzervars, document, 'Document');\nSetVariable(fuzzervars, document.body.firstChild, 'Element');\n\n" js += '//beginjs\n' js += htmlctx['htmlvargen'] js += jsgrammar._generate_code(num_lines, htmlctx['htmlvars']) js += '\n//endjs\n' js += 'var fuzzervars = {};\nfreememory()\n' return js def check_grammar(grammar): """Checks if grammar has errors and if so outputs them. Args: grammar: The grammar to check. """ for rule in grammar._all_rules: for part in rule['parts']: if part['type'] == 'text': continue tagname = part['tagname'] # print tagname if tagname not in grammar._creators: print('No creators for type ' + tagname) def generate_new_sample(template, htmlgrammar, cssgrammar, jsgrammar): """Parses grammar rules from string. Args: template: A template string. htmlgrammar: Grammar for generating HTML code. cssgrammar: Grammar for generating CSS code. jsgrammar: Grammar for generating JS code. Returns: A string containing sample data. """ result = template css = cssgrammar.generate_symbol('rules') html = htmlgrammar.generate_symbol('bodyelements') htmlctx = { 'htmlvars': [], 'htmlvarctr': 0, 'svgvarctr': 0, 'htmlvargen': '' } html = re.sub( r'<[a-zA-Z0-9_-]+ ', lambda match: add_html_ids(match, htmlctx), html ) generate_html_elements(htmlctx, _N_ADDITIONAL_HTMLVARS) result = result.replace('<cssfuzzer>', css) result = result.replace('<htmlfuzzer>', html) handlers = False while '<jsfuzzer>' in result: numlines = _N_MAIN_LINES if handlers: numlines = _N_EVENTHANDLER_LINES else: handlers = True result = result.replace( '<jsfuzzer>', generate_function_body(jsgrammar, htmlctx, numlines), 1 ) return result def generate_samples(template, outfiles): """Generates a set of samples and writes them to the output files. Args: grammar_dir: directory to load grammar files from. outfiles: A list of output filenames. """ grammar_dir = os.path.join(os.path.dirname(__file__), 'rules') htmlgrammar = Grammar() err = htmlgrammar.parse_from_file(os.path.join(grammar_dir, 'html.txt')) # CheckGrammar(htmlgrammar) if err > 0: print('There were errors parsing html grammar') return cssgrammar = Grammar() err = cssgrammar.parse_from_file(os.path.join(grammar_dir ,'css.txt')) # CheckGrammar(cssgrammar) if err > 0: print('There were errors parsing css grammar') return jsgrammar = Grammar() err = jsgrammar.parse_from_file(os.path.join(grammar_dir,'js.txt')) # CheckGrammar(jsgrammar) if err > 0: print('There were errors parsing js grammar') return # JS and HTML grammar need access to CSS grammar. # Add it as import htmlgrammar.add_import('cssgrammar', cssgrammar) jsgrammar.add_import('cssgrammar', cssgrammar) for outfile in outfiles: result = generate_new_sample(template, htmlgrammar, cssgrammar, jsgrammar) if result is not None: print('Writing a sample to ' + outfile) try: with open(outfile, 'w') as f: f.write(result) except IOError: print('Error writing to output') def get_argument_parser(): parser = argparse.ArgumentParser(description="DOMATO (A DOM FUZZER)") parser.add_argument("-f", "--file", help="File name which is to be generated in the same directory") parser.add_argument('-o', '--output_dir', type=str, help='The output directory to put the generated files in') parser.add_argument('-n', '--no_of_files', type=int, help='number of files to be generated') return parser def main(): fuzzer_dir = os.path.dirname(__file__) with open(os.path.join(fuzzer_dir, "template.html"), "r") as f: template = f.read() parser = get_argument_parser() args = parser.parse_args() if args.file: generate_samples(template, [args.file]) elif args.output_dir: if not args.no_of_files: print("Please use switch -n to specify the number of files") else: print('Running on ClusterFuzz') out_dir = args.output_dir nsamples = args.no_of_files print('Output directory: ' + out_dir) print('Number of samples: ' + str(nsamples)) if not os.path.exists(out_dir): os.mkdir(out_dir) outfiles = [] for i in range(nsamples): outfiles.append(os.path.join(out_dir, 'fuzz-' + str(i).zfill(5) + '.html')) generate_samples(template, outfiles) else: parser.print_help() if __name__ == '__main__': main()
googleprojectzero/domato
generator.py
Python
apache-2.0
7,760
import random import glob from os import path import biohex import math import traceback # storage class for constants class locals(): class distanceLookup(): RING_CACHE = 0 BIT_LISTER = 1 try: # load up bit graphic asset names bitList = [] for file in glob.glob("biohex/bitGraphics/*.png"): bitList.append(path.basename(file)[:-4]) except: pass def getBit(x, y): """Return the bit at this location. If there is no bit, return False.""" try: return Bit.world.bitPositions[x][y] except IndexError: return False def isValid(x, y): """ Return True if there is no bit at that location and the location is not out of world range. """ return (x >= 0 and x < Bit.world.width and \ y >= 0 and y < Bit.world.height and \ not getBit(x, y)) class Looper(object): """ Paired with a Bit, creates a class that will periodically run a command and repeat. """ def __init__(self, bit, command, delay): self.timer = delay self.delay = delay self.command = command self.bit = bit self.bit.addLooper(self) self.start() def __serialize__(self): """Return a version of the looper that can be pickled.""" # NOTE: THIS MEANS COMMANDS CAN ONLY BE DERIVED FROM THE CURRENT BIT! serializedCommand = self.command.__name__ return (self.timer, self.delay, serializedCommand, self.paused) def pause(self): self.paused = True def start(self): self.paused = False def tick(self): if not self.paused: self.timer -= 1 if self.timer <= 0: self.timer = self.delay self.command() def stop(self): """Destroy looper from bit memory.""" self.bit.removeLooper(self) def loadSavedBit(self, pickledBit, index): """Unpickle a serialized, pickled bit.""" newBit = Bit(pickledBit['x'], pickledBit['y']) for variableName, value in pickledBit.items(): if variableName != "index": eval("newBit.{} = {}".format(variableName, value)) for pickledLooper in newBit.loopers: looperCommand = eval("newBit." + pickledLooper[2]) newLooper = Looper(newBit, looperCommand, pickledLooper[1]) newLooper.timer = pickledLooper[0] newLooper.paused = pickledLooper[3] pickledVector = newBit.vector newBit.vector = Vector(newBit, pickledVector[0]) newBit._ahead = pickledVector[1] class Bit(object): """ The basic unit of matter in the Biohex simulations. Represents a single hexagon in the world. Only one Bit can be in each tile at a given time. """ world = None name = "Test" # stores a record of all bits, accessible to others. # stored on a 'name' basis. # ex.: {"Test" : [Bit, Bit, ...], "Test 2" : [...]} lister = {} # ENTHALPY acts as a simulated unit of energy. # Use a enthalpy looper to simulate a breakdown # without a constant source of energy. # # --- Note: only presented as a constant to signify default # starting enthalpy. Each Bit has its own 'enthalpy' # attribute that changes with time. ENTHALPY = 0 # ENTROPY acts as the simulated reality of # the breakdown of complex, organized compounds # into a more random state. Use this as a guide # to make sure that all reactions that take # place result in total entropy increasing overall. ENTROPY = 0 # THERMAL_RANGE is a two-element list # where the minimum temperature is first # and the maximum is second. The bit will die # if the temperature at its location becomes # too high. Set to None for no temperature breakdown. THERMAL_RANGE = None temperature = 0 def __init__(self, x, y): self.name = self.__class__.__name__ self.enthalpy = self.ENTHALPY if self.ATOMS != None: self.atoms = list(self.ATOMS) else: self.atoms = None self.x = x self.y = y placed = self.world.addBit(self) if placed: self.dirty() self.destroyed = False if self.name in __class__.lister: __class__.lister[self.name].append(self) else: __class__.lister[self.name] = [self] else: self.destroyed = True self.vector = biohex.hexmech.Vector(self, 0) self.enthalpyLooper = None self.loopers = [] self.temperature = self.world.ambientTemperature def getPosition(self): return (self.x, self.y) def setPosition(self, newPosition): self.moveto(newPosition) position = property(getPosition, setPosition) def getRings(self, distance): """Get all hexagon rings up to a distance around this bit's position.""" return biohex.hexmech.getRings(self.x, self.y, distance) def getRing(self, distance): """Get a hexagon ring at a distance around this bit's position.""" return biohex.hexmech.getRing(self.x, self.y, distance) def die(self): """Turn this bit into a Necrosis bit, prevserving current atoms and enthalpy.""" self.becomeBit(biohex.bits.Necrosis, {}, True) def dieThermal(self): """Turn this bit into a DenaturedNecrosis bit, signifying that it overheated.""" self.becomeBit(biohex.bits.DenaturedNecrosis, {}, True) def dieError(self): """Turn this bit into a CausticNecrosis bit, signifiying a critical internal error.""" self.becomeBit(biohex.bits.CausticNecrosis, {}, False) def siphonEnthalpy(self, bitName, distance, amount=1, limit=None, technique=locals.distanceLookup.RING_CACHE): """ Extract enthalpy in stages of amount from bits of bitName from a certain distance. A limit can be set on the maximum level of enthalpy this bit should attempt to obtain before stopping. Technique is a class attribute from the life.locals.distanceLookup class. """ for bit in self.lookout(bitName, distance, technique = technique): self._siphonEnthalpyBit(bit, amount, limit) def siphonAtoms(self, bitName, distance, amount=[1,1,1], limit=None, technique=locals.distanceLookup.RING_CACHE): """ Extract atoms in stages of amount from bits of bitName from a certain distance. A limit can be set on the maximum level of atoms this bit should attempt to obtain before stopping. Technique is a class attribute from the life.locals.distanceLookup class. """ for bit in self.lookout(bitName, distance, technique = technique): self._siphonAtomsBit(bit, amount, limit) def _siphonEnthalpyBit(self, bit, amount=1, limit=None): maxCanGrab = limit - self.enthalpy if not limit: self.grabEnthalpy(bit, amount) else: if amount >= maxCanGrab: self.grabEnthalpy(bit, maxCanGrab) else: self.grabEnthalpy(bit, amount) def _siphonAtomsBit(self, bit, amount=[1,1,1], limit=None): if limit: diffs = [0]*len(amount) for atomIndex in range(len(amount)): diffs[atomIndex] = limit[atomIndex] - self.atoms[atomIndex] if diffs[atomIndex] > amount[atomIndex]: diffs[atomIndex] = amount[atomIndex] self.grabAtoms(bit, diffs) else: self.grabAtoms(bit, bit.atoms) def siphonResources(self, bitName, distance, amountEnthalpy=1, amountAtoms=[1,1,1], limitEnthalpy=None, limitAtoms=None, technique=locals.distanceLookup.RING_CACHE): """ Extract atoms and enthalpy in stages of amountEnthalpy and amountAtoms from bits of bitName from a certain distance. A limit can be set on the maximum level of atoms or enthalpy this bit should attempt to obtain before stopping. Technique is a class attribute from the life.locals.distanceLookup class. """ for bit in self.lookout(bitName, distance, technique = technique): self._siphonEnthalpyBit(bit, amountEnthalpy, limitEnthalpy) self._siphonAtomsBit(bit, amountAtoms, limitAtoms) def grabAtoms(self, bit, amount=[1,1,1]): """Take atoms of amount from the bit. Returns bool related to success of extraction.""" amount = list(amount) condition = [] i = 0 for atomAmount in bit.atoms: if atomAmount >= amount[i]: condition.append(True) i += 1 if len(condition) == len(bit.atoms): for atomIndex in range(len(amount)): bit.atoms[atomIndex] -= amount[atomIndex] self.atoms[atomIndex] += amount[atomIndex] return True else: return False def grabEnthalpy(self, bit, amount=1): """Take enthalpy of amount from the bit. Returns bool related to success of extraction.""" if bit.enthalpy >= amount: bit.enthalpy -= amount self.enthalpy += amount self.enthalpyUpdate() bit.enthalpyUpdate() return True elif bit.enthalpy < amount and bit.enthalpy > 0: self.enthalpy += bit.enthalpy bit.enthalpy = 0 self.enthalpyUpdate() bit.enthalpyUpdate() return True else: return False def checkTemperature(self): if self.THERMAL_RANGE: if self.temperature < self.THERMAL_RANGE[0] or \ self.temperature > self.THERMAL_RANGE[1]: self.dieThermal() def tempRange(self, min, max): """Returns 1 if below minimum, returns 2 if above max, returns 0 if in range.""" if self.world.getTemp(self.x, self.y) < min: return 1 elif self.world.getTemp(self.x, self.y) > max: return 2 else: return 0 def becomeBit(self, bitclass, args={}, saveEnthalpy=True): """ Turn this current bit into another bit, prevserving atoms in the process and optionally preserving enthalpy. This will always be successful. """ self.destroy() # We know that this will be successful 100% of the time. # you cannot fail making a bit where you just destroyed one. madeBit = None if saveEnthalpy: madeBit = self.makeBit(bitclass, (self.x, self.y), args, enthalpy = self.enthalpy, atoms = self.atoms) else: madeBit = self.makeBit(bitclass, (self.x, self.y), args, enthalpy = None, atoms = self.atoms) def becomeBits(self, bitclass, positions, args={}, saveEnthalpy=True): """ Turn this current bit into multiple other bits, preserving atoms across all of them in the process and optionally preserving enthalpy. At the end of the process, any remaining enthalpy or atoms from unsuccessful positions will be placed into the bit replacing the bit at this current position. """ self.destroy() filteredPositions = [] # remove repeated positions for pos in positions: if pos not in filteredPositions: filteredPositions.append(tuple(pos)) if self.position in positions: positions.remove(self.position) amount = len(positions) + 1 if isinstance(args, dict): args = [args] * amount i = 0 for position in positions: if saveEnthalpy: if not self.makeBit(bitclass, position, args[i], enthalpy = bitclass.ENTHALPY, atoms = bitclass.ATOMS): break else: if not self.makeBit(bitclass, position, args[i], enthalpy = None, atoms = bitclass.ATOMS): break if saveEnthalpy: self.makeBit(bitclass, self.position, args[-1], enthalpy = self.enthalpy, atoms = self.atoms) else: self.makeBit(bitclass, self.position, args[-1], enthalpy = None, atoms = self.atoms) def makeBits(self, bitclass, positions, args={}, atoms=None, enthalpy=None): """ Place multiple bits at positions, optionally giving that bit a certain amount of atoms and enthalpy from this bit. If atoms and/or enthalpy amounts are not specified, the defaults inherited from bitclass will be used. Returns a list of bits placed if all the positions were valid. If even one placement fails, None is returned and no bits are created. """ filteredPositions = [] # remove repeated positions for pos in positions: if pos not in filteredPositions: filteredPositions.append(tuple(pos)) if enthalpy == None: enthalpy = bitclass.ENTHALPY if atoms == None: atoms = list(bitclass.ATOMS) else: atoms = list(atoms) amount = len(positions) if isinstance(args, type(dict)): args = [args]*amount totalAtoms = [i*amount for i in atoms] totalEnthalpy = enthalpy * amount valids = [i for i in positions if not getBit(*i)] if args == [] or args == {}: args = [{}]*amount # this will completely fail if even ONE of the positions # is not valid (i.e. a bit is there). Use becomeBits() # if it is important that it happens. becomeBits() always # works because if anything is leftover from being invalid, # the "becoming" will take the missing value. if len([i for i in range(len(self.atoms)) if \ self.atoms[i] >= totalAtoms[i]]) == len(self.atoms) and \ self.enthalpy >= totalEnthalpy and \ len(valids) == len(positions): for i in range(len(totalAtoms)): self.atoms[i] -= totalAtoms[i] self.enthalpy -= totalEnthalpy newBits = [] i = 0 for pos in positions: argSet = args[i] newBit = bitclass(pos[0], pos[1], **argSet) newBits.append(newBit) i += 1 return newBits else: return None def makeBit(self, bitclass, position, args={}, atoms=None, enthalpy=None): """ Place a bit at a new position, optionally giving that bit a certain amount of atoms and enthalpy from this bit. If atoms and/or enthalpy amounts are not specified, the defaults inherited from bitclass will be used. Returns a placed bit if successful, and returns None of the placement was not valid. """ if enthalpy == None: enthalpy = bitclass.ENTHALPY if atoms == None: atoms = list(bitclass.ATOMS) else: atoms = list(atoms) if len([i for i in range(len(self.atoms)) if \ self.atoms[i] >= atoms[i]]) == len(self.atoms) and \ self.enthalpy >= enthalpy and \ not getBit(*position): for i in range(len(self.atoms)): self.atoms[i] -= atoms[i] self.enthalpy -= enthalpy newBit = bitclass(position[0], position[1], **args) newBit.enthalpy = enthalpy newBit.atoms = atoms return newBit else: return None def enthalpyDeath(self): """Same as die(). Can be overridden for special enthalpy deaths.""" self.die() def startEnthalpy(self, multiplier=10): """Start an enthalpy looper with delay of multiplier ticks.""" self.enthalpyLooper = Looper(self, self.enthalpyProgress, multiplier) def enthalpyUpdate(self): """Cause enthalpy death if conditions are satisfied.""" if self.enthalpy <= 0: self.enthalpyDeath() if self.enthalpyLooper: self.enthalpyLooper.stop() def enthalpyProgress(self): """Decrease enthalpy by one and call enthalpyUpdate().""" self.enthalpy -= 1 self.enthalpyUpdate() self.world.thermalData[self.y][self.x] += .5 self.temperature += .5 self.world.thermalDelta += 1 self.checkTemperature() def thermalEquilibrium(self): worldTemp = self.world.getTemp(self.x, self.y) average = (worldTemp + self.temperature)/2 self.world.setTemp(self.x, self.y, average) self.world.thermalDelta += average - worldTemp self.temperature = average def getIndex(self): """Return index of bit in life.world.bits list.""" if self in self.world.bits: return self.world.bits.index(self) else: return None index = property(getIndex) def makePickle(self): """Return data in the form of a serialized, picklable data piece.""" data = {} for key, value in vars(self).items(): if key == "vector": value = value.__serialize__() elif isinstance(value, Bit): value = value.getIndex() elif key == "loopers": value = [i.__serialize__() for i in value] data[key] = value return data def randomWalkTowardsType(self, bitName, searchRadius, technique = locals.distanceLookup.RING_CACHE): """ Randomly walk the bit slowly over to any bit of bitName within the search radius. Technique is a class attribute of the life.locals.distanceLookup class. """ try: bit = random.choice(self.lookout(bitName, searchRadius)) walkX = random.randrange(3) - 1 walkY = random.randrange(3) - 1 if self.x < bit.x: walkX = 1 else: walkX = -1 if self.y < bit.y: walkY = 1 else: walkY = -1 return self.move(walkX, walkY) except IndexError as e: return False def randomWalkTowards(self, bit, technique = locals.distanceLookup.RING_CACHE): """ Randomly walk the bit slowly over to the bit. Technique is a class attribute of the life.locals.distanceLookup class. """ walkX = random.randrange(3) - 1 walkY = random.randrange(3) - 1 if self.x < bit.x: walkX = 1 else: walkX = -1 if self.y < bit.y: walkY = 1 else: walkY = -1 return self.move(walkX, walkY) def moveTowards(self, pos): """Move the bit through a vector pointing to the position.""" i = 0 for dim in pos: if dim < -1: pos[i] = -1 elif dim > 1: pos[i] = 1 return self.vector.getAngleTowards(pos) def lookout(self, bitName, searchRadius, technique = locals.distanceLookup.RING_CACHE): """ Scan the area of searchRadius for a bit of bitName. Technique is a class attribute of the life.locals.distanceLookup class. """ if not isinstance(bitName, str): bitName = bitName.name if technique == locals.distanceLookup.RING_CACHE: return [getBit(*i) for i in self.getRings(searchRadius) if getBit(*i) and \ getBit(*i).name == bitName] elif technique == locals.DISTANCE_SEARCH: return [bit for bit in self.getList(bitName) if self.distance(bit) <= searchRadius] def addLooper(self, newLooper): """Add the looper to the list of loopers.""" if newLooper not in self.loopers: self.loopers.append(newLooper) def removeLooper(self, looper): """Remove the looper from this bit.""" if looper in self.loopers: self.loopers.remove(looper) def randomWalk(self): """Move to a random adjacent valid tile.""" self.moveto(random.choice(self.getAdjValids(allowNull = False))) def distance(self, distantBit): """Get the distance to another bit using euclidean math.""" return math.sqrt((self.x-distantBit.x)**2 + (self.y-distantBit.y)**2) def getAdjs(self, coord=None): """Return all adjacent coordinate sets """ if not coord: coord = (self.x, self.y) return biohex.hexmech.getAdjs(self.x, self.y) def getAdjBits(self, coord=None): """Return all adjacent bits.""" if not coord: coord = (self.x, self.y) bits = [] for position in self.getAdjs(coord): try: ibit = Bit.world.bitPositions[position[0]][position[1]] except: ibit = None if ibit: bits.append(ibit) return bits def getAdjValids(self, coord=None, allowNull=True): """Return all adjacent coordinate sets that are empty and valid.""" if not coord: coord = (self.x, self.y) prod = self.getAdjs(coord) newprod = [] for coord in prod: if isValid(coord[0], coord[1]): newprod.append(coord) if not newprod and not allowNull: newprod=[coord] return newprod def moveForward(self): """Move according to self.vector direction.""" return self.moveto(self.vector.ahead) def moveBackward(self): """Move according to the opposite of self.vector direction.""" return self.moveto(self.vector.behind) def getList(self, name=None): """Get the list of all bits of name. If name is None, then the current name is used.""" if name == None: name = self.name if name in __class__.lister: return __class__.lister[name] else: __class__.lister[name] = [] return [] def destroy(self): """Remove the bit from world and program memory.""" Bit.world.removeBit(self) Bit.world.bitPositions[self.x][self.y] = 0 Bit.world.markUpdate(self.x, self.y) Bit.world.unmarkDirty(self) self.destroyed = True if self in __class__.lister[self.name]: __class__.lister[self.name].remove(self) def tick(self): for looper in self.loopers: looper.tick() def dirty(self): """Mark the bit as a changed visible object.""" self.world.markDirty(self) def moveto(self, x, y=None): """Move the bit to a coordinate set. Return bool based on success.""" if y == None: y = x[1] x = x[0] val = False Bit.world.bitPositions[self.x][self.y] = 0 if isValid(x, y): Bit.world.markUpdate(self.x,self.y) self.x = x self.y = y self.dirty() val = True Bit.world.bitPositions[self.x][self.y] = self self.thermalEquilibrium() self.checkTemperature() return val def move(self, dx, dy=None): """Move according to a coordinate vector. Return bool based on success.""" if dy == None: dy = dx[1] dx = dx[0] return self.moveto((self.x + dx, self.y + dy)) class World(object): """ Represents the virtual space that Bits can move around in and interact with. """ experiment = None def __init__(self, width, height, passErrors = False): """ Initialize a world with width and height in hexagon tiles. If passErrors is True, then when a Bit raises a fatal error, instead of crashing, it will decay into a special marker Bit and print the error in the command window. """ self.width = width self.height = height self.area = self.width * self.height self.bits = [] self.thermalData = [] self.setAmbientTemperature(0) # all the bits that have changed and need to be redrawn self.dirtyBits = [] self.bitPositions = [] for x in range(width): col = [] for y in range(height): col.append(0) self.bitPositions.append(col) # all the positions that must be updated on the screen self.updatePositions = [] self.passingErrors = passErrors Bit.world = self # tracks age in ticks self.tickNumber = 0 # create new experiment object self.experiment = biohex.experiment.Experiment(self) def getTemp(self, x, y): return self.thermalData[y][x] def setTemp(self, x, y, value): self.thermalData[y][x] = value def setAmbientTemperature(self, temperature): self.thermalData = [] for y in range(self.height): self.thermalData.append([temperature] * self.width) self.thermalDelta = 0 self.ambientTemperature = temperature for bit in self.bits: bit.temperature = temperature def thermalTransfer(self, x, y, amount, fuzzy=0): if fuzzy: x += random.randint(-fuzzy, fuzzy) y += random.randint(-fuzzy, fuzzy) self.thermalData[y][x] += amount self.thermalDelta += amount def getPassingErrors(self): return self._passingErrors def setPassingErrors(self, value): self._passingErrors = value if value: self.tick = self.tickPassErrors else: self.tick = self.tickNonPassErrors passingErrors = property(getPassingErrors, setPassingErrors) def save(self, filename): """Pickle entire current world experiment.""" import pickle file = open(filename, 'wb') pickledBits = [] for bit in self.bits: pickledBits.append(bit.makePickle()) pickle.dump(pickledBits, file) file.close() def load(self, filename): """Load a pickled world experiment.""" import pickle file = open(filename, 'rb') pickledBits = pickle.load(file) index = 0 for pickle in pickledBits: loadSavedBit(pickle, index) index += 1 # these are not needed for this instance of a World # because they are graphical methods. These are # overridden by a graphical version of the World # in graphics.py. def markDirty(self, bit): pass def unmarkDirty(self, bit): pass def markUpdate(self, x, y): pass def flush(self): pass #def drawEmpty(self, pos): None def erase(self, x, y): """Destroy the bit at this position if there is one.""" for bit in self.bits: if bit.x == x and bit.y == y: bit.destroy() def addBit(self, bit): """If there is no bit in the bit's position, add the bit.""" if bit not in self.bits and not getBit(bit.x, bit.y): try: self.bitPositions[bit.x][bit.y] = bit self.bits.append(bit) return True except: return False else: return False def removeBit(self, bit): """Destroy the bit and remove all memory access.""" if bit in self.bits: self.bits.remove(bit) #self.drawEmpty((bit.x, bit.y)) self.bitPositions[bit.x][bit.y] = 0 def tickPassErrors(self): self.tickNumber += 1 for bit in self.bits: try: bit.tick() except: print("----------------------------") print(self), " DIED BY FATAL ERROR:" print(traceback.format_exc()) print("----------------------------\n") bit.dieError() def tickNonPassErrors(self): self.tickNumber += 1 for bit in self.bits: bit.tick() def tick(self): pass
omegachysis/biohex
biohex/life.py
Python
apache-2.0
28,573
from unittest.mock import patch from unittest.mock import MagicMock from django.test import override_settings from django.test import TestCase from django.conf import settings from orchestra.google_apps.convenience import _get_image_mimetype from orchestra.google_apps.convenience import create_document_from_template from orchestra.google_apps.convenience import create_folder_with_permissions from orchestra.google_apps.convenience import create_media_folder_with_images from orchestra.google_apps.convenience import Service from orchestra.google_apps.convenience import add_image from orchestra.google_apps.errors import InvalidUrlError from orchestra.google_apps.errors import GoogleDriveError from orchestra.tests.helpers.google_apps import mock_create_drive_service from orchestra.tests.helpers.google_apps import fake_image_get @override_settings(GOOGLE_APPS=True) class TestGoogleAppsConvenience(TestCase): def setUp(self): super(TestGoogleAppsConvenience, self).setUp() def test_get_image_mimetype(self): # Content type is not provided. url = 'http://nocontenttype.com/image.jpeg' response = fake_image_get(url) mimetype = _get_image_mimetype(response, url) self.assertEquals(mimetype, 'image/jpeg') # Content type is provided. url = 'http://contenttype.com/image.jpg' response = fake_image_get(url) mimetype = _get_image_mimetype(response, url) self.assertEquals(mimetype, 'image/jpg') @patch('requests.get', side_effect=fake_image_get) @patch.object(Service, '_create_drive_service', new=mock_create_drive_service) def test_add_image(self, requests_get): # Make sure if incorrect url then image is not returned. with self.assertRaises(InvalidUrlError): add_image(MagicMock(), 'test_folder', 'http://in.gogo/test.jp') service = Service(settings.GOOGLE_P12_PATH, settings.GOOGLE_SERVICE_EMAIL) image_data = add_image(service, 'test_folder', 'http://nocontenttype.com/image.jpeg') self.assertEquals(image_data, {'id': 1}) image_data = add_image(service, 'test_folder', 'http://nocontenttype.com/error.jpg') self.assertIsNone(image_data) @patch('requests.get', side_effect=fake_image_get) @patch.object(Service, '_create_drive_service', new=mock_create_drive_service) def test_create_media_folder(self, requests_get): with self.assertRaises(GoogleDriveError): create_media_folder_with_images('test_parent_id', ['http://in.gogo/image.jpg'], 'error') # Make sure that if the correct image is provided, # it creates a folder. media_images = create_media_folder_with_images( 'test_parent_id', ['http://inp.gogo/image.jpg'], 'test') self.assertEquals(media_images['folder']['id'], 1) self.assertEquals( media_images['image_counter']['uploaded_images'], 1) # Make sure that if the incorrect image is provided, # still returns a new folder. media_images = create_media_folder_with_images( 'test_parent_id', ['http://nocontenttype.com/test.jp', None], 'test') self.assertEquals(media_images['folder']['id'], 1) self.assertEquals( media_images['image_counter']['not_uploaded_images'], 2) with self.assertRaises(GoogleDriveError): folder = create_folder_with_permissions( 'test_parent_id', 'error') # In order to test permission failure we did something convoluted. # create_folder_with_permissions calls service.insert_folder which in # turn will create a new folder named 'error'. # When we try to add permission to folder 'error' that fails in # our helper function and raises an error. folder = create_folder_with_permissions('test_parent_id', 'permission_fail') self.assertEquals(folder['id'], 'error') @patch.object(Service, '_create_drive_service', new=mock_create_drive_service) def test_delete_folder(self): service = Service(settings.GOOGLE_P12_PATH, settings.GOOGLE_SERVICE_EMAIL) folder = service.delete_folder('test') self.assertEquals(folder['id'], 1) folder = service.delete_folder('error') self.assertIsNone(folder) @patch.object(Service, '_create_drive_service', new=mock_create_drive_service) def test_create_document_from_template(self): upload_info = create_document_from_template('test_id', 'test_filename') self.assertEquals(upload_info['id'], 1) with self.assertRaises(GoogleDriveError): create_document_from_template('error', 'test_filename')
Sonblind/orchestra
orchestra/tests/google_apps/test_convenience.py
Python
apache-2.0
5,072
""" Linux kernel system control from Python. """
kdart/pycopia
core/pycopia/OS/Linux/sysctl.py
Python
apache-2.0
52
#!/usr/bin/python import json class Client(): def __init__(self, clientHostName, clientPort, channel): self.clientHostName = clientHostName self.clientPort = clientPort self.clientType = self.getClientType() self.channel = channel # TO DO implement this method properly def getClientType(self): try: self.WebClient = "Web Client" self.MobileClient = "Mobile Client" return self.WebClient except ImportError as e: print json.dumps({"status" : "error", "Client.getClientType" : str(e)}) exit(0)
lyubomir1993/AlohaServer
Client.py
Python
apache-2.0
616
#!/usr/bin/env python """Queue definitions. This module defines the queues where a worker may look for work. """ from grr_response_core.lib import rdfvalue # Queues that a standard worker should work from, highest priority first. # # "W" and "CA" are deprecated, but included until we are sure that they are # empty. WORKER_LIST = list(map(rdfvalue.RDFURN, ["CA", "W", "E", "F", "H", "S"])) # The normal queue for enrollment messages. ENROLLMENT = rdfvalue.RDFURN("E") # The normal queue for flows. Must be kept synchronized with the default value # of FlowRunnerArgs.queue. FLOWS = rdfvalue.RDFURN("F") # The normal queue for hunts. Must be kept synchronized with the default value # of HuntRunnerArgs.queue. HUNTS = rdfvalue.RDFURN("H") # The normal queue for statistics processing. STATS = rdfvalue.RDFURN("S")
google/grr
grr/core/grr_response_core/lib/queues.py
Python
apache-2.0
822
# System """Concordance Checking.""" import logging import os import functools # Third Party from sklearn.cluster import MiniBatchKMeans from scipy.stats import ks_2samp import numpy as np import pandas as pd # First Party from submission_criteria import common def has_concordance(P1, P2, P3, c1, c2, c3, threshold=0.12): """Checks that the clustered submission data conforms to a concordance threshold Paramters: ---------- P1 : ndarray Sorted validation submission probabilities based on the id P2 : ndarray Sorted test submission probabilities based on the id P3 : ndarray Sorted live submission probabilities based on the id c1 : ndarray Clustered validation from the tournament data c2 : ndarray Clustered test from the tournament data c3 : ndarray Clustered live from the tournament data threshold : float, optional, default: 0.12 The threshold in which our mean ks_score has to be under to have "concordance" Returns: -------- concordance : bool Boolean value of the clustered submission data having concordance """ ks = [] for i in set(c1): ks_score = max( ks_2samp(P1.reshape(-1)[c1 == i], P2.reshape(-1)[c2 == i])[0], ks_2samp(P1.reshape(-1)[c1 == i], P3.reshape(-1)[c3 == i])[0], ks_2samp(P3.reshape(-1)[c3 == i], P2.reshape(-1)[c2 == i])[0]) ks.append(ks_score) logging.getLogger().info("Noticed score {}".format(np.mean(ks))) return np.mean(ks) < threshold def make_clusters(X, X_1, X_2, X_3): """Split submission data into 3 clusters using K-Means clustering Parameters: ----------- X: ndarray tournament data for the competition round X_1: ndarray sorted validation data ids from tournament data X_2: ndarray sorted test ids data from tournament data X_3: ndarray sorted live ids data from tournament data Returns: -------- c1: nparray Clustered validation data c2: nparray Clustered test data c3: nparray Cluster live data """ logging.getLogger().info("New competition, clustering dataset") kmeans = MiniBatchKMeans(n_clusters=5, random_state=1337) kmeans.fit(X) c1, c2, c3 = kmeans.predict(X_1), kmeans.predict(X_2), kmeans.predict(X_3) logging.getLogger().info("Finished clustering") return c1, c2, c3 @functools.lru_cache(maxsize=2) def get_ids(filemanager, tournament_number, round_number): """Gets the ids from submission data based on the round_number Parameters: ----------- filemanager : FileManager S3 Bucket data access object for querying competition datasets round_number : int The numerical id of the competition Returns: -------- val : list List of all ids in the 'validation' dataset test : list List of all ids in the 'test' dataset live : list List of all ids in the 'live' dataset """ extract_dir = filemanager.download_dataset(tournament_number, round_number) tournament = pd.read_csv( os.path.join(extract_dir, "numerai_tournament_data.csv")) val = tournament[tournament["data_type"] == "validation"] test = tournament[tournament["data_type"] == "test"] live = tournament[tournament["data_type"] == "live"] return list(val["id"]), list(test["id"]), list(live["id"]) def get_sorted_split(data, val_ids, test_ids, live_ids): """Split the competition data into validation, test, and live data sets in a sorted fashion Parameters: ----------- data : DataFrame Tournament data for the competition round val_ids : list List of all validation data ids test_ids : list List of all test data ids live_ids : list List of all live data ids Returns: -------- validation : ndarray Validation data features sorted by id test : ndarray Test data features sorted by id live : ndarray Live data features sorted by id """ validation = data[data["id"].isin(val_ids)] test = data[data["id"].isin(test_ids)] live = data[data["id"].isin(live_ids)] validation = validation.sort_values("id") test = test.sort_values("id") live = live.sort_values("id") if any(["feature" in c for c in list(validation)]): f = [c for c in list(validation) if "feature" in c] else: f = ["probability"] validation = validation[f] test = test[f] live = live[f] return validation.as_matrix(), test.as_matrix(), live.as_matrix() @functools.lru_cache(maxsize=2) def get_competition_variables(tournament_number, round_number, filemanager): """Return the K-Means Clustered tournament data for the competition round Parameters: ----------- round_id : string UUID of the competition round of the tournament db_manager : DatabaseManager DB data access object that has read and write functions to NoSQL DB filemanager : FileManager S3 Bucket data access object for querying competition datasets Returns: -------- variables : dictionary Holds clustered tournament data and the round_number """ extract_dir = filemanager.download_dataset(tournament_number, round_number) training = pd.read_csv( os.path.join(extract_dir, "numerai_training_data.csv")) tournament = pd.read_csv( os.path.join(extract_dir, "numerai_tournament_data.csv")) val_ids, test_ids, live_ids = get_ids(filemanager, tournament_number, round_number) return get_competition_variables_from_df( round_number, training, tournament, val_ids, test_ids, live_ids) def get_competition_variables_from_df( round_number: str, training: pd.DataFrame, tournament: pd.DataFrame, val_ids: list, test_ids: list, live_ids: list) -> dict: f = [c for c in list(tournament) if "feature" in c] # TODO the dropna is a hack workaround for https://github.com/numerai/api-ml/issues/68 X = training[f].dropna().as_matrix() X = np.append(X, tournament[f].as_matrix(), axis=0) X_1, X_2, X_3 = get_sorted_split(tournament, val_ids, test_ids, live_ids) c1, c2, c3 = make_clusters(X, X_1, X_2, X_3) variables = { "round_number": round_number, "cluster_1": c1, "cluster_2": c2, "cluster_3": c3, } return variables def get_submission_pieces(submission_id, tournament, round_number, db_manager, filemanager): """Get validation, test, and live ids sorted from submission_id Parameters: ----------- submission_id : string ID of the submission round_number : int Numerical ID of the competition round of the tournament db_manager : DatabaseManager DB data access object that has read and write functions to NoSQL DB filemanager : FileManager S3 Bucket data access object for querying competition datasets Returns: -------- validation : ndarray Sorted validation ids from submission data tests : ndarray Sorted test ids from submission data live : ndarray Sorted live ids from submission data """ s3_file, _ = common.get_filename(db_manager.postgres_db, submission_id) data = filemanager.read_csv(s3_file) val_ids, test_ids, live_ids = get_ids(filemanager, tournament, round_number) validation, tests, live = get_sorted_split(data, val_ids, test_ids, live_ids) return validation, tests, live def submission_concordance(submission, db_manager, filemanager): """Determine if a submission is concordant and write the result to DB Parameters: ----------- submission : dictionary Submission data that holds the ids of submission and competition round db_manager : DatabaseManager DB data access object that has read and write functions to NoSQL DB filemanager : FileManager S3 Bucket data access object for querying competition datasets """ tournament, round_number, _dataset_path = common.get_round( db_manager.postgres_db, submission["submission_id"]) clusters = get_competition_variables(tournament, round_number, filemanager) P1, P2, P3 = get_submission_pieces(submission["submission_id"], tournament, round_number, db_manager, filemanager) c1, c2, c3 = clusters["cluster_1"], clusters["cluster_2"], clusters[ "cluster_3"] try: concordance = has_concordance(P1, P2, P3, c1, c2, c3) except IndexError: # If we had an indexing error, that is because the round restart, and we need to try getting the new competition variables. get_competition_variables.cache_clear() clusters = get_competition_variables(tournament, round_number, filemanager) c1, c2, c3 = clusters["cluster_1"], clusters["cluster_2"], clusters[ "cluster_3"] concordance = has_concordance(P1, P2, P3, c1, c2, c3) print('writing concordance', submission['submission_id'], concordance) db_manager.write_concordance(submission['submission_id'], concordance)
numerai/submission-criteria
submission_criteria/concordance.py
Python
apache-2.0
9,505
# debuggin from lxml import etree # 3rd-party modules from lxml.builder import E # module packages from jnpr.junos.cfg import Resource from jnpr.junos import jxml as JXML from jnpr.junos.cfg.srx.shared_ab_addr import SharedAddrBookAddr from jnpr.junos.cfg.srx.shared_ab_set import SharedAddrBookSet class SharedAddrBook(Resource): """ [edit security address-book <name>] Resource <name> The address book name, string Manages: addr - SharedAddrBookAddr resources set - SharedAddrBookAddrSet resources """ PROPERTIES = [ 'description', '$addrs', # read-only addresss '$sets', # read-only address-sets 'zone_list' # attached zone ] def __init__(self, junos, name=None, **kvargs): if name is None: # resource-manager Resource.__init__(self, junos, name, **kvargs) return self.addr = SharedAddrBookAddr(junos, parent=self) self.set = SharedAddrBookSet(junos, parent=self) self._manages = ['addr', 'set'] Resource.__init__(self, junos, name, **kvargs) def _xml_at_top(self): return E.security( E('address-book', E.name(self._name)) ) # ----------------------------------------------------------------------- # XML reading # ----------------------------------------------------------------------- def _xml_hook_read_begin(self, xml): ab = xml.find('.//address-book') ab.append(E('description')) ab.append(E('address', JXML.NAMES_ONLY)) ab.append(E('address-set', JXML.NAMES_ONLY)) ab.append(E('attach')) return True def _xml_at_res(self, xml): return xml.find('.//address-book') def _xml_to_py(self, as_xml, to_py): Resource._r_has_xml_status(as_xml, to_py) Resource.copyifexists(as_xml, 'description', to_py) to_py['$addrs'] = [name.text for name in as_xml.xpath('address/name')] to_py['$sets'] = [ name.text for name in as_xml.xpath('address-set/name')] # ----------------------------------------------------------------------- # XML writing # ----------------------------------------------------------------------- def _xml_change_zone_list(self, xml): x_attach = E('attach') self._xml_list_property_add_del_names(x_attach, prop_name='zone_list', element_name='zone') xml.append(x_attach) return True # ----------------------------------------------------------------------- # Manager List, Catalog # ----------------------------------------------------------------------- def _r_list(self): raise RuntimeError("Need to implement!") def _r_catalog(self): raise RuntimeError("Need to implement!")
dgjnpr/py-junos-eznc
lib/jnpr/junos/cfg/srx/shared_ab.py
Python
apache-2.0
2,925
#!/usr/bin/env python import socket import unittest from framework import VppTestCase, VppTestRunner from template_bd import BridgeDomain from scapy.layers.l2 import Ether from scapy.layers.inet import IP, UDP from scapy.layers.vxlan import VXLAN from scapy.utils import atol class TestVxlan(BridgeDomain, VppTestCase): """ VXLAN Test Case """ def __init__(self, *args): BridgeDomain.__init__(self) VppTestCase.__init__(self, *args) def encapsulate(self, pkt, vni): """ Encapsulate the original payload frame by adding VXLAN header with its UDP, IP and Ethernet fields """ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) / IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) / UDP(sport=self.dport, dport=self.dport, chksum=0) / VXLAN(vni=vni, flags=self.flags) / pkt) def encap_mcast(self, pkt, src_ip, src_mac, vni): """ Encapsulate the original payload frame by adding VXLAN header with its UDP, IP and Ethernet fields """ return (Ether(src=src_mac, dst=self.mcast_mac4) / IP(src=src_ip, dst=self.mcast_ip4) / UDP(sport=self.dport, dport=self.dport, chksum=0) / VXLAN(vni=vni, flags=self.flags) / pkt) def decapsulate(self, pkt): """ Decapsulate the original payload frame by removing VXLAN header """ # check if is set I flag self.assertEqual(pkt[VXLAN].flags, int('0x8', 16)) return pkt[VXLAN].payload # Method for checking VXLAN encapsulation. # def check_encapsulation(self, pkt, vni, local_only=False): # TODO: add error messages # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved # by VPP using ARP. self.assertEqual(pkt[Ether].src, self.pg0.local_mac) if not local_only: self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac) # Verify VXLAN tunnel source IP is VPP_IP and destination IP is MY_IP. self.assertEqual(pkt[IP].src, self.pg0.local_ip4) if not local_only: self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4) # Verify UDP destination port is VXLAN 4789, source UDP port could be # arbitrary. self.assertEqual(pkt[UDP].dport, type(self).dport) # TODO: checksum check # Verify VNI self.assertEqual(pkt[VXLAN].vni, vni) @staticmethod def ip4_range(ip4n, s=10, e=20): base = str(bytearray(ip4n)[:3]) return ((base + ip) for ip in str(bytearray(range(s, e)))) @classmethod def create_vxlan_flood_test_bd(cls, vni): # Create 10 ucast vxlan tunnels under bd ip_range_start = 10 ip_range_end = 20 next_hop_address = cls.pg0.remote_ip4n for dest_addr in cls.ip4_range(next_hop_address, ip_range_start, ip_range_end): # add host route so dest_addr will not be resolved cls.vapi.ip_add_del_route(dest_addr, 32, next_hop_address) r = cls.vapi.vxlan_add_del_tunnel( src_addr=cls.pg0.local_ip4n, dst_addr=dest_addr, vni=vni) cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni) @classmethod def add_del_mcast_load(cls, is_add): ip_range_start = 10 ip_range_end = 210 for dest_addr in cls.ip4_range(cls.mcast_ip4n, ip_range_start, ip_range_end): vni = bytearray(dest_addr)[3] cls.vapi.vxlan_add_del_tunnel( src_addr=cls.pg0.local_ip4n, dst_addr=dest_addr, mcast_sw_if_index=1, vni=vni, is_add=is_add) @classmethod def add_mcast_load(cls): cls.add_del_mcast_load(is_add=1) @classmethod def del_mcast_load(cls): cls.add_del_mcast_load(is_add=0) # Class method to start the VXLAN test case. # Overrides setUpClass method in VppTestCase class. # Python try..except statement is used to ensure that the tear down of # the class will be executed even if exception is raised. # @param cls The class pointer. @classmethod def setUpClass(cls): super(TestVxlan, cls).setUpClass() try: cls.dport = 4789 cls.flags = 0x8 # Create 2 pg interfaces. cls.create_pg_interfaces(range(4)) for pg in cls.pg_interfaces: pg.admin_up() # Configure IPv4 addresses on VPP pg0. cls.pg0.config_ip4() # Resolve MAC address for VPP's IP address on pg0. cls.pg0.resolve_arp() # Our Multicast address cls.mcast_ip4 = '239.1.1.1' cls.mcast_ip4n = socket.inet_pton(socket.AF_INET, cls.mcast_ip4) iplong = atol(cls.mcast_ip4) cls.mcast_mac4 = "01:00:5e:%02x:%02x:%02x" % ( (iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF) # Create VXLAN VTEP on VPP pg0, and put vxlan_tunnel0 and pg1 # into BD. cls.single_tunnel_bd = 1 r = cls.vapi.vxlan_add_del_tunnel( src_addr=cls.pg0.local_ip4n, dst_addr=cls.pg0.remote_ip4n, vni=cls.single_tunnel_bd) cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=cls.single_tunnel_bd) cls.vapi.sw_interface_set_l2_bridge(cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd) # Setup vni 2 to test multicast flooding cls.mcast_flood_bd = 2 cls.create_vxlan_flood_test_bd(cls.mcast_flood_bd) r = cls.vapi.vxlan_add_del_tunnel( src_addr=cls.pg0.local_ip4n, dst_addr=cls.mcast_ip4n, mcast_sw_if_index=1, vni=cls.mcast_flood_bd) cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=cls.mcast_flood_bd) cls.vapi.sw_interface_set_l2_bridge(cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd) # Add and delete mcast tunnels to check stability cls.add_mcast_load() cls.del_mcast_load() # Setup vni 3 to test unicast flooding cls.ucast_flood_bd = 3 cls.create_vxlan_flood_test_bd(cls.ucast_flood_bd) cls.vapi.sw_interface_set_l2_bridge(cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd) except Exception: super(TestVxlan, cls).tearDownClass() raise # Method to define VPP actions before tear down of the test case. # Overrides tearDown method in VppTestCase class. # @param self The object pointer. def tearDown(self): super(TestVxlan, self).tearDown() if not self.vpp_dead: self.logger.info(self.vapi.cli("show bridge-domain 1 detail")) self.logger.info(self.vapi.cli("show bridge-domain 2 detail")) self.logger.info(self.vapi.cli("show bridge-domain 3 detail")) self.logger.info(self.vapi.cli("show vxlan tunnel")) if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)
licko/vpp-1701-licko
test/test_vxlan.py
Python
apache-2.0
7,549
import time from pymongo import MongoClient from datetime import datetime, timedelta import json from bson import Binary, Code from bson.json_util import dumps client = MongoClient('localhost', 27017) db = client['election-2016'] def dumpData(yesterdayStr): collectionName = 't' + yesterdayStr cursor = db[collectionName].find() count = cursor.count() print(collectionName + ' found ' + str(count) + ' tweets') # dump only if data count is greater than 0 if count > 0: file = open('out/' + yesterdayStr + '.json', 'w') file.write('[') i = 0 for document in cursor: doc = dumps(document) file.write(doc) if (i != count - 1): file.write(',\n') else: file.write('\n]') i = i + 1 print('data for ' + yesterdayStr + ' successfully dumped at ' + str(now)) # Run following code when the program starts if __name__ == '__main__': currentDate = str(datetime.now().month) + '_' + str(datetime.now().day) #get now and yesterday strings now = datetime.now() yesterday = now - timedelta(days=1) yesterdayStr = str(yesterday.month) + '_' + str(yesterday.day) #update currentDate dumpData(yesterdayStr)
seungkim11/election-2016
python_streaming/yesterday_dump.py
Python
apache-2.0
1,289
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Cinder OS API WSGI application.""" import sys import warnings from jacket.objects import storage warnings.simplefilter('once', DeprecationWarning) from oslo_config import cfg from oslo_log import log as logging from oslo_service import wsgi from jacket.storage import i18n i18n.enable_lazy() # Need to register global_opts from jacket.common.storage import config from jacket import rpc from jacket.storage import version CONF = cfg.CONF def initialize_application(): storage.register_all() CONF(sys.argv[1:], project='storage', version=version.version_string()) logging.setup(CONF, "storage") config.set_middleware_defaults() rpc.init(CONF) return wsgi.Loader(CONF).load_app(name='osapi_volume')
HybridF5/jacket
jacket/wsgi/storage/wsgi.py
Python
apache-2.0
1,316
#!/usr/bin/python from __future__ import print_function import os import sys import subprocess import re import importlib from cli.utils import Utils def print_help_opt(opt, desc): print(" {} {}".format(opt.ljust(13), desc)) def roger_help(root, commands): print("usage: roger [-h] [-v] command [arg...]\n") print("a command line interface to work with roger mesos.") print("\npositional arguments:") print_help_opt("command", "command to run.") print_help_opt("arg", "arguments to pass to the command.") print("\noptional arguments:") print_help_opt("-h, --help", "show this help message and exit.") print_help_opt("-v, --version", "show version information and exit.") print("\ncommands:") sys.path.append("{}/cli".format(root)) for command in commands: description = "" module_name = "roger_" + command cmd_module = importlib.import_module(module_name) try: description = cmd_module.describe() except Exception as e: pass print_help_opt(command, description) print("\nrun: 'roger < command > -h' for more information on a command.") def getFiles(directory): filenames = next(os.walk(directory))[2] return filenames def getCommands(files): commands = set() for filename in files: if filename.startswith("roger_"): commands.add(re.split("roger_|\.", filename)[1]) return sorted(commands) def getScriptCall(root, command, command_args): script_call = "roger_{}.py".format(command) for command_arg in command_args: script_call = script_call + " {}".format(command_arg) return script_call def main(): root = '' utilsObj = Utils() own_dir = os.path.dirname(os.path.realpath(__file__)) root = os.path.abspath(os.path.join(own_dir, os.pardir)) files = getFiles("{}/cli/".format(root)) commands = getCommands(files) if len(sys.argv) > 1: if sys.argv[1] == "-h" or sys.argv[1] == "--help": roger_help(root, commands) elif sys.argv[1] == "-v" or sys.argv[1] == "--version": version = utilsObj.roger_version(root) print(version) else: command = sys.argv[1] command_args = sys.argv[2:] if command in commands: print("root: {} command: {} args: {}".format( root, command, command_args )) script_call = getScriptCall(root, command, command_args) os.system(script_call) else: raise SystemExit("Command is not valid. Exiting.") else: raise SystemExit("No arguments found. Please refer to usage: roger -h") if __name__ == "__main__": main()
seomoz/roger-mesos-tools
bin/roger.py
Python
apache-2.0
2,779
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Training an pixel_cnn model. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import time from absl import app from absl import flags import tensorflow.compat.v1 as tf from genomics_ood.images_ood import pixel_cnn from genomics_ood.images_ood import utils tf.compat.v1.disable_v2_behavior() flags.DEFINE_string('data_dir', '/tmp/image_data', 'Directory to data np arrays.') flags.DEFINE_string('out_dir', '/tmp/pixelcnn', 'Directory to write results and logs.') flags.DEFINE_boolean('save_im', False, 'If True, save image to npy.') flags.DEFINE_string('exp', 'fashion', 'cifar or fashion') # general hyper-parameters flags.DEFINE_integer('batch_size', 32, 'Batch size for training.') flags.DEFINE_integer('total_steps', 10, 'Max steps for training') flags.DEFINE_integer('random_seed', 1234, 'Fixed random seed to use.') flags.DEFINE_integer('eval_every', 10, 'Interval to evaluate model.') flags.DEFINE_float('learning_rate', 0.0001, 'Initial learning rate.') flags.DEFINE_float('learning_rate_decay', 0.999995, 'LR decay every step.') flags.DEFINE_integer('num_logistic_mix', 1, 'Number of components in decoder mixture distribution.') flags.DEFINE_integer('num_hierarchies', 1, 'Number of hierarchies in ' 'Pixel CNN.') flags.DEFINE_integer( 'num_resnet', 5, 'Number of convoluational layers ' 'before depth changes in Pixel CNN.') flags.DEFINE_integer('num_filters', 32, 'Number of pixel cnn filters') flags.DEFINE_float('dropout_p', 0.0, 'Dropout probability.') flags.DEFINE_float('reg_weight', 0.0, 'L2 regularization weight.') flags.DEFINE_float('mutation_rate', 0.0, 'Mutation rate.') flags.DEFINE_boolean('use_weight_norm', False, 'If True, use weight normalization.') flags.DEFINE_boolean('data_init', False, ('If True, use data-dependent initialization', ' (has no effect if use_weight_norm is False')) flags.DEFINE_float('momentum', 0.95, 'Momentum parameter (beta1) for Adam' 'optimizer.') flags.DEFINE_float('momentum2', 0.9995, 'Second momentum parameter (beta2) for' 'Adam optimizer.') flags.DEFINE_boolean('rescale_pixel_value', False, 'If True, rescale pixel values into [-1,1].') FLAGS = flags.FLAGS def main(unused_argv): out_dir = FLAGS.out_dir exp_dir = 'exp%s' % FLAGS.exp model_dir = 'rescale%s' % FLAGS.rescale_pixel_value param_dir = 'reg%.2f_mr%.2f' % (FLAGS.reg_weight, FLAGS.mutation_rate) job_dir = os.path.join(out_dir, exp_dir, model_dir, param_dir) print('job_dir={}'.format(job_dir)) job_model_dir = os.path.join(job_dir, 'model') job_log_dir = os.path.join(job_dir, 'log') for sub_dir in out_dir, job_dir, job_model_dir, job_log_dir: tf.compat.v1.gfile.MakeDirs(sub_dir) params = { 'job_model_dir': job_model_dir, 'job_log_dir': job_log_dir, 'job_dir': job_dir, 'dropout_p': FLAGS.dropout_p, 'reg_weight': FLAGS.reg_weight, 'num_resnet': FLAGS.num_resnet, 'num_hierarchies': FLAGS.num_hierarchies, 'num_filters': FLAGS.num_filters, 'num_logistic_mix': FLAGS.num_logistic_mix, 'use_weight_norm': FLAGS.use_weight_norm, 'data_init': FLAGS.data_init, 'mutation_rate': FLAGS.mutation_rate, 'batch_size': FLAGS.batch_size, 'learning_rate': FLAGS.learning_rate, 'learning_rate_decay': FLAGS.learning_rate_decay, 'momentum': FLAGS.momentum, 'momentum2': FLAGS.momentum2, 'eval_every': FLAGS.eval_every, 'save_im': FLAGS.save_im, 'n_dim': 28 if FLAGS.exp == 'fashion' else 32, 'n_channel': 1 if FLAGS.exp == 'fashion' else 3, 'exp': FLAGS.exp, 'rescale_pixel_value': FLAGS.rescale_pixel_value, } # Print and write parameter settings with tf.io.gfile.GFile( os.path.join(params['job_model_dir'], 'params.json'), mode='w') as f: f.write(json.dumps(params, sort_keys=True)) # Fix the random seed - easier to debug separate runs tf.compat.v1.set_random_seed(FLAGS.random_seed) tf.keras.backend.clear_session() sess = tf.compat.v1.Session() tf.compat.v1.keras.backend.set_session(sess) # Load the datasets if FLAGS.exp == 'fashion': datasets = utils.load_fmnist_datasets(FLAGS.data_dir) else: datasets = utils.load_cifar_datasets(FLAGS.data_dir) # pylint: disable=g-long-lambda tr_in_ds = datasets['tr_in'].map(lambda x: utils.image_preprocess_add_noise( x, params['mutation_rate'])).batch( params['batch_size']).repeat().shuffle(1000).make_one_shot_iterator() tr_in_im = tr_in_ds.get_next() # repeat valid dataset because it will be used for training val_in_ds = datasets['val_in'].map(utils.image_preprocess).batch( params['batch_size']).repeat().make_one_shot_iterator() val_in_im = val_in_ds.get_next() # Define a Pixel CNN network input_shape = (params['n_dim'], params['n_dim'], params['n_channel']) dist = pixel_cnn.PixelCNN( image_shape=input_shape, dropout_p=params['dropout_p'], reg_weight=params['reg_weight'], num_resnet=params['num_resnet'], num_hierarchies=params['num_hierarchies'], num_filters=params['num_filters'], num_logistic_mix=params['num_logistic_mix'], use_weight_norm=params['use_weight_norm'], rescale_pixel_value=params['rescale_pixel_value'], ) # Define the training loss and optimizer log_prob_i = dist.log_prob(tr_in_im['image'], return_per_pixel=False) loss = -tf.reduce_mean(log_prob_i) log_prob_i_val_in = dist.log_prob(val_in_im['image']) loss_val_in = -tf.reduce_mean(log_prob_i_val_in) global_step = tf.compat.v1.train.get_or_create_global_step() learning_rate = tf.compat.v1.train.exponential_decay( params['learning_rate'], global_step, 1, params['learning_rate_decay']) opt = tf.compat.v1.train.AdamOptimizer( learning_rate=learning_rate, beta1=params['momentum'], beta2=params['momentum2']) tr_op = opt.minimize(loss, global_step=global_step) init_op = tf.compat.v1.global_variables_initializer() sess.run(init_op) # write tensorflow summaries saver = tf.compat.v1.train.Saver(max_to_keep=50000) merged_tr = tf.compat.v1.summary.merge([ tf.compat.v1.summary.scalar('loss', loss), tf.compat.v1.summary.scalar('train/learning_rate', learning_rate) ]) merged_val_in = tf.compat.v1.summary.merge( [tf.compat.v1.summary.scalar('loss', loss_val_in)]) tr_writer = tf.compat.v1.summary.FileWriter(job_log_dir + '/tr_in', sess.graph) val_in_writer = tf.compat.v1.summary.FileWriter(job_log_dir + '/val_in', sess.graph) # If previous ckpt exists, load ckpt ckpt_file = tf.compat.v2.train.latest_checkpoint(job_model_dir) if ckpt_file: prev_step = int( os.path.basename(ckpt_file).split('model_step')[1].split('.ckpt')[0]) tf.compat.v1.logging.info( 'previous ckpt exist, prev_step={}'.format(prev_step)) saver.restore(sess, ckpt_file) else: prev_step = 0 # Train the model with sess.as_default(): # this is a must otherwise localhost error for step in range(prev_step, FLAGS.total_steps + 1, 1): _, loss_tr_np, summary = sess.run([tr_op, loss, merged_tr]) if step % params['eval_every'] == 0: ckpt_name = 'model_step%d.ckpt' % step ckpt_path = os.path.join(job_model_dir, ckpt_name) while not tf.compat.v1.gfile.Exists(ckpt_path + '.index'): _ = saver.save(sess, ckpt_path, write_meta_graph=False) time.sleep(10) tr_writer.add_summary(summary, step) # Evaluate loss on the valid_in loss_val_in_np, summary_val_in = sess.run([loss_val_in, merged_val_in]) val_in_writer.add_summary(summary_val_in, step) print('step=%d, tr_in_loss=%.4f, val_in_loss=%.4f' % (step, loss_tr_np, loss_val_in_np)) tr_writer.flush() val_in_writer.flush() tr_writer.close() val_in_writer.close() if __name__ == '__main__': app.run(main)
google-research/google-research
genomics_ood/images_ood/train.py
Python
apache-2.0
8,869
# # Copyright 2013 eNovance <licensing@enovance.com> # # Authors: Mehdi Abaakouk <mehdi.abaakouk@enovance.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from ceilometer.alarm import evaluator from ceilometer.i18n import _ from ceilometer.openstack.common import log LOG = log.getLogger(__name__) COMPARATORS = {'and': all, 'or': any} class CombinationEvaluator(evaluator.Evaluator): def _get_alarm_state(self, alarm_id): try: alarm = self._client.alarms.get(alarm_id) except Exception: LOG.exception(_('alarm retrieval failed')) return None return alarm.state def _sufficient_states(self, alarm, states): """Check for the sufficiency of the data for evaluation. Ensure that there is sufficient data for evaluation, transitioning to unknown otherwise. """ # note(sileht): alarm can be evaluated only with # stable state of other alarm alarms_missing_states = [alarm_id for alarm_id, state in states if not state or state == evaluator.UNKNOWN] sufficient = len(alarms_missing_states) == 0 if not sufficient and alarm.rule['operator'] == 'or': # if operator is 'or' and there is one alarm, then the combinated # alarm's state should be 'alarm' sufficient = bool([alarm_id for alarm_id, state in states if state == evaluator.ALARM]) if not sufficient and alarm.state != evaluator.UNKNOWN: reason = (_('Alarms %(alarm_ids)s' ' are in unknown state') % {'alarm_ids': ",".join(alarms_missing_states)}) reason_data = self._reason_data(alarms_missing_states) self._refresh(alarm, evaluator.UNKNOWN, reason, reason_data) return sufficient @staticmethod def _reason_data(alarm_ids): """Create a reason data dictionary for this evaluator type.""" return {'type': 'combination', 'alarm_ids': alarm_ids} @classmethod def _reason(cls, alarm, state, underlying_states): """Fabricate reason string.""" transition = alarm.state != state alarms_to_report = [alarm_id for alarm_id, alarm_state in underlying_states if alarm_state == state] reason_data = cls._reason_data(alarms_to_report) if transition: return (_('Transition to %(state)s due to alarms' ' %(alarm_ids)s in state %(state)s') % {'state': state, 'alarm_ids': ",".join(alarms_to_report)}), reason_data return (_('Remaining as %(state)s due to alarms' ' %(alarm_ids)s in state %(state)s') % {'state': state, 'alarm_ids': ",".join(alarms_to_report)}), reason_data def _transition(self, alarm, underlying_states): """Transition alarm state if necessary.""" op = alarm.rule['operator'] if COMPARATORS[op](s == evaluator.ALARM for __, s in underlying_states): state = evaluator.ALARM else: state = evaluator.OK continuous = alarm.repeat_actions reason, reason_data = self._reason(alarm, state, underlying_states) if alarm.state != state or continuous: self._refresh(alarm, state, reason, reason_data) def evaluate(self, alarm): if not self.within_time_constraint(alarm): LOG.debug(_('Attempted to evaluate alarm %s, but it is not ' 'within its time constraint.') % alarm.alarm_id) return states = zip(alarm.rule['alarm_ids'], itertools.imap(self._get_alarm_state, alarm.rule['alarm_ids'])) if self._sufficient_states(alarm, states): self._transition(alarm, states)
yanheven/ceilometer
ceilometer/alarm/evaluator/combination.py
Python
apache-2.0
4,511
# AWX settings file import os def get_secret(): if os.path.exists("/etc/tower/SECRET_KEY"): return open('/etc/tower/SECRET_KEY', 'rb').read().strip() ADMINS = () STATIC_ROOT = '/var/lib/awx/public/static' PROJECTS_ROOT = '/var/lib/awx/projects' AWX_ANSIBLE_COLLECTIONS_PATHS = '/var/lib/awx/vendor/awx_ansible_collections' JOBOUTPUT_ROOT = '/var/lib/awx/job_status' SECRET_KEY = get_secret() ALLOWED_HOSTS = ['*'] # Container environments don't like chroots AWX_PROOT_ENABLED = False CLUSTER_HOST_ID = "awx" SYSTEM_UUID = '00000000-0000-0000-0000-000000000000' CSRF_COOKIE_SECURE = False SESSION_COOKIE_SECURE = False ############################################################################### # EMAIL SETTINGS ############################################################################### SERVER_EMAIL = 'root@localhost' DEFAULT_FROM_EMAIL = 'webmaster@localhost' EMAIL_SUBJECT_PREFIX = '[AWX] ' EMAIL_HOST = 'localhost' EMAIL_PORT = 25 EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = False LOGGING['handlers']['console'] = { '()': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'simple', } LOGGING['loggers']['django.request']['handlers'] = ['console'] LOGGING['loggers']['rest_framework.request']['handlers'] = ['console'] LOGGING['loggers']['awx']['handlers'] = ['console', 'external_logger'] LOGGING['loggers']['awx.main.commands.run_callback_receiver']['handlers'] = ['console'] LOGGING['loggers']['awx.main.tasks']['handlers'] = ['console', 'external_logger'] LOGGING['loggers']['awx.main.scheduler']['handlers'] = ['console', 'external_logger'] LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console'] LOGGING['loggers']['social']['handlers'] = ['console'] LOGGING['loggers']['system_tracking_migrations']['handlers'] = ['console'] LOGGING['loggers']['rbac_migrations']['handlers'] = ['console'] LOGGING['loggers']['awx.isolated.manager.playbooks']['handlers'] = ['console'] LOGGING['handlers']['callback_receiver'] = {'class': 'logging.NullHandler'} LOGGING['handlers']['task_system'] = {'class': 'logging.NullHandler'} LOGGING['handlers']['tower_warnings'] = {'class': 'logging.NullHandler'} LOGGING['handlers']['rbac_migrations'] = {'class': 'logging.NullHandler'} LOGGING['handlers']['system_tracking_migrations'] = {'class': 'logging.NullHandler'} LOGGING['handlers']['management_playbooks'] = {'class': 'logging.NullHandler'} DATABASES = { 'default': { 'ATOMIC_REQUESTS': True, 'ENGINE': 'awx.main.db.profiled_pg', 'NAME': os.getenv("DATABASE_NAME", None), 'USER': os.getenv("DATABASE_USER", None), 'PASSWORD': os.getenv("DATABASE_PASSWORD", None), 'HOST': os.getenv("DATABASE_HOST", None), 'PORT': os.getenv("DATABASE_PORT", None), } } if os.getenv("DATABASE_SSLMODE", False): DATABASES['default']['OPTIONS'] = {'sslmode': os.getenv("DATABASE_SSLMODE")} USE_X_FORWARDED_HOST = True USE_X_FORWARDED_PORT = True
GoogleCloudPlatform/sap-deployment-automation
third_party/github.com/ansible/awx/installer/roles/image_build/files/settings.py
Python
apache-2.0
2,976
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Autoregressive distribution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import ops from tensorflow.python.ops.distributions import distribution as distribution_lib from tensorflow.python.ops.distributions import util as distribution_util from tensorflow.python.util import deprecation class Autoregressive(distribution_lib.Distribution): """Autoregressive distributions. The Autoregressive distribution enables learning (often) richer multivariate distributions by repeatedly applying a [diffeomorphic]( https://en.wikipedia.org/wiki/Diffeomorphism) transformation (such as implemented by `Bijector`s). Regarding terminology, "Autoregressive models decompose the joint density as a product of conditionals, and model each conditional in turn. Normalizing flows transform a base density (e.g. a standard Gaussian) into the target density by an invertible transformation with tractable Jacobian." [(Papamakarios et al., 2016)][1] In other words, the "autoregressive property" is equivalent to the decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided `shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves this property by zeroing out weights in its `masked_dense` layers. Practically speaking the autoregressive property means that there exists a permutation of the event coordinates such that each coordinate is a diffeomorphic function of only preceding coordinates [(van den Oord et al., 2016)][2]. #### Mathematical Details The probability function is ```none prob(x; fn, n) = fn(x).prob(x) ``` And a sample is generated by ```none x = fn(...fn(fn(x0).sample()).sample()).sample() ``` where the ellipses (`...`) represent `n-2` composed calls to `fn`, `fn` constructs a `tfp.distributions.Distribution`-like instance, and `x0` is a fixed initializing `Tensor`. #### Examples ```python import tensorflow_probability as tfp tfd = tfp.distributions def normal_fn(self, event_size): n = event_size * (event_size + 1) / 2 p = tf.Variable(tfd.Normal(loc=0., scale=1.).sample(n)) affine = tfd.bijectors.Affine( scale_tril=tfd.fill_triangular(0.25 * p)) def _fn(samples): scale = math_ops.exp(affine.forward(samples)).eval() return independent_lib.Independent( normal_lib.Normal(loc=0., scale=scale, validate_args=True), reinterpreted_batch_ndims=1) return _fn batch_and_event_shape = [3, 2, 4] sample0 = array_ops.zeros(batch_and_event_shape) ar = autoregressive_lib.Autoregressive( self._normal_fn(batch_and_event_shape[-1]), sample0) x = ar.sample([6, 5]) # ==> x.shape = [6, 5, 3, 2, 4] prob_x = ar.prob(x) # ==> x.shape = [6, 5, 3, 2] ``` #### References [1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked Autoregressive Flow for Density Estimation. In _Neural Information Processing Systems_, 2017. https://arxiv.org/abs/1705.07057 [2]: Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt, Alex Graves, and Koray Kavukcuoglu. Conditional Image Generation with PixelCNN Decoders. In _Neural Information Processing Systems_, 2016. https://arxiv.org/abs/1606.05328 """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tfp.distributions`.", warn_once=True) def __init__(self, distribution_fn, sample0=None, num_steps=None, validate_args=False, allow_nan_stats=True, name="Autoregressive"): """Construct an `Autoregressive` distribution. Args: distribution_fn: Python `callable` which constructs a `tfp.distributions.Distribution`-like instance from a `Tensor` (e.g., `sample0`). The function must respect the "autoregressive property", i.e., there exists a permutation of event such that each coordinate is a diffeomorphic function of on preceding coordinates. sample0: Initial input to `distribution_fn`; used to build the distribution in `__init__` which in turn specifies this distribution's properties, e.g., `event_shape`, `batch_shape`, `dtype`. If unspecified, then `distribution_fn` should be default constructable. num_steps: Number of times `distribution_fn` is composed from samples, e.g., `num_steps=2` implies `distribution_fn(distribution_fn(sample0).sample(n)).sample()`. validate_args: Python `bool`. Whether to validate input with asserts. If `validate_args` is `False`, and the inputs are invalid, correct behavior is not guaranteed. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Default value: "Autoregressive". Raises: ValueError: if `num_steps` and `distribution_fn(sample0).event_shape.num_elements()` are both `None`. ValueError: if `num_steps < 1`. """ parameters = dict(locals()) with ops.name_scope(name) as name: self._distribution_fn = distribution_fn self._sample0 = sample0 self._distribution0 = (distribution_fn() if sample0 is None else distribution_fn(sample0)) if num_steps is None: num_steps = self._distribution0.event_shape.num_elements() if num_steps is None: raise ValueError("distribution_fn must generate a distribution " "with fully known `event_shape`.") if num_steps < 1: raise ValueError("num_steps ({}) must be at least 1.".format(num_steps)) self._num_steps = num_steps super(Autoregressive, self).__init__( dtype=self._distribution0.dtype, reparameterization_type=self._distribution0.reparameterization_type, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=self._distribution0._graph_parents, # pylint: disable=protected-access name=name) @property def distribution_fn(self): return self._distribution_fn @property def sample0(self): return self._sample0 @property def num_steps(self): return self._num_steps @property def distribution0(self): return self._distribution0 def _batch_shape(self): return self.distribution0.batch_shape def _batch_shape_tensor(self): return self.distribution0.batch_shape_tensor() def _event_shape(self): return self.distribution0.event_shape def _event_shape_tensor(self): return self.distribution0.event_shape_tensor() def _sample_n(self, n, seed=None): if seed is None: seed = distribution_util.gen_new_seed( seed=np.random.randint(2**32 - 1), salt="autoregressive") samples = self.distribution0.sample(n, seed=seed) for _ in range(self._num_steps): samples = self.distribution_fn(samples).sample(seed=seed) return samples def _log_prob(self, value): return self.distribution_fn(value).log_prob(value) def _prob(self, value): return self.distribution_fn(value).prob(value)
hfp/tensorflow-xsmm
tensorflow/contrib/distributions/python/ops/autoregressive.py
Python
apache-2.0
8,448
# # Copyright (c) 2001 - 2019 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # TODO: # * supported arch for versions: for old versions of batch file without # argument, giving bogus argument cannot be detected, so we have to hardcode # this here # * print warning when msvc version specified but not found # * find out why warning do not print # * test on 64 bits XP + VS 2005 (and VS 6 if possible) # * SDK # * Assembly __revision__ = "src/engine/SCons/Tool/MSCommon/vc.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan" __doc__ = """Module for Visual C/C++ detection and configuration. """ import SCons.compat import SCons.Util import subprocess import os import platform import sys from string import digits as string_digits if sys.version_info[0] == 2: import collections import SCons.Warnings from SCons.Tool import find_program_path from . import common debug = common.debug from . import sdk get_installed_sdks = sdk.get_installed_sdks class VisualCException(Exception): pass class UnsupportedVersion(VisualCException): pass class MSVCUnsupportedHostArch(VisualCException): pass class MSVCUnsupportedTargetArch(VisualCException): pass class MissingConfiguration(VisualCException): pass class NoVersionFound(VisualCException): pass class BatchFileExecutionError(VisualCException): pass # Dict to 'canonalize' the arch _ARCH_TO_CANONICAL = { "amd64" : "amd64", "emt64" : "amd64", "i386" : "x86", "i486" : "x86", "i586" : "x86", "i686" : "x86", "ia64" : "ia64", # deprecated "itanium" : "ia64", # deprecated "x86" : "x86", "x86_64" : "amd64", "arm" : "arm", "arm64" : "arm64", "aarch64" : "arm64", } _HOST_TARGET_TO_CL_DIR_GREATER_THAN_14 = { ("amd64","amd64") : ("Hostx64","x64"), ("amd64","x86") : ("Hostx64","x86"), ("amd64","arm") : ("Hostx64","arm"), ("amd64","arm64") : ("Hostx64","arm64"), ("x86","amd64") : ("Hostx86","x64"), ("x86","x86") : ("Hostx86","x86"), ("x86","arm") : ("Hostx86","arm"), ("x86","arm64") : ("Hostx86","arm64"), } # get path to the cl.exe dir for older VS versions # based off a tuple of (host, target) platforms _HOST_TARGET_TO_CL_DIR = { ("amd64","amd64") : "amd64", ("amd64","x86") : "amd64_x86", ("amd64","arm") : "amd64_arm", ("amd64","arm64") : "amd64_arm64", ("x86","amd64") : "x86_amd64", ("x86","x86") : "", ("x86","arm") : "x86_arm", ("x86","arm64") : "x86_arm64", } # Given a (host, target) tuple, return the argument for the bat file. # Both host and targets should be canonalized. _HOST_TARGET_ARCH_TO_BAT_ARCH = { ("x86", "x86"): "x86", ("x86", "amd64"): "x86_amd64", ("x86", "x86_amd64"): "x86_amd64", ("amd64", "x86_amd64"): "x86_amd64", # This is present in (at least) VS2012 express ("amd64", "amd64"): "amd64", ("amd64", "x86"): "x86", ("x86", "ia64"): "x86_ia64", # gone since 14.0 ("arm", "arm"): "arm", # since 14.0, maybe gone 14.1? ("x86", "arm"): "x86_arm", # since 14.0 ("x86", "arm64"): "x86_arm64", # since 14.1 ("amd64", "arm"): "amd64_arm", # since 14.0 ("amd64", "arm64"): "amd64_arm64", # since 14.1 } _CL_EXE_NAME = 'cl.exe' def get_msvc_version_numeric(msvc_version): """Get the raw version numbers from a MSVC_VERSION string, so it could be cast to float or other numeric values. For example, '14.0Exp' would get converted to '14.0'. Args: msvc_version: str string representing the version number, could contain non digit characters Returns: str: the value converted to a numeric only string """ return ''.join([x for x in msvc_version if x in string_digits + '.']) def get_host_target(env): debug('get_host_target()') host_platform = env.get('HOST_ARCH') if not host_platform: host_platform = platform.machine() # Solaris returns i86pc for both 32 and 64 bit architectures if host_platform == "i86pc": if platform.architecture()[0] == "64bit": host_platform = "amd64" else: host_platform = "x86" # Retain user requested TARGET_ARCH req_target_platform = env.get('TARGET_ARCH') debug('get_host_target() req_target_platform:%s'%req_target_platform) if req_target_platform: # If user requested a specific platform then only try that one. target_platform = req_target_platform else: target_platform = host_platform try: host = _ARCH_TO_CANONICAL[host_platform.lower()] except KeyError: msg = "Unrecognized host architecture %s" raise MSVCUnsupportedHostArch(msg % repr(host_platform)) try: target = _ARCH_TO_CANONICAL[target_platform.lower()] except KeyError: all_archs = str(list(_ARCH_TO_CANONICAL.keys())) raise MSVCUnsupportedTargetArch("Unrecognized target architecture %s\n\tValid architectures: %s" % (target_platform, all_archs)) return (host, target,req_target_platform) # If you update this, update SupportedVSList in Tool/MSCommon/vs.py, and the # MSVC_VERSION documentation in Tool/msvc.xml. _VCVER = ["14.2", "14.1", "14.0", "14.0Exp", "12.0", "12.0Exp", "11.0", "11.0Exp", "10.0", "10.0Exp", "9.0", "9.0Exp","8.0", "8.0Exp","7.1", "7.0", "6.0"] # if using vswhere, a further mapping is needed _VCVER_TO_VSWHERE_VER = { '14.2' : '[16.0, 17.0)', '14.1' : '[15.0, 16.0)', } _VCVER_TO_PRODUCT_DIR = { '14.2' : [ (SCons.Util.HKEY_LOCAL_MACHINE, r'')], # VS 2019 doesn't set this key '14.1' : [ (SCons.Util.HKEY_LOCAL_MACHINE, r'')], # VS 2017 doesn't set this key '14.0' : [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\14.0\Setup\VC\ProductDir')], '14.0Exp' : [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\14.0\Setup\VC\ProductDir')], '12.0' : [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\12.0\Setup\VC\ProductDir'), ], '12.0Exp' : [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\12.0\Setup\VC\ProductDir'), ], '11.0': [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\11.0\Setup\VC\ProductDir'), ], '11.0Exp' : [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\11.0\Setup\VC\ProductDir'), ], '10.0': [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\10.0\Setup\VC\ProductDir'), ], '10.0Exp' : [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\10.0\Setup\VC\ProductDir'), ], '9.0': [ (SCons.Util.HKEY_CURRENT_USER, r'Microsoft\DevDiv\VCForPython\9.0\installdir',), (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\9.0\Setup\VC\ProductDir',), ], '9.0Exp' : [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\9.0\Setup\VC\ProductDir'), ], '8.0': [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\8.0\Setup\VC\ProductDir'), ], '8.0Exp': [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\8.0\Setup\VC\ProductDir'), ], '7.1': [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\7.1\Setup\VC\ProductDir'), ], '7.0': [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\7.0\Setup\VC\ProductDir'), ], '6.0': [ (SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\6.0\Setup\Microsoft Visual C++\ProductDir'), ] } def msvc_version_to_maj_min(msvc_version): msvc_version_numeric = get_msvc_version_numeric(msvc_version) t = msvc_version_numeric.split(".") if not len(t) == 2: raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric)) try: maj = int(t[0]) min = int(t[1]) return maj, min except ValueError as e: raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric)) def is_host_target_supported(host_target, msvc_version): """Check if (host, target) pair is supported for a VC version. :note: only checks whether a given version *may* support the given (host, target), not that the toolchain is actually present on the machine. :param tuple host_target: canonalized host-targets pair, e.g. ("x86", "amd64") for cross compilation from 32 bit Windows to 64 bits. :param str msvc_version: Visual C++ version (major.minor), e.g. "10.0" :returns: True or False """ # We assume that any Visual Studio version supports x86 as a target if host_target[1] != "x86": maj, min = msvc_version_to_maj_min(msvc_version) if maj < 8: return False return True def find_vc_pdir_vswhere(msvc_version): """ Find the MSVC product directory using the vswhere program. :param msvc_version: MSVC version to search for :return: MSVC install dir or None :raises UnsupportedVersion: if the version is not known by this file """ try: vswhere_version = _VCVER_TO_VSWHERE_VER[msvc_version] except KeyError: debug("Unknown version of MSVC: %s" % msvc_version) raise UnsupportedVersion("Unknown version %s" % msvc_version) # For bug 3333 - support default location of vswhere for both 64 and 32 bit windows # installs. for pf in ['Program Files (x86)', 'Program Files']: vswhere_path = os.path.join( 'C:\\', pf, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe' ) if os.path.exists(vswhere_path): # If we found vswhere, then use it. break else: # No vswhere on system, no install info available return None vswhere_cmd = [vswhere_path, '-products', '*', '-version', vswhere_version, '-property', 'installationPath'] #TODO PY27 cannot use Popen as context manager # try putting it back to the old way for now sp = subprocess.Popen(vswhere_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) vsdir, err = sp.communicate() if vsdir: vsdir = vsdir.decode("mbcs").splitlines() # vswhere could easily return multiple lines # we could define a way to pick the one we prefer, but since # this data is currently only used to make a check for existence, # returning the first hit should be good enough for now. vc_pdir = os.path.join(vsdir[0], 'VC') return vc_pdir else: # No vswhere on system, no install info available return None def find_vc_pdir(msvc_version): """Find the MSVC product directory for the given version. Tries to look up the path using a registry key from the table _VCVER_TO_PRODUCT_DIR; if there is no key, calls find_vc_pdir_wshere for help instead. Args: msvc_version: str msvc version (major.minor, e.g. 10.0) Returns: str: Path found in registry, or None Raises: UnsupportedVersion: if the version is not known by this file. MissingConfiguration: found version but the directory is missing. Both exceptions inherit from VisualCException. """ root = 'Software\\' try: hkeys = _VCVER_TO_PRODUCT_DIR[msvc_version] except KeyError: debug("Unknown version of MSVC: %s" % msvc_version) raise UnsupportedVersion("Unknown version %s" % msvc_version) for hkroot, key in hkeys: try: comps = None if not key: comps = find_vc_pdir_vswhere(msvc_version) if not comps: debug('find_vc_pdir_vswhere(): no VC found for version {}'.format(repr(msvc_version))) raise SCons.Util.WinError debug('find_vc_pdir_vswhere(): VC found: {}'.format(repr(msvc_version))) return comps else: if common.is_win64(): try: # ordinally at win64, try Wow6432Node first. comps = common.read_reg(root + 'Wow6432Node\\' + key, hkroot) except SCons.Util.WinError as e: # at Microsoft Visual Studio for Python 2.7, value is not in Wow6432Node pass if not comps: # not Win64, or Microsoft Visual Studio for Python 2.7 comps = common.read_reg(root + key, hkroot) except SCons.Util.WinError as e: debug('find_vc_dir(): no VC registry key {}'.format(repr(key))) else: debug('find_vc_dir(): found VC in registry: {}'.format(comps)) if os.path.exists(comps): return comps else: debug('find_vc_dir(): reg says dir is {}, but it does not exist. (ignoring)'.format(comps)) raise MissingConfiguration("registry dir {} not found on the filesystem".format(comps)) return None def find_batch_file(env,msvc_version,host_arch,target_arch): """ Find the location of the batch script which should set up the compiler for any TARGET_ARCH whose compilers were installed by Visual Studio/VCExpress """ pdir = find_vc_pdir(msvc_version) if pdir is None: raise NoVersionFound("No version of Visual Studio found") debug('find_batch_file() in {}'.format(pdir)) # filter out e.g. "Exp" from the version name msvc_ver_numeric = get_msvc_version_numeric(msvc_version) vernum = float(msvc_ver_numeric) if 7 <= vernum < 8: pdir = os.path.join(pdir, os.pardir, "Common7", "Tools") batfilename = os.path.join(pdir, "vsvars32.bat") elif vernum < 7: pdir = os.path.join(pdir, "Bin") batfilename = os.path.join(pdir, "vcvars32.bat") elif 8 <= vernum <= 14: batfilename = os.path.join(pdir, "vcvarsall.bat") else: # vernum >= 14.1 VS2017 and above batfilename = os.path.join(pdir, "Auxiliary", "Build", "vcvarsall.bat") if not os.path.exists(batfilename): debug("Not found: %s" % batfilename) batfilename = None installed_sdks = get_installed_sdks() for _sdk in installed_sdks: sdk_bat_file = _sdk.get_sdk_vc_script(host_arch,target_arch) if not sdk_bat_file: debug("find_batch_file() not found:%s"%_sdk) else: sdk_bat_file_path = os.path.join(pdir,sdk_bat_file) if os.path.exists(sdk_bat_file_path): debug('find_batch_file() sdk_bat_file_path:%s'%sdk_bat_file_path) return (batfilename, sdk_bat_file_path) return (batfilename, None) __INSTALLED_VCS_RUN = None _VC_TOOLS_VERSION_FILE_PATH = ['Auxiliary', 'Build', 'Microsoft.VCToolsVersion.default.txt'] _VC_TOOLS_VERSION_FILE = os.sep.join(_VC_TOOLS_VERSION_FILE_PATH) def _check_cl_exists_in_vc_dir(env, vc_dir, msvc_version): """Find the cl.exe on the filesystem in the vc_dir depending on TARGET_ARCH, HOST_ARCH and the msvc version. TARGET_ARCH and HOST_ARCH can be extracted from the passed env, unless its None, which then the native platform is assumed the host and target. Args: env: Environment a construction environment, usually if this is passed its because there is a desired TARGET_ARCH to be used when searching for a cl.exe vc_dir: str the path to the VC dir in the MSVC installation msvc_version: str msvc version (major.minor, e.g. 10.0) Returns: bool: """ # determine if there is a specific target platform we want to build for and # use that to find a list of valid VCs, default is host platform == target platform # and same for if no env is specified to extract target platform from if env: (host_platform, target_platform, req_target_platform) = get_host_target(env) else: host_platform = platform.machine().lower() target_platform = host_platform host_platform = _ARCH_TO_CANONICAL[host_platform] target_platform = _ARCH_TO_CANONICAL[target_platform] debug('_check_cl_exists_in_vc_dir(): host platform %s, target platform %s for version %s' % (host_platform, target_platform, msvc_version)) ver_num = float(get_msvc_version_numeric(msvc_version)) # make sure the cl.exe exists meaning the tool is installed if ver_num > 14: # 2017 and newer allowed multiple versions of the VC toolset to be installed at the same time. # Just get the default tool version for now #TODO: support setting a specific minor VC version default_toolset_file = os.path.join(vc_dir, _VC_TOOLS_VERSION_FILE) try: with open(default_toolset_file) as f: vc_specific_version = f.readlines()[0].strip() except IOError: debug('_check_cl_exists_in_vc_dir(): failed to read ' + default_toolset_file) return False except IndexError: debug('_check_cl_exists_in_vc_dir(): failed to find MSVC version in ' + default_toolset_file) return False host_trgt_dir = _HOST_TARGET_TO_CL_DIR_GREATER_THAN_14.get((host_platform, target_platform), None) if host_trgt_dir is None: debug('_check_cl_exists_in_vc_dir(): unsupported host/target platform combo: (%s,%s)'%(host_platform, target_platform)) return False cl_path = os.path.join(vc_dir, 'Tools','MSVC', vc_specific_version, 'bin', host_trgt_dir[0], host_trgt_dir[1], _CL_EXE_NAME) debug('_check_cl_exists_in_vc_dir(): checking for ' + _CL_EXE_NAME + ' at ' + cl_path) if os.path.exists(cl_path): debug('_check_cl_exists_in_vc_dir(): found ' + _CL_EXE_NAME + '!') return True elif ver_num <= 14 and ver_num >= 8: # Set default value to be -1 as "" which is the value for x86/x86 yields true when tested # if not host_trgt_dir host_trgt_dir = _HOST_TARGET_TO_CL_DIR.get((host_platform, target_platform), None) if host_trgt_dir is None: debug('_check_cl_exists_in_vc_dir(): unsupported host/target platform combo') return False cl_path = os.path.join(vc_dir, 'bin', host_trgt_dir, _CL_EXE_NAME) debug('_check_cl_exists_in_vc_dir(): checking for ' + _CL_EXE_NAME + ' at ' + cl_path) cl_path_exists = os.path.exists(cl_path) if not cl_path_exists and host_platform == 'amd64': # older versions of visual studio only had x86 binaries, # so if the host platform is amd64, we need to check cross # compile options (x86 binary compiles some other target on a 64 bit os) # Set default value to be -1 as "" which is the value for x86/x86 yields true when tested # if not host_trgt_dir host_trgt_dir = _HOST_TARGET_TO_CL_DIR.get(('x86', target_platform), None) if host_trgt_dir is None: return False cl_path = os.path.join(vc_dir, 'bin', host_trgt_dir, _CL_EXE_NAME) debug('_check_cl_exists_in_vc_dir(): checking for ' + _CL_EXE_NAME + ' at ' + cl_path) cl_path_exists = os.path.exists(cl_path) if cl_path_exists: debug('_check_cl_exists_in_vc_dir(): found ' + _CL_EXE_NAME + '!') return True elif ver_num < 8 and ver_num >= 6: # not sure about these versions so if a walk the VC dir (could be slow) for root, _, files in os.walk(vc_dir): if _CL_EXE_NAME in files: debug('get_installed_vcs ' + _CL_EXE_NAME + ' found %s' % os.path.join(root, _CL_EXE_NAME)) return True return False else: # version not support return false debug('_check_cl_exists_in_vc_dir(): unsupported MSVC version: ' + str(ver_num)) return False def cached_get_installed_vcs(env=None): global __INSTALLED_VCS_RUN if __INSTALLED_VCS_RUN is None: ret = get_installed_vcs(env) __INSTALLED_VCS_RUN = ret return __INSTALLED_VCS_RUN def get_installed_vcs(env=None): installed_versions = [] for ver in _VCVER: debug('trying to find VC %s' % ver) try: VC_DIR = find_vc_pdir(ver) if VC_DIR: debug('found VC %s' % ver) if _check_cl_exists_in_vc_dir(env, VC_DIR, ver): installed_versions.append(ver) else: debug('find_vc_pdir no compiler found %s' % ver) else: debug('find_vc_pdir return None for ver %s' % ver) except (MSVCUnsupportedTargetArch, MSVCUnsupportedHostArch): # Allow this exception to propagate further as it should cause # SCons to exit with an error code raise except VisualCException as e: debug('did not find VC %s: caught exception %s' % (ver, str(e))) return installed_versions def reset_installed_vcs(): """Make it try again to find VC. This is just for the tests.""" __INSTALLED_VCS_RUN = None # Running these batch files isn't cheap: most of the time spent in # msvs.generate() is due to vcvars*.bat. In a build that uses "tools='msvs'" # in multiple environments, for example: # env1 = Environment(tools='msvs') # env2 = Environment(tools='msvs') # we can greatly improve the speed of the second and subsequent Environment # (or Clone) calls by memoizing the environment variables set by vcvars*.bat. # # Updated: by 2018, vcvarsall.bat had gotten so expensive (vs2017 era) # it was breaking CI builds because the test suite starts scons so many # times and the existing memo logic only helped with repeated calls # within the same scons run. Windows builds on the CI system were split # into chunks to get around single-build time limits. # With VS2019 it got even slower and an optional persistent cache file # was introduced. The cache now also stores only the parsed vars, # not the entire output of running the batch file - saves a bit # of time not parsing every time. script_env_cache = None def script_env(script, args=None): global script_env_cache if script_env_cache is None: script_env_cache = common.read_script_env_cache() cache_key = "{}--{}".format(script, args) cache_data = script_env_cache.get(cache_key, None) if cache_data is None: stdout = common.get_output(script, args) # Stupid batch files do not set return code: we take a look at the # beginning of the output for an error message instead olines = stdout.splitlines() if olines[0].startswith("The specified configuration type is missing"): raise BatchFileExecutionError("\n".join(olines[:2])) cache_data = common.parse_output(stdout) script_env_cache[cache_key] = cache_data # once we updated cache, give a chance to write out if user wanted common.write_script_env_cache(script_env_cache) else: #TODO: Python 2 cleanup # If we "hit" data from the json file, we have a Py2 problem: # keys & values will be unicode. don't detect, just convert. if sys.version_info[0] == 2: def convert(data): if isinstance(data, basestring): return str(data) elif isinstance(data, collections.Mapping): return dict(map(convert, data.iteritems())) elif isinstance(data, collections.Iterable): return type(data)(map(convert, data)) else: return data cache_data = convert(cache_data) return cache_data def get_default_version(env): debug('get_default_version()') msvc_version = env.get('MSVC_VERSION') msvs_version = env.get('MSVS_VERSION') debug('get_default_version(): msvc_version:%s msvs_version:%s'%(msvc_version,msvs_version)) if msvs_version and not msvc_version: SCons.Warnings.warn( SCons.Warnings.DeprecatedWarning, "MSVS_VERSION is deprecated: please use MSVC_VERSION instead ") return msvs_version elif msvc_version and msvs_version: if not msvc_version == msvs_version: SCons.Warnings.warn( SCons.Warnings.VisualVersionMismatch, "Requested msvc version (%s) and msvs version (%s) do " \ "not match: please use MSVC_VERSION only to request a " \ "visual studio version, MSVS_VERSION is deprecated" \ % (msvc_version, msvs_version)) return msvs_version if not msvc_version: installed_vcs = cached_get_installed_vcs(env) debug('installed_vcs:%s' % installed_vcs) if not installed_vcs: #msg = 'No installed VCs' #debug('msv %s' % repr(msg)) #SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, msg) debug('msvc_setup_env: No installed VCs') return None msvc_version = installed_vcs[0] debug('msvc_setup_env: using default installed MSVC version %s' % repr(msvc_version)) return msvc_version def msvc_setup_env_once(env): try: has_run = env["MSVC_SETUP_RUN"] except KeyError: has_run = False if not has_run: msvc_setup_env(env) env["MSVC_SETUP_RUN"] = True def msvc_find_valid_batch_script(env, version): debug('msvc_find_valid_batch_script()') # Find the host platform, target platform, and if present the requested # target platform platforms = get_host_target(env) debug(" msvs_find_valid_batch_script(): host_platform %s, target_platform %s req_target_platform:%s" % platforms) host_platform, target_platform, req_target_platform = platforms try_target_archs = [target_platform] # VS2012 has a "cross compile" environment to build 64 bit # with x86_amd64 as the argument to the batch setup script if req_target_platform in ('amd64', 'x86_64'): try_target_archs.append('x86_amd64') elif not req_target_platform and target_platform in ['amd64', 'x86_64']: # There may not be "native" amd64, but maybe "cross" x86_amd64 tools try_target_archs.append('x86_amd64') # If the user hasn't specifically requested a TARGET_ARCH, and # The TARGET_ARCH is amd64 then also try 32 bits if there are no viable # 64 bit tools installed try_target_archs.append('x86') debug("msvs_find_valid_batch_script(): host_platform: %s try_target_archs:%s"%(host_platform, try_target_archs)) d = None for tp in try_target_archs: # Set to current arch. env['TARGET_ARCH']=tp debug("msvc_find_valid_batch_script() trying target_platform:%s"%tp) host_target = (host_platform, tp) if not is_host_target_supported(host_target, version): warn_msg = "host, target = %s not supported for MSVC version %s" % \ (host_target, version) SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg) arg = _HOST_TARGET_ARCH_TO_BAT_ARCH[host_target] # Get just version numbers maj, min = msvc_version_to_maj_min(version) # VS2015+ if maj >= 14: if env.get('MSVC_UWP_APP') == '1': # Initialize environment variables with store/universal paths arg += ' store' # Try to locate a batch file for this host/target platform combo try: (vc_script, sdk_script) = find_batch_file(env, version, host_platform, tp) debug('msvc_find_valid_batch_script() vc_script:%s sdk_script:%s'%(vc_script,sdk_script)) except VisualCException as e: msg = str(e) debug('Caught exception while looking for batch file (%s)' % msg) warn_msg = "VC version %s not installed. " + \ "C/C++ compilers are most likely not set correctly.\n" + \ " Installed versions are: %s" warn_msg = warn_msg % (version, cached_get_installed_vcs(env)) SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg) continue # Try to use the located batch file for this host/target platform combo debug('msvc_find_valid_batch_script() use_script 2 %s, args:%s' % (repr(vc_script), arg)) found = None if vc_script: try: d = script_env(vc_script, args=arg) found = vc_script except BatchFileExecutionError as e: debug('msvc_find_valid_batch_script() use_script 3: failed running VC script %s: %s: Error:%s'%(repr(vc_script),arg,e)) vc_script=None continue if not vc_script and sdk_script: debug('msvc_find_valid_batch_script() use_script 4: trying sdk script: %s'%(sdk_script)) try: d = script_env(sdk_script) found = sdk_script except BatchFileExecutionError as e: debug('msvc_find_valid_batch_script() use_script 5: failed running SDK script %s: Error:%s'%(repr(sdk_script),e)) continue elif not vc_script and not sdk_script: debug('msvc_find_valid_batch_script() use_script 6: Neither VC script nor SDK script found') continue debug("msvc_find_valid_batch_script() Found a working script/target: %s/%s"%(repr(found),arg)) break # We've found a working target_platform, so stop looking # If we cannot find a viable installed compiler, reset the TARGET_ARCH # To it's initial value if not d: env['TARGET_ARCH']=req_target_platform return d def msvc_setup_env(env): debug('msvc_setup_env()') version = get_default_version(env) if version is None: warn_msg = "No version of Visual Studio compiler found - C/C++ " \ "compilers most likely not set correctly" # Nuitka: Useless warning for us. # SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg) return None debug('msvc_setup_env: using specified MSVC version %s' % repr(version)) # XXX: we set-up both MSVS version for backward # compatibility with the msvs tool env['MSVC_VERSION'] = version env['MSVS_VERSION'] = version env['MSVS'] = {} use_script = env.get('MSVC_USE_SCRIPT', True) if SCons.Util.is_String(use_script): debug('msvc_setup_env() use_script 1 %s' % repr(use_script)) d = script_env(use_script) elif use_script: d = msvc_find_valid_batch_script(env,version) debug('msvc_setup_env() use_script 2 %s' % d) if not d: return d else: debug('MSVC_USE_SCRIPT set to False') warn_msg = "MSVC_USE_SCRIPT set to False, assuming environment " \ "set correctly." # Nuitka: We use this on purpose. # SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg) return None for k, v in d.items(): # Nuitka: Make the Windows SDK version visible in environment. if k == "WindowsSDKVersion": # Always just a single version if any. if len(v) == 1: env["WindowsSDKVersion"] = v[0].rstrip('\\') elif len(v) == 0: env["WindowsSDKVersion"] = None else: assert False, v continue debug('msvc_setup_env() env:%s -> %s'%(k,v)) env.PrependENVPath(k, v, delete_existing=True) # final check to issue a warning if the compiler is not present msvc_cl = find_program_path(env, 'cl') if not msvc_cl: SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, "Could not find MSVC compiler 'cl', it may need to be installed separately with Visual Studio") def msvc_exists(env=None, version=None): vcs = cached_get_installed_vcs(env) if version is None: return len(vcs) > 0 return version in vcs
kayhayen/Nuitka
nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/MSCommon/vc.py
Python
apache-2.0
33,537
from zope.interface import classImplements from pyramid.config import Configurator from clld.interfaces import ILanguage, IMapMarker, IValueSet, IValue from clld.web.app import MapMarker from clld.db.models.common import Parameter_files # we must make sure custom models are known at database initialization! from tsammalex import models from tsammalex.interfaces import IEcoregion, IImage # associate Parameter_files with the IImage interface to make the model work as resource. classImplements(Parameter_files, IImage) _ = lambda s: s _('Parameter') _('Parameters') _('Source') _('Sources') _('Value') _('Values') class TsammalexMapMarker(MapMarker): def get_color(self, ctx, req): lineage = None if ctx and isinstance(ctx, (tuple, list)): ctx = ctx[0] if ILanguage.providedBy(ctx): lineage = ctx.lineage elif IValueSet.providedBy(ctx): lineage = ctx.language.lineage elif IValue.providedBy(ctx): lineage = ctx.valueset.language.lineage if isinstance(ctx, str): lineage = req.db.query(models.Lineage)\ .filter(models.Lineage.name == ctx).one() return lineage.color if lineage else 'ff6600' def __call__(self, ctx, req): return req.static_url('tsammalex:static/icons/%s.png' % self.get_color(ctx, req)) def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ config = Configurator(settings=settings) config.include('clldmpg') config.registry.registerUtility(TsammalexMapMarker(), IMapMarker) config.registry.settings['home_comp'].append('contributors') config.register_menu( ('dataset', dict(label='Home')), 'values', 'languages', 'parameters', 'ecoregions', 'sources', 'images', #('contributors', dict(label='Contribute')) ('contribute', lambda ctx, req: (req.route_url('help'), 'Contribute!')) ) config.register_resource('ecoregion', models.Ecoregion, IEcoregion, with_index=True) config.register_resource('image', Parameter_files, IImage, with_index=True) return config.make_wsgi_app()
clld/tsammalex
tsammalex/__init__.py
Python
apache-2.0
2,206
from hashlib import sha256 from .etl import ETL from kombu.mixins import ConsumerMixin from kombu import Connection import traceback import Queue import json import time import pytz from datetime import datetime from tzlocal import get_localzone import socket import logging import os class KnownHosts(object): HOST_FILE = "/etc/hosts" def __init__(self, filename=HOST_FILE): self.filename = filename try: os.stat(self.filename) except: raise self.mapping = self.read_hosts_file(filename) @classmethod def read_hosts_file(cls, filename): mapping = {} for line in open(filename).readlines(): if line.strip() == '': continue elif line.strip().find('#') == 0: continue elif len(line.split()) < 2: continue l = line.strip() ip = l.split()[0] host_names = l.split()[1:] if len(host_names) == 0: continue # FIXME this means the expected mapping[ip] = host # may not be right ip_host_mappings = [(ip, h) for h in host_names] for ip, host in ip_host_mappings: mapping[host.strip()] = ip.strip() mapping[ip.strip()] = host.strip() return mapping def is_ip(self, ip): # FIXME track down a regex and use that d = ip.split('.') if len(d) != 3: return False if not all([i.isdigit() for i in d]): return False if not all([int(i, 10) >= 0 for i in d]): return False if not all([int(i, 10) <= 255 for i in d]): return False return True def resolve_host(self, ip_host): if ip_host in self.mapping and \ not self.is_ip(ip_host): return self.mapping[ip_host] name = ip_host try: name, _, _ = socket.gethostbyname(ip_host) self.mapping[ip_host] = name self.mapping[name] = ip_host except: name = ip_host self.mapping[ip_host] = name return name class HitterService(ConsumerMixin): NAME = 'processor' BROKER_URI = "redis://127.0.0.1:6379" BROKER_QUEUE = "mystified-catcher" KNOWN_HOSTS = KnownHosts() LOGSTASH_QUEUE = "logstash-results" SYSLOG_MSG_TYPE = { 0: "EMERGENCY", 1: "ALERT", 2: "CRITICAL", 3: "ERROR", 4: "WARNING", 5: "NOTICE", 6: "INFORMATIONAL", 7: "DEBUG", } MY_TZ = os.environ.get('CATCHER_TZ', 'NOT_SET') TZ_INFO = pytz.timezone(MY_TZ) if MY_TZ != 'NOT_SET' else None def __init__(self, broker_uri=BROKER_URI, broker_queue=BROKER_QUEUE, hosts_file=None, mongo_backend=None, etl_backend=ETL, msg_limit=100, # leaving it open to use kombu to buffer messages store_uri=BROKER_URI, store_queue=LOGSTASH_QUEUE): if hosts_file is not None: self.KNOWN_HOSTS = KnownHosts(filename=hosts_file) self.broker_uri = broker_uri self.broker_queue = broker_queue self.store_uri = store_uri self.store_queue = store_queue self.mongo_backend = mongo_backend self.etl_backend = etl_backend self.keep_running = False self.msg_limit = msg_limit @classmethod def split_alert_message(cls, data): t = '' msg = data end = data.find('>') start = data.find('<') if len(data) < end+1: return '', msg if start == 0 and end > 0 and end < 10: t = data[start+1:end] if not t.isdigit(): return '', data else: msg = data[end+1:] return t, msg @classmethod def calculate_msg_type(cls, data): t, msg = cls.split_alert_message(data) if len(t) == 0: return "UNKNOWN" v = int(t, 10) if v > 7: v &= 0x7 return cls.SYSLOG_MSG_TYPE[v] @classmethod def format_timestamp(self, tstamp): if self.TZ_INFO is not None: local_tz = self.TZ_INFO.localize(tstamp, is_dst=None) utc_tz = local_tz.astimezone(pytz.utc) return str(utc_tz.strftime("%Y-%m-%dT%H:%M:%S") +\ ".%03d" % (tstamp.microsecond / 1000) + "Z") return str(tstamp.strftime("%Y-%m-%dT%H:%M:%S") +\ ".%03d" % (tstamp.microsecond / 1000)) @classmethod def get_base_json(cls, syslog_msg, syslog_server_ip, catcher_name, catcher_host, catcher_tz): r = {'source': "syslog", 'raw': syslog_msg, 'type': 'json', '_id': sha256(syslog_msg).hexdigest(), '@timestamp': cls.format_timestamp(datetime.now()), '@version': "1", 'message': "transformed syslog", 'path': '', 'tags': [], 'catcher_tz': catcher_tz, 'catcher_host': catcher_host, 'catcher_name': catcher_name } t, msg = cls.split_alert_message(syslog_msg) r['syslog_level'] = cls.calculate_msg_type(syslog_msg) r['syslog_msg'] = msg r['syslog_tag'] = t r['syslog_server'] = cls.resolve_host(syslog_server_ip) r['syslog_server_ip'] = syslog_server_ip r['syslog_catcher'] = catcher_name return r @classmethod def resolve_host(cls, ip_host): return cls.KNOWN_HOSTS.resolve_host(ip_host) def process_message(self, syslog_msg, syslog_server_ip, catcher_name, catcher_host, catcher_tz): m = "Extracting and converting msg from %s msg (syslog: %s)" % (syslog_server_ip, catcher_name) logging.debug(m) r = self.get_base_json(syslog_msg, syslog_server_ip, catcher_name, catcher_host, catcher_tz) sm = {} try: result = self.etl_backend.syslog_et(syslog_msg) sm.update(result.get('rule_results', result)) if 'rule_name' in result: sm['rule_name'] = result.get('rule_name') sm['tags'] = [] if sm.get('syslog_level', None) is not None: sm['tags'].append(sm['syslog_level']) if sm.get('rule_name', None) is not None: sm['tags'].append(sm['rule_name']) except: tb = traceback.format_exc() logging.debug("[XXX] Error: "+tb) r.update(sm) return r def extract_message_components(self, msg_dict): syslog_msg = msg_dict.get('syslog_msg', '') syslog_server_ip = msg_dict.get('syslog_server_ip', '') catcher_host = msg_dict.get('catcher_host', '') catcher_name = msg_dict.get('catcher_name', '') catcher_tz = msg_dict.get('catcher_tz', str(get_localzone())) return self.process_message(syslog_msg, syslog_server_ip, catcher_name, catcher_host, catcher_tz) def process_and_report(self, incoming_msg): logging.debug("Processing and report syslog_msg") message = incoming_msg if isinstance(incoming_msg, str): try: message = json.loads(incoming_msg) except: message = {} tb = traceback.format_exc() logging.debug("[XXX] Error: "+tb) raise etl_data = self.extract_message_components(message) syslog_msg = etl_data['raw'] self.store_results(syslog_msg, etl_data) return etl_data def _read_messages(self, uri, queue, callback=None, cnt=1): msgs = [] read_all = False if cnt < 1: read_all = True try: logging.debug("Reading the messages") with Connection(uri) as conn: q = conn.SimpleQueue(queue) while cnt > 0 or read_all: cnt += -1 try: message = q.get(block=False) if callback is not None: data = callback(message.payload) msgs.append(data) logging.debug("made it here 2") logging.debug(data) message.ack() except Queue.Empty: logging.debug("%s queue is empty" % queue) break except: tb = traceback.format_exc() logging.debug("[XXX] Error: "+tb) logging.debug("Successfully read %d messages" % len(msgs)) except: tb = traceback.format_exc() logging.debug("[XXX] Error: "+tb) logging.debug("Failed to read message") return msgs def store_mongo(self, syslog_msg, etl_data): if self.mongo_backend is not None: m = "Sending results to mongo" logging.debug(m) raw_insert, json_insert = self.mongo_backend.insert( syslog_msg, etl_data) if not raw_insert: logging.debug("Failed to insert the raw syslog information in mongo") if not json_insert: logging.debug("Failed to insert the processed syslog information in mongo") def store_kombu(self, etl_data): logging.debug("Storing message in logstash queue") try: with Connection(self.store_uri) as conn: q = conn.SimpleQueue(self.store_queue) q.put(etl_data) q.close() logging.debug("Storing message in logstash success") except: tb = traceback.format_exc() logging.debug("[XXX] Error: "+tb) logging.debug("Storing message in logstash queue failed") def store_results(self, syslog_msg, etl_data): self.store_mongo(syslog_msg, etl_data) self.store_kombu(etl_data) def read_messages(self): msgs = self._read_messages(self.broker_uri, self.broker_queue, cnt=self.msg_limit, callback=self.process_and_report) return msgs def serve_forever(self, poll_interval=1.0): self.keep_running = True while self.keep_running: try: self.read_messages() time.sleep(poll_interval) except KeyboardInterrupt: break
deeso/slow-hitter
src/slow/hitter.py
Python
apache-2.0
10,877
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack.api_version_request \ import MAX_IMAGE_META_PROXY_API_VERSION from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack.api_version_request \ import MIN_WITHOUT_IMAGE_META_PROXY_API_VERSION from nova.api.openstack.api_version_request \ import MIN_WITHOUT_PROXY_API_SUPPORT_VERSION from nova.api.openstack.compute.schemas import limits from nova.api.openstack.compute.views import limits as limits_views from nova.api.openstack import wsgi from nova.api import validation from nova.policies import limits as limits_policies from nova import quota QUOTAS = quota.QUOTAS # This is a list of limits which needs to filter out from the API response. # This is due to the deprecation of network related proxy APIs, the related # limit should be removed from the API also. FILTERED_LIMITS_2_36 = ['floating_ips', 'security_groups', 'security_group_rules'] FILTERED_LIMITS_2_57 = list(FILTERED_LIMITS_2_36) FILTERED_LIMITS_2_57.extend(['injected_files', 'injected_file_content_bytes']) class LimitsController(wsgi.Controller): """Controller for accessing limits in the OpenStack API.""" @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema) def index(self, req): return self._index(req) @wsgi.Controller.api_version(MIN_WITHOUT_PROXY_API_SUPPORT_VERSION, # noqa MAX_IMAGE_META_PROXY_API_VERSION) # noqa @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema) def index(self, req): return self._index(req, FILTERED_LIMITS_2_36) @wsgi.Controller.api_version( # noqa MIN_WITHOUT_IMAGE_META_PROXY_API_VERSION, '2.56') # noqa @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema) def index(self, req): return self._index(req, FILTERED_LIMITS_2_36, max_image_meta=False) @wsgi.Controller.api_version('2.57') # noqa @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema_275, '2.75') @validation.query_schema(limits.limits_query_schema, '2.57', '2.74') def index(self, req): return self._index(req, FILTERED_LIMITS_2_57, max_image_meta=False) def _index(self, req, filtered_limits=None, max_image_meta=True): """Return all global limit information.""" context = req.environ['nova.context'] context.can(limits_policies.BASE_POLICY_NAME) project_id = req.params.get('tenant_id', context.project_id) quotas = QUOTAS.get_project_quotas(context, project_id, usages=True) builder = limits_views.ViewBuilder() return builder.build(req, quotas, filtered_limits=filtered_limits, max_image_meta=max_image_meta)
rahulunair/nova
nova/api/openstack/compute/limits.py
Python
apache-2.0
3,590
''' Import command for Winthrop team's spreadsheet. It can be invoked using:: python manage.py import_nysql [--justsammel] /path/to/csv The ``--justsammel`` flag skips import of records to avoid reproducing duplicates, but rebuilds the ``is_sammelband`` flag set and produces an output list. The expect behavior is designed for a once-off import and will produce duplicate book entries (but not duplicates of any entries created as part of book creation). All persons created attempt to have a VIAF uri associated and all places have a Geonames ID assigned if possible. ''' from collections import defaultdict from itertools import chain import csv import re from django.core.management.base import BaseCommand, CommandError from winthrop.books.models import Book, Publisher, OwningInstitution, \ Catalogue from winthrop.people.models import Person from winthrop.people.viaf import ViafAPI from winthrop.places.models import Place from winthrop.places.geonames import GeoNamesAPI class Command(BaseCommand): '''Import NYSL book data into the database from a CSV file''' help = __doc__ #: mapping of book model fields that can be filled in exactly as is #: from corresponding columns in the spreadsheet data fields_exact = { 'title': 'Title', 'short_title': 'Short Title', 'red_catalog_number': 'RED catalogue number at the front', 'ink_catalog_number': 'INK catalogue number at the front', 'pencil_catalog_number': 'PENCIL catalogue number at the front', 'original_pub_info': 'PUB INFO - Original', 'notes': 'Notes' } #: fields that require cleanup, related model lookup, or other logic fields = { 'pub_year': 'Year of Publication', 'is_annotated': 'Annotated?', 'flagged_info': 'FLAGGED PAGES FOR REPRODUCTION', 'pub_place': 'Modern Place of Publication', 'publisher': 'Standardized Name of Publisher', # NYSL cataloguing information 'nysl_call_number': 'NYSL CALL NUMBER', 'nysl_notes': 'NYSL -- NOTES' } # creator type and corresponding column in the spreadsheet creators = { 'Author': 'AUTHOR, Standarized', 'Translator': 'Translator', 'Editor': 'Editor', } # currently unused other_fields = [ 'Number of Pages', 'Type of Volume', 'Subject Tagging (separate with semicolons)', 'EDITION', 'Books with important relationships to this text (separate with semicolons)', 'NYSL DESCRIPTION', 'Other documents that demonstrate this relationship (separate with semicolon)', 'Provenance', 'Physical Size' ] def add_arguments(self, parser): parser.add_argument('input_file') parser.add_argument( '--justsammel', action='store_true', dest='just_sammel', default=False, help='Just make sammelband connections' ) def handle(self, *args, **kwargs): input_file = kwargs['input_file'] self.mocking = False # TODO: create fixture for NYSL & NYC ? # all books will be catalogued with NYSL, so look for # owning instution object first # (no need to check because NYSL is preloaded by migrations) self.nysl = OwningInstitution.objects.get(short_name='NYSL') self.stats = defaultdict(int) if not kwargs['just_sammel']: with open(input_file) as csvfile: csvreader = csv.DictReader(csvfile) # each row in the CSV corresponds to a book record for row in csvreader: try: self.create_book(row) except Exception as err: print('Error on import for %s: %s' % (row['Short Title'][:30], err)) self.stats['err'] += 1 # summarize what content was imported/created self.stdout.write('''Imported content: %(book)d books %(place)d places %(person)d people %(publisher)d publishers %(err)d errors''' % self.stats) # Now look for is_sammelband and set the flag self.build_sammelband() def viaf_lookup(self, name): viaf = ViafAPI() viafid = None results = viaf.suggest(name) # Handle no results if results: # Check for a 'nametype' and make sure it's personal if 'nametype' in results[0]: if results[0]['nametype'] == 'personal': viafid = viaf.uri_from_id(results[0]['viafid']) return viafid def geonames_lookup(self, place_name): '''Function to wrap a GeoNames lookup and assign info. Returns a dict for Place generator or None''' geo = GeoNamesAPI() # Get the top hit and presume the API guessed correctly result = geo.search(place_name, max_rows=1) place_dict = {} if result: place_dict['latitude'] = float(result[0]['lat']) place_dict['longitude'] = float(result[0]['lng']) place_dict['geonames_id'] = geo.uri_from_id(result[0]['geonameId']) return place_dict else: return None def create_book(self, data): # create a new book and all related models from # a row of data in the spreadsheet # nysl books, therefore assuming all are extant newbook = Book(is_extant=True) # set fields that can be mapped directly from the spreadsheet # aside from removing periods for model_field, csv_field in self.fields_exact.items(): value = data[csv_field] # special case: some of the catalog numbers have # been entered as "NA" in the spreadsheet; skip those if model_field.endswith('catalog_number') and \ value == 'NA': continue # special case: some books are missing a short title # supply those with first three words of title if model_field == 'short_title' and not value: words = data['Title'].strip('. ').split() value = (' '.join(words[0:3])).strip('.') # special case: strip periods for title and short_title if model_field == 'title': value = data['Title'].strip('. ') setattr(newbook, model_field, value) # handle book fields that require some logic # - publication year might have brackets, e.g. [1566], # but model stores it as an integer stripped_spaces_only = data[self.fields['pub_year']].strip() pub_year = data[self.fields['pub_year']].strip('[]?.nd ') if re.search(r'-|i\.e\.', pub_year): if newbook.notes: newbook.notes += '\n\nAdditional Publication Year Info: %s' %\ stripped_spaces_only else: newbook.notes = 'Additional Publication Year Info: %s' %\ stripped_spaces_only pub_year = (re.match(r'\d+?(?=\D)', pub_year)).group(0) if pub_year: newbook.pub_year = pub_year # - is annotated; spreadsheet has variants in upper/lower case # and trailing periods; in some cases there are notes; # for now, assuming that anything ambiguous should be false here annotated = data[self.fields['is_annotated']].lower().strip('. ') newbook.is_annotated = (annotated == 'yes') # - flagged_info; pull info for flagged pages and add if it exists if annotated == 'yes': flagged_info = data[self.fields['flagged_info']].strip() if flagged_info: if newbook.notes: newbook.notes += '\n\nReproduction Recommendation: %s' %\ flagged_info else: newbook.notes = 'Reproduction Recommendation: %s' %\ flagged_info # add required relationships before saving the new book # - place placename = data[self.fields['pub_place']].strip(' ?[]()') if placename and len((re.sub(r'[.,]', '', placename))) < 3: placename = None if placename: try: place = Place.objects.get(name=placename) except Place.DoesNotExist: place_dict = self.geonames_lookup(placename) if place_dict: place = Place.objects.create(name=placename, **place_dict) else: place = Place.objects.create( name=placename, latitude=0.0, longitude=0.0, ) self.stats['place'] += 1 newbook.pub_place = place # - publisher publisher_name = data[self.fields['publisher']].strip("?. ") # Catch np/sn if publisher_name and len(publisher_name) < 4: publisher_name = None if publisher_name: try: publisher = Publisher.objects.get(name=publisher_name) except Publisher.DoesNotExist: publisher = Publisher.objects.create(name=publisher_name) self.stats['publisher'] += 1 newbook.publisher = publisher newbook.save() # TODO: do we need to handle multiple creators here? for creator_type, csv_field in self.creators.items(): # name could be empty (e.g. for translator, editor) name = data[csv_field] # Get rid of any last stray periods, if they exist name = name.strip('?. []') # Get various versions of 'Not sure' and remove name if they exist if re.search(r'[Vv]arious|[A|a]nonymous|[N|n]one [G|g]iven', name): name = None # Use four characters as a dumb filter to toss stray 'np'/'sn' if name and len(name) <= 4: name = None if name: try: person = Person.objects.get(authorized_name=name) except Person.DoesNotExist: viafid = self.viaf_lookup(name) person = Person.objects.create(authorized_name=name, viaf_id=viafid) self.stats['person'] += 1 newbook.add_creator(person, creator_type) # catalogue as a current NYSL book Catalogue.objects.create(institution=self.nysl, book=newbook, is_current=True, call_number=data[self.fields['nysl_call_number']], notes=data[self.fields['nysl_notes']]) self.stats['book'] += 1 def build_sammelband(self): '''Create sammelband flag for books with same/similar NYSL catalog numbers''' # All the catalogues just created catalogue_set = Catalogue.objects.all() # Call number list, not yet made unique call_nos = [] self.stdout.write('Now checking for bound volumes:') for catalogue in catalogue_set: # Remove letters that obscure sammelbands call_search = (catalogue.call_number).strip('abcdefgh') match_count = 0 for entry in catalogue_set: search_re = re.compile(r'%s$' % call_search) if re.match(search_re, (entry.call_number).strip('abcdefgh')): match_count += 1 # If match happened more than once, assume sammelband if match_count > 1: call_nos.append(catalogue.call_number) catalogue.is_sammelband = True catalogue.save() # A sorted unique vol list sorted_vols = sorted(list(set(call_nos))) # Get a list of books that are associated with a sammelband entry cat_list = [] for number in sorted_vols: q = Catalogue.objects.filter(call_number=number) cat_list = chain(cat_list, q) self.stdout.write(' Number of call numbers that seem to have ' 'multiple bound titles: %s' % len(sorted_vols)) self.stdout.write('The following titles are marked as sammelband:') # Good old fashioned for-loop with iterator to build a list for the team i = 1 for cat in cat_list: self.stdout.write(' %s. Short Title: %s - NYSL Call Number: %s' % (i, cat.book.short_title, cat.call_number)) i += 1
Princeton-CDH/winthrop-django
winthrop/books/management/commands/import_nysl.py
Python
apache-2.0
12,753
import base64 from cloudify.state import ctx_parameters as inputs from cloudify.manager import get_rest_client client = get_rest_client() client.secrets.create( 'kubernetes_token', base64.b64decode(inputs['kube_token']), update_if_exists=True)
cloudify-cosmo/cloudify-azure-plugin
examples/aks_service/scripts/store_kube_token.py
Python
apache-2.0
259
#-*- coding: utf-8 -*- # This file based on MIT licensed code at: https://github.com/imwilsonxu/fbone from functools import wraps from flask import abort from flask.ext.login import current_user def admin_required(f): @wraps(f) def decorated_function(*args, **kwargs): if not current_user.is_admin(): abort(403) return f(*args, **kwargs) return decorated_function
achanda/refstack
refstack/decorators.py
Python
apache-2.0
409
#!/usr/bin/env python import sys from modules.ClipRead import * import os if __name__ == '__main__': from optparse import OptionParser usage = "usage: ./%prog [options] data_file" parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action = 'store_true', default = False, help="verbose output") parser.add_option("-t", "--tag", default = 'gtfar', type='string', help="tag data") (options, args) = parser.parse_args() if len(args)==1: try: fileType = args[0].split(".")[-1] fileHandle = open(args[0]) ClipReads = ClipReads(fileHandle,fileType,options.tag) while ClipReads.fileOpen: ClipReads.getNextRead() ClipReads.printData() except IOError: sys.exit() else: parser.print_help() print "" print "Example Usage: ./parse_alignment.py mymapping.sam -p myexonicoutput" sys.exit(2)
pegasus-isi/pegasus-gtfar
bin/parse_clipped_alignment.py
Python
apache-2.0
994
# Copyright 2016 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from oslo_utils import excutils import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.objects import fields from cinder.volume import driver from cinder.volume.drivers.dell import dell_storagecenter_api from cinder.volume.drivers.san.san import san_opts from cinder.volume import volume_types common_opts = [ cfg.IntOpt('dell_sc_ssn', default=64702, help='Storage Center System Serial Number'), cfg.PortOpt('dell_sc_api_port', default=3033, help='Dell API port'), cfg.StrOpt('dell_sc_server_folder', default='openstack', help='Name of the server folder to use on the Storage Center'), cfg.StrOpt('dell_sc_volume_folder', default='openstack', help='Name of the volume folder to use on the Storage Center'), cfg.BoolOpt('dell_sc_verify_cert', default=False, help='Enable HTTPS SC certificate verification'), cfg.StrOpt('secondary_san_ip', default='', help='IP address of secondary DSM controller'), cfg.StrOpt('secondary_san_login', default='Admin', help='Secondary DSM user name'), cfg.StrOpt('secondary_san_password', default='', help='Secondary DSM user password name', secret=True), cfg.PortOpt('secondary_sc_api_port', default=3033, help='Secondary Dell API port'), cfg.MultiOpt('excluded_domain_ip', item_type=types.IPAddress(), default=None, help='Domain IP to be excluded from iSCSI returns.'), cfg.StrOpt('dell_server_os', default='Red Hat Linux 6.x', help='Server OS type to use when creating a new server on the ' 'Storage Center.') ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(common_opts) class DellCommonDriver(driver.ManageableVD, driver.ManageableSnapshotsVD, driver.BaseVD): def __init__(self, *args, **kwargs): super(DellCommonDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(common_opts) self.configuration.append_config_values(san_opts) self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'Dell' self.backends = self.configuration.safe_get('replication_device') self.replication_enabled = True if self.backends else False self.is_direct_connect = False self.active_backend_id = kwargs.get('active_backend_id', None) self.failed_over = True if self.active_backend_id else False LOG.info(_LI('Loading %(name)s: Failover state is %(state)r'), {'name': self.backend_name, 'state': self.failed_over}) self.storage_protocol = 'iSCSI' self.failback_timeout = 60 def _bytes_to_gb(self, spacestring): """Space is returned in a string like ... 7.38197504E8 Bytes Need to split that apart and convert to GB. :returns: gbs in int form """ try: n = spacestring.split(' ', 1) fgbs = float(n[0]) / 1073741824.0 igbs = int(fgbs) return igbs except Exception: # If any of that blew up it isn't in the format we # thought so eat our error and return None return None def do_setup(self, context): """One time driver setup. Called once by the manager after the driver is loaded. Sets up clients, check licenses, sets up protocol specific helpers. """ self._client = dell_storagecenter_api.StorageCenterApiHelper( self.configuration, self.active_backend_id, self.storage_protocol) def check_for_setup_error(self): """Validates the configuration information.""" with self._client.open_connection() as api: api.find_sc() self.is_direct_connect = api.is_direct_connect if self.is_direct_connect and self.replication_enabled: msg = _('Dell Cinder driver configuration error replication ' 'not supported with direct connect.') raise exception.InvalidHost(reason=msg) # If we are a healthy replicated system make sure our backend # is alive. if self.replication_enabled and not self.failed_over: # Check that our replication destinations are available. for backend in self.backends: replssn = backend['target_device_id'] try: # Just do a find_sc on it. If it raises we catch # that and raise with a correct exception. api.find_sc(int(replssn)) except exception.VolumeBackendAPIException: msg = _('Dell Cinder driver configuration error ' 'replication_device %s not found') % replssn raise exception.InvalidHost(reason=msg) def _get_volume_extra_specs(self, obj): """Gets extra specs for the given object.""" type_id = obj.get('volume_type_id') if type_id: return volume_types.get_volume_type_extra_specs(type_id) return {} def _add_volume_to_consistency_group(self, api, scvolume, volume): """Just a helper to add a volume to a consistency group. :param api: Dell SC API opbject. :param scvolume: Dell SC Volume object. :param volume: Cinder Volume object. :returns: Nothing. """ if scvolume and volume.get('consistencygroup_id'): profile = api.find_replay_profile( volume.get('consistencygroup_id')) if profile: api.update_cg_volumes(profile, [volume]) def _get_replication_specs(self, specs): """Checks if we can do replication. Need the extra spec set and we have to be talking to EM. :param specs: Cinder Volume or snapshot extra specs. :return: rinfo dict. """ rinfo = {'enabled': False, 'sync': False, 'live': False, 'active': False, 'autofailover': False} # Repl does not work with direct connect. if not self.is_direct_connect: if (not self.failed_over and specs.get('replication_enabled') == '<is> True'): rinfo['enabled'] = True if specs.get('replication_type') == '<in> sync': rinfo['sync'] = True if specs.get('replication:livevolume') == '<is> True': rinfo['live'] = True if specs.get('replication:livevolume:autofailover') == '<is> True': rinfo['autofailover'] = True if specs.get('replication:activereplay') == '<is> True': rinfo['active'] = True # Some quick checks. if rinfo['enabled']: replication_target_count = len(self.backends) msg = None if replication_target_count == 0: msg = _( 'Replication setup failure: replication has been ' 'enabled but no replication target has been specified ' 'for this backend.') if rinfo['live'] and replication_target_count != 1: msg = _('Replication setup failure: replication:livevolume' ' has been enabled but more than one replication ' 'target has been specified for this backend.') if msg: LOG.debug(msg) raise exception.ReplicationError(message=msg) # Got this far. Life is good. Return our data. return rinfo def _is_live_vol(self, obj): rspecs = self._get_replication_specs(self._get_volume_extra_specs(obj)) return rspecs['enabled'] and rspecs['live'] def _create_replications(self, api, volume, scvolume, extra_specs=None): """Creates any appropriate replications for a given volume. :param api: Dell REST API object. :param volume: Cinder volume object. :param scvolume: Dell Storage Center Volume object. :param extra_specs: Extra specs if we have them otherwise gets them from the volume. :return: model_update """ # Replication V2 # for now we assume we have an array named backends. replication_driver_data = None # Replicate if we are supposed to. if not extra_specs: extra_specs = self._get_volume_extra_specs(volume) rspecs = self._get_replication_specs(extra_specs) if rspecs['enabled']: for backend in self.backends: targetdeviceid = backend['target_device_id'] primaryqos = backend.get('qosnode', 'cinderqos') secondaryqos = backend.get('remoteqos', 'cinderqos') diskfolder = backend.get('diskfolder', None) obj = None if rspecs['live']: # We are rolling with a live volume. obj = api.create_live_volume(scvolume, targetdeviceid, rspecs['active'], rspecs['sync'], rspecs['autofailover'], primaryqos, secondaryqos) else: # Else a regular replication. obj = api.create_replication(scvolume, targetdeviceid, primaryqos, rspecs['sync'], diskfolder, rspecs['active']) # This is either a ScReplication object or a ScLiveVolume # object. So long as it isn't None we are fine. if not obj: # Create replication will have printed a better error. msg = _('Replication %(name)s to %(ssn)s failed.') % { 'name': volume['id'], 'ssn': targetdeviceid} raise exception.VolumeBackendAPIException(data=msg) if not replication_driver_data: replication_driver_data = backend['target_device_id'] else: replication_driver_data += ',' replication_driver_data += backend['target_device_id'] # If we did something return model update. model_update = {} if replication_driver_data: model_update = { 'replication_status': fields.ReplicationStatus.ENABLED, 'replication_driver_data': replication_driver_data} return model_update @staticmethod def _cleanup_failed_create_volume(api, volumename): try: api.delete_volume(volumename) except exception.VolumeBackendAPIException as ex: LOG.info(_LI('Non fatal cleanup error: %s.'), ex.msg) def create_volume(self, volume): """Create a volume.""" model_update = {} # We use id as our name as it is unique. volume_name = volume.get('id') # Look for our volume volume_size = volume.get('size') LOG.debug('Creating volume %(name)s of size %(size)s', {'name': volume_name, 'size': volume_size}) scvolume = None with self._client.open_connection() as api: try: # Get our extra specs. specs = self._get_volume_extra_specs(volume) scvolume = api.create_volume( volume_name, volume_size, specs.get('storagetype:storageprofile'), specs.get('storagetype:replayprofiles'), specs.get('storagetype:volumeqos'), specs.get('storagetype:groupqos'), specs.get('storagetype:datareductionprofile')) if scvolume is None: raise exception.VolumeBackendAPIException( message=_('Unable to create volume %s') % volume_name) # Update Consistency Group self._add_volume_to_consistency_group(api, scvolume, volume) # Create replications. (Or not. It checks.) model_update = self._create_replications(api, volume, scvolume) # Save our provider_id. model_update['provider_id'] = scvolume['instanceId'] except Exception: # if we actually created a volume but failed elsewhere # clean up the volume now. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume %s'), volume_name) if scvolume is None: raise exception.VolumeBackendAPIException( data=_('Unable to create volume. Backend down.')) return model_update def _split_driver_data(self, replication_driver_data): """Splits the replication_driver_data into an array of ssn strings. :param replication_driver_data: A string of comma separated SSNs. :returns: SSNs in an array of strings. """ ssnstrings = [] # We have any replication_driver_data. if replication_driver_data: # Split the array and wiffle through the entries. for str in replication_driver_data.split(','): # Strip any junk from the string. ssnstring = str.strip() # Anything left? if ssnstring: # Add it to our array. ssnstrings.append(ssnstring) return ssnstrings def _delete_live_volume(self, api, volume): """Delete live volume associated with volume. :param api:Dell REST API object. :param volume: Cinder Volume object :return: True if we actually deleted something. False for everything else. """ # Live Volume was added after provider_id support. So just assume it is # there. replication_driver_data = volume.get('replication_driver_data') # Do we have any replication driver data? if replication_driver_data: # Valid replication data? ssnstrings = self._split_driver_data(replication_driver_data) if ssnstrings: ssn = int(ssnstrings[0]) sclivevolume = api.get_live_volume(volume.get('provider_id'), volume.get('id')) # Have we found the live volume? if (sclivevolume and sclivevolume.get('secondaryScSerialNumber') == ssn and api.delete_live_volume(sclivevolume, True)): LOG.info(_LI('%(vname)s\'s replication live volume has ' 'been deleted from storage Center %(sc)s,'), {'vname': volume.get('id'), 'sc': ssn}) return True # If we are here either we do not have a live volume, we do not have # one on our configured SC or we were not able to delete it. # Either way, warn and leave. LOG.warning(_LW('Unable to delete %s live volume.'), volume.get('id')) return False def _delete_replications(self, api, volume): """Delete replications associated with a given volume. We should be able to roll through the replication_driver_data list of SSNs and delete replication objects between them and the source volume. :param api: Dell REST API object. :param volume: Cinder Volume object :return: None """ replication_driver_data = volume.get('replication_driver_data') if replication_driver_data: ssnstrings = self._split_driver_data(replication_driver_data) volume_name = volume.get('id') provider_id = volume.get('provider_id') scvol = api.find_volume(volume_name, provider_id) # This is just a string of ssns separated by commas. # Trundle through these and delete them all. for ssnstring in ssnstrings: ssn = int(ssnstring) # Are we a replication or a live volume? if not api.delete_replication(scvol, ssn): LOG.warning(_LW('Unable to delete replication of Volume ' '%(vname)s to Storage Center %(sc)s.'), {'vname': volume_name, 'sc': ssnstring}) # If none of that worked or there was nothing to do doesn't matter. # Just move on. def delete_volume(self, volume): deleted = False # We use id as our name as it is unique. volume_name = volume.get('id') provider_id = volume.get('provider_id') # Unless we are migrating. if volume.get('migration_status') == 'deleting': volume_name = volume.get('_name_id') provider_id = None LOG.debug('Deleting volume %s', volume_name) with self._client.open_connection() as api: try: rspecs = self._get_replication_specs( self._get_volume_extra_specs(volume)) if rspecs['enabled']: if rspecs['live']: self._delete_live_volume(api, volume) else: self._delete_replications(api, volume) deleted = api.delete_volume(volume_name, provider_id) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to delete volume %s'), volume_name) # if there was an error we will have raised an # exception. If it failed to delete it is because # the conditions to delete a volume were not met. if deleted is False: raise exception.VolumeIsBusy(volume_name=volume_name) def create_snapshot(self, snapshot): """Create snapshot""" # our volume name is the volume id volume_name = snapshot.get('volume_id') provider_id = snapshot.volume.get('provider_id') snapshot_id = snapshot.get('id') LOG.debug('Creating snapshot %(snap)s on volume %(vol)s', {'snap': snapshot_id, 'vol': volume_name}) with self._client.open_connection() as api: scvolume = api.find_volume(volume_name, provider_id, self._is_live_vol(snapshot)) if scvolume is not None: replay = api.create_replay(scvolume, snapshot_id, 0) if replay: return {'status': fields.SnapshotStatus.AVAILABLE, 'provider_id': scvolume['instanceId']} else: LOG.warning(_LW('Unable to locate volume:%s'), volume_name) snapshot['status'] = fields.SnapshotStatus.ERROR msg = _('Failed to create snapshot %s') % snapshot_id raise exception.VolumeBackendAPIException(data=msg) def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other volume's snapshot on appliance.""" model_update = {} scvolume = None volume_name = volume.get('id') src_provider_id = snapshot.get('provider_id') src_volume_name = snapshot.get('volume_id') # This snapshot could have been created on its own or as part of a # cgsnapshot. If it was a cgsnapshot it will be identified on the Dell # backend under cgsnapshot_id. Given the volume ID and the # cgsnapshot_id we can find the appropriate snapshot. # So first we look for cgsnapshot_id. If that is blank then it must # have been a normal snapshot which will be found under snapshot_id. snapshot_id = snapshot.get('cgsnapshot_id') if not snapshot_id: snapshot_id = snapshot.get('id') LOG.debug( 'Creating new volume %(vol)s from snapshot %(snap)s ' 'from vol %(src)s', {'vol': volume_name, 'snap': snapshot_id, 'src': src_volume_name}) with self._client.open_connection() as api: try: srcvol = api.find_volume(src_volume_name, src_provider_id) if srcvol is not None: replay = api.find_replay(srcvol, snapshot_id) if replay is not None: # See if we have any extra specs. specs = self._get_volume_extra_specs(volume) scvolume = api.create_view_volume( volume_name, replay, specs.get('storagetype:replayprofiles'), specs.get('storagetype:volumeqos'), specs.get('storagetype:groupqos'), specs.get('storagetype:datareductionprofile')) # Extend Volume if scvolume and (volume['size'] > snapshot["volume_size"]): LOG.debug('Resize the new volume to %s.', volume['size']) scvolume = api.expand_volume(scvolume, volume['size']) if scvolume is None: raise exception.VolumeBackendAPIException( message=_('Unable to create volume ' '%(name)s from %(snap)s.') % {'name': volume_name, 'snap': snapshot_id}) # Update Consistency Group self._add_volume_to_consistency_group(api, scvolume, volume) # Replicate if we are supposed to. model_update = self._create_replications(api, volume, scvolume) # Save our instanceid. model_update['provider_id'] = ( scvolume['instanceId']) except Exception: # Clean up after ourselves. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume %s'), volume_name) if scvolume is not None: LOG.debug('Volume %(vol)s created from %(snap)s', {'vol': volume_name, 'snap': snapshot_id}) else: msg = _('Failed to create volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) return model_update def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" model_update = {} scvolume = None src_volume_name = src_vref.get('id') src_provider_id = src_vref.get('provider_id') volume_name = volume.get('id') LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s', {'clone': volume_name, 'vol': src_volume_name}) with self._client.open_connection() as api: try: srcvol = api.find_volume(src_volume_name, src_provider_id) if srcvol is not None: # Get our specs. specs = self._get_volume_extra_specs(volume) # Create our volume scvolume = api.create_cloned_volume( volume_name, srcvol, specs.get('storagetype:storageprofile'), specs.get('storagetype:replayprofiles'), specs.get('storagetype:volumeqos'), specs.get('storagetype:groupqos'), specs.get('storagetype:datareductionprofile')) # Extend Volume if scvolume and volume['size'] > src_vref['size']: LOG.debug('Resize the volume to %s.', volume['size']) scvolume = api.expand_volume(scvolume, volume['size']) # If either of those didn't work we bail. if scvolume is None: raise exception.VolumeBackendAPIException( message=_('Unable to create volume ' '%(name)s from %(vol)s.') % {'name': volume_name, 'vol': src_volume_name}) # Update Consistency Group self._add_volume_to_consistency_group(api, scvolume, volume) # Replicate if we are supposed to. model_update = self._create_replications(api, volume, scvolume) # Save our provider_id. model_update['provider_id'] = scvolume['instanceId'] except Exception: # Clean up after ourselves. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to create volume %s'), volume_name) if scvolume is not None: LOG.debug('Volume %(vol)s cloned from %(src)s', {'vol': volume_name, 'src': src_volume_name}) else: msg = _('Failed to create volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) return model_update def delete_snapshot(self, snapshot): """delete_snapshot""" volume_name = snapshot.get('volume_id') snapshot_id = snapshot.get('id') provider_id = snapshot.get('provider_id') LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s', {'snap': snapshot_id, 'vol': volume_name}) with self._client.open_connection() as api: scvolume = api.find_volume(volume_name, provider_id) if scvolume and api.delete_replay(scvolume, snapshot_id): return # if we are here things went poorly. snapshot['status'] = fields.SnapshotStatus.ERROR_DELETING msg = _('Failed to delete snapshot %s') % snapshot_id raise exception.VolumeBackendAPIException(data=msg) def create_export(self, context, volume, connector): """Create an export of a volume. The volume exists on creation and will be visible on initialize connection. So nothing to do here. """ pass def ensure_export(self, context, volume): """Ensure an export of a volume. Per the eqlx driver we just make sure that the volume actually exists where we think it does. """ scvolume = None volume_name = volume.get('id') provider_id = volume.get('provider_id') LOG.debug('Checking existence of volume %s', volume_name) with self._client.open_connection() as api: try: scvolume = api.find_volume(volume_name, provider_id, self._is_live_vol(volume)) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to ensure export of volume %s'), volume_name) if scvolume is None: msg = _('Unable to find volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) def remove_export(self, context, volume): """Remove an export of a volume. We do nothing here to match the nothing we do in create export. Again we do everything in initialize and terminate connection. """ pass def extend_volume(self, volume, new_size): """Extend the size of the volume.""" volume_name = volume.get('id') provider_id = volume.get('provider_id') LOG.debug('Extending volume %(vol)s to %(size)s', {'vol': volume_name, 'size': new_size}) if volume is not None: with self._client.open_connection() as api: scvolume = api.find_volume(volume_name, provider_id) if api.expand_volume(scvolume, new_size) is not None: return # If we are here nothing good happened. msg = _('Unable to extend volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ if refresh: self._update_volume_stats() # Take this opportunity to report our failover state. if self.failed_over: LOG.debug('%(source)s has been failed over to %(dest)s', {'source': self.backend_name, 'dest': self.active_backend_id}) return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" with self._client.open_connection() as api: # Static stats. data = {} data['volume_backend_name'] = self.backend_name data['vendor_name'] = 'Dell' data['driver_version'] = self.VERSION data['storage_protocol'] = self.storage_protocol data['reserved_percentage'] = 0 data['consistencygroup_support'] = True data['thin_provisioning_support'] = True data['QoS_support'] = False data['replication_enabled'] = self.replication_enabled if self.replication_enabled: data['replication_type'] = ['async', 'sync'] data['replication_count'] = len(self.backends) replication_targets = [] # Trundle through our backends. for backend in self.backends: target_device_id = backend.get('target_device_id') if target_device_id: replication_targets.append(target_device_id) data['replication_targets'] = replication_targets # Get our capacity. storageusage = api.get_storage_usage() if storageusage: # Get actual stats. totalcapacity = storageusage.get('availableSpace') totalcapacitygb = self._bytes_to_gb(totalcapacity) data['total_capacity_gb'] = totalcapacitygb freespace = storageusage.get('freeSpace') freespacegb = self._bytes_to_gb(freespace) data['free_capacity_gb'] = freespacegb else: # Soldier on. Just return 0 for this iteration. LOG.error(_LE('Unable to retrieve volume stats.')) data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 self._stats = data LOG.debug('Total cap %(total)s Free cap %(free)s', {'total': data['total_capacity_gb'], 'free': data['free_capacity_gb']}) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ # We use id as our volume name so we need to rename the backend # volume to the original volume name. original_volume_name = volume.get('id') current_name = new_volume.get('id') # We should have this. If we don't we'll set it below. provider_id = new_volume.get('provider_id') LOG.debug('update_migrated_volume: %(current)s to %(original)s', {'current': current_name, 'original': original_volume_name}) if original_volume_name: with self._client.open_connection() as api: # todo(tswanson): Delete old volume repliations/live volumes # todo(tswanson): delete old volume? scvolume = api.find_volume(current_name, provider_id) if (scvolume and api.rename_volume(scvolume, original_volume_name)): # Replicate if we are supposed to. model_update = self._create_replications(api, new_volume, scvolume) model_update['_name_id'] = None model_update['provider_id'] = scvolume['instanceId'] return model_update # The world was horrible to us so we should error and leave. LOG.error(_LE('Unable to rename the logical volume for volume: %s'), original_volume_name) return {'_name_id': new_volume['_name_id'] or new_volume['id']} def create_consistencygroup(self, context, group): """This creates a replay profile on the storage backend. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :returns: Nothing on success. :raises: VolumeBackendAPIException """ gid = group['id'] with self._client.open_connection() as api: cgroup = api.create_replay_profile(gid) if cgroup: LOG.info(_LI('Created Consistency Group %s'), gid) return msg = _('Unable to create consistency group %s') % gid raise exception.VolumeBackendAPIException(data=msg) def delete_consistencygroup(self, context, group, volumes): """Delete the Dell SC profile associated with this consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :returns: Updated model_update, volumes. """ gid = group['id'] with self._client.open_connection() as api: profile = api.find_replay_profile(gid) if profile: api.delete_replay_profile(profile) # If we are here because we found no profile that should be fine # as we are trying to delete it anyway. # Trundle through the list deleting the volumes. volume_updates = [] for volume in volumes: self.delete_volume(volume) volume_updates.append({'id': volume['id'], 'status': 'deleted'}) model_update = {'status': group['status']} return model_update, volume_updates def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be updated. :param add_volumes: a list of volume dictionaries to be added. :param remove_volumes: a list of volume dictionaries to be removed. :returns: model_update, add_volumes_update, remove_volumes_update model_update is a dictionary that the driver wants the manager to update upon a successful return. If None is returned, the manager will set the status to 'available'. add_volumes_update and remove_volumes_update are lists of dictionaries that the driver wants the manager to update upon a successful return. Note that each entry requires a {'id': xxx} so that the correct volume entry can be updated. If None is returned, the volume will remain its original status. Also note that you cannot directly assign add_volumes to add_volumes_update as add_volumes is a list of cinder.db.sqlalchemy.models.Volume objects and cannot be used for db update directly. Same with remove_volumes. If the driver throws an exception, the status of the group as well as those of the volumes to be added/removed will be set to 'error'. """ gid = group['id'] with self._client.open_connection() as api: profile = api.find_replay_profile(gid) if not profile: LOG.error(_LE('Cannot find Consistency Group %s'), gid) elif api.update_cg_volumes(profile, add_volumes, remove_volumes): LOG.info(_LI('Updated Consistency Group %s'), gid) # we need nothing updated above us so just return None. return None, None, None # Things did not go well so throw. msg = _('Unable to update consistency group %s') % gid raise exception.VolumeBackendAPIException(data=msg) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Takes a snapshot of the consistency group. :param context: the context of the caller. :param cgsnapshot: Information about the snapshot to take. :param snapshots: List of snapshots for this cgsnapshot. :returns: Updated model_update, snapshots. :raises: VolumeBackendAPIException. """ cgid = cgsnapshot['consistencygroup_id'] snapshotid = cgsnapshot['id'] with self._client.open_connection() as api: profile = api.find_replay_profile(cgid) if profile: LOG.debug('profile %s replayid %s', profile, snapshotid) if api.snap_cg_replay(profile, snapshotid, 0): snapshot_updates = [] for snapshot in snapshots: snapshot_updates.append({ 'id': snapshot.id, 'status': fields.SnapshotStatus.AVAILABLE }) model_update = {'status': fields.SnapshotStatus.AVAILABLE} return model_update, snapshot_updates # That didn't go well. Tell them why. Then bomb out. LOG.error(_LE('Failed to snap Consistency Group %s'), cgid) else: LOG.error(_LE('Cannot find Consistency Group %s'), cgid) msg = _('Unable to snap Consistency Group %s') % cgid raise exception.VolumeBackendAPIException(data=msg) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot. If profile isn't found return success. If failed to delete the replay (the snapshot) then raise an exception. :param context: the context of the caller. :param cgsnapshot: Information about the snapshot to delete. :returns: Updated model_update, snapshots. :raises: VolumeBackendAPIException. """ cgid = cgsnapshot['consistencygroup_id'] snapshotid = cgsnapshot['id'] with self._client.open_connection() as api: profile = api.find_replay_profile(cgid) if profile: LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'), {'ss': snapshotid, 'pro': profile}) if not api.delete_cg_replay(profile, snapshotid): msg = (_('Unable to delete Consistency Group snapshot %s') % snapshotid) raise exception.VolumeBackendAPIException(data=msg) snapshot_updates = [] for snapshot in snapshots: snapshot_updates.append( {'id': snapshot['id'], 'status': fields.SnapshotStatus.DELETED}) model_update = {'status': fields.SnapshotStatus.DELETED} return model_update, snapshot_updates def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ if existing_ref.get('source-name') or existing_ref.get('source-id'): with self._client.open_connection() as api: api.manage_existing(volume['id'], existing_ref) # Replicate if we are supposed to. volume_name = volume.get('id') provider_id = volume.get('provider_id') scvolume = api.find_volume(volume_name, provider_id) model_update = self._create_replications(api, volume, scvolume) if model_update: return model_update else: msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Only return a model_update if we have replication info to add. return None def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ if existing_ref.get('source-name') or existing_ref.get('source-id'): with self._client.open_connection() as api: return api.get_unmanaged_volume_size(existing_ref) else: msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param volume: Cinder volume to unmanage """ with self._client.open_connection() as api: volume_name = volume.get('id') provider_id = volume.get('provider_id') scvolume = api.find_volume(volume_name, provider_id) if scvolume: api.unmanage(scvolume) def _get_retype_spec(self, diff, volume_name, specname, spectype): """Helper function to get current and requested spec. :param diff: A difference dictionary. :param volume_name: The volume name we are working with. :param specname: The pretty name of the parameter. :param spectype: The actual spec string. :return: current, requested spec. :raises: VolumeBackendAPIException """ spec = (diff['extra_specs'].get(spectype)) if spec: if len(spec) != 2: msg = _('Unable to retype %(specname)s, expected to receive ' 'current and requested %(spectype)s values. Value ' 'received: %(spec)s') % {'specname': specname, 'spectype': spectype, 'spec': spec} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) current = spec[0] requested = spec[1] if current != requested: LOG.debug('Retyping volume %(vol)s to use %(specname)s ' '%(spec)s.', {'vol': volume_name, 'specname': specname, 'spec': requested}) return current, requested else: LOG.info(_LI('Retype was to same Storage Profile.')) return None, None def _retype_replication(self, api, volume, scvolume, new_type, diff): model_update = None ret = True # Replication. current, requested = ( self._get_retype_spec(diff, volume.get('id'), 'replication_enabled', 'replication_enabled')) # We only toggle at the repl level. if current != requested: # If we are changing to on... if requested == '<is> True': # We create our replication using our new type's extra specs. model_update = self._create_replications( api, volume, scvolume, new_type.get('extra_specs')) elif current == '<is> True': # If we are killing replication we have to see if we currently # have live volume enabled or not. if self._is_live_vol(volume): ret = self._delete_live_volume(api, volume) else: self._delete_replications(api, volume) model_update = {'replication_status': fields.ReplicationStatus.DISABLED, 'replication_driver_data': ''} # TODO(tswanson): Add support for changing replication options. return ret, model_update def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). :returns: Boolean or Boolean, model_update tuple. """ LOG.info(_LI('retype: volume_name: %(name)s new_type: %(newtype)s ' 'diff: %(diff)s host: %(host)s'), {'name': volume.get('id'), 'newtype': new_type, 'diff': diff, 'host': host}) model_update = None # Any spec changes? if diff['extra_specs']: volume_name = volume.get('id') provider_id = volume.get('provider_id') with self._client.open_connection() as api: try: # Get our volume scvolume = api.find_volume(volume_name, provider_id) if scvolume is None: LOG.error(_LE('Retype unable to find volume %s.'), volume_name) return False # Check our specs. # Storage profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Storage Profile', 'storagetype:storageprofile')) # if there is a change and it didn't work fast fail. if (current != requested and not api.update_storage_profile(scvolume, requested)): LOG.error(_LE('Failed to update storage profile')) return False # Replay profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Replay Profiles', 'storagetype:replayprofiles')) # if there is a change and it didn't work fast fail. if requested and not api.update_replay_profiles(scvolume, requested): LOG.error(_LE('Failed to update replay profiles')) return False # Volume QOS profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Volume QOS Profile', 'storagetype:volumeqos')) if current != requested: if not api.update_qos_profile(scvolume, requested): LOG.error(_LE('Failed to update volume ' 'qos profile')) # Group QOS profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Group QOS Profile', 'storagetype:groupqos')) if current != requested: if not api.update_qos_profile(scvolume, requested, True): LOG.error(_LE('Failed to update group ' 'qos profile')) return False # Data reduction profiles. current, requested = ( self._get_retype_spec( diff, volume_name, 'Data Reduction Profile', 'storagetype:datareductionprofile')) if current != requested: if not api.update_datareduction_profile(scvolume, requested): LOG.error(_LE('Failed to update data reduction ' 'profile')) return False # Active Replay current, requested = ( self._get_retype_spec(diff, volume_name, 'Replicate Active Replay', 'replication:activereplay')) if current != requested and not ( api.update_replicate_active_replay( scvolume, requested == '<is> True')): LOG.error(_LE('Failed to apply ' 'replication:activereplay setting')) return False # Deal with replication. ret, model_update = self._retype_replication( api, volume, scvolume, new_type, diff) if not ret: return False except exception.VolumeBackendAPIException: # We do nothing with this. We simply return failure. return False # If we have something to send down... if model_update: return True, model_update return True def _parse_secondary(self, api, secondary): """Find the replication destination associated with secondary. :param api: Dell StorageCenterApi :param secondary: String indicating the secondary to failover to. :return: Destination SSN for the given secondary. """ LOG.debug('_parse_secondary. Looking for %s.', secondary) destssn = None # Trundle through these looking for our secondary. for backend in self.backends: ssnstring = backend['target_device_id'] # If they list a secondary it has to match. # If they do not list a secondary we return the first # replication on a working system. if not secondary or secondary == ssnstring: # Is a string. Need an int. ssn = int(ssnstring) # Without the source being up we have no good # way to pick a destination to failover to. So just # look for one that is just up. try: # If the SC ssn exists use it. if api.find_sc(ssn): destssn = ssn break except exception.VolumeBackendAPIException: LOG.warning(_LW('SSN %s appears to be down.'), ssn) LOG.info(_LI('replication failover secondary is %(ssn)s'), {'ssn': destssn}) return destssn def _update_backend(self, active_backend_id): # Mark for failover or undo failover. LOG.debug('active_backend_id: %s', active_backend_id) if active_backend_id: self.active_backend_id = six.text_type(active_backend_id) self.failed_over = True else: self.active_backend_id = None self.failed_over = False self._client.active_backend_id = self.active_backend_id def _get_qos(self, targetssn): # Find our QOS. qosnode = None for backend in self.backends: if int(backend['target_device_id']) == targetssn: qosnode = backend.get('qosnode', 'cinderqos') return qosnode def _parse_extraspecs(self, volume): # Digest our extra specs for replication. extraspecs = {} specs = self._get_volume_extra_specs(volume) if specs.get('replication_type') == '<in> sync': extraspecs['replicationtype'] = 'Synchronous' else: extraspecs['replicationtype'] = 'Asynchronous' if specs.get('replication:activereplay') == '<is> True': extraspecs['activereplay'] = True else: extraspecs['activereplay'] = False extraspecs['storage_profile'] = specs.get('storagetype:storageprofile') extraspecs['replay_profile_string'] = ( specs.get('storagetype:replayprofiles')) return extraspecs def _wait_for_replication(self, api, items): # Wait for our replications to resync with their original volumes. # We wait for completion, errors or timeout. deadcount = 5 lastremain = 0.0 # The big wait loop. while True: # We run until all volumes are synced or in error. done = True currentremain = 0.0 # Run the list. for item in items: # If we have one cooking. if item['status'] == 'inprogress': # Is it done? synced, remain = api.replication_progress(item['screpl']) currentremain += remain if synced: # It is! Get our volumes. cvol = api.get_volume(item['cvol']) nvol = api.get_volume(item['nvol']) # Flip replication. if (cvol and nvol and api.flip_replication( cvol, nvol, item['volume']['id'], item['specs']['replicationtype'], item['qosnode'], item['specs']['activereplay'])): # rename the original. Doesn't matter if it # succeeded as we should have the provider_id # of the new volume. ovol = api.get_volume(item['ovol']) if not ovol or not api.rename_volume( ovol, 'org:' + ovol['name']): # Not a reason to fail but will possibly # cause confusion so warn. LOG.warning(_LW('Unable to locate and rename ' 'original volume: %s'), item['ovol']) item['status'] = 'synced' else: item['status'] = 'error' elif synced is None: # Couldn't get info on this one. Call it baked. item['status'] = 'error' else: # Miles to go before we're done. done = False # done? then leave. if done: break # Confirm we are or are not still making progress. if lastremain == currentremain: # One chance down. Warn user. deadcount -= 1 LOG.warning(_LW('Waiting for replications to complete. ' 'No progress for %(timeout)d seconds. ' 'deadcount = %(cnt)d'), {'timeout': self.failback_timeout, 'cnt': deadcount}) else: # Reset lastremain = currentremain deadcount = 5 # If we've used up our 5 chances we error and log.. if deadcount == 0: LOG.error(_LE('Replication progress has stopped: ' '%f remaining.'), currentremain) for item in items: if item['status'] == 'inprogress': LOG.error(_LE('Failback failed for volume: %s. ' 'Timeout waiting for replication to ' 'sync with original volume.'), item['volume']['id']) item['status'] = 'error' break # This is part of an async call so we should be good sleeping here. # Have to balance hammering the backend for no good reason with # the max timeout for the unit tests. Yeah, silly. eventlet.sleep(self.failback_timeout) def _reattach_remaining_replications(self, api, items): # Wiffle through our backends and reattach any remaining replication # targets. for item in items: if item['status'] == 'synced': svol = api.get_volume(item['nvol']) # assume it went well. Will error out if not. item['status'] = 'reattached' # wiffle through our backends and kick off replications. for backend in self.backends: rssn = int(backend['target_device_id']) if rssn != api.ssn: rvol = api.find_repl_volume(item['volume']['id'], rssn, None) # if there is an old replication whack it. api.delete_replication(svol, rssn, False) if api.start_replication( svol, rvol, item['specs']['replicationtype'], self._get_qos(rssn), item['specs']['activereplay']): # Save our replication_driver_data. item['rdd'] += ',' item['rdd'] += backend['target_device_id'] else: # No joy. Bail item['status'] = 'error' def _fixup_types(self, api, items): # Update our replay profiles. for item in items: if item['status'] == 'reattached': # Re-apply any appropriate replay profiles. item['status'] = 'available' rps = item['specs']['replay_profile_string'] if rps: svol = api.get_volume(item['nvol']) if not api.update_replay_profiles(svol, rps): item['status'] = 'error' def _volume_updates(self, items): # Update our volume updates. volume_updates = [] for item in items: # Set our status for our replicated volumes model_update = {'provider_id': item['nvol'], 'replication_driver_data': item['rdd']} # These are simple. If the volume reaches available then, # since we were replicating it, replication status must # be good. Else error/error. if item['status'] == 'available': model_update['status'] = 'available' model_update['replication_status'] = ( fields.ReplicationStatus.ENABLED) else: model_update['status'] = 'error' model_update['replication_status'] = ( fields.ReplicationStatus.ERROR) volume_updates.append({'volume_id': item['volume']['id'], 'updates': model_update}) return volume_updates def _failback_replication(self, api, volume, qosnode): """Sets up the replication failback. :param api: Dell SC API. :param volume: Cinder Volume :param qosnode: Dell QOS node object. :return: replitem dict. """ LOG.info(_LI('failback_volumes: replicated volume')) # Get our current volume. cvol = api.find_volume(volume['id'], volume['provider_id']) # Original volume on the primary. ovol = api.find_repl_volume(volume['id'], api.primaryssn, None, True, False) # Delete our current mappings. api.remove_mappings(cvol) # If there is a replication to delete do so. api.delete_replication(ovol, api.ssn, False) # Replicate to a common replay. screpl = api.replicate_to_common(cvol, ovol, 'tempqos') # We made it this far. Update our status. screplid = None status = '' if screpl: screplid = screpl['instanceId'] nvolid = screpl['destinationVolume']['instanceId'] status = 'inprogress' else: LOG.error(_LE('Unable to restore %s'), volume['id']) screplid = None nvolid = None status = 'error' # Save some information for the next step. # nvol is the new volume created by replicate_to_common. # We also grab our extra specs here. replitem = { 'volume': volume, 'specs': self._parse_extraspecs(volume), 'qosnode': qosnode, 'screpl': screplid, 'cvol': cvol['instanceId'], 'ovol': ovol['instanceId'], 'nvol': nvolid, 'rdd': six.text_type(api.ssn), 'status': status} return replitem def _failback_live_volume(self, api, id, provider_id): """failback the live volume to its original :param api: Dell SC API :param id: Volume ID :param provider_id: Dell Instance ID :return: model_update dict """ model_update = {} # We do not search by name. Only failback if we have a complete # LV object. sclivevolume = api.get_live_volume(provider_id) # TODO(tswanson): Check swapped state first. if sclivevolume and api.swap_roles_live_volume(sclivevolume): LOG.info(_LI('Success swapping sclivevolume roles %s'), id) model_update = { 'status': 'available', 'replication_status': fields.ReplicationStatus.ENABLED, 'provider_id': sclivevolume['secondaryVolume']['instanceId']} else: LOG.info(_LI('Failure swapping roles %s'), id) model_update = {'status': 'error'} return model_update def _finish_failback(self, api, replitems): # Wait for replication to complete. # This will also flip replication. self._wait_for_replication(api, replitems) # Replications are done. Attach to any additional replication # backends. self._reattach_remaining_replications(api, replitems) self._fixup_types(api, replitems) return self._volume_updates(replitems) def failback_volumes(self, volumes): """This is a generic volume failback. :param volumes: List of volumes that need to be failed back. :return: volume_updates for the list of volumes. """ LOG.info(_LI('failback_volumes')) with self._client.open_connection() as api: # Get our qosnode. This is a good way to make sure the backend # is still setup so that we can do this. qosnode = self._get_qos(api.ssn) if not qosnode: raise exception.VolumeBackendAPIException( message=_('Unable to failback. Backend is misconfigured.')) volume_updates = [] replitems = [] # Trundle through the volumes. Update non replicated to alive again # and reverse the replications for the remaining volumes. for volume in volumes: LOG.info(_LI('failback_volumes: starting volume: %s'), volume) model_update = {} if volume.get('replication_driver_data'): rspecs = self._get_replication_specs( self._get_volume_extra_specs(volume)) if rspecs['live']: model_update = self._failback_live_volume( api, volume['id'], volume['provider_id']) else: replitem = self._failback_replication(api, volume, qosnode) # Save some information for the next step. # nvol is the new volume created by # replicate_to_common. We also grab our # extra specs here. replitems.append(replitem) else: # Not replicated. Just set it to available. model_update = {'status': 'available'} # Save our update if model_update: volume_updates.append({'volume_id': volume['id'], 'updates': model_update}) # Let's do up to 5 replications at once. if len(replitems) == 5: volume_updates += self._finish_failback(api, replitems) replitems = [] # Finish any leftover items if replitems: volume_updates += self._finish_failback(api, replitems) # Set us back to a happy state. # The only way this doesn't happen is if the primary is down. self._update_backend(None) return volume_updates def _failover_replication(self, api, id, provider_id, destssn): rvol = api.break_replication(id, provider_id, destssn) model_update = {} if rvol: LOG.info(_LI('Success failing over volume %s'), id) model_update = {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'provider_id': rvol['instanceId']} else: LOG.info(_LI('Failed failing over volume %s'), id) model_update = {'status': 'error'} return model_update def _failover_live_volume(self, api, id, provider_id): model_update = {} # Search for volume by id if we have to. sclivevolume = api.get_live_volume(provider_id, id) if sclivevolume: swapped = api.is_swapped(provider_id, sclivevolume) # If we aren't swapped try it. If fail error out. if not swapped and not api.swap_roles_live_volume(sclivevolume): LOG.info(_LI('Failure swapping roles %s'), id) model_update = {'status': 'error'} return model_update LOG.info(_LI('Success swapping sclivevolume roles %s'), id) sclivevolume = api.get_live_volume(provider_id) model_update = { 'replication_status': fields.ReplicationStatus.FAILED_OVER, 'provider_id': sclivevolume['primaryVolume']['instanceId']} # Error and leave. return model_update def failover_host(self, context, volumes, secondary_id=None): """Failover to secondary. :param context: security context :param secondary_id: Specifies rep target to fail over to :param volumes: List of volumes serviced by this backend. :returns: destssn, volume_updates data structure Example volume_updates data structure: .. code-block:: json [{'volume_id': <cinder-uuid>, 'updates': {'provider_id': 8, 'replication_status': 'failed-over', 'replication_extended_status': 'whatever',...}},] """ LOG.debug('failover-host') LOG.debug(self.failed_over) LOG.debug(self.active_backend_id) LOG.debug(self.replication_enabled) if self.failed_over: if secondary_id == 'default': LOG.debug('failing back') return 'default', self.failback_volumes(volumes) raise exception.InvalidReplicationTarget( reason=_('Already failed over')) LOG.info(_LI('Failing backend to %s'), secondary_id) # basic check if self.replication_enabled: with self._client.open_connection() as api: # Look for the specified secondary. destssn = self._parse_secondary(api, secondary_id) if destssn: # We roll through trying to break replications. # Is failing here a complete failure of failover? volume_updates = [] for volume in volumes: model_update = {} if volume.get('replication_driver_data'): rspecs = self._get_replication_specs( self._get_volume_extra_specs(volume)) if rspecs['live']: model_update = self._failover_live_volume( api, volume['id'], volume.get('provider_id')) else: model_update = self._failover_replication( api, volume['id'], volume.get('provider_id'), destssn) else: # Not a replicated volume. Try to unmap it. scvolume = api.find_volume( volume['id'], volume.get('provider_id')) api.remove_mappings(scvolume) model_update = {'status': 'error'} # Either we are failed over or our status is now error. volume_updates.append({'volume_id': volume['id'], 'updates': model_update}) # this is it. self._update_backend(destssn) LOG.debug('after update backend') LOG.debug(self.failed_over) LOG.debug(self.active_backend_id) LOG.debug(self.replication_enabled) return destssn, volume_updates else: raise exception.InvalidReplicationTarget(reason=( _('replication_failover failed. %s not found.') % secondary_id)) # I don't think we should ever get here. raise exception.VolumeBackendAPIException(message=( _('replication_failover failed. ' 'Backend not configured for failover'))) def _get_unmanaged_replay(self, api, volume_name, provider_id, existing_ref): replay_name = None if existing_ref: replay_name = existing_ref.get('source-name') if not replay_name: msg = _('_get_unmanaged_replay: Must specify source-name.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Find our volume. scvolume = api.find_volume(volume_name, provider_id) if not scvolume: # Didn't find it. msg = (_('_get_unmanaged_replay: Cannot find volume id %s') % volume_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Find our replay. screplay = api.find_replay(scvolume, replay_name) if not screplay: # Didn't find it. Reference must be invalid. msg = (_('_get_unmanaged_replay: Cannot ' 'find snapshot named %s') % replay_name) LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) return screplay def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder snapshot structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. 2. Place some metadata on the snapshot, or somewhere in the backend, that allows other driver requests (e.g. delete) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. """ with self._client.open_connection() as api: # Find our unmanaged snapshot. This will raise on error. volume_name = snapshot.get('volume_id') provider_id = snapshot.get('provider_id') snapshot_id = snapshot.get('id') screplay = self._get_unmanaged_replay(api, volume_name, provider_id, existing_ref) # Manage means update description and update expiration. if not api.manage_replay(screplay, snapshot_id): # That didn't work. Error. msg = (_('manage_existing_snapshot: Error managing ' 'existing replay %(ss)s on volume %(vol)s') % {'ss': screplay.get('description'), 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Life is good. Let the world know what we've done. LOG.info(_LI('manage_existing_snapshot: snapshot %(exist)s on ' 'volume %(volume)s has been renamed to %(id)s and is ' 'now managed by Cinder.'), {'exist': screplay.get('description'), 'volume': volume_name, 'id': snapshot_id}) return {'provider_id': screplay['createVolume']['instanceId']} # NOTE: Can't use abstractmethod before all drivers implement it def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. When calculating the size, round up to the next GB. """ volume_name = snapshot.get('volume_id') provider_id = snapshot.get('provider_id') with self._client.open_connection() as api: screplay = self._get_unmanaged_replay(api, volume_name, provider_id, existing_ref) sz, rem = dell_storagecenter_api.StorageCenterApi.size_to_gb( screplay['size']) if rem > 0: raise exception.VolumeBackendAPIException( data=_('Volume size must be a multiple of 1 GB.')) return sz # NOTE: Can't use abstractmethod before all drivers implement it def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. NOTE: We do set the expire countdown to 1 day. Once a snapshot is unmanaged it will expire 24 hours later. """ with self._client.open_connection() as api: snapshot_id = snapshot.get('id') # provider_id is the snapshot's parent volume's instanceId. provider_id = snapshot.get('provider_id') volume_name = snapshot.get('volume_id') # Find our volume. scvolume = api.find_volume(volume_name, provider_id) if not scvolume: # Didn't find it. msg = (_('unmanage_snapshot: Cannot find volume id %s') % volume_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Find our replay. screplay = api.find_replay(scvolume, snapshot_id) if not screplay: # Didn't find it. Reference must be invalid. msg = (_('unmanage_snapshot: Cannot find snapshot named %s') % snapshot_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Free our snapshot. api.unmanage_replay(screplay) # Do not check our result. def thaw_backend(self, context): """Notify the backend that it's unfrozen/thawed. This is a gate. We do not allow the backend to be thawed if it is still failed over. :param context: security context :response: True on success :raises Invalid: if it cannot be thawed. """ # We shouldn't be called if we are not failed over. if self.failed_over: msg = _('The Dell SC array does not support thawing a failed over' ' replication. Please migrate volumes to an operational ' 'back-end or resolve primary system issues and ' 'fail back to reenable full functionality.') LOG.error(msg) raise exception.Invalid(reason=msg) return True
ge0rgi/cinder
cinder/volume/drivers/dell/dell_storagecenter_common.py
Python
apache-2.0
83,512
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for cond_v2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import config_pb2 from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import cond_v2 from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import saver from tensorflow.python.util import compat class CondV2Test(test.TestCase): def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None): if not feed_dict: feed_dict = {} with self.session(graph=ops.get_default_graph()) as sess: pred = array_ops.placeholder(dtypes.bool, name="pred") expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected") actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual") expected_grad = gradients_impl.gradients(expected, train_vals) actual_grad = gradients_impl.gradients(actual, train_vals) sess_run_args = {pred: True} sess_run_args.update(feed_dict) expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run( (expected, actual, expected_grad, actual_grad), sess_run_args) self.assertEqual(expected_val, actual_val) self.assertEqual(expected_grad_val, actual_grad_val) sess_run_args = {pred: False} sess_run_args.update(feed_dict) expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run( (expected, actual, expected_grad, actual_grad), sess_run_args) self.assertEqual(expected_val, actual_val) self.assertEqual(expected_grad_val, actual_grad_val) @test_util.run_deprecated_v1 def testBasic(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return x * 2.0 def false_fn(): return y * 3.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) @test_util.run_deprecated_v1 def testMultipleOutputs(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(3.0, name="y") def true_fn(): return x * y, y def false_fn(): return x, y * 3.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) @test_util.run_deprecated_v1 def testBasic2(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return x * y * 2.0 def false_fn(): return 2.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) @test_util.run_deprecated_v1 def testNoInputs(self): with self.cached_session() as sess: pred = array_ops.placeholder(dtypes.bool, name="pred") def true_fn(): return constant_op.constant(1.0) def false_fn(): return constant_op.constant(2.0) out = cond_v2.cond_v2(pred, true_fn, false_fn) self.assertEqual(sess.run(out, {pred: True}), (1.0,)) self.assertEqual(sess.run(out, {pred: False}), (2.0,)) def _createCond(self, name): """Creates a cond_v2 call and returns the output tensor and the cond op.""" pred = constant_op.constant(True, name="pred") x = constant_op.constant(1.0, name="x") def true_fn(): return x def false_fn(): return x + 1 output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name) cond_op = output.op.inputs[0].op self.assertEqual(cond_op.type, "If") return output, cond_op def _createNestedCond(self, name): """Like _createCond but creates a nested cond_v2 call as well.""" pred = constant_op.constant(True, name="pred") x = constant_op.constant(1.0, name="x") def true_fn(): return cond_v2.cond_v2(pred, lambda: x, lambda: x + 1) def false_fn(): return x + 2 output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name) cond_op = output.op.inputs[0].op self.assertEqual(cond_op.type, "If") return output, cond_op def testDefaultName(self): with ops.Graph().as_default(): _, cond_op = self._createCond(None) self.assertEqual(cond_op.name, "cond") self.assertRegexpMatches( cond_op.get_attr("then_branch").name, r"cond_true_\d*") self.assertRegexpMatches( cond_op.get_attr("else_branch").name, r"cond_false_\d*") with ops.Graph().as_default(): with ops.name_scope("foo"): _, cond1_op = self._createCond("") self.assertEqual(cond1_op.name, "foo/cond") self.assertRegexpMatches( cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*") self.assertRegexpMatches( cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*") _, cond2_op = self._createCond(None) self.assertEqual(cond2_op.name, "foo/cond_1") self.assertRegexpMatches( cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*") self.assertRegexpMatches( cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*") @test_util.run_v1_only("b/120545219") def testDefunInCond(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): @function.defun def fn(): return x * y * 2.0 return fn() def false_fn(): return 2.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) @test_util.run_deprecated_v1 def testNestedDefunInCond(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): @function.defun def fn(): @function.defun def nested_fn(): return x * y * 2.0 return nested_fn() return fn() self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) @test_util.run_deprecated_v1 def testDoubleNestedDefunInCond(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): @function.defun def fn(): @function.defun def nested_fn(): @function.defun def nested_nested_fn(): return x * y * 2.0 return nested_nested_fn() return nested_fn() return fn() def false_fn(): return 2.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) def testNestedCond(self): def run_test(pred_value): def build_graph(): pred = array_ops.placeholder(dtypes.bool, name="pred") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): def false_true_fn(): return x * y * 2.0 def false_false_fn(): return x * 5.0 return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn") return x, y, pred, true_fn, false_fn with ops.Graph().as_default(): x, y, pred, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [x, y], {pred: pred_value}) self._testCond(true_fn, false_fn, [x], {pred: pred_value}) self._testCond(true_fn, false_fn, [y], {pred: pred_value}) run_test(True) run_test(False) def testNestedCondBothBranches(self): def run_test(pred_value): def build_graph(): pred = array_ops.placeholder(dtypes.bool, name="pred") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return _cond(pred, lambda: x + y, lambda: x * x, name=None) def false_fn(): return _cond(pred, lambda: x - y, lambda: y * y, name=None) return x, y, pred, true_fn, false_fn with ops.Graph().as_default(): x, y, pred, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [x, y], {pred: pred_value}) self._testCond(true_fn, false_fn, [x], {pred: pred_value}) self._testCond(true_fn, false_fn, [y], {pred: pred_value}) run_test(True) run_test(False) def testDoubleNestedCond(self): def run_test(pred1_value, pred2_value): def build_graph(): pred1 = array_ops.placeholder(dtypes.bool, name="pred1") pred2 = array_ops.placeholder(dtypes.bool, name="pred2") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): def false_true_fn(): def false_true_true_fn(): return x * y * 2.0 def false_true_false_fn(): return x * 10.0 return _cond( pred1, false_true_true_fn, false_true_false_fn, name="inside_false_true_fn") def false_false_fn(): return x * 5.0 return _cond( pred2, false_true_fn, false_false_fn, name="inside_false_fn") return x, y, pred1, pred2, true_fn, false_fn with ops.Graph().as_default(): x, y, pred1, pred2, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [x, y], { pred1: pred1_value, pred2: pred2_value }) x, y, pred1, pred2, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [x], { pred1: pred1_value, pred2: pred2_value }) x, y, pred1, pred2, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [y], { pred1: pred1_value, pred2: pred2_value }) run_test(True, True) run_test(True, False) run_test(False, False) run_test(False, True) def testGradientFromInsideDefun(self): def build_graph(): pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer") pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): def inner_true_fn(): return x * y * 2.0 def inner_false_fn(): return x * 5.0 return cond_v2.cond_v2( pred_inner, inner_true_fn, inner_false_fn, name="inner_cond") cond_outer = cond_v2.cond_v2( pred_outer, true_fn, false_fn, name="outer_cond") # Compute grads inside a Defun. @function.defun def nesting_fn(): return gradients_impl.gradients(cond_outer, [x, y]) grads = nesting_fn() return grads, pred_outer, pred_inner with ops.Graph().as_default(): grads, pred_outer, pred_inner = build_graph() with self.session(graph=ops.get_default_graph()) as sess: self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: True }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: False }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: True }), [4., 2.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: False }), [5., 0.]) def testGradientFromInsideNestedDefun(self): def build_graph(): pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer") pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): def inner_true_fn(): return x * y * 2.0 def inner_false_fn(): return x * 5.0 return cond_v2.cond_v2( pred_inner, inner_true_fn, inner_false_fn, name="inner_cond") cond_outer = cond_v2.cond_v2( pred_outer, true_fn, false_fn, name="outer_cond") # Compute grads inside a Defun. @function.defun def nesting_fn(): @function.defun def inner_nesting_fn(): return gradients_impl.gradients(cond_outer, [x, y]) return inner_nesting_fn() grads = nesting_fn() return grads, pred_outer, pred_inner with ops.Graph().as_default(): grads, pred_outer, pred_inner = build_graph() with self.session(graph=ops.get_default_graph()) as sess: self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: True }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: False }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: True }), [4., 2.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: False }), [5., 0.]) def testBuildCondAndGradientInsideDefun(self): def build_graph(): pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer") pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") # Build cond and its gradient inside a Defun. @function.defun def fn(): def true_fn(): return 2.0 def false_fn(): def inner_true_fn(): return x * y * 2.0 def inner_false_fn(): return x * 5.0 return cond_v2.cond_v2( pred_inner, inner_true_fn, inner_false_fn, name="inner_cond") cond_outer = cond_v2.cond_v2( pred_outer, true_fn, false_fn, name="outer_cond") return gradients_impl.gradients(cond_outer, [x, y]) grads = fn() return grads, pred_outer, pred_inner with ops.Graph().as_default(), self.session( graph=ops.get_default_graph()) as sess: grads, pred_outer, pred_inner = build_graph() self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: True }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: False }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: True }), [4., 2.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: False }), [5., 0.]) @test_util.run_deprecated_v1 def testSecondDerivative(self): with self.cached_session() as sess: pred = array_ops.placeholder(dtypes.bool, name="pred") x = constant_op.constant(3.0, name="x") def true_fn(): return math_ops.pow(x, 3) def false_fn(): return x cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond") cond_grad = gradients_impl.gradients(cond, [x]) cond_grad_grad = gradients_impl.gradients(cond_grad, [x]) # d[x^3]/dx = 3x^2 true_val = sess.run(cond_grad, {pred: True}) self.assertEqual(true_val, [27.0]) # d[x]/dx = 1 false_val = sess.run(cond_grad, {pred: False}) self.assertEqual(false_val, [1.0]) true_val = sess.run(cond_grad_grad, {pred: True}) # d2[x^3]/dx2 = 6x self.assertEqual(true_val, [18.0]) false_val = sess.run(cond_grad_grad, {pred: False}) # d2[x]/dx2 = 0 self.assertEqual(false_val, [0.0]) def testGradientOfDeserializedCond(self): with ops.Graph().as_default(): pred = array_ops.placeholder(dtypes.bool, name="pred") x = constant_op.constant(3.0, name="x") ops.add_to_collection("x", x) def true_fn(): return math_ops.pow(x, 3) def false_fn(): return x ops.add_to_collection("pred", pred) cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond") ops.add_to_collection("cond", cond) meta_graph = saver.export_meta_graph() with ops.Graph().as_default() as g: with self.session(graph=g) as sess: saver.import_meta_graph(meta_graph) x = ops.get_collection("x")[0] pred = ops.get_collection("pred")[0] cond = ops.get_collection("cond") cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad") cond_grad_grad = gradients_impl.gradients( cond_grad, [x], name="cond_grad_grad") # d[x^3]/dx = 3x^2 true_val = sess.run(cond_grad, {pred: True}) self.assertEqual(true_val, [27.0]) # d[x]/dx = 1 false_val = sess.run(cond_grad, {pred: False}) self.assertEqual(false_val, [1.0]) true_val = sess.run(cond_grad_grad, {pred: True}) # d2[x^3]/dx2 = 6x self.assertEqual(true_val, [18.0]) false_val = sess.run(cond_grad_grad, {pred: False}) # d2[x]/dx2 = 0 self.assertEqual(false_val, [0.0]) def testLowering(self): with ops.Graph().as_default() as g: with self.session(graph=g) as sess: cond_output, _ = self._createCond("cond") run_options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(cond_output, options=run_options, run_metadata=run_metadata) # If lowering was enabled, there should be a `Switch` node switch_found = any( any(node.op == "Switch" for node in graph.node) for graph in run_metadata.partition_graphs ) self.assertTrue(switch_found, "A `Switch` op should exist if the graph was lowered.") # If lowering was enabled, there should be no `If` node if_found = any( any(node.op == "If" for node in graph.node) for graph in run_metadata.partition_graphs ) self.assertFalse(if_found, "An `If` op was found, but it should be lowered.") @test_util.run_deprecated_v1 def testLoweringDisabledInXLA(self): with self.session(graph=ops.Graph()) as sess: # Build the cond_v2 in an XLA context xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() cond_output, cond_op = self._createCond("cond") xla_context.Exit() # Check lowering attr is not set. with self.assertRaises(ValueError): cond_op.get_attr("_lower_using_switch_merge") # Check the actual graph that is run. run_options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(cond_output, options=run_options, run_metadata=run_metadata) # Lowering disabled in XLA, there should be no `Switch` node switch_found = any( any(node.op == "Switch" for node in graph.node) for graph in run_metadata.partition_graphs ) self.assertFalse( switch_found, "A `Switch` op exists, but the graph should not be lowered.") # Lowering disabled in XLA, there should still be an `If` node if_found = any( any(node.op == "If" for node in graph.node) for graph in run_metadata.partition_graphs ) self.assertTrue( if_found, "An `If` op was not found, but the graph should not be lowered.") @test_util.run_deprecated_v1 def testNestedLoweringDisabledInXLA(self): # Build the cond_v2 in an XLA context xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() _, cond_op = self._createNestedCond("cond") xla_context.Exit() # Check lowering attr is not set for either If node. with self.assertRaises(ValueError): cond_op.get_attr("_lower_using_switch_merge") nested_if_ops = [] for func in ops.get_default_graph()._functions.values(): nested_if_ops.extend(op for op in func._graph.get_operations() if op.type == "If") self.assertEqual(len(nested_if_ops), 1) with self.assertRaises(ValueError): nested_if_ops[0].get_attr("_lower_using_switch_merge") # TODO(skyewm): check the actual graphs that are run once we have a way to # programmatically access those graphs. @test_util.run_deprecated_v1 def testLoweringDisabledWithSingleThreadedExecutorContext(self): with self.session(graph=ops.Graph()) as sess: @function.defun def _add_cond(x): return cond_v2.cond_v2( constant_op.constant(True, name="pred"), lambda: x, lambda: x + 1) x = array_ops.placeholder(shape=None, dtype=dtypes.float32) with context.function_executor_type("SINGLE_THREADED_EXECUTOR"): out_cond = _add_cond(x) # The fact that sess.run() succeeds means lowering is disabled, because # the single threaded executor does not support cond v1 ops. sess.run(out_cond, feed_dict={x: 1.0}) @test_util.enable_control_flow_v2 def testStructuredOutputs(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(3.0, name="y") def true_fn(): return ((x * y,), y) def false_fn(): return ((x,), y * 3.0) output = control_flow_ops.cond( constant_op.constant(False), true_fn, false_fn) self.assertEqual(self.evaluate(output[0][0]), 1.) self.assertEqual(self.evaluate(output[1]), 9.) @test_util.enable_control_flow_v2 @test_util.run_deprecated_v1 def testRaisesOutputStructuresMismatch(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(3.0, name="y") def true_fn(): return x * y, y def false_fn(): return ((x,), y * 3.0) with self.assertRaisesRegexp( ValueError, "Outputs of true_fn and false_fn must" " have the same structure"): control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn) @test_util.enable_control_flow_v2 def testCondAndTensorArray(self): x = math_ops.range(-5, 5) output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0]) def loop_body(i, output): def if_true(): return output.write(i, x[i]**2) def if_false(): return output.write(i, x[i]) output = control_flow_ops.cond(x[i] > 0, if_true, if_false) return i + 1, output _, output = control_flow_ops.while_loop( lambda i, arr: i < x.shape[0], loop_body, loop_vars=(constant_op.constant(0), output)) output_t = output.stack() self.assertAllEqual( self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16]) @test_util.enable_control_flow_v2 def testCondAndTensorArrayInDefun(self): @function.defun def f(): x = math_ops.range(-5, 5) output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0]) def loop_body(i, output): def if_true(): return output.write(i, x[i]**2) def if_false(): return output.write(i, x[i]) output = control_flow_ops.cond(x[i] > 0, if_true, if_false) return i + 1, output _, output = control_flow_ops.while_loop( lambda i, arr: i < x.shape[0], loop_body, loop_vars=(constant_op.constant(0), output)) return output.stack() output_t = f() self.assertAllEqual( self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16]) @test_util.run_deprecated_v1 def testForwardPassRewrite(self): x = constant_op.constant(1.0, name="x") output = cond_v2.cond_v2(constant_op.constant(True), lambda: x * 2.0, lambda: x) if_op = output.op.inputs[0].op self.assertEqual(if_op.type, "If") # pylint: disable=g-deprecated-assert self.assertEqual(len(if_op.outputs), 1) gradients_impl.gradients(output, x) # if_op should have been rewritten to output 2.0 intermediate. self.assertEqual(len(if_op.outputs), 2) gradients_impl.gradients(output, x) # Computing the gradient again shouldn't rewrite if_op again. self.assertEqual(len(if_op.outputs), 2) # pylint: enable=g-deprecated-assert class CondV2CollectionTest(test.TestCase): def testCollectionIntValueAccessInCond(self): """Read values from graph collections inside of cond_v2.""" with ops.Graph().as_default() as g: with self.session(graph=g): x = 2 y = 5 ops.add_to_collection("x", x) ops.add_to_collection("y", y) def fn(): x_const = constant_op.constant(ops.get_collection("x")[0]) y_const = constant_op.constant(ops.get_collection("y")[0]) return math_ops.add(x_const, y_const) cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn) self.assertEquals(cnd.eval(), 7) def testCollectionTensorValueAccessInCond(self): """Read tensors from collections inside of cond_v2 & use them.""" with ops.Graph().as_default() as g: with self.session(graph=g): x = constant_op.constant(2) y = constant_op.constant(5) ops.add_to_collection("x", x) ops.add_to_collection("y", y) def fn(): x_read = ops.get_collection("x")[0] y_read = ops.get_collection("y")[0] return math_ops.add(x_read, y_read) cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn) self.assertEquals(cnd.eval(), 7) def testCollectionIntValueWriteInCond(self): """Make sure Int writes to collections work inside of cond_v2.""" with ops.Graph().as_default() as g: with self.session(graph=g): x = constant_op.constant(2) y = constant_op.constant(5) def true_fn(): z = math_ops.add(x, y) ops.add_to_collection("z", 7) return math_ops.mul(x, z) def false_fn(): z = math_ops.add(x, y) return math_ops.mul(x, z) cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn) self.assertEquals(cnd.eval(), 14) read_z_collection = ops.get_collection("z") self.assertEquals(read_z_collection, [7]) class CondV2ContainerTest(test.TestCase): def testContainer(self): """Set containers outside & inside of cond_v2. Make sure the containers are set correctly for both variable creation (tested by variables.Variable) and for stateful ops (tested by FIFOQueue) """ self.skipTest("b/113048653") with ops.Graph().as_default() as g: with self.session(graph=g): v0 = variables.Variable([0]) q0 = data_flow_ops.FIFOQueue(1, dtypes.float32) def container(node): return node.op.get_attr("container") self.assertEqual(compat.as_bytes(""), container(v0)) self.assertEqual(compat.as_bytes(""), container(q0.queue_ref)) def true_fn(): # When this branch is created in cond below, # the container should begin with 'l1' v1 = variables.Variable([1]) q1 = data_flow_ops.FIFOQueue(1, dtypes.float32) with ops.container("l2t"): v2 = variables.Variable([2]) q2 = data_flow_ops.FIFOQueue(1, dtypes.float32) v3 = variables.Variable([1]) q3 = data_flow_ops.FIFOQueue(1, dtypes.float32) self.assertEqual(compat.as_bytes("l1"), container(v1)) self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref)) self.assertEqual(compat.as_bytes("l2t"), container(v2)) self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref)) self.assertEqual(compat.as_bytes("l1"), container(v3)) self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref)) return constant_op.constant(2.0) def false_fn(): # When this branch is created in cond below, # the container should begin with 'l1' v1 = variables.Variable([1]) q1 = data_flow_ops.FIFOQueue(1, dtypes.float32) with ops.container("l2f"): v2 = variables.Variable([2]) q2 = data_flow_ops.FIFOQueue(1, dtypes.float32) v3 = variables.Variable([1]) q3 = data_flow_ops.FIFOQueue(1, dtypes.float32) self.assertEqual(compat.as_bytes("l1"), container(v1)) self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref)) self.assertEqual(compat.as_bytes("l2f"), container(v2)) self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref)) self.assertEqual(compat.as_bytes("l1"), container(v3)) self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref)) return constant_op.constant(6.0) with ops.container("l1"): cnd_true = cond_v2.cond_v2( constant_op.constant(True), true_fn, false_fn) self.assertEquals(cnd_true.eval(), 2) cnd_false = cond_v2.cond_v2( constant_op.constant(False), true_fn, false_fn) self.assertEquals(cnd_false.eval(), 6) v4 = variables.Variable([3]) q4 = data_flow_ops.FIFOQueue(1, dtypes.float32) v5 = variables.Variable([4]) q5 = data_flow_ops.FIFOQueue(1, dtypes.float32) self.assertEqual(compat.as_bytes("l1"), container(v4)) self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref)) self.assertEqual(compat.as_bytes(""), container(v5)) self.assertEqual(compat.as_bytes(""), container(q5.queue_ref)) class CondV2ColocationGroupAndDeviceTest(test.TestCase): def testColocateWithBeforeCond(self): with ops.Graph().as_default() as g: with self.session(graph=g): a = constant_op.constant([2.0], name="a") b = constant_op.constant([2.0], name="b") def fn(): c = constant_op.constant(3.0) self.assertEqual([b"loc:@a"], c.op.colocation_groups()) return c with ops.colocate_with(a.op): self.assertEquals( cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3) def fn2(): c = constant_op.constant(3.0) self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups()) return c with ops.colocate_with(a.op): with ops.colocate_with(b.op): self.assertEquals( cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3) def testColocateWithInAndOutOfCond(self): with ops.Graph().as_default() as g: with self.session(graph=g): a = constant_op.constant([2.0], name="a") b = constant_op.constant([2.0], name="b") def fn2(): with ops.colocate_with(b.op): c = constant_op.constant(3.0) self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups()) return c with ops.colocate_with(a.op): self.assertEquals( cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3) d = constant_op.constant([2.0], name="d") self.assertEqual([b"loc:@a"], d.op.colocation_groups()) def testColocateWithInCondGraphPartitioning(self): with ops.Graph().as_default() as g: with self.session( graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2}) ) as sess: with ops.device("/device:CPU:0"): a = constant_op.constant([2.0], name="a") with ops.device("/device:CPU:1"): b = constant_op.constant([2.0], name="b") def fn(): with ops.colocate_with(b.op): c = math_ops.add(a, a, name="c") return c out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn) run_options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(out_cond_2, options=run_options, run_metadata=run_metadata) # We expect there to be two partitions because of the # colocate_with. We are only running the cond, which has a data # dependency on `a` but not on `b`. So, without the colocate_with # we would expect execution on just one device. self.assertTrue(len(run_metadata.partition_graphs) >= 2) def testDeviceBeforeCond(self): with ops.Graph().as_default() as g: with self.session(graph=g): def fn(): self.assertEqual("", constant_op.constant(3.0).op.device) return test_ops.device_placement_op() with ops.device("/device:CPU:0"): self.assertIn( compat.as_bytes("CPU:0"), self.evaluate(cond_v2.cond_v2(constant_op.constant(True), fn, fn))) def fn2(): self.assertEqual("", constant_op.constant(3.0).op.device) return test_ops.device_placement_op() if test_util.is_gpu_available(): with ops.device("/device:GPU:0"): self.assertIn( compat.as_bytes("GPU:0"), self.evaluate(cond_v2.cond_v2(constant_op.constant(True), fn2, fn2))) else: self.skipTest("Test requires a GPU to check GPU device placement.") def testDeviceInAndOutOfCond(self): with ops.Graph().as_default() as g: with self.session( graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})): def fn2(): with ops.device("/device:CPU:1"): c = constant_op.constant(3.0) self.assertEqual("/device:CPU:1", c.op.device) return c with ops.device("/device:CPU:0"): self.assertEquals( cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3) d = constant_op.constant(4.0) self.assertEqual("/device:CPU:0", d.op.device) def testDeviceInCondGraphPartitioning(self): with ops.Graph().as_default() as g: with self.session( graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2}) ) as sess: def fn(): with ops.device("/device:CPU:1"): c = math_ops.add(a, a, name="c") return c with ops.device("/device:CPU:0"): a = constant_op.constant([2.0], name="a") out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn) run_options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(out_cond_2, options=run_options, run_metadata=run_metadata) self.assertTrue(len(run_metadata.partition_graphs) >= 2) def _cond(pred, true_fn, false_fn, name): if _is_old_cond(): return control_flow_ops.cond(pred, true_fn, false_fn, name=name) else: return cond_v2.cond_v2(pred, true_fn, false_fn, name=name) def _is_old_cond(): return isinstance(ops.get_default_graph()._get_control_flow_context(), control_flow_ops.CondContext) if __name__ == "__main__": test.main()
hfp/tensorflow-xsmm
tensorflow/python/kernel_tests/cond_v2_test.py
Python
apache-2.0
37,016
class ProxyPoolError(Exception): """proxypool error base""" class CrawlerRuleImplementionError(ProxyPoolError): def __str__(self): return 'crawler rule required "start_url", "ip_xpath" and "port_xpath".' class CrawlerRuleBaseInstantiateError(ProxyPoolError): def __str__(self): return "crawler rule base class shouldn't be instantiated." class ProxyPoolEmptyError(ProxyPoolError): def __str__(self): return 'the proxy pool was empty in a long time.'
arrti/proxypool
proxypool/errors.py
Python
apache-2.0
501
# -*- coding: utf-8 -*- # # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accesses the google.monitoring.v3 MetricService API.""" import functools import pkg_resources import warnings from google.oauth2 import service_account import google.api_core.client_options import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.gapic_v1.routing_header import google.api_core.grpc_helpers import google.api_core.page_iterator import google.api_core.path_template import grpc from google.api import metric_pb2 as api_metric_pb2 from google.api import monitored_resource_pb2 from google.cloud.monitoring_v3.gapic import enums from google.cloud.monitoring_v3.gapic import metric_service_client_config from google.cloud.monitoring_v3.gapic.transports import metric_service_grpc_transport from google.cloud.monitoring_v3.proto import alert_pb2 from google.cloud.monitoring_v3.proto import alert_service_pb2 from google.cloud.monitoring_v3.proto import alert_service_pb2_grpc from google.cloud.monitoring_v3.proto import common_pb2 from google.cloud.monitoring_v3.proto import group_pb2 from google.cloud.monitoring_v3.proto import group_service_pb2 from google.cloud.monitoring_v3.proto import group_service_pb2_grpc from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2 from google.cloud.monitoring_v3.proto import metric_service_pb2 from google.cloud.monitoring_v3.proto import metric_service_pb2_grpc from google.protobuf import empty_pb2 from google.protobuf import field_mask_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( "google-cloud-monitoring" ).version class MetricServiceClient(object): """ Manages metric descriptors, monitored resource descriptors, and time series data. """ SERVICE_ADDRESS = "monitoring.googleapis.com:443" """The default address of the service.""" # The name of the interface for this client. This is the key used to # find the method configuration in the client_config dictionary. _INTERFACE_NAME = "google.monitoring.v3.MetricService" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: MetricServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @classmethod def metric_descriptor_path(cls, project, metric_descriptor): """Return a fully-qualified metric_descriptor string.""" return google.api_core.path_template.expand( "projects/{project}/metricDescriptors/{metric_descriptor=**}", project=project, metric_descriptor=metric_descriptor, ) @classmethod def monitored_resource_descriptor_path(cls, project, monitored_resource_descriptor): """Return a fully-qualified monitored_resource_descriptor string.""" return google.api_core.path_template.expand( "projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}", project=project, monitored_resource_descriptor=monitored_resource_descriptor, ) @classmethod def project_path(cls, project): """Return a fully-qualified project string.""" return google.api_core.path_template.expand( "projects/{project}", project=project ) def __init__( self, transport=None, channel=None, credentials=None, client_config=None, client_info=None, client_options=None, ): """Constructor. Args: transport (Union[~.MetricServiceGrpcTransport, Callable[[~.Credentials, type], ~.MetricServiceGrpcTransport]): A transport instance, responsible for actually making the API calls. The default transport uses the gRPC protocol. This argument may also be a callable which returns a transport instance. Callables will be sent the credentials as the first argument and the default transport class as the second argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through which to make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. This argument is mutually exclusive with providing a transport instance to ``transport``; doing so will raise an exception. client_config (dict): DEPRECATED. A dictionary of call options for each method. If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. client_options (Union[dict, google.api_core.client_options.ClientOptions]): Client options used to set user options on the client. API Endpoint should be set through client_options. """ # Raise deprecation warnings for things we want to go away. if client_config is not None: warnings.warn( "The `client_config` argument is deprecated.", PendingDeprecationWarning, stacklevel=2, ) else: client_config = metric_service_client_config.config if channel: warnings.warn( "The `channel` argument is deprecated; use " "`transport` instead.", PendingDeprecationWarning, stacklevel=2, ) api_endpoint = self.SERVICE_ADDRESS if client_options: if type(client_options) == dict: client_options = google.api_core.client_options.from_dict( client_options ) if client_options.api_endpoint: api_endpoint = client_options.api_endpoint # Instantiate the transport. # The transport is responsible for handling serialization and # deserialization and actually sending data to the service. if transport: if callable(transport): self.transport = transport( credentials=credentials, default_class=metric_service_grpc_transport.MetricServiceGrpcTransport, address=api_endpoint, ) else: if credentials: raise ValueError( "Received both a transport instance and " "credentials; these are mutually exclusive." ) self.transport = transport else: self.transport = metric_service_grpc_transport.MetricServiceGrpcTransport( address=api_endpoint, channel=channel, credentials=credentials ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC # from the client configuration. # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config["interfaces"][self._INTERFACE_NAME] ) # Save a dictionary of cached API call functions. # These are the actual callables which invoke the proper # transport methods, wrapped with `wrap_method` to add retry, # timeout, and the like. self._inner_api_calls = {} # Service calls def list_monitored_resource_descriptors( self, name, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. Example: >>> from google.cloud import monitoring_v3 >>> >>> client = monitoring_v3.MetricServiceClient() >>> >>> name = client.project_path('[PROJECT]') >>> >>> # Iterate over all results >>> for element in client.list_monitored_resource_descriptors(name): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_monitored_resource_descriptors(name).pages: ... for element in page: ... # process element ... pass Args: name (str): The project on which to execute the request. The format is ``"projects/{project_id_or_number}"``. filter_ (str): An optional `filter <https://cloud.google.com/monitoring/api/v3/filters>`__ describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an ``id`` label: :: resource.type = starts_with("gce_") AND resource.label:id page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. An iterable of :class:`~google.cloud.monitoring_v3.types.MonitoredResourceDescriptor` instances. You can also iterate over the pages of the response using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ if metadata is None: metadata = [] metadata = list(metadata) # Wrap the transport method to add retry and timeout logic. if "list_monitored_resource_descriptors" not in self._inner_api_calls: self._inner_api_calls[ "list_monitored_resource_descriptors" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_monitored_resource_descriptors, default_retry=self._method_configs[ "ListMonitoredResourceDescriptors" ].retry, default_timeout=self._method_configs[ "ListMonitoredResourceDescriptors" ].timeout, client_info=self._client_info, ) request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest( name=name, filter=filter_, page_size=page_size ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_monitored_resource_descriptors"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="resource_descriptors", request_token_field="page_token", response_token_field="next_page_token", ) return iterator def get_monitored_resource_descriptor( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets a single monitored resource descriptor. This method does not require a Stackdriver account. Example: >>> from google.cloud import monitoring_v3 >>> >>> client = monitoring_v3.MetricServiceClient() >>> >>> name = client.monitored_resource_descriptor_path('[PROJECT]', '[MONITORED_RESOURCE_DESCRIPTOR]') >>> >>> response = client.get_monitored_resource_descriptor(name) Args: name (str): The monitored resource descriptor to get. The format is ``"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}"``. The ``{resource_type}`` is a predefined type, such as ``cloudsql_database``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.monitoring_v3.types.MonitoredResourceDescriptor` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ if metadata is None: metadata = [] metadata = list(metadata) # Wrap the transport method to add retry and timeout logic. if "get_monitored_resource_descriptor" not in self._inner_api_calls: self._inner_api_calls[ "get_monitored_resource_descriptor" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_monitored_resource_descriptor, default_retry=self._method_configs[ "GetMonitoredResourceDescriptor" ].retry, default_timeout=self._method_configs[ "GetMonitoredResourceDescriptor" ].timeout, client_info=self._client_info, ) request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["get_monitored_resource_descriptor"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_metric_descriptors( self, name, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists metric descriptors that match a filter. This method does not require a Stackdriver account. Example: >>> from google.cloud import monitoring_v3 >>> >>> client = monitoring_v3.MetricServiceClient() >>> >>> name = client.project_path('[PROJECT]') >>> >>> # Iterate over all results >>> for element in client.list_metric_descriptors(name): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_metric_descriptors(name).pages: ... for element in page: ... # process element ... pass Args: name (str): The project on which to execute the request. The format is ``"projects/{project_id_or_number}"``. filter_ (str): If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the `filter <https://cloud.google.com/monitoring/api/v3/filters>`__ specifies which metric descriptors are to be returned. For example, the following filter matches all `custom metrics <https://cloud.google.com/monitoring/custom-metrics>`__: :: metric.type = starts_with("custom.googleapis.com/") page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. An iterable of :class:`~google.cloud.monitoring_v3.types.MetricDescriptor` instances. You can also iterate over the pages of the response using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ if metadata is None: metadata = [] metadata = list(metadata) # Wrap the transport method to add retry and timeout logic. if "list_metric_descriptors" not in self._inner_api_calls: self._inner_api_calls[ "list_metric_descriptors" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_metric_descriptors, default_retry=self._method_configs["ListMetricDescriptors"].retry, default_timeout=self._method_configs["ListMetricDescriptors"].timeout, client_info=self._client_info, ) request = metric_service_pb2.ListMetricDescriptorsRequest( name=name, filter=filter_, page_size=page_size ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_metric_descriptors"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="metric_descriptors", request_token_field="page_token", response_token_field="next_page_token", ) return iterator def get_metric_descriptor( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets a single metric descriptor. This method does not require a Stackdriver account. Example: >>> from google.cloud import monitoring_v3 >>> >>> client = monitoring_v3.MetricServiceClient() >>> >>> name = client.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]') >>> >>> response = client.get_metric_descriptor(name) Args: name (str): The metric descriptor on which to execute the request. The format is ``"projects/{project_id_or_number}/metricDescriptors/{metric_id}"``. An example value of ``{metric_id}`` is ``"compute.googleapis.com/instance/disk/read_bytes_count"``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.monitoring_v3.types.MetricDescriptor` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ if metadata is None: metadata = [] metadata = list(metadata) # Wrap the transport method to add retry and timeout logic. if "get_metric_descriptor" not in self._inner_api_calls: self._inner_api_calls[ "get_metric_descriptor" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_metric_descriptor, default_retry=self._method_configs["GetMetricDescriptor"].retry, default_timeout=self._method_configs["GetMetricDescriptor"].timeout, client_info=self._client_info, ) request = metric_service_pb2.GetMetricDescriptorRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["get_metric_descriptor"]( request, retry=retry, timeout=timeout, metadata=metadata ) def create_metric_descriptor( self, name, metric_descriptor, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a new metric descriptor. User-created metric descriptors define `custom metrics <https://cloud.google.com/monitoring/custom-metrics>`__. Example: >>> from google.cloud import monitoring_v3 >>> >>> client = monitoring_v3.MetricServiceClient() >>> >>> name = client.project_path('[PROJECT]') >>> >>> # TODO: Initialize `metric_descriptor`: >>> metric_descriptor = {} >>> >>> response = client.create_metric_descriptor(name, metric_descriptor) Args: name (str): The project on which to execute the request. The format is ``"projects/{project_id_or_number}"``. metric_descriptor (Union[dict, ~google.cloud.monitoring_v3.types.MetricDescriptor]): The new `custom metric <https://cloud.google.com/monitoring/custom-metrics>`__ descriptor. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.monitoring_v3.types.MetricDescriptor` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.monitoring_v3.types.MetricDescriptor` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ if metadata is None: metadata = [] metadata = list(metadata) # Wrap the transport method to add retry and timeout logic. if "create_metric_descriptor" not in self._inner_api_calls: self._inner_api_calls[ "create_metric_descriptor" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_metric_descriptor, default_retry=self._method_configs["CreateMetricDescriptor"].retry, default_timeout=self._method_configs["CreateMetricDescriptor"].timeout, client_info=self._client_info, ) request = metric_service_pb2.CreateMetricDescriptorRequest( name=name, metric_descriptor=metric_descriptor ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["create_metric_descriptor"]( request, retry=retry, timeout=timeout, metadata=metadata ) def delete_metric_descriptor( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a metric descriptor. Only user-created `custom metrics <https://cloud.google.com/monitoring/custom-metrics>`__ can be deleted. Example: >>> from google.cloud import monitoring_v3 >>> >>> client = monitoring_v3.MetricServiceClient() >>> >>> name = client.metric_descriptor_path('[PROJECT]', '[METRIC_DESCRIPTOR]') >>> >>> client.delete_metric_descriptor(name) Args: name (str): The metric descriptor on which to execute the request. The format is ``"projects/{project_id_or_number}/metricDescriptors/{metric_id}"``. An example of ``{metric_id}`` is: ``"custom.googleapis.com/my_test_metric"``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ if metadata is None: metadata = [] metadata = list(metadata) # Wrap the transport method to add retry and timeout logic. if "delete_metric_descriptor" not in self._inner_api_calls: self._inner_api_calls[ "delete_metric_descriptor" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_metric_descriptor, default_retry=self._method_configs["DeleteMetricDescriptor"].retry, default_timeout=self._method_configs["DeleteMetricDescriptor"].timeout, client_info=self._client_info, ) request = metric_service_pb2.DeleteMetricDescriptorRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) self._inner_api_calls["delete_metric_descriptor"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_time_series( self, name, filter_, interval, view, aggregation=None, order_by=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists time series that match a filter. This method does not require a Stackdriver account. Example: >>> from google.cloud import monitoring_v3 >>> from google.cloud.monitoring_v3 import enums >>> >>> client = monitoring_v3.MetricServiceClient() >>> >>> name = client.project_path('[PROJECT]') >>> >>> # TODO: Initialize `filter_`: >>> filter_ = '' >>> >>> # TODO: Initialize `interval`: >>> interval = {} >>> >>> # TODO: Initialize `view`: >>> view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL >>> >>> # Iterate over all results >>> for element in client.list_time_series(name, filter_, interval, view): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_time_series(name, filter_, interval, view).pages: ... for element in page: ... # process element ... pass Args: name (str): The project on which to execute the request. The format is "projects/{project\_id\_or\_number}". filter_ (str): A `monitoring filter <https://cloud.google.com/monitoring/api/v3/filters>`__ that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example: :: metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND metric.label.instance_name = "my-instance-name" interval (Union[dict, ~google.cloud.monitoring_v3.types.TimeInterval]): The time interval for which results should be returned. Only time series that contain data points in the specified interval are included in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.monitoring_v3.types.TimeInterval` view (~google.cloud.monitoring_v3.types.TimeSeriesView): Specifies which information is returned about the time series. aggregation (Union[dict, ~google.cloud.monitoring_v3.types.Aggregation]): By default, the raw time series data is returned. Use this field to combine multiple time series for different views of the data. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.monitoring_v3.types.Aggregation` order_by (str): Unsupported: must be left blank. The points in each time series are returned in reverse time order. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. An iterable of :class:`~google.cloud.monitoring_v3.types.TimeSeries` instances. You can also iterate over the pages of the response using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ if metadata is None: metadata = [] metadata = list(metadata) # Wrap the transport method to add retry and timeout logic. if "list_time_series" not in self._inner_api_calls: self._inner_api_calls[ "list_time_series" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_time_series, default_retry=self._method_configs["ListTimeSeries"].retry, default_timeout=self._method_configs["ListTimeSeries"].timeout, client_info=self._client_info, ) request = metric_service_pb2.ListTimeSeriesRequest( name=name, filter=filter_, interval=interval, view=view, aggregation=aggregation, order_by=order_by, page_size=page_size, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_time_series"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="time_series", request_token_field="page_token", response_token_field="next_page_token", ) return iterator def create_time_series( self, name, time_series, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response. Example: >>> from google.cloud import monitoring_v3 >>> >>> client = monitoring_v3.MetricServiceClient() >>> >>> name = client.project_path('[PROJECT]') >>> >>> # TODO: Initialize `time_series`: >>> time_series = [] >>> >>> client.create_time_series(name, time_series) Args: name (str): The project on which to execute the request. The format is ``"projects/{project_id_or_number}"``. time_series (list[Union[dict, ~google.cloud.monitoring_v3.types.TimeSeries]]): The new data to be added to a list of time series. Adds at most one data point to each of several time series. The new data point must be more recent than any other point in its time series. Each ``TimeSeries`` value must fully specify a unique time series by supplying all label values for the metric and the monitored resource. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.monitoring_v3.types.TimeSeries` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ if metadata is None: metadata = [] metadata = list(metadata) # Wrap the transport method to add retry and timeout logic. if "create_time_series" not in self._inner_api_calls: self._inner_api_calls[ "create_time_series" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_time_series, default_retry=self._method_configs["CreateTimeSeries"].retry, default_timeout=self._method_configs["CreateTimeSeries"].timeout, client_info=self._client_info, ) request = metric_service_pb2.CreateTimeSeriesRequest( name=name, time_series=time_series ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) self._inner_api_calls["create_time_series"]( request, retry=retry, timeout=timeout, metadata=metadata )
tseaver/google-cloud-python
monitoring/google/cloud/monitoring_v3/gapic/metric_service_client.py
Python
apache-2.0
43,991
import copy import logging from strategy import Strategy logging.basicConfig(level=logging.INFO) LOG = logging.getLogger(__name__) class PersistenceStrategy(Strategy): def __init__(self, strategy): self.__dict__ = copy.deepcopy(strategy.__dict__) def compatible(self, event): if event is None: return True if event.dimensions["persistency"]["enabled"] == self.classification["description"]["enabled"]: return True else: return False
ClockworkOrigins/m2etis
configurator/missile/classes/PersistenceStrategy.py
Python
apache-2.0
516
# Copyright (c) 2013 OpenStack Foundation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import webob.exc from neutron.api import extensions from neutron.api.v2 import base from neutron.api.v2 import resource from neutron.common import constants from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.extensions import agent from neutron.i18n import _LE from neutron import manager from neutron.openstack.common import log as logging from neutron.plugins.common import constants as service_constants from neutron import policy from neutron import wsgi LOG = logging.getLogger(__name__) L3_ROUTER = 'l3-router' L3_ROUTERS = L3_ROUTER + 's' L3_AGENT = 'l3-agent' L3_AGENTS = L3_AGENT + 's' class RouterSchedulerController(wsgi.Controller): def get_plugin(self): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if not plugin: LOG.error(_LE('No plugin for L3 routing registered to handle ' 'router scheduling')) msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "get_%s" % L3_ROUTERS, {}) return plugin.list_routers_on_l3_agent( request.context, kwargs['agent_id']) def create(self, request, body, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "create_%s" % L3_ROUTER, {}) agent_id = kwargs['agent_id'] router_id = body['router_id'] result = plugin.add_router_to_l3_agent(request.context, agent_id, router_id) notify(request.context, 'l3_agent.router.add', router_id, agent_id) return result def delete(self, request, id, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "delete_%s" % L3_ROUTER, {}) agent_id = kwargs['agent_id'] result = plugin.remove_router_from_l3_agent(request.context, agent_id, id) notify(request.context, 'l3_agent.router.remove', id, agent_id) return result class L3AgentsHostingRouterController(wsgi.Controller): def get_plugin(self): plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) if not plugin: LOG.error(_LE('No plugin for L3 routing registered to handle ' 'router scheduling')) msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "get_%s" % L3_AGENTS, {}) return plugin.list_l3_agents_hosting_router( request.context, kwargs['router_id']) class L3agentscheduler(extensions.ExtensionDescriptor): """Extension class supporting l3 agent scheduler. """ @classmethod def get_name(cls): return "L3 Agent Scheduler" @classmethod def get_alias(cls): return constants.L3_AGENT_SCHEDULER_EXT_ALIAS @classmethod def get_description(cls): return "Schedule routers among l3 agents" @classmethod def get_namespace(cls): return "http://docs.openstack.org/ext/l3_agent_scheduler/api/v1.0" @classmethod def get_updated(cls): return "2013-02-07T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name="agent", collection_name="agents") controller = resource.Resource(RouterSchedulerController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( L3_ROUTERS, controller, parent)) parent = dict(member_name="router", collection_name="routers") controller = resource.Resource(L3AgentsHostingRouterController(), base.FAULT_MAP) exts.append(extensions.ResourceExtension( L3_AGENTS, controller, parent)) return exts def get_extended_resources(self, version): return {} class InvalidL3Agent(agent.AgentNotFound): message = _("Agent %(id)s is not a L3 Agent or has been disabled") class RouterHostedByL3Agent(exceptions.Conflict): message = _("The router %(router_id)s has been already hosted" " by the L3 Agent %(agent_id)s.") class RouterSchedulingFailed(exceptions.Conflict): message = _("Failed scheduling router %(router_id)s to" " the L3 Agent %(agent_id)s.") class RouterReschedulingFailed(exceptions.Conflict): message = _("Failed rescheduling router %(router_id)s: " "no eligible l3 agent found.") class RouterNotHostedByL3Agent(exceptions.Conflict): message = _("The router %(router_id)s is not hosted" " by L3 agent %(agent_id)s.") class RouterL3AgentMismatch(exceptions.Conflict): message = _("Cannot host %(router_type)s router %(router_id)s " "on %(agent_mode)s L3 agent %(agent_id)s.") class DVRL3CannotAssignToDvrAgent(exceptions.Conflict): message = _("Not allowed to manually assign a %(router_type)s " "router %(router_id)s from an existing DVR node " "to another L3 agent %(agent_id)s.") class L3AgentSchedulerPluginBase(object): """REST API to operate the l3 agent scheduler. All of method must be in an admin context. """ @abc.abstractmethod def add_router_to_l3_agent(self, context, id, router_id): pass @abc.abstractmethod def remove_router_from_l3_agent(self, context, id, router_id): pass @abc.abstractmethod def list_routers_on_l3_agent(self, context, id): pass @abc.abstractmethod def list_l3_agents_hosting_router(self, context, router_id): pass def notify(context, action, router_id, agent_id): info = {'id': agent_id, 'router_id': router_id} notifier = n_rpc.get_notifier('router') notifier.info(context, action, {'agent': info})
cloudbase/neutron-virtualbox
neutron/extensions/l3agentscheduler.py
Python
apache-2.0
7,118
class CMEModule: name = 'runasppl' description = "Check if the registry value RunAsPPL is set or not" supported_protocols = ['smb'] opsec_safe = True multiple_hosts = True def options(self, context, module_options): ''' ''' def on_admin_login(self, context, connection): command = 'reg query HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Lsa\ /v RunAsPPL' context.log.info('Executing command') p = connection.execute(command, True) context.log.highlight(p)
byt3bl33d3r/CrackMapExec
cme/modules/runasppl.py
Python
bsd-2-clause
542
from functools import reduce def vartype(var): if var.is_discrete: return 1 elif var.is_continuous: return 2 elif var.is_string: return 3 else: return 0 def progress_bar_milestones(count, iterations=100): return set([int(i*count/float(iterations)) for i in range(iterations)]) def getdeepattr(obj, attr, *arg, **kwarg): if isinstance(obj, dict): return obj.get(attr) try: return reduce(getattr, attr.split("."), obj) except AttributeError: if arg: return arg[0] if kwarg: return kwarg["default"] raise def getHtmlCompatibleString(strVal): return strVal.replace("<=", "&#8804;").replace(">=","&#8805;").replace("<", "&#60;").replace(">","&#62;").replace("=\\=", "&#8800;")
marinkaz/orange3
Orange/widgets/utils/__init__.py
Python
bsd-2-clause
811
""" @package mi.instrument.seabird.sbe16plus_v2.test.test_driver @file mi/instrument/seabird/sbe16plus_v2/test/test_driver.py @author David Everett @brief Test cases for InstrumentDriver USAGE: Make tests verbose and provide stdout * From the IDK $ bin/test_driver $ bin/test_driver -u $ bin/test_driver -i $ bin/test_driver -q * From pyon $ bin/nosetests -s -v .../mi/instrument/seabird/sbe16plus_v2/ooicore $ bin/nosetests -s -v .../mi/instrument/seabird/sbe16plus_v2/ooicore -a UNIT $ bin/nosetests -s -v .../mi/instrument/seabird/sbe16plus_v2/ooicore -a INT $ bin/nosetests -s -v .../mi/instrument/seabird/sbe16plus_v2/ooicore -a QUAL """ __author__ = 'David Everett' __license__ = 'Apache 2.0' # Standard lib imports import time import unittest # 3rd party imports from nose.plugins.attrib import attr from mock import Mock # MI logger from mi.core.log import get_logger ; log = get_logger() from mi.core.instrument.instrument_driver import DriverAsyncEvent from mi.core.instrument.chunker import StringChunker # from interface.objects import AgentCommand from mi.idk.unit_test import DriverTestMixin from mi.idk.unit_test import ParameterTestConfigKey from mi.idk.unit_test import AgentCapabilityType from mi.core.exceptions import InstrumentParameterException from mi.core.exceptions import InstrumentProtocolException from mi.core.exceptions import InstrumentCommandException from mi.core.exceptions import InstrumentTimeoutException from mi.instrument.seabird.sbe16plus_v2.driver import SBE16Protocol from mi.instrument.seabird.sbe16plus_v2.driver import SBE16InstrumentDriver from mi.instrument.seabird.sbe16plus_v2.driver import DataParticleType from mi.instrument.seabird.sbe16plus_v2.driver import ConfirmedParameter from mi.instrument.seabird.sbe16plus_v2.driver import NEWLINE from mi.instrument.seabird.sbe16plus_v2.driver import SBE16DataParticleKey from mi.instrument.seabird.sbe16plus_v2.driver import SBE16StatusParticleKey from mi.instrument.seabird.sbe16plus_v2.driver import SBE16CalibrationParticleKey from mi.instrument.seabird.sbe16plus_v2.driver import ProtocolState from mi.instrument.seabird.sbe16plus_v2.driver import ProtocolEvent from mi.instrument.seabird.sbe16plus_v2.driver import ScheduledJob from mi.instrument.seabird.sbe16plus_v2.driver import Capability from mi.instrument.seabird.sbe16plus_v2.driver import Parameter from mi.instrument.seabird.sbe16plus_v2.driver import Command from mi.instrument.seabird.sbe16plus_v2.driver import Prompt from mi.instrument.seabird.driver import SBE_EPOCH from mi.instrument.seabird.test.test_driver import SeaBirdUnitTest from mi.instrument.seabird.test.test_driver import SeaBirdIntegrationTest from mi.instrument.seabird.test.test_driver import SeaBirdQualificationTest from mi.instrument.seabird.test.test_driver import SeaBirdPublicationTest from mi.core.instrument.instrument_driver import DriverConnectionState from mi.core.instrument.instrument_driver import DriverProtocolState from mi.core.instrument.instrument_driver import ResourceAgentState class SeaBird16plusMixin(DriverTestMixin): InstrumentDriver = SBE16InstrumentDriver ''' Mixin class used for storing data particle constants and common data assertion methods. ''' # Create some short names for the parameter test config TYPE = ParameterTestConfigKey.TYPE READONLY = ParameterTestConfigKey.READONLY STARTUP = ParameterTestConfigKey.STARTUP DA = ParameterTestConfigKey.DIRECT_ACCESS VALUE = ParameterTestConfigKey.VALUE REQUIRED = ParameterTestConfigKey.REQUIRED DEFAULT = ParameterTestConfigKey.DEFAULT STATES = ParameterTestConfigKey.STATES ### # Instrument output (driver input) Definitions ### VALID_SAMPLE = "#0409DB0A738C81747A84AC0006000A2E541E18BE6ED9" + NEWLINE VALID_SAMPLE2 = "0409DB0A738C81747A84AC0006000A2E541E18BE6ED9" + NEWLINE VALID_DS_RESPONSE = 'SBE 16plus V 2.5 SERIAL NO. 6841 28 Feb 2013 16:39:31' + NEWLINE + \ 'vbatt = 23.4, vlith = 8.0, ioper = 61.4 ma, ipump = 0.3 ma,' + NEWLINE + \ 'status = not logging' + NEWLINE + \ 'samples = 0, free = 4386542' + NEWLINE + \ 'sample interval = 10 seconds, number of measurements per sample = 4' + NEWLINE + \ 'pump = run pump during sample, delay before sampling = 0.0 seconds, delay after sampling = 0.0 seconds' + NEWLINE + \ 'transmit real-time = yes' + NEWLINE + \ 'battery cutoff = 7.5 volts' + NEWLINE + \ 'pressure sensor = strain gauge, range = 160.0' + NEWLINE + \ 'SBE 38 = no, SBE 50 = no, WETLABS = no, OPTODE = no, SBE63 = no, Gas Tension Device = no' + NEWLINE + \ 'Ext Volt 0 = yes, Ext Volt 1 = yes' + NEWLINE + \ 'Ext Volt 2 = yes, Ext Volt 3 = yes' + NEWLINE + \ 'Ext Volt 4 = yes, Ext Volt 5 = yes' + NEWLINE + \ 'echo characters = yes' + NEWLINE + \ 'output format = raw HEX' + NEWLINE + \ 'serial sync mode disabled' + NEWLINE VALID_DCAL_QUARTZ = 'SBE 16plus V 2.5 SERIAL NO. 6841 28 Feb 2013 18:37:40' + NEWLINE + \ 'temperature: 18-May-12' + NEWLINE + \ ' TA0 = 1.561342e-03' + NEWLINE + \ ' TA1 = 2.561486e-04' + NEWLINE + \ ' TA2 = 1.896537e-07' + NEWLINE + \ ' TA3 = 1.301189e-07' + NEWLINE + \ ' TOFFSET = 0.000000e+00' + NEWLINE + \ 'conductivity: 18-May-11' + NEWLINE + \ ' G = -9.896568e-01' + NEWLINE + \ ' H = 1.316599e-01' + NEWLINE + \ ' I = -2.213854e-04' + NEWLINE + \ ' J = 3.292199e-05' + NEWLINE + \ ' CPCOR = -9.570000e-08' + NEWLINE + \ ' CTCOR = 3.250000e-06' + NEWLINE + \ ' CSLOPE = 1.000000e+00' + NEWLINE + \ 'pressure S/N = 125270, range = 1000 psia: 02-nov-12' + NEWLINE + \ ' PC1 = -4.642673e+03' + NEWLINE + \ ' PC2 = -4.611640e-03' + NEWLINE + \ ' PC3 = 8.921190e-04' + NEWLINE + \ ' PD1 = 7.024800e-02' + NEWLINE + \ ' PD2 = 0.000000e+00' + NEWLINE + \ ' PT1 = 3.022595e+01' + NEWLINE + \ ' PT2 = -1.549720e-04' + NEWLINE + \ ' PT3 = 2.677750e-06' + NEWLINE + \ ' PT4 = 1.705490e-09' + NEWLINE + \ ' PSLOPE = 1.000000e+00' + NEWLINE + \ ' POFFSET = 0.000000e+00' + NEWLINE + \ 'volt 0: offset = -4.650526e-02, slope = 1.246381e+00' + NEWLINE + \ 'volt 1: offset = -4.618105e-02, slope = 1.247197e+00' + NEWLINE + \ 'volt 2: offset = -4.659790e-02, slope = 1.247601e+00' + NEWLINE + \ 'volt 3: offset = -4.502421e-02, slope = 1.246911e+00' + NEWLINE + \ 'volt 4: offset = -4.589158e-02, slope = 1.246346e+00' + NEWLINE + \ 'volt 5: offset = -4.609895e-02, slope = 1.247868e+00' + NEWLINE + \ ' EXTFREQSF = 9.999949e-01' + NEWLINE VALID_DCAL_STRAIN ='SBE 16plus V 2.5 SERIAL NO. 6841 28 Feb 2013 18:37:40' + NEWLINE + \ 'temperature: 18-May-12' + NEWLINE + \ ' TA0 = 1.561342e-03' + NEWLINE + \ ' TA1 = 2.561486e-04' + NEWLINE + \ ' TA2 = 1.896537e-07' + NEWLINE + \ ' TA3 = 1.301189e-07' + NEWLINE + \ ' TOFFSET = 0.000000e+00' + NEWLINE + \ 'conductivity: 18-May-11' + NEWLINE + \ ' G = -9.896568e-01' + NEWLINE + \ ' H = 1.316599e-01' + NEWLINE + \ ' I = -2.213854e-04' + NEWLINE + \ ' J = 3.292199e-05' + NEWLINE + \ ' CPCOR = -9.570000e-08' + NEWLINE + \ ' CTCOR = 3.250000e-06' + NEWLINE + \ ' CSLOPE = 1.000000e+00' + NEWLINE + \ 'pressure S/N = 3230195, range = 160 psia: 11-May-11' + NEWLINE + \ ' PA0 = 4.960417e-02' + NEWLINE + \ ' PA1 = 4.883682e-04' + NEWLINE + \ ' PA2 = -5.687309e-12' + NEWLINE + \ ' PTCA0 = 5.249802e+05' + NEWLINE + \ ' PTCA1 = 7.595719e+00' + NEWLINE + \ ' PTCA2 = -1.322776e-01' + NEWLINE + \ ' PTCB0 = 2.503125e+01' + NEWLINE + \ ' PTCB1 = 5.000000e-05' + NEWLINE + \ ' PTCB2 = 0.000000e+00' + NEWLINE + \ ' PTEMPA0 = -6.431504e+01' + NEWLINE + \ ' PTEMPA1 = 5.168177e+01' + NEWLINE + \ ' PTEMPA2 = -2.847757e-01' + NEWLINE + \ ' POFFSET = 0.000000e+00' + NEWLINE + \ 'volt 0: offset = -4.650526e-02, slope = 1.246381e+00' + NEWLINE + \ 'volt 1: offset = -4.618105e-02, slope = 1.247197e+00' + NEWLINE + \ 'volt 2: offset = -4.659790e-02, slope = 1.247601e+00' + NEWLINE + \ 'volt 3: offset = -4.502421e-02, slope = 1.246911e+00' + NEWLINE + \ 'volt 4: offset = -4.589158e-02, slope = 1.246346e+00' + NEWLINE + \ 'volt 5: offset = -4.609895e-02, slope = 1.247868e+00' + NEWLINE + \ ' EXTFREQSF = 9.999949e-01' + NEWLINE ### # Parameter and Type Definitions ### _driver_parameters = { # Parameters defined in the IOS Parameter.DATE_TIME : {TYPE: str, READONLY: True, DA: False, STARTUP: False}, Parameter.ECHO : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True}, Parameter.OUTPUT_EXEC_TAG : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True}, Parameter.TXREALTIME : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True}, Parameter.PUMP_MODE : {TYPE: int, READONLY: False, DA: True, STARTUP: True, DEFAULT: 2, VALUE: 2}, Parameter.NCYCLES : {TYPE: int, READONLY: False, DA: False, STARTUP: True, DEFAULT: 4, VALUE: 4}, Parameter.INTERVAL : {TYPE: int, READONLY: False, DA: False, STARTUP: True, VALUE: 10}, Parameter.BIOWIPER : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False}, Parameter.PTYPE : {TYPE: int, READONLY: True, DA: True, STARTUP: True, DEFAULT: 1, VALUE: 1}, Parameter.VOLT0 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True}, Parameter.VOLT1 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True}, Parameter.VOLT2 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True}, Parameter.VOLT3 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True}, Parameter.VOLT4 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True}, Parameter.VOLT5 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: True, VALUE: True}, Parameter.DELAY_BEFORE_SAMPLE : {TYPE: float, READONLY: True, DA: True, STARTUP: True, DEFAULT: 0.0, VALUE: 0.0}, Parameter.DELAY_AFTER_SAMPLE : {TYPE: float, READONLY: True, DA: True, STARTUP: True, DEFAULT: 0.0, VALUE: 0.0}, Parameter.SBE63 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False}, Parameter.SBE38 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False}, Parameter.SBE50 : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False}, Parameter.WETLABS : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False}, Parameter.GTD : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False}, Parameter.OPTODE : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False}, Parameter.SYNCMODE : {TYPE: bool, READONLY: True, DA: True, STARTUP: True, DEFAULT: False, VALUE: False}, Parameter.SYNCWAIT : {TYPE: bool, READONLY: True, DA: False, STARTUP: False, DEFAULT: 0, VALUE: 0, REQUIRED: False}, Parameter.OUTPUT_FORMAT : {TYPE: int, READONLY: True, DA: True, STARTUP: True, DEFAULT: 0, VALUE: 0}, Parameter.LOGGING : {TYPE: bool, READONLY: True, DA: False, STARTUP: False}, } _driver_capabilities = { # capabilities defined in the IOS Capability.QUIT_SESSION : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]}, Capability.START_AUTOSAMPLE : {STATES: [ProtocolState.COMMAND]}, Capability.STOP_AUTOSAMPLE : {STATES: [ProtocolState.AUTOSAMPLE]}, Capability.CLOCK_SYNC : {STATES: [ProtocolState.COMMAND]}, Capability.ACQUIRE_STATUS : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]}, Capability.GET_CONFIGURATION : {STATES: [ProtocolState.COMMAND, ProtocolState.AUTOSAMPLE]}, Capability.TEST : {STATES: [ProtocolState.COMMAND]}, Capability.RESET_EC : {STATES: [ProtocolState.COMMAND]}, } _sample_parameters = { SBE16DataParticleKey.TEMP: {TYPE: int, VALUE: 264667, REQUIRED: True }, SBE16DataParticleKey.CONDUCTIVITY: {TYPE: int, VALUE: 684940, REQUIRED: True }, SBE16DataParticleKey.PRESSURE: {TYPE: int, VALUE: 8483962, REQUIRED: True }, SBE16DataParticleKey.PRESSURE_TEMP: {TYPE: int, VALUE: 33964, REQUIRED: True }, SBE16DataParticleKey.TIME: {TYPE: int, VALUE: 415133401, REQUIRED: True }, } _status_parameters = { SBE16StatusParticleKey.FIRMWARE_VERSION: {TYPE: unicode, VALUE: '2.5', REQUIRED: True }, SBE16StatusParticleKey.SERIAL_NUMBER: {TYPE: int, VALUE: 6841, REQUIRED: True }, SBE16StatusParticleKey.DATE_TIME: {TYPE: unicode, VALUE: '28 Feb 2013 16:39:31', REQUIRED: True }, SBE16StatusParticleKey.VBATT: {TYPE: float, VALUE: 23.4, REQUIRED: True }, SBE16StatusParticleKey.VLITH: {TYPE: float, VALUE: 8.0, REQUIRED: True }, SBE16StatusParticleKey.IOPER: {TYPE: float, VALUE: 61.4, REQUIRED: True }, SBE16StatusParticleKey.IPUMP: {TYPE: float, VALUE: 0.3, REQUIRED: True }, SBE16StatusParticleKey.STATUS: {TYPE: unicode, VALUE: 'not logging', REQUIRED: True }, SBE16StatusParticleKey.SAMPLES: {TYPE: int, VALUE: 0, REQUIRED: True }, SBE16StatusParticleKey.FREE: {TYPE: int, VALUE: 4386542, REQUIRED: True }, SBE16StatusParticleKey.SAMPLE_INTERVAL: {TYPE: int, VALUE: 10, REQUIRED: True }, SBE16StatusParticleKey.MEASUREMENTS_PER_SAMPLE: {TYPE: int, VALUE: 4, REQUIRED: True }, SBE16StatusParticleKey.PUMP_MODE: {TYPE: unicode, VALUE: 'run pump during sample', REQUIRED: True }, SBE16StatusParticleKey.DELAY_BEFORE_SAMPLING: {TYPE: float, VALUE: 0.0, REQUIRED: True }, SBE16StatusParticleKey.DELAY_AFTER_SAMPLING: {TYPE: float, VALUE: 0.0, REQUIRED: True }, SBE16StatusParticleKey.TX_REAL_TIME: {TYPE: int, VALUE: 1, REQUIRED: True }, SBE16StatusParticleKey.BATTERY_CUTOFF: {TYPE: float, VALUE: 7.5, REQUIRED: True }, SBE16StatusParticleKey.PRESSURE_SENSOR: {TYPE: unicode, VALUE: 'strain gauge', REQUIRED: True }, SBE16StatusParticleKey.RANGE: {TYPE: float, VALUE: 160, REQUIRED: True }, SBE16StatusParticleKey.SBE38: {TYPE: int, VALUE: 0, REQUIRED: True }, SBE16StatusParticleKey.SBE50: {TYPE: int, VALUE: 0, REQUIRED: True }, SBE16StatusParticleKey.WETLABS: {TYPE: int, VALUE: 0, REQUIRED: True }, SBE16StatusParticleKey.OPTODE: {TYPE: int, VALUE: 0, REQUIRED: True }, SBE16StatusParticleKey.GAS_TENSION_DEVICE: {TYPE: int, VALUE: 0, REQUIRED: True }, SBE16StatusParticleKey.EXT_VOLT_0: {TYPE: int, VALUE: 1, REQUIRED: True }, SBE16StatusParticleKey.EXT_VOLT_1: {TYPE: int, VALUE: 1, REQUIRED: True }, SBE16StatusParticleKey.EXT_VOLT_2: {TYPE: int, VALUE: 1, REQUIRED: True }, SBE16StatusParticleKey.EXT_VOLT_3: {TYPE: int, VALUE: 1, REQUIRED: True }, SBE16StatusParticleKey.EXT_VOLT_4: {TYPE: int, VALUE: 1, REQUIRED: True }, SBE16StatusParticleKey.EXT_VOLT_5: {TYPE: int, VALUE: 1, REQUIRED: True }, SBE16StatusParticleKey.ECHO_CHARACTERS: {TYPE: int, VALUE: 1, REQUIRED: True }, SBE16StatusParticleKey.OUTPUT_FORMAT: {TYPE: int, VALUE: 0, REQUIRED: True }, SBE16StatusParticleKey.OUTPUT_SALINITY: {TYPE: int, VALUE: 0, REQUIRED: False }, SBE16StatusParticleKey.OUTPUT_SOUND_VELOCITY: {TYPE: int, VALUE: 0, REQUIRED: False }, SBE16StatusParticleKey.SERIAL_SYNC_MODE: {TYPE: int, VALUE: 0, REQUIRED: True }, } # Base calibration structure, but exludes pressure sensor type. Those parameters are based # on ptype _calibration_parameters_base = { SBE16CalibrationParticleKey.FIRMWARE_VERSION: {TYPE: unicode, VALUE: "2.5", REQUIRED: True }, SBE16CalibrationParticleKey.SERIAL_NUMBER: {TYPE: int, VALUE: 6841, REQUIRED: True }, SBE16CalibrationParticleKey.DATE_TIME: {TYPE: unicode, VALUE: "28 Feb 2013 18:37:40", REQUIRED: True }, SBE16CalibrationParticleKey.TEMP_CAL_DATE: {TYPE: unicode, VALUE: "18-May-12", REQUIRED: True }, SBE16CalibrationParticleKey.TA0: {TYPE: float, VALUE: 1.561342e-03, REQUIRED: True }, SBE16CalibrationParticleKey.TA1: {TYPE: float, VALUE: 2.561486e-04, REQUIRED: True }, SBE16CalibrationParticleKey.TA2: {TYPE: float, VALUE: 1.896537e-07, REQUIRED: True }, SBE16CalibrationParticleKey.TA3: {TYPE: float, VALUE: 1.301189e-07, REQUIRED: True }, SBE16CalibrationParticleKey.TOFFSET: {TYPE: float, VALUE: 0.0, REQUIRED: True }, SBE16CalibrationParticleKey.COND_CAL_DATE: {TYPE: unicode, VALUE: '18-May-11', REQUIRED: True }, SBE16CalibrationParticleKey.CONDG: {TYPE: float, VALUE: -9.896568e-01, REQUIRED: True }, SBE16CalibrationParticleKey.CONDH: {TYPE: float, VALUE: 1.316599e-01, REQUIRED: True }, SBE16CalibrationParticleKey.CONDI: {TYPE: float, VALUE: -2.213854e-04, REQUIRED: True }, SBE16CalibrationParticleKey.CONDJ: {TYPE: float, VALUE: 3.292199e-05, REQUIRED: True }, SBE16CalibrationParticleKey.CPCOR: {TYPE: float, VALUE: -9.570000e-08, REQUIRED: True }, SBE16CalibrationParticleKey.CTCOR: {TYPE: float, VALUE: 3.250000e-06, REQUIRED: True }, SBE16CalibrationParticleKey.CSLOPE: {TYPE: float, VALUE: 1.0, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT0_OFFSET: {TYPE: float, VALUE: -4.650526e-02, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT0_SLOPE: {TYPE: float, VALUE: 1.246381e+00, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT1_OFFSET: {TYPE: float, VALUE: -4.618105e-02, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT1_SLOPE: {TYPE: float, VALUE: 1.247197e+00, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT2_OFFSET: {TYPE: float, VALUE: -4.659790e-02, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT2_SLOPE: {TYPE: float, VALUE: 1.247601e+00, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT3_OFFSET: {TYPE: float, VALUE: -4.502421e-02, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT3_SLOPE: {TYPE: float, VALUE: 1.246911e+00, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT4_OFFSET: {TYPE: float, VALUE: -4.589158e-02, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT4_SLOPE: {TYPE: float, VALUE: 1.246346e+00, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT5_OFFSET: {TYPE: float, VALUE: -4.609895e-02, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_VOLT5_SLOPE: {TYPE: float, VALUE: 1.247868e+00, REQUIRED: True }, SBE16CalibrationParticleKey.EXT_FREQ: {TYPE: float, VALUE: 9.999949e-01, REQUIRED: True }, } # Calibration particle definition for a 16 with a quartz pressure sensor _calibration_parameters_quartz = dict( { SBE16CalibrationParticleKey.PRES_SERIAL_NUMBER: {TYPE: int, VALUE: 125270, REQUIRED: True }, SBE16CalibrationParticleKey.PRES_RANGE: {TYPE: int, VALUE: 1000, REQUIRED: True }, SBE16CalibrationParticleKey.PRES_CAL_DATE: {TYPE: unicode, VALUE: '02-nov-12', REQUIRED: True }, SBE16CalibrationParticleKey.PC1: {TYPE: float, VALUE: -4.642673e+03, REQUIRED: True }, SBE16CalibrationParticleKey.PC2: {TYPE: float, VALUE: -4.611640e-03, REQUIRED: True }, SBE16CalibrationParticleKey.PC3: {TYPE: float, VALUE: 8.921190e-04, REQUIRED: True }, SBE16CalibrationParticleKey.PD1: {TYPE: float, VALUE: 7.024800e-02, REQUIRED: True }, SBE16CalibrationParticleKey.PD2: {TYPE: float, VALUE: 0.000000e+00, REQUIRED: True }, SBE16CalibrationParticleKey.PT1: {TYPE: float, VALUE: 3.022595e+01, REQUIRED: True }, SBE16CalibrationParticleKey.PT2: {TYPE: float, VALUE: -1.549720e-04, REQUIRED: True }, SBE16CalibrationParticleKey.PT3: {TYPE: float, VALUE: 2.677750e-06, REQUIRED: True }, SBE16CalibrationParticleKey.PT4: {TYPE: float, VALUE: 1.705490e-09, REQUIRED: True }, SBE16CalibrationParticleKey.PSLOPE: {TYPE: float, VALUE: 1.000000e+00, REQUIRED: True }, SBE16CalibrationParticleKey.POFFSET: {TYPE: float, VALUE: 0.000000e+00, REQUIRED: True }, }, **_calibration_parameters_base ) # Calibration particle definition for a 16 with a stain gauge pressure sensor _calibration_parameters_strain = dict( { SBE16CalibrationParticleKey.PRES_SERIAL_NUMBER: {TYPE: int, VALUE: 3230195, REQUIRED: True }, SBE16CalibrationParticleKey.PRES_RANGE: {TYPE: int, VALUE: 160, REQUIRED: True }, SBE16CalibrationParticleKey.PRES_CAL_DATE: {TYPE: unicode, VALUE: '11-May-11', REQUIRED: True }, SBE16CalibrationParticleKey.PA0: {TYPE: float, VALUE: 4.960417e-02, REQUIRED: True }, SBE16CalibrationParticleKey.PA1: {TYPE: float, VALUE: 4.883682e-04, REQUIRED: True }, SBE16CalibrationParticleKey.PA2: {TYPE: float, VALUE: -5.687309e-12, REQUIRED: True }, SBE16CalibrationParticleKey.PTCA0: {TYPE: float, VALUE: 5.249802e+05, REQUIRED: True }, SBE16CalibrationParticleKey.PTCA1: {TYPE: float, VALUE: 7.595719e+00, REQUIRED: True }, SBE16CalibrationParticleKey.PTCA2: {TYPE: float, VALUE: -1.322776e-01, REQUIRED: True }, SBE16CalibrationParticleKey.PTCB0: {TYPE: float, VALUE: 2.503125e+01, REQUIRED: True }, SBE16CalibrationParticleKey.PTCB1: {TYPE: float, VALUE: 5.000000e-05, REQUIRED: True }, SBE16CalibrationParticleKey.PTCB2: {TYPE: float, VALUE: 0.000000e+003, REQUIRED: True }, SBE16CalibrationParticleKey.PTEMPA0: {TYPE: float, VALUE: -6.431504e+01, REQUIRED: True }, SBE16CalibrationParticleKey.PTEMPA1: {TYPE: float, VALUE: 5.168177e+01, REQUIRED: True }, SBE16CalibrationParticleKey.PTEMPA2: {TYPE: float, VALUE: -2.847757e-01, REQUIRED: True }, SBE16CalibrationParticleKey.POFFSET: {TYPE: float, VALUE: 0.000000e+00, REQUIRED: True }, }, **_calibration_parameters_base ) ### # Driver Parameter Methods ### def assert_driver_parameters(self, current_parameters, verify_values = False): """ Verify that all driver parameters are correct and potentially verify values. @param current_parameters: driver parameters read from the driver instance @param verify_values: should we verify values against definition? """ self.assert_parameters(current_parameters, self._driver_parameters, verify_values) def assert_particle_sample(self, data_particle, verify_values = False): ''' Verify sample particle @param data_particle: SBE16DataParticle data particle @param verify_values: bool, should we verify parameter values ''' self.assert_data_particle_keys(SBE16DataParticleKey, self._sample_parameters) self.assert_data_particle_header(data_particle, DataParticleType.CTD_PARSED, require_instrument_timestamp=True) self.assert_data_particle_parameters(data_particle, self._sample_parameters, verify_values) def assert_particle_status(self, data_particle, verify_values = False): ''' Verify status particle @param data_particle: SBE16StatusParticle data particle @param verify_values: bool, should we verify parameter values ''' self.assert_data_particle_keys(SBE16StatusParticleKey, self._status_parameters) self.assert_data_particle_header(data_particle, DataParticleType.DEVICE_STATUS) self.assert_data_particle_parameters(data_particle, self._status_parameters, verify_values) def assert_particle_calibration_quartz(self, data_particle, verify_values = False): ''' Verify calibration particle @param data_particle: SBE16CalibrationParticle data particle @param verify_values: bool, should we verify parameter values ''' # Have to skip this test because the parameter set is dynamic #self.assert_data_particle_keys(SBE16CalibrationParticleKey, self._calibration_parameters_quartz) self.assert_data_particle_header(data_particle, DataParticleType.DEVICE_CALIBRATION) self.assert_data_particle_parameters(data_particle, self._calibration_parameters_quartz, verify_values) def assert_particle_calibration_strain(self, data_particle, verify_values = False): ''' Verify calibration particle @param data_particle: SBE16CalibrationParticle data particle @param verify_values: bool, should we verify parameter values ''' # Have to skip this test because the parameter set is dynamic #self.assert_data_particle_keys(SBE16CalibrationParticleKey, self._calibration_parameters_strain) self.assert_data_particle_header(data_particle, DataParticleType.DEVICE_CALIBRATION) self.assert_data_particle_parameters(data_particle, self._calibration_parameters_strain, verify_values) def assert_granule_calibration_strain(self, granule, verify_values = False): ''' Verify calibration granule @param data_particle: SBE16CalibrationParticle data granule @param verify_values: bool, should we verify parameter values ''' # Have to skip this test because the parameter set is dynamic #self.assert_data_particle_keys(SBE16CalibrationParticleKey, self._calibration_parameters_strain) self.assert_data_particle_header(granule, DataParticleType.DEVICE_CALIBRATION) self.assert_data_particle_parameters(granule, self._calibration_parameters_strain, verify_values) #################################### RULES #################################### # # # Common capabilities in the base class # # # # Instrument specific stuff in the derived class # # # # Generator spits out either stubs or comments describing test this here, # # test that there. # # # # Qualification tests are driven through the instrument_agent # # # ############################################################################### ############################################################################### # UNIT TESTS # # Unit tests test the method calls and parameters using Mock. # ############################################################################### @attr('UNIT', group='mi') class SBEUnitTestCase(SeaBirdUnitTest, SeaBird16plusMixin): """Unit Test Driver""" def test_driver_enums(self): """ Verify that all driver enumeration has no duplicate values that might cause confusion. Also do a little extra validation for the Capabilites """ self.assert_enum_has_no_duplicates(Command()) self.assert_enum_has_no_duplicates(ScheduledJob()) self.assert_enum_has_no_duplicates(DataParticleType()) self.assert_enum_has_no_duplicates(ProtocolState()) self.assert_enum_has_no_duplicates(ProtocolEvent()) self.assert_enum_has_no_duplicates(Parameter()) self.assert_enum_complete(ConfirmedParameter(), Parameter()) # Test capabilites for duplicates, them verify that capabilities is a subset of proto events self.assert_enum_has_no_duplicates(Capability()) self.assert_enum_complete(Capability(), ProtocolEvent()) def test_driver_schema(self): """ get the driver schema and verify it is configured properly """ driver = self.InstrumentDriver(self._got_data_event_callback) self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities) def test_chunker(self): """ Test the chunker and verify the particles created. """ chunker = StringChunker(SBE16Protocol.sieve_function) self.assert_chunker_sample(chunker, self.VALID_SAMPLE) self.assert_chunker_sample_with_noise(chunker, self.VALID_SAMPLE) self.assert_chunker_fragmented_sample(chunker, self.VALID_SAMPLE) self.assert_chunker_combined_sample(chunker, self.VALID_SAMPLE) self.assert_chunker_sample(chunker, self.VALID_SAMPLE2) self.assert_chunker_sample_with_noise(chunker, self.VALID_SAMPLE2) self.assert_chunker_fragmented_sample(chunker, self.VALID_SAMPLE2) self.assert_chunker_combined_sample(chunker, self.VALID_SAMPLE2) self.assert_chunker_sample(chunker, self.VALID_DS_RESPONSE) self.assert_chunker_sample_with_noise(chunker, self.VALID_DS_RESPONSE) self.assert_chunker_fragmented_sample(chunker, self.VALID_DS_RESPONSE, 64) self.assert_chunker_combined_sample(chunker, self.VALID_DS_RESPONSE) self.assert_chunker_sample(chunker, self.VALID_DCAL_QUARTZ) self.assert_chunker_sample_with_noise(chunker, self.VALID_DCAL_QUARTZ) self.assert_chunker_fragmented_sample(chunker, self.VALID_DCAL_QUARTZ, 64) self.assert_chunker_combined_sample(chunker, self.VALID_DCAL_QUARTZ) self.assert_chunker_sample(chunker, self.VALID_DCAL_STRAIN) self.assert_chunker_sample_with_noise(chunker, self.VALID_DCAL_STRAIN) self.assert_chunker_fragmented_sample(chunker, self.VALID_DCAL_STRAIN, 64) self.assert_chunker_combined_sample(chunker, self.VALID_DCAL_STRAIN) @unittest.skip("passes with test_driver, fails with nosetest") def test_got_data(self): """ Verify sample data passed through the got data method produces the correct data particles """ # Create and initialize the instrument driver with a mock port agent driver = self.InstrumentDriver(self._got_data_event_callback) self.assert_initialize_driver(driver) self.assert_raw_particle_published(driver, True) # Start validating data particles self.assert_particle_published(driver, self.VALID_SAMPLE, self.assert_particle_sample, True) self.assert_particle_published(driver, self.VALID_SAMPLE2, self.assert_particle_sample, True) self.assert_particle_published(driver, self.VALID_DS_RESPONSE, self.assert_particle_status, True) self.assert_particle_published(driver, self.VALID_DCAL_QUARTZ, self.assert_particle_calibration_quartz, True) self.assert_particle_published(driver, self.VALID_DCAL_STRAIN, self.assert_particle_calibration_strain, True) def test_capabilities(self): """ Verify the FSM reports capabilities as expected. All states defined in this dict must also be defined in the protocol FSM. """ capabilities = { ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'], ProtocolState.TEST: ['DRIVER_EVENT_GET', 'DRIVER_EVENT_RUN_TEST'], ProtocolState.COMMAND: ['DRIVER_EVENT_ACQUIRE_SAMPLE', 'DRIVER_EVENT_ACQUIRE_STATUS', 'DRIVER_EVENT_CLOCK_SYNC', 'DRIVER_EVENT_GET', 'DRIVER_EVENT_SET', 'DRIVER_EVENT_TEST', 'DRIVER_EVENT_START_AUTOSAMPLE', 'DRIVER_EVENT_START_DIRECT', 'PROTOCOL_EVENT_GET_CONFIGURATION', 'PROTOCOL_EVENT_RESET_EC', 'PROTOCOL_EVENT_QUIT_SESSION', 'DRIVER_EVENT_SCHEDULED_CLOCK_SYNC'], ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_GET', 'PROTOCOL_EVENT_QUIT_SESSION', 'DRIVER_EVENT_STOP_AUTOSAMPLE', 'PROTOCOL_EVENT_GET_CONFIGURATION', 'DRIVER_EVENT_SCHEDULED_CLOCK_SYNC', 'DRIVER_EVENT_ACQUIRE_STATUS'], ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT', 'EXECUTE_DIRECT'] } driver = self.InstrumentDriver(self._got_data_event_callback) self.assert_capabilities(driver, capabilities) @unittest.skip("passes with test_driver, fails with nosetest") def test_parse_ds(self): """ Create a mock port agent """ driver = self.InstrumentDriver(self._got_data_event_callback) self.assert_initialize_driver(driver, ProtocolState.COMMAND) source = self.VALID_DS_RESPONSE baseline = driver._protocol._param_dict.get_current_timestamp() # First verify that parse ds sets all know parameters. driver._protocol._parse_dsdc_response(source, '<Executed/>') pd = driver._protocol._param_dict.get_all(baseline) log.debug("Param Dict Values: %s" % pd) log.debug("Param Sample: %s" % source) self.assert_driver_parameters(pd, True) # Now change some things and make sure they are parsed properly # Note: Only checking parameters that can change. # Logging source = source.replace("= not logging", "= logging") log.debug("Param Sample: %s" % source) driver._protocol._parse_dsdc_response(source, '<Executed/>') pd = driver._protocol._param_dict.get_all(baseline) self.assertTrue(pd.get(Parameter.LOGGING)) # Sync Mode source = source.replace("serial sync mode disabled", "serial sync mode enabled") log.debug("Param Sample: %s" % source) driver._protocol._parse_dsdc_response(source, '<Executed/>') pd = driver._protocol._param_dict.get_all(baseline) self.assertTrue(pd.get(Parameter.SYNCMODE)) # Pump Mode 0 source = source.replace("run pump during sample", "no pump") log.debug("Param Sample: %s" % source) driver._protocol._parse_dsdc_response(source, '<Executed/>') pd = driver._protocol._param_dict.get_all(baseline) self.assertEqual(pd.get(Parameter.PUMP_MODE), 0) # Pump Mode 1 source = source.replace("no pump", "run pump for 0.5 sec") log.debug("Param Sample: %s" % source) driver._protocol._parse_dsdc_response(source, '<Executed/>') pd = driver._protocol._param_dict.get_all(baseline) self.assertEqual(pd.get(Parameter.PUMP_MODE), 1) # Pressure Sensor type 2 source = source.replace("strain gauge", "quartz without temp comp") log.debug("Param Sample: %s" % source) driver._protocol._parse_dsdc_response(source, '<Executed/>') pd = driver._protocol._param_dict.get_all(baseline) self.assertEqual(pd.get(Parameter.PTYPE), 2) # Pressure Sensor type 3 source = source.replace("quartz without temp comp", "quartz with temp comp") log.debug("Param Sample: %s" % source) driver._protocol._parse_dsdc_response(source, '<Executed/>') pd = driver._protocol._param_dict.get_all(baseline) self.assertEqual(pd.get(Parameter.PTYPE), 3) def test_parse_set_response(self): """ Test response from set commands. """ driver = self.InstrumentDriver(self._got_data_event_callback) self.assert_initialize_driver(driver, ProtocolState.COMMAND) response = "Not an error" driver._protocol._parse_set_response(response, Prompt.EXECUTED) driver._protocol._parse_set_response(response, Prompt.COMMAND) with self.assertRaises(InstrumentProtocolException): driver._protocol._parse_set_response(response, Prompt.BAD_COMMAND) response = "<ERROR type='INVALID ARGUMENT' msg='out of range'/>" with self.assertRaises(InstrumentParameterException): driver._protocol._parse_set_response(response, Prompt.EXECUTED) ############################################################################### # INTEGRATION TESTS # # Integration test test the direct driver / instrument interaction # # but making direct calls via zeromq. # # - Common Integration tests test the driver through the instrument agent # # and common for all drivers (minmum requirement for ION ingestion) # ############################################################################### @attr('INT', group='mi') class SBEIntTestCase(SeaBirdIntegrationTest, SeaBird16plusMixin): """ Integration tests for the sbe16 driver. This class tests and shows use patterns for the sbe16 driver as a zmq driver process. """ def test_test(self): """ Test the hardware testing mode. """ self.assert_initialize_driver() start_time = time.time() timeout = time.time() + 300 reply = self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.TEST) self.assert_current_state(ProtocolState.TEST) # Test the driver is in test state. state = self.driver_client.cmd_dvr('get_resource_state') while state != ProtocolState.COMMAND: time.sleep(5) elapsed = time.time() - start_time log.info('Device testing %f seconds elapsed.' % elapsed) state = self.driver_client.cmd_dvr('get_resource_state') self.assertLess(time.time(), timeout, msg="Timeout waiting for instrument to come out of test") # Verify we received the test result and it passed. test_results = [evt for evt in self.events if evt['type']==DriverAsyncEvent.RESULT] self.assertTrue(len(test_results) == 1) self.assertEqual(test_results[0]['value']['success'], 'Passed') def test_parameters(self): """ Test driver parameters and verify their type. Startup parameters also verify the parameter value. This test confirms that parameters are being read/converted properly and that the startup has been applied. """ self.assert_initialize_driver() reply = self.driver_client.cmd_dvr('get_resource', Parameter.ALL) self.assert_driver_parameters(reply, True) def test_set(self): """ Test all set commands. Verify all exception cases. """ self.assert_initialize_driver() # Verify we can set all parameters in bulk new_values = { Parameter.INTERVAL: 20, Parameter.PUMP_MODE: 0, Parameter.NCYCLES: 6 } self.assert_set_bulk(new_values) # Pump Mode # x=0: No pump. # x=1: Run pump for 0.5 sec before each sample. # x=2: Run pump during each sample. self.assert_set(Parameter.PUMP_MODE, 0) self.assert_set(Parameter.PUMP_MODE, 1) self.assert_set(Parameter.PUMP_MODE, 2) self.assert_set_exception(Parameter.PUMP_MODE, -1) self.assert_set_exception(Parameter.PUMP_MODE, 3) self.assert_set_exception(Parameter.PUMP_MODE, 'bad') # NCYCLE Range 1 - 100 self.assert_set(Parameter.NCYCLES, 1) self.assert_set(Parameter.NCYCLES, 100) self.assert_set_exception(Parameter.NCYCLES, 0) self.assert_set_exception(Parameter.NCYCLES, 101) self.assert_set_exception(Parameter.NCYCLES, -1) self.assert_set_exception(Parameter.NCYCLES, 0.1) self.assert_set_exception(Parameter.NCYCLES, 'bad') # SampleInterval Range 10 - 14,400 self.assert_set(Parameter.INTERVAL, 10) self.assert_set(Parameter.INTERVAL, 14400) self.assert_set_exception(Parameter.INTERVAL, 9) self.assert_set_exception(Parameter.INTERVAL, 14401) self.assert_set_exception(Parameter.INTERVAL, -1) self.assert_set_exception(Parameter.INTERVAL, 0.1) self.assert_set_exception(Parameter.INTERVAL, 'bad') # Read only parameters self.assert_set_readonly(Parameter.ECHO, False) self.assert_set_readonly(Parameter.OUTPUT_EXEC_TAG, False) self.assert_set_readonly(Parameter.TXREALTIME, False) self.assert_set_readonly(Parameter.BIOWIPER, False) self.assert_set_readonly(Parameter.PTYPE, 1) self.assert_set_readonly(Parameter.VOLT0, False) self.assert_set_readonly(Parameter.VOLT1, False) self.assert_set_readonly(Parameter.VOLT2, False) self.assert_set_readonly(Parameter.VOLT3, False) self.assert_set_readonly(Parameter.VOLT4, False) self.assert_set_readonly(Parameter.VOLT5, False) self.assert_set_readonly(Parameter.DELAY_BEFORE_SAMPLE, 1) self.assert_set_readonly(Parameter.DELAY_AFTER_SAMPLE, 1) self.assert_set_readonly(Parameter.SBE63, False) self.assert_set_readonly(Parameter.SBE38, False) self.assert_set_readonly(Parameter.SBE50, False) self.assert_set_readonly(Parameter.WETLABS, False) self.assert_set_readonly(Parameter.GTD, False) self.assert_set_readonly(Parameter.OPTODE, False) self.assert_set_readonly(Parameter.SYNCMODE, False) self.assert_set_readonly(Parameter.SYNCWAIT, 1) self.assert_set_readonly(Parameter.OUTPUT_FORMAT, 1) self.assert_set_readonly(Parameter.LOGGING, False) def test_startup_params(self): """ Verify that startup parameters are applied correctly. Generally this happens in the driver discovery method. """ # Explicitly verify these values after discover. They should match # what the startup values should be get_values = { Parameter.INTERVAL: 10, Parameter.PUMP_MODE: 2, Parameter.NCYCLES: 4 } # Change the values of these parameters to something before the # driver is reinitalized. They should be blown away on reinit. new_values = { Parameter.INTERVAL: 20, Parameter.PUMP_MODE: 0, Parameter.NCYCLES: 6 } self.assert_initialize_driver() self.assert_startup_parameters(self.assert_driver_parameters, new_values, get_values) # Start autosample and try again self.assert_set_bulk(new_values) self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1) self.assert_startup_parameters(self.assert_driver_parameters) self.assert_current_state(ProtocolState.AUTOSAMPLE) def test_commands(self): """ Run instrument commands from both command and streaming mode. """ self.assert_initialize_driver() #### # First test in command mode #### self.assert_driver_command(ProtocolEvent.CLOCK_SYNC) self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC) self.assert_driver_command(ProtocolEvent.QUIT_SESSION) self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1) self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1) self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode') self.assert_driver_command(ProtocolEvent.RESET_EC) self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode') self.assert_driver_command(ProtocolEvent.GET_CONFIGURATION, regex=r'EXTFREQSF =') #### # Test in streaming mode #### # Put us in streaming self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1) self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC) self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode') self.assert_driver_command(ProtocolEvent.GET_CONFIGURATION, regex=r'EXTFREQSF =') self.assert_driver_command(ProtocolEvent.QUIT_SESSION) self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1) #### # Test a bad command #### self.assert_driver_command_exception('ima_bad_command', exception_class=InstrumentCommandException) def test_autosample(self): """ Verify that we can enter streaming and that all particles are produced properly. Because we have to test for three different data particles we can't use the common assert_sample_autosample method """ self.assert_initialize_driver() self.assert_set(Parameter.INTERVAL, 10) self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1) self.assert_async_particle_generation(DataParticleType.CTD_PARSED, self.assert_particle_sample, timeout=60) self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.DEVICE_STATUS, self.assert_particle_status) self.assert_particle_generation(ProtocolEvent.GET_CONFIGURATION, DataParticleType.DEVICE_CALIBRATION, self.assert_particle_calibration_strain) self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1) def test_polled(self): """ Test that we can generate particles with commands """ self.assert_initialize_driver() self.assert_particle_generation(ProtocolEvent.GET_CONFIGURATION, DataParticleType.DEVICE_CALIBRATION, self.assert_particle_calibration_strain) self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.DEVICE_STATUS, self.assert_particle_status) self.assert_particle_generation(ProtocolEvent.ACQUIRE_SAMPLE, DataParticleType.CTD_PARSED, self.assert_particle_sample) ### # Test scheduled events ### def assert_calibration_coefficients(self): """ Verify a calibration particle was generated """ self.clear_events() self.assert_async_particle_generation(DataParticleType.DEVICE_CALIBRATION, self.assert_particle_calibration_strain, timeout=120) def test_scheduled_device_configuration_command(self): """ Verify the device configuration command can be triggered and run in command """ self.assert_scheduled_event(ScheduledJob.CONFIGURATION_DATA, self.assert_calibration_coefficients, delay=120) self.assert_current_state(ProtocolState.COMMAND) def test_scheduled_device_configuration_autosample(self): """ Verify the device configuration command can be triggered and run in autosample """ self.assert_scheduled_event(ScheduledJob.CONFIGURATION_DATA, self.assert_calibration_coefficients, autosample_command=ProtocolEvent.START_AUTOSAMPLE, delay=180) self.assert_current_state(ProtocolState.AUTOSAMPLE) self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE) def assert_acquire_status(self): """ Verify a status particle was generated """ self.clear_events() self.assert_async_particle_generation(DataParticleType.DEVICE_STATUS, self.assert_particle_status, timeout=120) def test_scheduled_device_status_command(self): """ Verify the device status command can be triggered and run in command """ self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status, delay=120) self.assert_current_state(ProtocolState.COMMAND) def test_scheduled_device_status_autosample(self): """ Verify the device status command can be triggered and run in autosample """ self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status, autosample_command=ProtocolEvent.START_AUTOSAMPLE, delay=180) self.assert_current_state(ProtocolState.AUTOSAMPLE) self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE) def test_scheduled_clock_sync_command(self): """ Verify the scheduled clock sync is triggered and functions as expected """ timeout = 120 self.assert_scheduled_event(ScheduledJob.CLOCK_SYNC, delay=timeout) self.assert_current_state(ProtocolState.COMMAND) # Set the clock to some time in the past # Need an easy way to do this now that DATE_TIME is read only #self.assert_set_clock(Parameter.DATE_TIME, time_override=SBE_EPOCH) # Check the clock until it is set correctly (by a schedued event) #self.assert_clock_set(Parameter.DATE_TIME, sync_clock_cmd=ProtocolEvent.GET_CONFIGURATION, timeout=timeout) def test_scheduled_clock_sync_autosample(self): """ Verify the scheduled clock sync is triggered and functions as expected """ timeout = 240 self.assert_scheduled_event(ScheduledJob.CLOCK_SYNC, delay=timeout) self.assert_current_state(ProtocolState.COMMAND) # Set the clock to some time in the past # Need an easy way to do this now that DATE_TIME is read only #self.assert_set_clock(Parameter.DATE_TIME, time_override=SBE_EPOCH) self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE) # Check the clock until it is set correctly (by a scheduled event) #self.assert_clock_set(Parameter.DATE_TIME, sync_clock_cmd=ProtocolEvent.GET_CONFIGURATION, timeout=timeout, tolerance=10) def assert_cycle(self): self.assert_current_state(ProtocolState.COMMAND) self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE) self.assert_current_state(ProtocolState.AUTOSAMPLE) self.assert_async_particle_generation(DataParticleType.CTD_PARSED, self.assert_particle_sample, particle_count = 6, timeout=60) self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.DEVICE_STATUS, self.assert_particle_status) self.assert_particle_generation(ProtocolEvent.GET_CONFIGURATION, DataParticleType.DEVICE_CALIBRATION, self.assert_particle_calibration_strain) self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE) self.assert_current_state(ProtocolState.COMMAND) def test_discover(self): """ Verify we can discover from both command and auto sample modes """ self.assert_initialize_driver() self.assert_cycle() self.assert_cycle() def test_metadata(self): metadata = self.driver_client.cmd_dvr('get_config_metadata') self.assertEqual(metadata, None) # must be connected self.assert_initialize_driver() metadata = self.driver_client.cmd_dvr('get_config_metadata') log.debug("Metadata: %s", metadata) self.assertTrue(isinstance(metadata, str)) ############################################################################### # QUALIFICATION TESTS # # Device specific qualification tests are for # # testing device specific capabilities # ############################################################################### @attr('QUAL', group='mi') class SBEQualTestCase(SeaBirdQualificationTest, SeaBird16plusMixin): """Qualification Test Container""" def test_autosample(self): """ Verify autosample works and data particles are created """ self.assert_enter_command_mode() self.assert_set_parameter(Parameter.INTERVAL, 10) self.assert_start_autosample() self.assert_particle_async(DataParticleType.CTD_PARSED, self.assert_particle_sample) self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1, timeout=20) self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1, timeout=20) # Stop autosample and do run a couple commands. self.assert_stop_autosample() self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1) self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1) # Restart autosample and gather a couple samples self.assert_sample_autosample(self.assert_particle_sample, DataParticleType.CTD_PARSED) def assert_cycle(self): self.assert_start_autosample() self.assert_particle_async(DataParticleType.CTD_PARSED, self.assert_particle_sample) self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1, timeout=20) self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1, timeout=20) self.assert_stop_autosample() self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1) self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1) def test_cycle(self): """ Verify we can bounce between command and streaming. We try it a few times to see if we can find a timeout. """ self.assert_enter_command_mode() self.assert_cycle() self.assert_cycle() self.assert_cycle() self.assert_cycle() def test_poll(self): ''' Verify that we can poll for a sample. Take sample for this instrument Also poll for other engineering data streams. ''' self.assert_enter_command_mode() self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sample, DataParticleType.CTD_PARSED, sample_count=1) self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_status, DataParticleType.DEVICE_STATUS, sample_count=1) self.assert_particle_polled(ProtocolEvent.GET_CONFIGURATION, self.assert_particle_calibration_strain, DataParticleType.DEVICE_CALIBRATION, sample_count=1) def test_direct_access_telnet_mode(self): """ @brief This test manually tests that the Instrument Driver properly supports direct access to the physical instrument. (telnet mode) """ self.assert_enter_command_mode() self.assert_set_parameter(Parameter.INTERVAL, 10) # go into direct access, and muck up a setting. self.assert_direct_access_start_telnet(timeout=600) self.tcp_client.send_data("%sampleinterval=97%s" % (NEWLINE, NEWLINE)) self.tcp_client.expect(Prompt.EXECUTED) self.assert_direct_access_stop_telnet() # verify the setting got restored. self.assert_enter_command_mode() self.assert_get_parameter(Parameter.INTERVAL, 10) def test_execute_clock_sync(self): """ Verify we can syncronize the instrument internal clock """ self.assert_enter_command_mode() self.assert_execute_resource(ProtocolEvent.CLOCK_SYNC) # get the time from the driver check_new_params = self.instrument_agent_client.get_resource([Parameter.DATE_TIME]) # convert driver's time from formatted date/time string to seconds integer instrument_time = time.mktime(time.strptime(check_new_params.get(Parameter.DATE_TIME).lower(), "%d %b %Y %H:%M:%S")) # need to convert local machine's time to date/time string and back to seconds to 'drop' the DST attribute so test passes # get time from local machine lt = time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.mktime(time.localtime()))) # convert local time from formatted date/time string to seconds integer to drop DST local_time = time.mktime(time.strptime(lt, "%d %b %Y %H:%M:%S")) # Now verify that the time matches to within 15 seconds self.assertLessEqual(abs(instrument_time - local_time), 15) def test_get_capabilities(self): """ @brief Verify that the correct capabilities are returned from get_capabilities at various driver/agent states. """ self.assert_enter_command_mode() ################## # Command Mode ################## capabilities = { AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND), AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(), AgentCapabilityType.RESOURCE_COMMAND: [ ProtocolEvent.TEST, ProtocolEvent.GET, ProtocolEvent.SET, ProtocolEvent.RESET_EC, ProtocolEvent.CLOCK_SYNC, ProtocolEvent.QUIT_SESSION, ProtocolEvent.ACQUIRE_STATUS, ProtocolEvent.START_AUTOSAMPLE, ProtocolEvent.GET_CONFIGURATION, ], AgentCapabilityType.RESOURCE_INTERFACE: None, AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys() } self.assert_capabilities(capabilities) ################## # Streaming Mode ################## capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING) capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [ ProtocolEvent.GET, ProtocolEvent.STOP_AUTOSAMPLE, ProtocolEvent.QUIT_SESSION, ProtocolEvent.ACQUIRE_STATUS, ProtocolEvent.GET_CONFIGURATION, ] self.assert_start_autosample() self.assert_capabilities(capabilities) self.assert_stop_autosample() ################## # DA Mode ################## capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS) capabilities[AgentCapabilityType.RESOURCE_COMMAND] = self._common_da_resource_commands() self.assert_direct_access_start_telnet() self.assert_capabilities(capabilities) self.assert_direct_access_stop_telnet() ####################### # Uninitialized Mode ####################### capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED) capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [] capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = [] capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = [] self.assert_reset() self.assert_capabilities(capabilities) ############################################################################### # PUBLICATION TESTS # # Device specific pulication tests are for # # testing device specific capabilities # ############################################################################### @attr('PUB', group='mi') class SBEPubTestCase(SeaBirdPublicationTest): def test_granule_generation(self): self.assert_initialize_driver() # Currently these tests only verify that the data granule is generated, but the values # are not tested. We will eventually need to replace log.debug with a better callback # function that actually tests the granule. self.assert_sample_async("raw data", log.debug, DataParticleType.RAW, timeout=10) self.assert_sample_async(self.VALID_SAMPLE, log.debug, DataParticleType.CTD_PARSED, timeout=10) self.assert_sample_async(self.VALID_DS_RESPONSE, log.debug, DataParticleType.DEVICE_STATUS, timeout=10) self.assert_sample_async(self.VALID_DCAL_STRAIN, log.debug, DataParticleType.DEVICE_CALIBRATION, timeout=10) self.assert_sample_async(self.VALID_DCAL_QUARTZ, log.debug, DataParticleType.DEVICE_CALIBRATION, timeout=10)
mikeh77/mi-instrument
mi/instrument/seabird/sbe16plus_v2/test/test_driver.py
Python
bsd-2-clause
61,730
#!/usr/bin/python3 # Copyright (C) 2015 Bitquant Research Laboratories (Asia) Limited # Released under the Simplified BSD License import my_path import time import zmq.green as zmq import pprint import algobroker import msgpack class Dispatcher(algobroker.Broker): def __init__(self): algobroker.Broker.__init__(self, "dispatcher") # send work self.sms_sender = self.socket(zmq.PUSH) self.sms_sender.connect(algobroker.ports['data']['broker_plivo']) self.bitmex_sender = self.socket(zmq.PUSH) self.bitmex_sender.connect(algobroker.ports['data']['broker_bitmex']) self.web_sender = self.socket(zmq.PUSH) self.web_sender.connect(algobroker.ports['data']['broker_web']) def process_data(self, data): if (data['cmd'] == "log"): self.warning(pprint.pformat(data)) elif (data['cmd'] == 'alert' and data['type'] == 'sms'): self.debug("sending sms") self.debug(pprint.pformat(data)) self.sms_sender.send(msgpack.packb(data)) elif (data['cmd'] == 'alert' and data['type'] == 'web'): self.debug("sending web") self.debug(pprint.pformat(data)) self.web_sender.send(msgpack.packb(data)) elif (data.get('broker', None) == 'bitmex'): self.debug("sending bitmex") self.debug(pprint.pformat(data)) self.bitmex_sender.send(msgpack.packb(data)) else: self.error("unknown action") if __name__ == "__main__": dispatcher = Dispatcher() dispatcher.run()
joequant/algobroker
algobroker/dispatcher.py
Python
bsd-2-clause
1,618
#!/bin/env python import libsedml def create_nested_task(file_name): doc = libsedml.SedDocument(1, 4) # create simulation sim = doc.createSteadyState() sim.setId("steady1") # need to set the correct KISAO Term alg = sim.createAlgorithm() alg.setKisaoID("KISAO:0000282") # create model model = doc.createModel() model.setId("model1") model.setLanguage("urn:sedml:language:sbml") model.setSource("oscli.xml") # create tasks task = doc.createTask() task.setId("task0") task.setModelReference("model1") task.setSimulationReference("steady1") task = doc.createRepeatedTask() assert(isinstance(task, libsedml.SedRepeatedTask)) task.setId("task1") task.setResetModel(True) task.setRangeId("current") range = task.createUniformRange() assert(isinstance(range, libsedml.SedUniformRange)) range.setId("current") range.setStart(0) range.setEnd(0) range.setNumberOfSteps(100) range.setType("linear") change = task.createTaskChange() assert(isinstance(change, libsedml.SedSetValue)) change.setModelReference("model1") change.setTarget("/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id=&quot;J0_v0&quot;]") change.setRange("current") change.setMath(libsedml.parseL3Formula("current")) subtask = task.createSubTask() subtask.setOrder(1) subtask.setTask("task0") # write doc libsedml.writeSedML(doc, file_name) if __name__ == "__main__": create_nested_task('nested_task.xml')
fbergmann/libSEDML
examples/python/create_nested_task.py
Python
bsd-2-clause
1,555
import os import sys import unittest from unittest.mock import patch import mkdocs from mkdocs.config import config_options from mkdocs.config.base import Config from mkdocs.tests.base import tempdir class OptionallyRequiredTest(unittest.TestCase): def test_empty(self): option = config_options.OptionallyRequired() value = option.validate(None) self.assertEqual(value, None) self.assertEqual(option.is_required(), False) def test_required(self): option = config_options.OptionallyRequired(required=True) self.assertRaises(config_options.ValidationError, option.validate, None) self.assertEqual(option.is_required(), True) def test_required_no_default(self): option = config_options.OptionallyRequired(required=True) value = option.validate(2) self.assertEqual(2, value) def test_default(self): option = config_options.OptionallyRequired(default=1) value = option.validate(None) self.assertEqual(1, value) def test_replace_default(self): option = config_options.OptionallyRequired(default=1) value = option.validate(2) self.assertEqual(2, value) class TypeTest(unittest.TestCase): def test_single_type(self): option = config_options.Type(str) value = option.validate("Testing") self.assertEqual(value, "Testing") def test_multiple_types(self): option = config_options.Type((list, tuple)) value = option.validate([1, 2, 3]) self.assertEqual(value, [1, 2, 3]) value = option.validate((1, 2, 3)) self.assertEqual(value, (1, 2, 3)) self.assertRaises(config_options.ValidationError, option.validate, {'a': 1}) def test_length(self): option = config_options.Type(str, length=7) value = option.validate("Testing") self.assertEqual(value, "Testing") self.assertRaises(config_options.ValidationError, option.validate, "Testing Long") class ChoiceTest(unittest.TestCase): def test_valid_choice(self): option = config_options.Choice(('python', 'node')) value = option.validate('python') self.assertEqual(value, 'python') def test_invalid_choice(self): option = config_options.Choice(('python', 'node')) self.assertRaises( config_options.ValidationError, option.validate, 'go') def test_invalid_choices(self): self.assertRaises(ValueError, config_options.Choice, '') self.assertRaises(ValueError, config_options.Choice, []) self.assertRaises(ValueError, config_options.Choice, 5) class DeprecatedTest(unittest.TestCase): def test_deprecated_option_simple(self): option = config_options.Deprecated() option.pre_validation({'d': 'value'}, 'd') self.assertEqual(len(option.warnings), 1) option.validate('value') def test_deprecated_option_message(self): msg = 'custom message for {} key' option = config_options.Deprecated(message=msg) option.pre_validation({'d': 'value'}, 'd') self.assertEqual(len(option.warnings), 1) self.assertEqual(option.warnings[0], msg.format('d')) def test_deprecated_option_with_type(self): option = config_options.Deprecated(option_type=config_options.Type(str)) option.pre_validation({'d': 'value'}, 'd') self.assertEqual(len(option.warnings), 1) option.validate('value') def test_deprecated_option_with_invalid_type(self): option = config_options.Deprecated(option_type=config_options.Type(list)) config = {'d': 'string'} option.pre_validation({'d': 'value'}, 'd') self.assertEqual(len(option.warnings), 1) self.assertRaises( config_options.ValidationError, option.validate, config['d'] ) def test_deprecated_option_with_type_undefined(self): option = config_options.Deprecated(option_type=config_options.Type(str)) option.validate(None) def test_deprecated_option_move(self): option = config_options.Deprecated(moved_to='new') config = {'old': 'value'} option.pre_validation(config, 'old') self.assertEqual(len(option.warnings), 1) self.assertEqual(config, {'new': 'value'}) def test_deprecated_option_move_complex(self): option = config_options.Deprecated(moved_to='foo.bar') config = {'old': 'value'} option.pre_validation(config, 'old') self.assertEqual(len(option.warnings), 1) self.assertEqual(config, {'foo': {'bar': 'value'}}) def test_deprecated_option_move_existing(self): option = config_options.Deprecated(moved_to='foo.bar') config = {'old': 'value', 'foo': {'existing': 'existing'}} option.pre_validation(config, 'old') self.assertEqual(len(option.warnings), 1) self.assertEqual(config, {'foo': {'existing': 'existing', 'bar': 'value'}}) def test_deprecated_option_move_invalid(self): option = config_options.Deprecated(moved_to='foo.bar') config = {'old': 'value', 'foo': 'wrong type'} option.pre_validation(config, 'old') self.assertEqual(len(option.warnings), 1) self.assertEqual(config, {'old': 'value', 'foo': 'wrong type'}) class IpAddressTest(unittest.TestCase): def test_valid_address(self): addr = '127.0.0.1:8000' option = config_options.IpAddress() value = option.validate(addr) self.assertEqual(str(value), addr) self.assertEqual(value.host, '127.0.0.1') self.assertEqual(value.port, 8000) def test_valid_IPv6_address(self): addr = '::1:8000' option = config_options.IpAddress() value = option.validate(addr) self.assertEqual(str(value), addr) self.assertEqual(value.host, '::1') self.assertEqual(value.port, 8000) def test_named_address(self): addr = 'localhost:8000' option = config_options.IpAddress() value = option.validate(addr) self.assertEqual(str(value), addr) self.assertEqual(value.host, 'localhost') self.assertEqual(value.port, 8000) def test_default_address(self): addr = '127.0.0.1:8000' option = config_options.IpAddress(default=addr) value = option.validate(None) self.assertEqual(str(value), addr) self.assertEqual(value.host, '127.0.0.1') self.assertEqual(value.port, 8000) @unittest.skipIf( sys.version_info >= (3, 9, 5), "Leading zeros not allowed in IP addresses since Python3.9.5", ) def test_IP_normalization(self): addr = '127.000.000.001:8000' option = config_options.IpAddress(default=addr) value = option.validate(None) self.assertEqual(str(value), '127.0.0.1:8000') self.assertEqual(value.host, '127.0.0.1') self.assertEqual(value.port, 8000) @unittest.skipIf( sys.version_info < (3, 9, 5), "Leading zeros allowed in IP addresses before Python3.9.5", ) def test_invalid_leading_zeros(self): addr = '127.000.000.001:8000' option = config_options.IpAddress(default=addr) self.assertRaises( config_options.ValidationError, option.validate, addr ) def test_invalid_address_range(self): option = config_options.IpAddress() self.assertRaises( config_options.ValidationError, option.validate, '277.0.0.1:8000' ) def test_invalid_address_format(self): option = config_options.IpAddress() self.assertRaises( config_options.ValidationError, option.validate, '127.0.0.18000' ) def test_invalid_address_type(self): option = config_options.IpAddress() self.assertRaises( config_options.ValidationError, option.validate, 123 ) def test_invalid_address_port(self): option = config_options.IpAddress() self.assertRaises( config_options.ValidationError, option.validate, '127.0.0.1:foo' ) def test_invalid_address_missing_port(self): option = config_options.IpAddress() self.assertRaises( config_options.ValidationError, option.validate, '127.0.0.1' ) def test_unsupported_address(self): option = config_options.IpAddress() value = option.validate('0.0.0.0:8000') option.post_validation({'dev_addr': value}, 'dev_addr') self.assertEqual(len(option.warnings), 1) def test_unsupported_IPv6_address(self): option = config_options.IpAddress() value = option.validate(':::8000') option.post_validation({'dev_addr': value}, 'dev_addr') self.assertEqual(len(option.warnings), 1) def test_invalid_IPv6_address(self): # The server will error out with this so we treat it as invalid. option = config_options.IpAddress() self.assertRaises( config_options.ValidationError, option.validate, '[::1]:8000' ) class URLTest(unittest.TestCase): def test_valid_url(self): url = "https://mkdocs.org" option = config_options.URL() value = option.validate(url) self.assertEqual(value, url) def test_invalid_url(self): option = config_options.URL() self.assertRaises(config_options.ValidationError, option.validate, "www.mkdocs.org") def test_invalid(self): option = config_options.URL() self.assertRaises(config_options.ValidationError, option.validate, 1) def test_url_is_dir(self): url = "https://mkdocs.org/" option = config_options.URL(is_dir=True) value = option.validate(url) self.assertEqual(value, url) def test_url_transform_to_dir(self): url = "https://mkdocs.org" option = config_options.URL(is_dir=True) value = option.validate(url) self.assertEqual(value, f'{url}/') class RepoURLTest(unittest.TestCase): def test_repo_name_github(self): option = config_options.RepoURL() config = {'repo_url': "https://github.com/mkdocs/mkdocs"} option.post_validation(config, 'repo_url') self.assertEqual(config['repo_name'], "GitHub") def test_repo_name_bitbucket(self): option = config_options.RepoURL() config = {'repo_url': "https://bitbucket.org/gutworth/six/"} option.post_validation(config, 'repo_url') self.assertEqual(config['repo_name'], "Bitbucket") def test_repo_name_gitlab(self): option = config_options.RepoURL() config = {'repo_url': "https://gitlab.com/gitlab-org/gitlab-ce/"} option.post_validation(config, 'repo_url') self.assertEqual(config['repo_name'], "GitLab") def test_repo_name_custom(self): option = config_options.RepoURL() config = {'repo_url': "https://launchpad.net/python-tuskarclient"} option.post_validation(config, 'repo_url') self.assertEqual(config['repo_name'], "Launchpad") def test_edit_uri_github(self): option = config_options.RepoURL() config = {'repo_url': "https://github.com/mkdocs/mkdocs"} option.post_validation(config, 'repo_url') self.assertEqual(config['edit_uri'], 'edit/master/docs/') def test_edit_uri_bitbucket(self): option = config_options.RepoURL() config = {'repo_url': "https://bitbucket.org/gutworth/six/"} option.post_validation(config, 'repo_url') self.assertEqual(config['edit_uri'], 'src/default/docs/') def test_edit_uri_gitlab(self): option = config_options.RepoURL() config = {'repo_url': "https://gitlab.com/gitlab-org/gitlab-ce/"} option.post_validation(config, 'repo_url') self.assertEqual(config['edit_uri'], 'edit/master/docs/') def test_edit_uri_custom(self): option = config_options.RepoURL() config = {'repo_url': "https://launchpad.net/python-tuskarclient"} option.post_validation(config, 'repo_url') self.assertEqual(config.get('edit_uri'), '') def test_repo_name_custom_and_empty_edit_uri(self): option = config_options.RepoURL() config = {'repo_url': "https://github.com/mkdocs/mkdocs", 'repo_name': 'mkdocs'} option.post_validation(config, 'repo_url') self.assertEqual(config.get('edit_uri'), 'edit/master/docs/') class DirTest(unittest.TestCase): def test_valid_dir(self): d = os.path.dirname(__file__) option = config_options.Dir(exists=True) value = option.validate(d) self.assertEqual(d, value) def test_missing_dir(self): d = os.path.join("not", "a", "real", "path", "I", "hope") option = config_options.Dir() value = option.validate(d) self.assertEqual(os.path.abspath(d), value) def test_missing_dir_but_required(self): d = os.path.join("not", "a", "real", "path", "I", "hope") option = config_options.Dir(exists=True) self.assertRaises(config_options.ValidationError, option.validate, d) def test_file(self): d = __file__ option = config_options.Dir(exists=True) self.assertRaises(config_options.ValidationError, option.validate, d) def test_incorrect_type_attribute_error(self): option = config_options.Dir() self.assertRaises(config_options.ValidationError, option.validate, 1) def test_incorrect_type_type_error(self): option = config_options.Dir() self.assertRaises(config_options.ValidationError, option.validate, []) def test_dir_unicode(self): cfg = Config( [('dir', config_options.Dir())], config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'), ) test_config = { 'dir': 'юникод' } cfg.load_dict(test_config) fails, warns = cfg.validate() self.assertEqual(len(fails), 0) self.assertEqual(len(warns), 0) self.assertIsInstance(cfg['dir'], str) def test_dir_filesystemencoding(self): cfg = Config( [('dir', config_options.Dir())], config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'), ) test_config = { 'dir': 'Übersicht'.encode(encoding=sys.getfilesystemencoding()) } cfg.load_dict(test_config) fails, warns = cfg.validate() # str does not include byte strings so validation fails self.assertEqual(len(fails), 1) self.assertEqual(len(warns), 0) def test_dir_bad_encoding_fails(self): cfg = Config( [('dir', config_options.Dir())], config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'), ) test_config = { 'dir': 'юникод'.encode(encoding='ISO 8859-5') } cfg.load_dict(test_config) fails, warns = cfg.validate() self.assertEqual(len(fails), 1) self.assertEqual(len(warns), 0) def test_config_dir_prepended(self): base_path = os.path.abspath('.') cfg = Config( [('dir', config_options.Dir())], config_file_path=os.path.join(base_path, 'mkdocs.yml'), ) test_config = { 'dir': 'foo' } cfg.load_dict(test_config) fails, warns = cfg.validate() self.assertEqual(len(fails), 0) self.assertEqual(len(warns), 0) self.assertIsInstance(cfg['dir'], str) self.assertEqual(cfg['dir'], os.path.join(base_path, 'foo')) def test_dir_is_config_dir_fails(self): cfg = Config( [('dir', config_options.Dir())], config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'), ) test_config = { 'dir': '.' } cfg.load_dict(test_config) fails, warns = cfg.validate() self.assertEqual(len(fails), 1) self.assertEqual(len(warns), 0) class SiteDirTest(unittest.TestCase): def validate_config(self, config): """ Given a config with values for site_dir and doc_dir, run site_dir post_validation. """ site_dir = config_options.SiteDir() docs_dir = config_options.Dir() fname = os.path.join(os.path.abspath('..'), 'mkdocs.yml') config['docs_dir'] = docs_dir.validate(config['docs_dir']) config['site_dir'] = site_dir.validate(config['site_dir']) schema = [ ('site_dir', site_dir), ('docs_dir', docs_dir), ] cfg = Config(schema, fname) cfg.load_dict(config) failed, warned = cfg.validate() if failed: raise config_options.ValidationError(failed) return True def test_doc_dir_in_site_dir(self): j = os.path.join # The parent dir is not the same on every system, so use the actual dir name parent_dir = mkdocs.__file__.split(os.sep)[-3] test_configs = ( {'docs_dir': j('site', 'docs'), 'site_dir': 'site'}, {'docs_dir': 'docs', 'site_dir': '.'}, {'docs_dir': '.', 'site_dir': '.'}, {'docs_dir': 'docs', 'site_dir': ''}, {'docs_dir': '', 'site_dir': ''}, {'docs_dir': j('..', parent_dir, 'docs'), 'site_dir': 'docs'}, {'docs_dir': 'docs', 'site_dir': '/'} ) for test_config in test_configs: self.assertRaises(config_options.ValidationError, self.validate_config, test_config) def test_site_dir_in_docs_dir(self): j = os.path.join test_configs = ( {'docs_dir': 'docs', 'site_dir': j('docs', 'site')}, {'docs_dir': '.', 'site_dir': 'site'}, {'docs_dir': '', 'site_dir': 'site'}, {'docs_dir': '/', 'site_dir': 'site'}, ) for test_config in test_configs: self.assertRaises(config_options.ValidationError, self.validate_config, test_config) def test_common_prefix(self): """ Legitimate settings with common prefixes should not fail validation. """ test_configs = ( {'docs_dir': 'docs', 'site_dir': 'docs-site'}, {'docs_dir': 'site-docs', 'site_dir': 'site'}, ) for test_config in test_configs: assert self.validate_config(test_config) class ThemeTest(unittest.TestCase): def test_theme_as_string(self): option = config_options.Theme() value = option.validate("mkdocs") self.assertEqual({'name': 'mkdocs'}, value) def test_uninstalled_theme_as_string(self): option = config_options.Theme() self.assertRaises(config_options.ValidationError, option.validate, "mkdocs2") def test_theme_default(self): option = config_options.Theme(default='mkdocs') value = option.validate(None) self.assertEqual({'name': 'mkdocs'}, value) def test_theme_as_simple_config(self): config = { 'name': 'mkdocs' } option = config_options.Theme() value = option.validate(config) self.assertEqual(config, value) def test_theme_as_complex_config(self): config = { 'name': 'mkdocs', 'custom_dir': 'custom', 'static_templates': ['sitemap.html'], 'show_sidebar': False } option = config_options.Theme() value = option.validate(config) self.assertEqual(config, value) def test_theme_name_is_none(self): config = { 'name': None } option = config_options.Theme() value = option.validate(config) self.assertEqual(config, value) def test_theme_config_missing_name(self): config = { 'custom_dir': 'custom', } option = config_options.Theme() self.assertRaises(config_options.ValidationError, option.validate, config) def test_uninstalled_theme_as_config(self): config = { 'name': 'mkdocs2' } option = config_options.Theme() self.assertRaises(config_options.ValidationError, option.validate, config) def test_theme_invalid_type(self): config = ['mkdocs2'] option = config_options.Theme() self.assertRaises(config_options.ValidationError, option.validate, config) def test_post_validation_none_theme_name_and_missing_custom_dir(self): config = { 'theme': { 'name': None } } option = config_options.Theme() self.assertRaises(config_options.ValidationError, option.post_validation, config, 'theme') @tempdir() def test_post_validation_inexisting_custom_dir(self, abs_base_path): config = { 'theme': { 'name': None, 'custom_dir': abs_base_path + '/inexisting_custom_dir', } } option = config_options.Theme() self.assertRaises(config_options.ValidationError, option.post_validation, config, 'theme') def test_post_validation_locale_none(self): config = { 'theme': { 'name': 'mkdocs', 'locale': None } } option = config_options.Theme() self.assertRaises(config_options.ValidationError, option.post_validation, config, 'theme') def test_post_validation_locale_invalid_type(self): config = { 'theme': { 'name': 'mkdocs', 'locale': 0 } } option = config_options.Theme() self.assertRaises(config_options.ValidationError, option.post_validation, config, 'theme') def test_post_validation_locale(self): config = { 'theme': { 'name': 'mkdocs', 'locale': 'fr' } } option = config_options.Theme() option.post_validation(config, 'theme') self.assertEqual('fr', config['theme']['locale'].language) class NavTest(unittest.TestCase): def test_old_format(self): option = config_options.Nav() self.assertRaises( config_options.ValidationError, option.validate, [['index.md', ], ] ) def test_provided_dict(self): option = config_options.Nav() value = option.validate([ 'index.md', {"Page": "page.md"} ]) self.assertEqual(['index.md', {'Page': 'page.md'}], value) option.post_validation({'extra_stuff': []}, 'extra_stuff') def test_provided_empty(self): option = config_options.Nav() value = option.validate([]) self.assertEqual(None, value) option.post_validation({'extra_stuff': []}, 'extra_stuff') def test_invalid_type(self): option = config_options.Nav() self.assertRaises(config_options.ValidationError, option.validate, {}) def test_invalid_config(self): option = config_options.Nav() self.assertRaises(config_options.ValidationError, option.validate, [[], 1]) class PrivateTest(unittest.TestCase): def test_defined(self): option = config_options.Private() self.assertRaises(config_options.ValidationError, option.validate, 'somevalue') class MarkdownExtensionsTest(unittest.TestCase): @patch('markdown.Markdown') def test_simple_list(self, mockMd): option = config_options.MarkdownExtensions() config = { 'markdown_extensions': ['foo', 'bar'] } config['markdown_extensions'] = option.validate(config['markdown_extensions']) option.post_validation(config, 'markdown_extensions') self.assertEqual({ 'markdown_extensions': ['foo', 'bar'], 'mdx_configs': {} }, config) @patch('markdown.Markdown') def test_list_dicts(self, mockMd): option = config_options.MarkdownExtensions() config = { 'markdown_extensions': [ {'foo': {'foo_option': 'foo value'}}, {'bar': {'bar_option': 'bar value'}}, {'baz': None} ] } config['markdown_extensions'] = option.validate(config['markdown_extensions']) option.post_validation(config, 'markdown_extensions') self.assertEqual({ 'markdown_extensions': ['foo', 'bar', 'baz'], 'mdx_configs': { 'foo': {'foo_option': 'foo value'}, 'bar': {'bar_option': 'bar value'} } }, config) @patch('markdown.Markdown') def test_mixed_list(self, mockMd): option = config_options.MarkdownExtensions() config = { 'markdown_extensions': [ 'foo', {'bar': {'bar_option': 'bar value'}} ] } config['markdown_extensions'] = option.validate(config['markdown_extensions']) option.post_validation(config, 'markdown_extensions') self.assertEqual({ 'markdown_extensions': ['foo', 'bar'], 'mdx_configs': { 'bar': {'bar_option': 'bar value'} } }, config) @patch('markdown.Markdown') def test_dict_of_dicts(self, mockMd): option = config_options.MarkdownExtensions() config = { 'markdown_extensions': { 'foo': {'foo_option': 'foo value'}, 'bar': {'bar_option': 'bar value'}, 'baz': {} } } config['markdown_extensions'] = option.validate(config['markdown_extensions']) option.post_validation(config, 'markdown_extensions') self.assertEqual({ 'markdown_extensions': ['foo', 'bar', 'baz'], 'mdx_configs': { 'foo': {'foo_option': 'foo value'}, 'bar': {'bar_option': 'bar value'} } }, config) @patch('markdown.Markdown') def test_builtins(self, mockMd): option = config_options.MarkdownExtensions(builtins=['meta', 'toc']) config = { 'markdown_extensions': ['foo', 'bar'] } config['markdown_extensions'] = option.validate(config['markdown_extensions']) option.post_validation(config, 'markdown_extensions') self.assertEqual({ 'markdown_extensions': ['meta', 'toc', 'foo', 'bar'], 'mdx_configs': {} }, config) def test_duplicates(self): option = config_options.MarkdownExtensions(builtins=['meta', 'toc']) config = { 'markdown_extensions': ['meta', 'toc'] } config['markdown_extensions'] = option.validate(config['markdown_extensions']) option.post_validation(config, 'markdown_extensions') self.assertEqual({ 'markdown_extensions': ['meta', 'toc'], 'mdx_configs': {} }, config) def test_builtins_config(self): option = config_options.MarkdownExtensions(builtins=['meta', 'toc']) config = { 'markdown_extensions': [ {'toc': {'permalink': True}} ] } config['markdown_extensions'] = option.validate(config['markdown_extensions']) option.post_validation(config, 'markdown_extensions') self.assertEqual({ 'markdown_extensions': ['meta', 'toc'], 'mdx_configs': {'toc': {'permalink': True}} }, config) @patch('markdown.Markdown') def test_configkey(self, mockMd): option = config_options.MarkdownExtensions(configkey='bar') config = { 'markdown_extensions': [ {'foo': {'foo_option': 'foo value'}} ] } config['markdown_extensions'] = option.validate(config['markdown_extensions']) option.post_validation(config, 'markdown_extensions') self.assertEqual({ 'markdown_extensions': ['foo'], 'bar': { 'foo': {'foo_option': 'foo value'} } }, config) def test_none(self): option = config_options.MarkdownExtensions(default=[]) config = { 'markdown_extensions': None } config['markdown_extensions'] = option.validate(config['markdown_extensions']) option.post_validation(config, 'markdown_extensions') self.assertEqual({ 'markdown_extensions': [], 'mdx_configs': {} }, config) @patch('markdown.Markdown') def test_not_list(self, mockMd): option = config_options.MarkdownExtensions() self.assertRaises(config_options.ValidationError, option.validate, 'not a list') @patch('markdown.Markdown') def test_invalid_config_option(self, mockMd): option = config_options.MarkdownExtensions() config = { 'markdown_extensions': [ {'foo': 'not a dict'} ] } self.assertRaises( config_options.ValidationError, option.validate, config['markdown_extensions'] ) @patch('markdown.Markdown') def test_invalid_config_item(self, mockMd): option = config_options.MarkdownExtensions() config = { 'markdown_extensions': [ ['not a dict'] ] } self.assertRaises( config_options.ValidationError, option.validate, config['markdown_extensions'] ) @patch('markdown.Markdown') def test_invalid_dict_item(self, mockMd): option = config_options.MarkdownExtensions() config = { 'markdown_extensions': [ {'key1': 'value', 'key2': 'too many keys'} ] } self.assertRaises( config_options.ValidationError, option.validate, config['markdown_extensions'] ) def test_unknown_extension(self): option = config_options.MarkdownExtensions() config = { 'markdown_extensions': ['unknown'] } self.assertRaises( config_options.ValidationError, option.validate, config['markdown_extensions'] )
waylan/mkdocs
mkdocs/tests/config/config_options_tests.py
Python
bsd-2-clause
31,081
from io import StringIO from coaster.logger import RepeatValueIndicator, filtered_value, pprint_with_indent def test_filtered_value(): """Test for filtered values.""" # Doesn't touch normal key/value pairs assert filtered_value('normal', 'value') == 'value' assert filtered_value('also_normal', 123) == 123 # But does redact sensitive keys assert filtered_value('password', '123pass') != '123pass' # The returned value is an object that renders via repr and str as '[Filtered]' assert repr(filtered_value('password', '123pass')) == '[Filtered]' assert str(filtered_value('password', '123pass')) == '[Filtered]' # Also works on partial matches in the keys assert repr(filtered_value('confirm_password', '123pass')) == '[Filtered]' # The filter uses a verbose regex. Words in the middle of the regex also work assert repr(filtered_value('access_token', 'secret-here')) == '[Filtered]' # Filters are case insensitive assert repr(filtered_value('TELEGRAM_ERROR_APIKEY', 'api:key')) == '[Filtered]' # Keys with 'token' as a word are also filtered assert repr(filtered_value('SMS_TWILIO_TOKEN', 'api:key')) == '[Filtered]' # Numbers that look like card numbers are filtered assert ( filtered_value('anything', 'My number is 1234 5678 9012 3456') == 'My number is [Filtered]' ) # This works with any combination of spaces and dashes within the number assert ( filtered_value('anything', 'My number is 1234 5678-90123456') == 'My number is [Filtered]' ) def test_pprint_with_indent(): """Test pprint_with_indent does indentation.""" out = StringIO() data = { 12: 34, 'confirm_password': '12345qwerty', 'credentials': ['abc', 'def'], 'key': 'value', 'nested_dict': {'password': 'not_filtered'}, 'password': '12345qwerty', } pprint_with_indent(data, out) assert ( out.getvalue() == '''\ {12: 34, 'confirm_password': [Filtered], 'credentials': [Filtered], 'key': 'value', 'nested_dict': {'password': 'not_filtered'}, 'password': [Filtered]} ''' ) def test_repeat_value_indicator(): """Test RepeatValueIndicator class.""" assert repr(RepeatValueIndicator('key')) == "<same as prior 'key'>" assert str(RepeatValueIndicator('key')) == "<same as prior 'key'>"
hasgeek/coaster
tests/test_logger.py
Python
bsd-2-clause
2,411
# encoding: utf-8 """ IMPORTANT - COLOUR SUPPORT IS CURRENTLY EXTREMELY EXPERIMENTAL. THE API MAY CHANGE, AND NO DEFAULT WIDGETS CURRENTLY TAKE ADVANTAGE OF THEME SUPPORT AT ALL. """ import curses from . import global_options def disable_color(): global_options.DISABLE_ALL_COLORS = True def enable_color(): global_options.DISABLE_ALL_COLORS = False class ThemeManager(object): _colors_to_define = ( # DO NOT DEFINE THIS COLOR - THINGS BREAK #('WHITE_BLACK', DO_NOT_DO_THIS, DO_NOT_DO_THIS), ('BLACK_WHITE', curses.COLOR_BLACK, curses.COLOR_WHITE), #('BLACK_ON_DEFAULT', curses.COLOR_BLACK, -1), #('WHITE_ON_DEFAULT', curses.COLOR_WHITE, -1), ('BLUE_BLACK', curses.COLOR_BLUE, curses.COLOR_BLACK), ('CYAN_BLACK', curses.COLOR_CYAN, curses.COLOR_BLACK), ('GREEN_BLACK', curses.COLOR_GREEN, curses.COLOR_BLACK), ('MAGENTA_BLACK', curses.COLOR_MAGENTA, curses.COLOR_BLACK), ('RED_BLACK', curses.COLOR_RED, curses.COLOR_BLACK), ('YELLOW_BLACK', curses.COLOR_YELLOW, curses.COLOR_BLACK), ('BLACK_RED', curses.COLOR_BLACK, curses.COLOR_RED), ('BLACK_GREEN', curses.COLOR_BLACK, curses.COLOR_GREEN), ('BLACK_YELLOW', curses.COLOR_BLACK, curses.COLOR_YELLOW), ('BLUE_WHITE', curses.COLOR_BLUE, curses.COLOR_WHITE), ('CYAN_WHITE', curses.COLOR_CYAN, curses.COLOR_WHITE), ('GREEN_WHITE', curses.COLOR_GREEN, curses.COLOR_WHITE), ('MAGENTA_WHITE', curses.COLOR_MAGENTA, curses.COLOR_WHITE), ('RED_WHITE', curses.COLOR_RED, curses.COLOR_WHITE), ('YELLOW_WHITE', curses.COLOR_YELLOW, curses.COLOR_WHITE), ) default_colors = { 'DEFAULT' : 'WHITE_BLACK', 'FORMDEFAULT' : 'WHITE_BLACK', 'NO_EDIT' : 'BLUE_BLACK', 'STANDOUT' : 'CYAN_BLACK', 'CURSOR' : 'WHITE_BLACK', 'LABEL' : 'GREEN_BLACK', 'LABELBOLD' : 'WHITE_BLACK', 'CONTROL' : 'YELLOW_BLACK', 'IMPORTANT' : 'GREEN_BLACK', 'SAFE' : 'GREEN_BLACK', 'WARNING' : 'YELLOW_BLACK', 'DANGER' : 'RED_BLACK', 'CRITICAL' : 'BLACK_RED', 'GOOD' : 'GREEN_BLACK', 'GOODHL' : 'GREEN_BLACK', 'VERYGOOD' : 'BLACK_GREEN', 'CAUTION' : 'YELLOW_BLACK', 'CAUTIONHL' : 'BLACK_YELLOW', } def __init__(self): #curses.use_default_colors() self._defined_pairs = {} self._names = {} try: self._max_pairs = curses.COLOR_PAIRS - 1 do_color = True except AttributeError: # curses.start_color has failed or has not been called do_color = False # Disable all color use across the application disable_color() if do_color and curses.has_colors(): self.initialize_pairs() self.initialize_names() def find_pair(self, caller, request='DEFAULT'): if not curses.has_colors() or global_options.DISABLE_ALL_COLORS: return False if request == 'DEFAULT': request = caller.color # Locate the requested color pair. Default to default if not found. try: pair = self._defined_pairs[self._names[request]] except: pair = self._defined_pairs[self._names['DEFAULT']] # now make the actual attribute color_attribute = curses.color_pair(pair[0]) return color_attribute def set_default(self, caller): return False def initialize_pairs(self): # White on Black is fixed as color_pair 0 self._defined_pairs['WHITE_BLACK'] = (0, curses.COLOR_WHITE, curses.COLOR_BLACK) for cp in self.__class__._colors_to_define: if cp[0] == 'WHITE_BLACK': # silently protect the user from breaking things. continue self.initalize_pair(cp[0], cp[1], cp[2]) def initialize_names(self): self._names.update(self.__class__.default_colors) def initalize_pair(self, name, fg, bg): #Initialize a color_pair for the required color and return the number. #Raise an exception if this is not possible. if (len(list(self._defined_pairs.keys())) + 1) == self._max_pairs: raise Exception("Too many colors") _this_pair_number = len(list(self._defined_pairs.keys())) + 1 curses.init_pair(_this_pair_number, fg, bg) self._defined_pairs[name] = (_this_pair_number, fg, bg) return _this_pair_number def get_pair_number(self, name): return self._defined_pairs[name][0]
tescalada/npyscreen-restructure
npyscreen/ThemeManagers.py
Python
bsd-2-clause
4,810
""" This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. Written (W) 2013 Heiko Strathmann """ from kameleon_mcmc.distribution.Gaussian import Gaussian from kameleon_mcmc.experiments.SingleChainExperiment import SingleChainExperiment from kameleon_mcmc.gp.GPData import GPData from kameleon_mcmc.gp.mcmc.PseudoMarginalHyperparameterDistribution import PseudoMarginalHyperparameterDistribution from kameleon_mcmc.kernel.GaussianKernel import GaussianKernel from kameleon_mcmc.mcmc.MCMCChain import MCMCChain from kameleon_mcmc.mcmc.MCMCParams import MCMCParams from kameleon_mcmc.mcmc.output.PlottingOutput import PlottingOutput from kameleon_mcmc.mcmc.output.StatisticsOutput import StatisticsOutput from kameleon_mcmc.mcmc.samplers.AdaptiveMetropolisLearnScale import AdaptiveMetropolisLearnScale from kameleon_mcmc.mcmc.samplers.KameleonWindowLearnScale import KameleonWindowLearnScale from kameleon_mcmc.mcmc.samplers.StandardMetropolis import StandardMetropolis from numpy.lib.twodim_base import eye from numpy.linalg.linalg import cholesky from numpy.ma.core import mean, ones, shape, asarray, zeros from numpy.ma.extras import cov from numpy.random import permutation, seed from scipy.linalg.basic import solve_triangular from kameleon_mcmc.experiments.ClusterTools import ClusterTools import os import sys if __name__ == '__main__': if len(sys.argv) != 3: print "usage:", str(sys.argv[0]).split(os.sep)[-1], "<experiment_dir_base> <number_of_experiments>" print "example:" print "python " + str(sys.argv[0]).split(os.sep)[-1] + " /nfs/nhome/live/ucabhst/kameleon_experiments/ 3" exit() experiment_dir_base = str(sys.argv[1]) n = int(str(sys.argv[2])) # loop over parameters here experiment_dir = experiment_dir_base + str(os.path.abspath(sys.argv[0])).split(os.sep)[-1].split(".")[0] + os.sep print "running experiments", n, "times at base", experiment_dir # load data data,labels=GPData.get_glass_data() # normalise and whiten dataset data-=mean(data, 0) L=cholesky(cov(data.T)) data=solve_triangular(L, data.T, lower=True).T dim=shape(data)[1] # prior on theta and posterior target estimate theta_prior=Gaussian(mu=0*ones(dim), Sigma=eye(dim)*5) distribution=PseudoMarginalHyperparameterDistribution(data, labels, \ n_importance=100, prior=theta_prior, \ ridge=1e-3) sigma = 23.0 print "using sigma", sigma kernel = GaussianKernel(sigma=sigma) for i in range(n): mcmc_samplers = [] burnin=50000 num_iterations=500000 #mcmc_samplers.append(KameleonWindowLearnScale(distribution, kernel, stop_adapt=burnin)) #mean_est = zeros(distribution.dimension, dtype="float64") #cov_est = 1.0 * eye(distribution.dimension) #cov_est[0, 0] = distribution.V #mcmc_samplers.append(AdaptiveMetropolisLearnScale(distribution, mean_est=mean_est, cov_est=cov_est)) #mcmc_samplers.append(AdaptiveMetropolis(distribution, mean_est=mean_est, cov_est=cov_est)) mcmc_samplers.append(StandardMetropolis(distribution)) start = zeros(distribution.dimension, dtype="float64") mcmc_params = MCMCParams(start=start, num_iterations=num_iterations, burnin=burnin) mcmc_chains = [MCMCChain(mcmc_sampler, mcmc_params) for mcmc_sampler in mcmc_samplers] for mcmc_chain in mcmc_chains: mcmc_chain.append_mcmc_output(StatisticsOutput()) experiments = [SingleChainExperiment(mcmc_chain, experiment_dir) for mcmc_chain in mcmc_chains] for experiment in experiments: ClusterTools.submit_experiment(experiment)
karlnapf/kameleon-mcmc
kameleon_mcmc/experiments/scripts/glass_ard/glass_ard_ground_truth.py
Python
bsd-2-clause
4,033
__author__ = "Konstantin Osipov <kostja.osipov@gmail.com>" # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import socket import yaml import sys import re from tarantool_connection import TarantoolConnection ADMIN_SEPARATOR = '\n' class AdminConnection(TarantoolConnection): def execute_no_reconnect(self, command, silent): if not command: return if not silent: sys.stdout.write(command + ADMIN_SEPARATOR) cmd = command.replace('\n', ' ') + ADMIN_SEPARATOR self.socket.sendall(cmd) bufsiz = 4096 res = "" while True: buf = self.socket.recv(bufsiz) if not buf: break res = res + buf if (res.rfind("\n...\n") >= 0 or res.rfind("\r\n...\r\n") >= 0): break # validate yaml by parsing it try: yaml.load(res) finally: if not silent: sys.stdout.write(res.replace("\r\n", "\n")) return res def connect(self): super(AdminConnection, self).connect() handshake = self.socket.recv(128) if not re.search(r'^Tarantool.*console.*', str(handshake)): raise RuntimeError('Broken tarantool console handshake')
nvoron23/tarantool
test/lib/admin_connection.py
Python
bsd-2-clause
2,482
import setuptools import os # This will add the __version__ to the globals with open("src/lsi/__init__.py") as f: exec(f.read()) setuptools.setup( name='lsi', version=__version__, author="Narrative Science", author_email="anelson@narrativescience.com", url="https://github.com/NarrativeScience/lsi", package_dir={'': 'src'}, packages=setuptools.find_packages('src'), provides=setuptools.find_packages('src'), install_requires=open('requirements.txt').readlines(), entry_points={ 'console_scripts': ['lsi = lsi.lsi:main'] } )
NarrativeScience/lsi
setup.py
Python
bsd-2-clause
583
from unittest import mock import pytest import stripe from model_mommy import mommy from rest_framework.reverse import reverse from restframework_stripe import models from restframework_stripe.test import get_mock_resource @mock.patch("stripe.Customer.save") @mock.patch("stripe.Customer.retrieve") @pytest.mark.django_db def test_customer_update_bank_acct(customer_retrieve, customer_update, customer, bank_account, api_client): bank_account.owner = customer.owner bank_account.save() api_client.force_authenticate(customer.owner) data = { "default_source": bank_account.id, "default_source_type": "bank_account" } customer_retrieve.return_value = get_mock_resource("Customer") customer_update.return_value = get_mock_resource("Customer", default_source=bank_account.source) uri = reverse("rf_stripe:customer-detail", kwargs={"pk": customer.pk}) response = api_client.patch(uri, data=data, format="json") customer.refresh_from_db() assert response.status_code == 200, response.data assert customer.default_source.id == bank_account.id @mock.patch("stripe.Customer.save") @mock.patch("stripe.Customer.retrieve") @pytest.mark.django_db def test_customer_update_card(customer_retrieve, customer_update, customer, card, api_client): card.owner = customer.owner card.save() api_client.force_authenticate(customer.owner) data = { "default_source": card.id, "default_source_type": "card" } customer_retrieve.return_value = get_mock_resource("Customer") customer_update.return_value = get_mock_resource("Customer", default_source=card.source) uri = reverse("rf_stripe:customer-detail", kwargs={"pk": customer.pk}) response = api_client.patch(uri, data=data, format="json") customer.refresh_from_db() assert response.status_code == 200, response.data assert customer.default_source.id == card.id @pytest.mark.django_db def test_customer_to_record_with_card_as_source(card): stripe_object = get_mock_resource("Customer", default_source=card.source) record = models.Customer.stripe_object_to_record(stripe_object) assert record["default_source"].id == card.id @pytest.mark.django_db def test_customer_to_record_with_bank_account_as_source(bank_account): stripe_object = get_mock_resource("Customer", default_source=bank_account.source) record = models.Customer.stripe_object_to_record(stripe_object) assert record["default_source"].id == bank_account.id @pytest.mark.django_db def test_customer_to_record_with_string_as_source(): stripe_object = get_mock_resource("Customer", default_source="bjkldjkfd532") record = models.Customer.stripe_object_to_record(stripe_object) assert record.get("default_source", None) is None @mock.patch("stripe.ListObject.create") @mock.patch("stripe.Customer.save") @mock.patch("stripe.Customer.retrieve") @pytest.mark.django_db def test_customer_add_payment_method(a_retrieve, a_update, l_create, customer, api_client): api_client.force_authenticate(customer.owner) data = { "source": "fkdsla;jfioewni3o2ndsa", "email": "test@test.com", } new_card = get_mock_resource("Card") updated_data = data.copy() updated_data.pop("source") updated_data["default_source"] = new_card a_retrieve.return_value = get_mock_resource("Customer") l_create.return_value = new_card a_update.return_value = get_mock_resource("Customer", **updated_data) uri = reverse("rf_stripe:customer-detail", kwargs={"pk": customer.pk}) response = api_client.patch(uri, data=data, format="json") customer.refresh_from_db() assert response.status_code == 200, response.data assert 0 < models.Card.objects.filter(owner=customer.owner).count() assert customer.source["email"] == data["email"] @pytest.mark.django_db def test_options(customer, api_client): api_client.force_authenticate(customer.owner) uri = reverse("rf_stripe:customer-list") response = api_client.options(uri) assert response.status_code == 200, response.data
andrewyoung1991/django-restframework-stripe
tests/test_customer.py
Python
bsd-2-clause
4,147
#! /usr/bin/env python # Copyright (c) 2014, Fundacion Dr. Manuel Sadosky # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import from __future__ import print_function import sys import time from barf import BARF from barf.arch import ARCH_ARM from barf.arch import ARCH_X86 from barf.arch import ARCH_X86_MODE_32 from barf.arch import ARCH_X86_MODE_64 from barf.core.symbols import load_symbols from barf.utils.cconv import ArmSystemV from barf.utils.cconv import X86SystemV from barf.utils.cconv import X86_64SystemV from barf.utils.utils import read_c_string from barf.utils.utils import write_c_string def split_command_line(argv): if '--' in argv: prg_options = argv[1:argv.index('--')] prg_arguments = argv[argv.index('--')+1:] else: prg_options = argv[1:] prg_arguments = [] return prg_options, prg_arguments def atoi_hook(emulator, state): print("[+] atoi hooked!") # int atoi(const char *nptr); cc = state['cc'] # Read parameters. nptr = cc.parameters[0] # Emulate function behavior. value = int(read_c_string(emulator, nptr, max_length=1024)) # Save result. cc.return_value = value def printf_hook(emulator, state): print("[+] printf hooked!") # int printf(const char *format, ...); cc = state["cc"] # Read parameters. fmt_ptr = cc.parameters[0] # Emulate function behavior. fmt = read_c_string(emulator, fmt_ptr, max_length=1024) out = fmt print(out) # Save result. cc.return_value = len(out) def get_symbols(binary_path): symbols_by_addr = load_symbols(binary_path) symbols_by_name = {} for addr in symbols_by_addr: name, size, returns = symbols_by_addr[addr] symbols_by_name[name] = (addr, size, returns) return symbols_by_addr, symbols_by_name def setup_argv(emulator, argv, base_addr): addr_size = emulator.arch_info.address_size // 8 argv_entry_addr = {} # Copy arguments into the stack but first leave space for the argv # array (null-terminated). addr = base_addr + (len(argv) + 1) * addr_size for index, arg in enumerate(argv): argv_entry_addr[index] = addr write_c_string(emulator, addr, arg) addr += len(arg) + 1 # each argument is null-terminated # Build argv array. for index in range(len(argv)): addr = argv_entry_addr[index] emulator.write_memory(base_addr + index * addr_size, addr_size, addr) # Add null terminator. emulator.write_memory(base_addr + len(argv) * addr_size, addr_size, 0x0) def setup_emulator(emulator, binary, args): # Instantiate calling convention. if binary.architecture == ARCH_X86: if binary.architecture_mode == ARCH_X86_MODE_32: cc = X86SystemV(emulator) else: cc = X86_64SystemV(emulator) elif binary.architecture == ARCH_ARM: cc = ArmSystemV(emulator) arch = emulator.arch_info sp = 0x1500 base_argv = 0x2500 emulator.registers[arch.stack_pointer_register()] = sp setup_argv(emulator, args, base_argv) # Setup main's parameters: argc, argv and envp. cc.parameters[0] = len(args) # argc cc.parameters[1] = base_argv # argv cc.parameters[2] = 0x0 # envp # Load symbols. print("[+] Loading symbols...") symbols_by_addr, symbols_by_name = get_symbols(binary.filename) start = symbols_by_name["main"][0] size = symbols_by_name["main"][1] # TODO Remove hardcoded addresses. if binary.architecture == ARCH_X86: end = start + size - 1 if binary.architecture_mode == ARCH_X86_MODE_32: atoi_addr = 0x8048380 printf_addr = 0x8048350 if binary.architecture_mode == ARCH_X86_MODE_64: atoi_addr = 0x4004d0 printf_addr = 0x4004a0 if binary.architecture == ARCH_ARM: end = start + size - 8 - 8 if start & 0x1 == 0x1: # ARCH_ARM_MODE_THUMB atoi_addr = 0x10394 printf_addr = 0x1035c if start & 0x1 == 0x0: # ARCH_ARM_MODE_ARM atoi_addr = 0x10388 printf_addr = 0x10358 state = { 'cc': cc, } ctx_init = { 'registers': { arch.flags_register(): arch.flags_default_value(), arch.stack_pointer_register(): sp, } } hooks = { atoi_addr: (atoi_hook, state, True, 0), printf_addr: (printf_hook, state, True, 0), } return ctx_init, start, end, hooks def main(): start_time = time.time() # Split program arguments. # ======================================================================== # prg_options, prg_arguments = split_command_line(sys.argv) binary_path = prg_arguments[0] # Loading binary. # ======================================================================== # print("[+] Loading binary...") barf = BARF(binary_path) if barf.binary.architecture not in [ARCH_X86, ARCH_ARM]: print("[-] Architecture not supported!") sys.exit(1) # Setup emulator. # ======================================================================== # ctx_init, start, end, hooks = setup_emulator(barf.emulator, barf.binary, prg_arguments) # Emulate. # ======================================================================== # barf.emulate(context=ctx_init, start=start, end=end, hooks=hooks, print_asm=False) end_time = time.time() total_time = end_time - start_time print("[+] Total processing time: {0:8.3f}s".format(total_time)) if __name__ == '__main__': if len(sys.argv) != 4: print("Usage: {} -- samples/bin/loop-simple1.[x86|x86_64|arm|arm_thumb] <iters>".format(sys.argv[0])) sys.exit(1) main()
programa-stic/barf-project
examples/misc/emulate_binary.py
Python
bsd-2-clause
7,099
# -*- mode: python; coding: utf-8 -*- # Copyright (c) 2018 Radio Astronomy Software Group # Licensed under the 2-clause BSD License """Primary container for radio interferometer calibration solutions.""" import copy import numpy as np import threading import warnings from ..uvbase import UVBase from .. import parameter as uvp from .. import telescopes as uvtel from .. import utils as uvutils from ..uvdata import UVData __all__ = ["UVCal"] class UVCal(UVBase): """ A class defining calibration solutions for interferometric data. Attributes ---------- UVParameter objects : For full list see the documentation on ReadTheDocs: http://pyuvdata.readthedocs.io/en/latest/. Some are always required, some are required for certain cal_types and cal_styles and others are always optional. """ def __init__(self): self._Nfreqs = uvp.UVParameter( "Nfreqs", description="Number of frequency channels", expected_type=int ) self._Njones = uvp.UVParameter( "Njones", description="Number of Jones calibration " "parameters (Number of Jones matrix elements " "calculated in calibration).", expected_type=int, ) desc = ( "Number of times with different calibrations calculated " "(if a calibration is calculated over a range of integrations, " "this gives the number of separate calibrations along the time axis)." ) self._Ntimes = uvp.UVParameter("Ntimes", description=desc, expected_type=int) self._history = uvp.UVParameter( "history", description="String of history, units English", form="str", expected_type=str, ) self._Nspws = uvp.UVParameter( "Nspws", description="Number of spectral windows " "(ie non-contiguous spectral chunks). ", expected_type=int, ) desc = "Name of telescope. e.g. HERA. String." self._telescope_name = uvp.UVParameter( "telescope_name", description=desc, form="str", expected_type=str ) desc = ( "Number of antennas that have data associated with them " "(i.e. length of ant_array), which may be smaller than the number" "of antennas in the telescope (i.e. length of antenna_numbers)." ) self._Nants_data = uvp.UVParameter( "Nants_data", description=desc, expected_type=int ) desc = ( "Number of antennas in the antenna_numbers array. May be larger " "than the number of antennas with gains associated with them." ) self._Nants_telescope = uvp.UVParameter( "Nants_telescope", description=desc, expected_type=int ) desc = ( "Telescope location: xyz in ITRF (earth-centered frame). " "Can also be accessed using telescope_location_lat_lon_alt or " "telescope_location_lat_lon_alt_degrees properties" ) self._telescope_location = uvp.LocationParameter( "telescope_location", description=desc, acceptable_range=(6.35e6, 6.39e6), tols=1e-3, required=False, ) desc = ( "Array of integer antenna numbers that appear in self.gain_array," " with shape (Nants_data,). " "This array is ordered to match the inherent ordering of the zeroth" " axis of self.gain_array." ) self._ant_array = uvp.UVParameter( "ant_array", description=desc, expected_type=int, form=("Nants_data",) ) desc = ( "Array of antenna names with shape (Nants_telescope,). " "Ordering of elements matches ordering of antenna_numbers." ) self._antenna_names = uvp.UVParameter( "antenna_names", description=desc, form=("Nants_telescope",), expected_type=str, ) desc = ( "Array of all integer-valued antenna numbers in the telescope with " "shape (Nants_telescope,). Ordering of elements matches that of " "antenna_names. This array is not necessarily identical to " "ant_array, in that this array holds all antenna numbers " "associated with the telescope, not just antennas with data, and " "has an in principle non-specific ordering." ) self._antenna_numbers = uvp.UVParameter( "antenna_numbers", description=desc, form=("Nants_telescope",), expected_type=int, ) desc = ( "Array giving coordinates of antennas relative to " "telescope_location (ITRF frame), shape (Nants_telescope, 3), " "units meters. See the tutorial page in the documentation " "for an example of how to convert this to topocentric frame." ) self._antenna_positions = uvp.UVParameter( "antenna_positions", description=desc, form=("Nants_telescope", 3), expected_type=float, tols=1e-3, # 1 mm required=False, ) desc = ( "Option to support 'wide-band' calibration solutions with gains or delays " "that apply over a range of frequencies rather than having distinct values " "at each frequency. Delay type cal solutions are always 'wide-band' if " "future_array_shapes is True. If it is True several other parameters are " "affected: future_array_shapes is also True; the data-like arrays have a " "spw axis that is Nspws long rather than a frequency axis that is Nfreqs " "long; the `freq_range` parameter is required and the `freq_array` " "parameter is not required." ) self._wide_band = uvp.UVParameter( "wide_band", description=desc, expected_type=bool, value=False, ) self._spw_array = uvp.UVParameter( "spw_array", description="Array of spectral window numbers, shape (Nspws).", form=("Nspws",), expected_type=int, ) # this dimensionality of freq_array does not allow for different spws # to have different numbers of channels desc = ( "Array of frequencies, center of the channel, " "shape (1, Nfreqs) or (Nfreqs,) if future_array_shapes=True, units Hz." "Not required if future_array_shapes=True and wide_band=True." ) # TODO: Spw axis to be collapsed in future release self._freq_array = uvp.UVParameter( "freq_array", description=desc, form=(1, "Nfreqs"), expected_type=float, tols=1e-3, ) # mHz desc = ( "Width of frequency channels (Hz). If flex_spw = False and " "future_array_shapes=False, then it is a " "single value of type = float, otherwise it is an array of shape " "(Nfreqs,), type = float." ) self._channel_width = uvp.UVParameter( "channel_width", description=desc, expected_type=float, tols=1e-3, ) # 1 mHz desc = ( "Required if cal_type='delay' or wide_band=True. Frequency range that " "solutions are valid for. If future_array_shapes is False it is a " "list: [start_frequency, end_frequency], otherwise it is an array of shape " "(Nspws, 2). Units are Hz." ) self._freq_range = uvp.UVParameter( "freq_range", required=False, description=desc, form=2, expected_type=float, tols=1e-3, ) desc = ( "Array of antenna polarization integers, shape (Njones). " "linear pols -5:-8 (jxx, jyy, jxy, jyx)." "circular pols -1:-4 (jrr, jll. jrl, jlr)." ) self._jones_array = uvp.UVParameter( "jones_array", description=desc, expected_type=int, acceptable_vals=list(np.arange(-8, 0)), form=("Njones",), ) desc = ( "Time range (in JD) that cal solutions are valid for." "list: [start_time, end_time] in JD. Should only be set in Ntimes is 1." ) self._time_range = uvp.UVParameter( "time_range", description=desc, form=2, expected_type=float, required=False ) desc = ( "Array of calibration solution times, center of integration, " "shape (Ntimes), units Julian Date" ) self._time_array = uvp.UVParameter( "time_array", description=desc, form=("Ntimes",), expected_type=float, tols=1e-3 / (60.0 * 60.0 * 24.0), ) # standard angle tolerance: 1 mas in radians. radian_tol = 1 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0) desc = "Array of lsts, center of integration, shape (Ntimes), units radians" self._lst_array = uvp.UVParameter( "lst_array", description=desc, form=("Ntimes",), expected_type=float, tols=radian_tol, required=False, ) desc = ( "Integration time of a time bin, units seconds. " "If future_array_shapes=False, then it is a single value of type = float, " "otherwise it is an array of shape (Ntimes), type = float." ) self._integration_time = uvp.UVParameter( "integration_time", description=desc, expected_type=float, tols=1e-3 ) # 1ms desc = ( "The convention for applying the calibration solutions to data." 'Values are "divide" or "multiply", indicating that to calibrate ' "one should divide or multiply uncalibrated data by gains. " "Mathematically this indicates the alpha exponent in the equation: " "calibrated data = gain^alpha * uncalibrated data. A value of " '"divide" represents alpha=-1 and "multiply" represents alpha=1.' ) self._gain_convention = uvp.UVParameter( "gain_convention", form="str", expected_type=str, description=desc, acceptable_vals=["divide", "multiply"], ) desc = ( "Array of flags to be applied to calibrated data (logical OR " "of input and flag generated by calibration). True is flagged. " "Shape: (Nants_data, 1, Nfreqs, Ntimes, Njones) or " "(Nants_data, Nfreqs, Ntimes, Njones) if future_array_shapes=True and " "wide_band=False or (Nants_data, Nspws, Ntimes, Njones) if wide_band=True, " "type = bool." ) self._flag_array = uvp.UVParameter( "flag_array", description=desc, form=("Nants_data", 1, "Nfreqs", "Ntimes", "Njones"), expected_type=bool, ) desc = ( "Array of qualities of calibration solutions. " "The shape depends on cal_type, if the cal_type is 'gain' or " "'unknown', the shape is: (Nants_data, 1, Nfreqs, Ntimes, Njones) or " "(Nants_data, Nfreqs, Ntimes, Njones) if future_array_shapes=True and " "wide_band=False or (Nants_data, Nspws, Ntimes, Njones) if wide_band=True, " "if the cal_type is 'delay', the shape is " "(Nants_data, 1, 1, Ntimes, Njones) or (Nants_data, Nspws, Ntimes, Njones) " "if future_array_shapes=True. The type is float." ) self._quality_array = uvp.UVParameter( "quality_array", description=desc, form=("Nants_data", 1, "Nfreqs", "Ntimes", "Njones"), expected_type=float, ) desc = ( "Orientation of the physical dipole corresponding to what is " 'labelled as the x polarization. Options are "east" ' '(indicating east/west orientation) and "north" (indicating ' "north/south orientation)" ) self._x_orientation = uvp.UVParameter( "x_orientation", description=desc, expected_type=str, acceptable_vals=["east", "north"], ) # --- cal_type parameters --- desc = "cal type parameter. Values are delay, gain or unknown." self._cal_type = uvp.UVParameter( "cal_type", form="str", expected_type=str, value="unknown", description=desc, acceptable_vals=["delay", "gain", "unknown"], ) desc = ( 'Required if cal_type = "gain". Array of gains, ' "shape: (Nants_data, 1, Nfreqs, Ntimes, Njones) or " "(Nants_data, Nfreqs, Ntimes, Njones) if future_array_shapes=True, or " "(Nants_data, Nspws, Ntimes, Njones) if wide_band=True, " "type = complex float." ) self._gain_array = uvp.UVParameter( "gain_array", description=desc, required=False, form=("Nants_data", 1, "Nfreqs", "Ntimes", "Njones"), expected_type=complex, ) desc = ( 'Required if cal_type = "delay". Array of delays with units of seconds. ' "Shape: (Nants_data, 1, 1, Ntimes, Njones) or " "(Nants_data, Nspws, Ntimes, Njones) if future_array_shapes=True, " "type=float." ) self._delay_array = uvp.UVParameter( "delay_array", description=desc, required=False, form=("Nants_data", "Nspws", 1, "Ntimes", "Njones"), expected_type=float, ) # --- flexible spectral window information --- desc = ( "Option to construct a 'flexible spectral window', which stores" "all spectral channels across the frequency axis of data_array. " "Allows for spectral windows of variable sizes, and channels of " "varying widths." ) self._flex_spw = uvp.UVParameter( "flex_spw", description=desc, expected_type=bool, value=False, ) desc = ( "Required if flex_spw = True. Maps individual channels along the " "frequency axis to individual spectral windows, as listed in the " "spw_array. Shape (Nfreqs), type = int." ) self._flex_spw_id_array = uvp.UVParameter( "flex_spw_id_array", description=desc, form=("Nfreqs",), expected_type=int, required=False, ) desc = "Flag indicating that this object is using the future array shapes." self._future_array_shapes = uvp.UVParameter( "future_array_shapes", description=desc, expected_type=bool, value=False, ) # --- cal_style parameters --- desc = "Style of calibration. Values are sky or redundant." self._cal_style = uvp.UVParameter( "cal_style", form="str", expected_type=str, description=desc, acceptable_vals=["sky", "redundant"], ) desc = ( "Required if cal_style = 'sky'. Short string describing field " "center or dominant source." ) self._sky_field = uvp.UVParameter( "sky_field", form="str", required=False, expected_type=str, description=desc ) desc = 'Required if cal_style = "sky". Name of calibration catalog.' self._sky_catalog = uvp.UVParameter( "sky_catalog", form="str", required=False, expected_type=str, description=desc, ) desc = 'Required if cal_style = "sky". Phase reference antenna.' self._ref_antenna_name = uvp.UVParameter( "ref_antenna_name", form="str", required=False, expected_type=str, description=desc, ) desc = "Number of sources used." self._Nsources = uvp.UVParameter( "Nsources", required=False, expected_type=int, description=desc ) desc = "Range of baselines used for calibration." self._baseline_range = uvp.UVParameter( "baseline_range", form=2, required=False, expected_type=float, description=desc, ) desc = "Name of diffuse model." self._diffuse_model = uvp.UVParameter( "diffuse_model", form="str", required=False, expected_type=str, description=desc, ) # --- truly optional parameters --- desc = ( "The gain scale of the calibration, which indicates the units of the " "calibrated visibilities. For example, Jy or K str." ) self._gain_scale = uvp.UVParameter( "gain_scale", form="str", expected_type=str, description=desc, required=False, ) desc = ( "Array of input flags, True is flagged. shape: " "(Nants_data, 1, Nfreqs, Ntimes, Njones) or " "(Nants_data, Nfreqs, Ntimes, Njones) if future_array_shapes=True, " "type = bool." ) self._input_flag_array = uvp.UVParameter( "input_flag_array", description=desc, required=False, form=("Nants_data", 1, "Nfreqs", "Ntimes", "Njones"), expected_type=bool, ) desc = "Origin (on github for e.g) of calibration software. Url and branch." self._git_origin_cal = uvp.UVParameter( "git_origin_cal", form="str", expected_type=str, description=desc, required=False, ) desc = ( "Commit hash of calibration software (from git_origin_cal) used " "to generate solutions." ) self._git_hash_cal = uvp.UVParameter( "git_hash_cal", form="str", expected_type=str, description=desc, required=False, ) desc = "Name of observer who calculated solutions in this file." self._observer = uvp.UVParameter( "observer", form="str", description=desc, expected_type=str, required=False ) desc = ( "Array of qualities of the calibration for entire arrays. " "The shape depends on cal_type, if the cal_type is 'gain' or " "'unknown', the shape is: (1, Nfreqs, Ntimes, Njones) or " "(Nfreqs, Ntimes, Njones) if future_array_shapes=True, " "if the cal_type is 'delay', the shape is (1, 1, Ntimes, Njones) or " "(1, Ntimes, Njones) if future_array_shapes=True, type = float." ) self._total_quality_array = uvp.UVParameter( "total_quality_array", description=desc, form=(1, "Nfreqs", "Ntimes", "Njones"), expected_type=float, required=False, ) desc = ( "Any user supplied extra keywords, type=dict. Keys should be " "8 character or less strings if writing to calfits files. " "Use the special key 'comment' for long multi-line string comments." ) self._extra_keywords = uvp.UVParameter( "extra_keywords", required=False, description=desc, value={}, spoof_val={}, expected_type=dict, ) desc = ( "List of strings containing the unique basenames (not the full path) of " "input files." ) self._filename = uvp.UVParameter( "filename", required=False, description=desc, expected_type=str, ) super(UVCal, self).__init__() def _set_flex_spw(self): """ Set flex_spw to True, and adjust required parameters. This method should not be called directly by users; instead it is called by the file-reading methods to indicate that an object has multiple spectral windows concatenated together across the frequency axis. """ # Mark once-optional arrays as now required self.flex_spw = True self._flex_spw_id_array.required = True # Now make sure that chan_width is set to be an array self._channel_width.form = ("Nfreqs",) def _set_wide_band(self, wide_band=True): """ Set the wide_band parameter and adjust required parameters. The wide_band can only be set to True if future_array_shapes is True. This method should not be called directly by users; instead it is called by the file-reading methods to indicate that an object is a wide-band calibration solution which supports gain or delay values per spectral window. """ if wide_band: assert ( self.future_array_shapes ), "future_array_shapes must be True to set wide_band to True." elif self.future_array_shapes: assert self.cal_type != "delay", ( "delay objects cannot have wide_band=False if future_array_shapes is " "True" ) self.wide_band = wide_band if wide_band: self._freq_array.required = False self._channel_width.required = False self._freq_range.required = True data_shape_params = [ "gain_array", "delay_array", "flag_array", "input_flag_array", "quality_array", ] data_form = ("Nants_data", "Nspws", "Ntimes", "Njones") tot_qual_form = ("Nspws", "Ntimes", "Njones") for param_name in self._data_params: if param_name in data_shape_params: getattr(self, "_" + param_name).form = data_form elif param_name == "total_quality_array": getattr(self, "_" + param_name).form = tot_qual_form else: self._freq_array.required = True self._channel_width.required = True self._freq_range.required = False if self.future_array_shapes: # can only get here if not a delay solution data_shape_params = [ "gain_array", "flag_array", "input_flag_array", "quality_array", ] data_form = ("Nants_data", "Nfreqs", "Ntimes", "Njones") tot_qual_form = ("Nfreqs", "Ntimes", "Njones") for param_name in self._data_params: if param_name in data_shape_params: getattr(self, "_" + param_name).form = data_form elif param_name == "total_quality_array": getattr(self, "_" + param_name).form = tot_qual_form def _set_gain(self): """Set cal_type to 'gain' and adjust required parameters.""" self.cal_type = "gain" self._gain_array.required = True self._delay_array.required = False self._freq_range.required = False self._freq_array.required = True self._channel_width.required = True self._quality_array.form = self._gain_array.form self._total_quality_array.form = self._gain_array.form[1:] def _set_delay(self): """Set cal_type to 'delay' and adjust required parameters.""" self.cal_type = "delay" self._gain_array.required = False self._delay_array.required = True self._freq_range.required = True self._freq_array.required = False self._channel_width.required = False self._quality_array.form = self._delay_array.form self._total_quality_array.form = self._delay_array.form[1:] def _set_unknown_cal_type(self): """Set cal_type to 'unknown' and adjust required parameters.""" self.cal_type = "unknown" self._gain_array.required = False self._delay_array.required = False self._freq_range.required = False self._freq_array.required = True self._quality_array.form = self._gain_array.form self._total_quality_array.form = self._gain_array.form[1:] def _set_sky(self): """Set cal_style to 'sky' and adjust required parameters.""" self.cal_style = "sky" self._sky_field.required = True self._sky_catalog.required = True self._ref_antenna_name.required = True def _set_redundant(self): """Set cal_style to 'redundant' and adjust required parameters.""" self.cal_style = "redundant" self._sky_field.required = False self._sky_catalog.required = False self._ref_antenna_name.required = False @property def _data_params(self): """List of strings giving the data-like parameters.""" return [ "gain_array", "delay_array", "flag_array", "quality_array", "total_quality_array", "input_flag_array", ] @property def _required_data_params(self): """List of strings giving the required data-like parameters.""" cal_type = self._cal_type.value if cal_type is None: cal_type = "unknown" if cal_type == "gain": return ["gain_array", "flag_array", "quality_array"] elif cal_type == "delay": return ["delay_array", "flag_array", "quality_array"] else: return ["flag_array", "quality_array"] @property def data_like_parameters(self): """Iterate defined parameters which are data-like (not metadata-like).""" for key in self._data_params: if hasattr(self, key): yield getattr(self, key) @property def metadata_only(self): """ Property that determines whether this is a metadata only object. An object is metadata only if data_array, nsample_array and flag_array are all None. """ metadata_only = all(d is None for d in self.data_like_parameters) for param_name in self._required_data_params: getattr(self, "_" + param_name).required = not metadata_only return metadata_only def _set_future_array_shapes(self): """ Set future_array_shapes to True and adjust required parameters. This method should not be called directly by users; instead it is called by file-reading methods and `use_future_array_shapes` to indicate the `future_array_shapes` is True and define expected parameter shapes. """ self.future_array_shapes = True self._freq_array.form = ("Nfreqs",) self._channel_width.form = ("Nfreqs",) self._integration_time.form = ("Ntimes",) self._freq_range.form = ("Nspws", 2) data_shape_params = [ "gain_array", "flag_array", "input_flag_array", "quality_array", ] if self.cal_type == "delay": self._set_wide_band() data_shape_params.append("delay_array") if self.wide_band: data_form = ("Nants_data", "Nspws", "Ntimes", "Njones") tot_qual_form = ("Nspws", "Ntimes", "Njones") else: data_form = ("Nants_data", "Nfreqs", "Ntimes", "Njones") tot_qual_form = ("Nfreqs", "Ntimes", "Njones") for param_name in self._data_params: if param_name in data_shape_params: getattr(self, "_" + param_name).form = data_form if param_name == "delay_array": # only get here if cal_type is not "delay" self._delay_array.form = ("Nants_data", "Nspws", "Ntimes", "Njones") elif param_name == "total_quality_array": getattr(self, "_" + param_name).form = tot_qual_form def use_future_array_shapes(self): """ Change the array shapes of this object to match the planned future shapes. This method sets allows users to convert to the planned array shapes changes before the changes go into effect. This method sets the `future_array_shapes` parameter on this object to True. """ self._set_future_array_shapes() if not self.metadata_only: # remove the length-1 spw axis for all data-like parameters # except the delay array, which should have the length-1 freq axis removed for param_name in self._data_params: param_value = getattr(self, param_name) if param_value is None: continue if param_name == "delay_array": setattr(self, param_name, (param_value)[:, :, 0, :, :]) elif param_name == "total_quality_array": setattr(self, param_name, (param_value)[0, :, :, :]) else: setattr(self, param_name, (param_value)[:, 0, :, :, :]) if self.cal_type == "delay": warnings.warn( "When converting a delay-style cal to future array shapes the " "flag_array (and input_flag_array if it exists) must drop the " "frequency axis so that it will be the same shape as the " "delay_array. This will be done using the " "`pyuvdata.utils.and_collapse` function which will only flag an " "antpol-time if all of the frequecies are flagged for that " "antpol-time. To preserve the full flag information, create a " "UVFlag object from this cal object before this operation. " "In the future, these flag arrays will be removed from UVCal " "objects in favor of using UVFlag objects." ) self.flag_array = uvutils.and_collapse(self.flag_array, axis=1)[ :, np.newaxis, :, : ] if self.input_flag_array is not None: self.input_flag_array = uvutils.and_collapse( self.input_flag_array, axis=1 )[:, np.newaxis, :, :] # remove the length-1 spw axis for the freq_array if self.freq_array is not None: self.freq_array = self.freq_array[0, :] if self.freq_range is not None: # force freq_range to have an spw axis self.freq_range = np.repeat( (np.asarray(self.freq_range))[np.newaxis, :], self.Nspws, axis=0 ) # force integration_time to be an array of length Ntimes self.integration_time = ( np.zeros(self.Ntimes, dtype=np.float64) + self.integration_time ) if not self.flex_spw and self.channel_width is not None: # make channel_width be an array of length Nfreqs rather than a single value # (not needed with flexible spws because this is already done in that case) self.channel_width = ( np.zeros(self.Nfreqs, dtype=np.float64) + self.channel_width ) def use_current_array_shapes(self): """ Change the array shapes of this object to match the current shapes. This method sets allows users to convert back to the current array shapes. This method sets the `future_array_shapes` parameter on this object to False. """ if self.Nspws > 1: raise ValueError("Cannot use current array shapes if Nspws > 1.") if self.cal_type != "delay" and self.wide_band: raise ValueError( "Cannot use current array shapes if cal_style is not 'delay' and " "wide_band is True." ) if not self.flex_spw: if self.channel_width is not None: unique_channel_widths = np.unique(self.channel_width) if unique_channel_widths.size > 1: raise ValueError( "channel_width parameter contains multiple unique values, but " "only one spectral window is present. Cannot collapse " "channel_width to a single value." ) self._channel_width.form = () self.channel_width = unique_channel_widths[0] unique_integration_times = np.unique(self.integration_time) if unique_integration_times.size > 1: raise ValueError( "integration_time parameter contains multiple unique values. " "Cannot collapse integration_time to a single value." ) self._integration_time.form = () self.integration_time = unique_integration_times[0] self.future_array_shapes = False self.wide_band = False gain_shape_params = ["gain_array", "flag_array", "input_flag_array"] delay_shape_params = ["delay_array"] if self.cal_type == "delay": delay_shape_params.append("quality_array") else: gain_shape_params.append("quality_array") for param_name in self._data_params: if param_name in gain_shape_params: getattr(self, "_" + param_name).form = ( "Nants_data", 1, "Nfreqs", "Ntimes", "Njones", ) elif param_name in delay_shape_params: getattr(self, "_" + param_name).form = ( "Nants_data", "Nspws", 1, "Ntimes", "Njones", ) elif param_name == "total_quality_array": if self.cal_type == "delay": getattr(self, "_" + param_name).form = (1, 1, "Ntimes", "Njones") else: getattr(self, "_" + param_name).form = ( 1, "Nfreqs", "Ntimes", "Njones", ) if not self.metadata_only: for param_name in self._data_params: param_value = getattr(self, param_name) if param_value is None: continue if param_name == "delay_array": setattr( self, param_name, (getattr(self, param_name))[:, :, np.newaxis, :, :], ) elif param_name == "total_quality_array": setattr( self, param_name, (getattr(self, param_name))[np.newaxis, :, :, :], ) else: setattr( self, param_name, (getattr(self, param_name))[:, np.newaxis, :, :, :], ) if self.cal_type == "delay": # make the flag array have a frequency axis again self.flag_array = np.repeat(self.flag_array, self.Nfreqs, axis=2) if self.input_flag_array is not None: self.input_flag_array = np.repeat( self.input_flag_array, self.Nfreqs, axis=2 ) self._freq_array.form = (1, "Nfreqs") self.freq_array = self.freq_array[np.newaxis, :] self.freq_range = self.freq_range[0, :].tolist() self._freq_range.form = (2,) def set_telescope_params(self, overwrite=False): """ Set telescope related parameters. If the telescope_name is in the known_telescopes, set the telescope location to the value for the known telescope. Also set the antenna positions if they are not set on the object and are available for the telescope. Parameters ---------- overwrite : bool Option to overwrite existing telescope-associated parameters with the values from the known telescope. Raises ------ ValueError if the telescope_name is not in known telescopes """ telescope_obj = uvtel.get_telescope(self.telescope_name) if telescope_obj is not False: if self.telescope_location is None or overwrite is True: warnings.warn( "telescope_location is not set. Using known values " f"for {telescope_obj.telescope_name}." ) self.telescope_location = telescope_obj.telescope_location if telescope_obj.antenna_positions is not None and ( self.antenna_positions is None or overwrite is True ): ant_inds = [] telescope_ant_inds = [] # first try to match using names only for index, antname in enumerate(self.antenna_names): if antname in telescope_obj.antenna_names: ant_inds.append(index) telescope_ant_inds.append( np.where(telescope_obj.antenna_names == antname)[0][0] ) # next try using numbers if len(ant_inds) != self.Nants_telescope: for index, antnum in enumerate(self.antenna_numbers): # only update if not already found if ( index not in ant_inds and antnum in telescope_obj.antenna_numbers ): this_ant_ind = np.where( telescope_obj.antenna_numbers == antnum )[0][0] # make sure we don't already have this antenna associated # with another antenna if this_ant_ind not in telescope_ant_inds: ant_inds.append(index) telescope_ant_inds.append(this_ant_ind) if len(ant_inds) != self.Nants_telescope: warnings.warn( "Not all antennas have positions in the telescope object. " "Not setting antenna_positions." ) else: warnings.warn( "antenna_positions is not set. Using known values " f"for {telescope_obj.telescope_name}." ) telescope_ant_inds = np.array(telescope_ant_inds) self.antenna_positions = telescope_obj.antenna_positions[ telescope_ant_inds, : ] else: raise ValueError( f"Telescope {self.telescope_name} is not in known_telescopes." ) def _set_lsts_helper(self): latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees unique_times, inverse_inds = np.unique(self.time_array, return_inverse=True) unique_lst_array = uvutils.get_lst_for_time( unique_times, latitude, longitude, altitude ) self.lst_array = unique_lst_array[inverse_inds] return def set_lsts_from_time_array(self, background=False): """Set the lst_array based from the time_array. Parameters ---------- background : bool, False When set to True, start the calculation on a threading.Thread in the background and return the thread to the user. Returns ------- proc : None or threading.Thread instance When background is set to True, a thread is returned which must be joined before the lst_array exists on the UVCal object. """ if not background: self._set_lsts_helper() return else: proc = threading.Thread(target=self._set_lsts_helper) proc.start() return proc def _check_flex_spw_contiguous(self): """ Check if the spectral windows are contiguous for flex_spw datasets. This checks the flex_spw_id_array to make sure that all channels for each spectral window are together in one block, versus being interspersed (e.g., channel #1 and #3 is in spw #1, channels #2 and #4 are in spw #2). """ if self.flex_spw: uvutils._check_flex_spw_contiguous(self.spw_array, self.flex_spw_id_array) else: # If this isn't a flex_spw data set, then there is only 1 spectral window, # which means that the check always passes pass return True def _check_freq_spacing(self, raise_errors=True): """ Check if frequencies are evenly spaced and separated by their channel width. This is a requirement for writing calfits files. Parameters ---------- raise_errors : bool Option to raise errors if the various checks do not pass. Returns ------- spacing_error : bool Flag that channel spacings or channel widths are not equal. chanwidth_error : bool Flag that channel spacing does not match channel width. """ if self.freq_array is None and self.Nfreqs == 1: return False, False return uvutils._check_freq_spacing( self.freq_array, self._freq_array.tols, self.channel_width, self._channel_width.tols, self.flex_spw, self.future_array_shapes, self.spw_array, self.flex_spw_id_array, raise_errors=raise_errors, ) def check( self, check_extra=True, run_check_acceptability=True, check_freq_spacing=False ): """ Add some extra checks on top of checks on UVBase class. Check that required parameters exist. Check that parameters have appropriate shapes and optionally that the values are acceptable. Parameters ---------- check_extra : bool If true, check all parameters, otherwise only check required parameters. run_check_acceptability : bool Option to check if values in parameters are acceptable. check_freq_spacing : bool Option to check if frequencies are evenly spaced and the spacing is equal to their channel_width. This is not required for UVCal objects in general but is required to write to calfits files. Returns ------- bool True if check passes Raises ------ ValueError if parameter shapes or types are wrong or do not have acceptable values (if run_check_acceptability is True) """ # Make sure requirements are set properly for cal_style if self.cal_style == "sky": self._set_sky() elif self.cal_style == "redundant": self._set_redundant() # If the telescope location is not set issue a deprecation warning if self.telescope_location is None: warnings.warn( "The telescope_location is not set. It will be a required " "parameter starting in pyuvdata version 2.3", category=DeprecationWarning, ) # If the antenna positions parameter is not set issue a deprecation warning if self.antenna_positions is None: warnings.warn( "The antenna_positions parameter is not set. It will be a required " "parameter starting in pyuvdata version 2.3", category=DeprecationWarning, ) # If the antenna positions parameter is not set issue a deprecation warning if self.lst_array is None: warnings.warn( "The lst_array is not set. It will be a required " "parameter starting in pyuvdata version 2.3", category=DeprecationWarning, ) # if wide_band is True, Nfreqs must be 1. if self.wide_band: if self.Nfreqs != 1: warnings.warn( "Nfreqs will be required to be 1 for wide_band cals (including " "all delay cals) starting in version 3.0", category=DeprecationWarning, ) # first run the basic check from UVBase super(UVCal, self).check( check_extra=check_extra, run_check_acceptability=run_check_acceptability ) # require that all entries in ant_array exist in antenna_numbers if not all(ant in self.antenna_numbers for ant in self.ant_array): raise ValueError("All antennas in ant_array must be in antenna_numbers.") # issue warning if extra_keywords keys are longer than 8 characters for key in self.extra_keywords.keys(): if len(key) > 8: warnings.warn( "key {key} in extra_keywords is longer than 8 " "characters. It will be truncated to 8 if written " "to a calfits file format.".format(key=key) ) # issue warning if extra_keywords values are lists, arrays or dicts for key, value in self.extra_keywords.items(): if isinstance(value, (list, dict, np.ndarray)): warnings.warn( "{key} in extra_keywords is a list, array or dict, " "which will raise an error when writing calfits " "files".format(key=key) ) if check_freq_spacing: self._check_freq_spacing() return True def copy(self, metadata_only=False): """ Make and return a copy of the UVCal object. Parameters ---------- metadata_only : bool If True, only copy the metadata of the object. Returns ------- UVCal Copy of self. """ if not metadata_only: return super(UVCal, self).copy() else: uv = UVCal() # include all attributes, not just UVParameter ones. for attr in self.__iter__(uvparams_only=False): # skip properties if isinstance(getattr(type(self), attr, None), property): continue # skip data like parameters # parameter names have a leading underscore we want to ignore if attr.lstrip("_") in self._data_params: continue setattr(uv, attr, copy.deepcopy(getattr(self, attr))) return uv def _has_key(self, antnum=None, jpol=None): """ Check if this UVCal has the requested antenna or polarization. Parameters ---------- antnum : int Antenna number to check. jpol : str or int Antenna polarization string or integer to check. Returns ------- bool Boolean indicator of whether the antenna and/or antenna polarization is present on this object. """ if antnum is not None: if antnum not in self.ant_array: return False if jpol is not None: if isinstance(jpol, (str, np.str_)): jpol = uvutils.jstr2num(jpol, x_orientation=self.x_orientation) if jpol not in self.jones_array: return False return True def ant2ind(self, antnum): """ Get the index in data arrays for an antenna number. Parameters ---------- antnum : int Antenna number to get index for. Returns ------- int Antenna index in data arrays. """ if not self._has_key(antnum=antnum): raise ValueError("{} not found in ant_array".format(antnum)) return np.argmin(np.abs(self.ant_array - antnum)) def jpol2ind(self, jpol): """ Get the index in data arrays for an antenna polarization. Parameters ---------- jpol : int or str Antenna polarization to get index for. Returns ------- int Antenna polarization index in data arrays """ if isinstance(jpol, (str, np.str_)): jpol = uvutils.jstr2num(jpol, x_orientation=self.x_orientation) if not self._has_key(jpol=jpol): raise ValueError("{} not found in jones_array".format(jpol)) return np.argmin(np.abs(self.jones_array - jpol)) def _slice_array(self, key, data_array, squeeze_pol=True): """ Slice a data array given a data key. Parameters ---------- key : int or length 2 tuple of ints or int and str Antenna or antenna and polarization to get slice for. If it's a length 2 tuple, the second value must be an antenna polarization int or string parsable by jpol2ind. data_array : :class: numpy ndarray Array to get slice of. Must have the shape of the gain_array or delay_array. squeeze_pol : bool Option to squeeze pol dimension if possible. Returns ------- :class: numpy ndarray Slice of the data_array for the key. """ key = uvutils._get_iterable(key) if len(key) == 1: # interpret as a single antenna if self.future_array_shapes: output = data_array[self.ant2ind(key[0]), :, :, :] else: output = data_array[self.ant2ind(key[0]), 0, :, :, :] if squeeze_pol and output.shape[-1] == 1: output = output[:, :, 0] return output elif len(key) == 2: # interpret as an antenna-pol pair if self.future_array_shapes: output = data_array[self.ant2ind(key[0]), :, :, self.jpol2ind(key[1])] else: output = data_array[ self.ant2ind(key[0]), 0, :, :, self.jpol2ind(key[1]) ] return output def _parse_key(self, ant, jpol=None): """ Parse key inputs and return a standard antenna-polarization key. Parameters ---------- ant : int or length 2 tuple of ints or int and str Antenna or antenna and polarization to get key for. If it's a length 2 tuple, the second value must be an antenna polarization int or string parsable by jpol2ind. jpol : int or str Antenna polarization int or string parsable by jpol2ind. Only used if `ant` is an integer. Returns ------- tuple Standard key tuple. """ if isinstance(ant, (list, tuple)): # interpret ant as (ant,) or (ant, jpol) key = tuple(ant) elif isinstance(ant, (int, np.integer)): # interpret ant as antenna number key = (ant,) # add jpol if fed if jpol is not None: key += (jpol,) return key def get_gains(self, ant, jpol=None, squeeze_pol=True): """ Get the gain associated with an antenna and/or polarization. Parameters ---------- ant : int or length 2 tuple of ints or int and str Antenna or antenna and polarization to get gains for. If it's a length 2 tuple, the second value must be an antenna polarization int or string parsable by jpol2ind. jpol : int or str, optional Instrumental polarization to request. Ex. 'Jxx' squeeze_pol : bool Option to squeeze pol dimension if possible. Returns ------- complex ndarray Gain solution of shape (Nfreqs, Ntimes, Njones) or (Nfreqs, Ntimes) if jpol is set or if squeeze_pol is True and Njones = 1. """ if self.cal_type != "gain": raise ValueError("cal_type must be 'gain' for get_gains() method") return self._slice_array( self._parse_key(ant, jpol=jpol), self.gain_array, squeeze_pol=squeeze_pol ) def get_flags(self, ant, jpol=None, squeeze_pol=True): """ Get the flags associated with an antenna and/or polarization. Parameters ---------- ant : int or length 2 tuple of ints or int and str Antenna or antenna and polarization to get gains for. If it's a length 2 tuple, the second value must be an antenna polarization int or string parsable by jpol2ind. jpol : int or str, optional Instrumental polarization to request. Ex. 'Jxx' squeeze_pol : bool Option to squeeze pol dimension if possible. Returns ------- boolean ndarray Flags of shape (Nfreqs, Ntimes, Njones) or (Nfreqs, Ntimes) if jpol is set or if squeeze_pol is True and Njones = 1. """ return self._slice_array( self._parse_key(ant, jpol=jpol), self.flag_array, squeeze_pol=squeeze_pol ) def get_quality(self, ant, jpol=None, squeeze_pol=True): """ Get the qualities associated with an antenna and/or polarization. Parameters ---------- ant : int or length 2 tuple of ints or int and str Antenna or antenna and polarization to get gains for. If it's a length 2 tuple, the second value must be an antenna polarization int or string parsable by jpol2ind. jpol : int or str, optional Instrumental polarization to request. Ex. 'Jxx' squeeze_pol : bool Option to squeeze pol dimension if possible. Returns ------- float ndarray Qualities of shape (Nfreqs, Ntimes, Njones) or (Nfreqs, Ntimes) if jpol is not None or if squeeze_pol is True and Njones = 1. """ return self._slice_array( self._parse_key(ant, jpol=jpol), self.quality_array, squeeze_pol=squeeze_pol ) def convert_to_gain( self, freq_array=None, channel_width=None, delay_convention="minus", run_check=True, check_extra=True, run_check_acceptability=True, ): """ Convert non-gain cal_types to gains. For the delay cal_type the gain is calculated as: gain = 1 * exp((+/-) * 2 * pi * j * delay * frequency) where the (+/-) is dictated by the delay_convention Parameters ---------- delay_convention : str Exponent sign to use in the conversion, can be "plus" or "minus". freq_array : array of float Frequencies to convert to gain at, units Hz. Not providing a freq_array is deprecated, but until version 3.0, if it is not provided and `freq_array` exists on the object, `freq_array` will be used. run_check : bool Option to check for the existence and proper shapes of parameters after converting. check_extra : bool Option to check optional parameters as well as required ones. run_check_acceptability : bool Option to check acceptable range of the values of parameters after converting. """ if self.cal_type == "gain": raise ValueError("The data is already a gain cal_type.") elif self.cal_type != "delay": raise ValueError("cal_type is unknown, cannot convert to gain") if self.Nspws > 1: raise ValueError( "convert_to_gain currently does not support multiple spectral windows" ) if delay_convention == "minus": conv = -1 elif delay_convention == "plus": conv = 1 else: raise ValueError("delay_convention can only be 'minus' or 'plus'") if freq_array is None or channel_width is None: if self.freq_array is None or self.channel_width is None: raise ValueError( "freq_array and channel_width must be provided if there is no " "freq_array or no channel_width on the object." ) warnings.warn( "In version 3.0 and later freq_array and channel_width will be " "required parameters. Using the freq_array and channel_width on the " "object.", category=DeprecationWarning, ) if self.future_array_shapes: freq_array_use = self.freq_array channel_width = self.channel_width else: freq_array_use = self.freq_array[0, :] channel_width = self.channel_width Nfreqs_use = self.Nfreqs else: if freq_array.ndim > 1: raise ValueError("freq_array parameter must be a one dimensional array") if self.future_array_shapes: if ( not isinstance(channel_width, np.ndarray) or channel_width.shape != freq_array.shape ): raise ValueError( "This object is using the future array shapes, so the " "channel_width parameter be an array shaped like the freq_array" ) else: if isinstance(channel_width, np.ndarray): if channel_width.size > 1: raise ValueError( "This object is using the current array shapes, so the " "channel_width parameter must be a scalar value" ) channel_width = channel_width[0] if self.freq_range is not None: # Already errored if more than one spw, so just use the first one here if isinstance(self.freq_range, list): freq_range_use = np.asarray(self.freq_range) else: freq_range_use = self.freq_range[0, :] if np.any(freq_array < freq_range_use[0]) or np.any( freq_array > freq_range_use[1] ): raise ValueError( "freq_array contains values outside the freq_range." ) freq_array_use = freq_array Nfreqs_use = freq_array.size self.history += " Converted from delays to gains using pyuvdata." if self.future_array_shapes: phase_array = np.zeros( (self.Nants_data, Nfreqs_use, self.Ntimes, self.Njones) ) else: phase_array = np.zeros( (self.Nants_data, 1, Nfreqs_use, self.Ntimes, self.Njones) ) if self.future_array_shapes: temp = ( conv * 2 * np.pi * np.dot( self.delay_array[:, 0, :, :, np.newaxis], freq_array_use[np.newaxis, :], ) ) temp = np.transpose(temp, (0, 3, 1, 2)) phase_array = temp else: temp = ( conv * 2 * np.pi * np.dot( self.delay_array[:, 0, 0, :, :, np.newaxis], freq_array_use[np.newaxis, :], ) ) temp = np.transpose(temp, (0, 3, 1, 2)) phase_array[:, 0, :, :, :] = temp gain_array = np.exp(1j * phase_array) if self.future_array_shapes: freq_axis = 1 else: freq_axis = 2 new_quality = np.repeat(self.quality_array, Nfreqs_use, axis=freq_axis) self._set_gain() self._set_wide_band(wide_band=False) self.channel_width = channel_width self.gain_array = gain_array self.quality_array = new_quality self.delay_array = None if self.Nfreqs > 1 and not self.future_array_shapes: if ( self.freq_array is None or self.Nfreqs != Nfreqs_use or not np.allclose( self.freq_array, freq_array_use, rtol=self._freq_array.tols[0], atol=self._freq_array.tols[1], ) ): warnings.warn( "Existing flag array has a frequency axis of length > 1 but " "frequencies do not match freq_array. The existing flag array " "(and input_flag_array if it exists) will be collapsed using " "the `pyuvdata.utils.and_collapse` function which will only " "flag an antpol-time if all of the frequecies are flagged for " "that antpol-time. Then it will be broadcast to all the new " "frequencies. To preserve the original flag information, " "create a UVFlag object from this cal object before this " "operation. In the future, these flag arrays will be removed from " "UVCal objects in favor of using UVFlag objects." ) new_flag_array = np.expand_dims( uvutils.and_collapse(self.flag_array, axis=freq_axis), axis=freq_axis, ) self.flag_array = np.repeat(new_flag_array, Nfreqs_use, axis=freq_axis) if self.input_flag_array is not None: new_input_flag_array = np.expand_dims( uvutils.and_collapse(self.input_flag_array, axis=freq_axis), axis=freq_axis, ) self.input_flag_array = np.repeat( new_input_flag_array, Nfreqs_use, axis=freq_axis ) else: new_flag_array = np.repeat(self.flag_array, Nfreqs_use, axis=freq_axis) self.flag_array = new_flag_array if self.input_flag_array is not None: new_input_flag_array = np.repeat( self.input_flag_array, Nfreqs_use, axis=freq_axis ) self.input_flag_array = new_input_flag_array if self.total_quality_array is not None: if self.future_array_shapes: freq_axis = 0 else: freq_axis = 1 new_total_quality_array = np.repeat( self.total_quality_array, Nfreqs_use, axis=freq_axis ) self.total_quality_array = new_total_quality_array if self.future_array_shapes: self.freq_array = freq_array_use else: self.freq_array = freq_array_use[np.newaxis, :] self.Nfreqs = Nfreqs_use # check if object is self-consistent if run_check: self.check( check_extra=check_extra, run_check_acceptability=run_check_acceptability, ) def __add__( self, other, verbose_history=False, run_check=True, check_extra=True, run_check_acceptability=True, inplace=False, ): """ Combine two UVCal objects along antenna, frequency, time, and/or Jones axis. Parameters ---------- other : :class: UVCal Another UVCal object which will be added to self. verbose_history : bool Option to allow more verbose history. If True and if the histories for the two objects are different, the combined object will keep all the history of both input objects (if many objects are combined in succession this can lead to very long histories). If False and if the histories for the two objects are different, the combined object will have the history of the first object and only the parts of the second object history that are unique (this is done word by word and can result in hard to interpret histories). run_check : bool Option to check for the existence and proper shapes of parameters after combining objects. check_extra : bool Option to check optional parameters as well as required ones. run_check_acceptability : bool Option to check acceptable range of the values of parameters after combining objects. inplace : bool Option to overwrite self as we go, otherwise create a third object as the sum of the two. """ if inplace: this = self else: this = self.copy() # Check that both objects are UVCal and valid this.check( check_extra=check_extra, run_check_acceptability=run_check_acceptability ) if not issubclass(other.__class__, this.__class__): if not issubclass(this.__class__, other.__class__): raise ValueError( "Only UVCal (or subclass) objects can be added to " "a UVCal (or subclass) object" ) other.check( check_extra=check_extra, run_check_acceptability=run_check_acceptability ) # Check to make sure that both objects are consistent w/ use of flex_spw if this.flex_spw != other.flex_spw: raise ValueError( "To combine these data, flex_spw must be set to the same " "value (True or False) for both objects." ) # check that both objects have the same array shapes if this.future_array_shapes != other.future_array_shapes: raise ValueError( "Both objects must have the same `future_array_shapes` parameter. " "Use the `use_future_array_shapes` or `use_current_array_shapes` " "methods to convert them." ) # Check objects are compatible compatibility_params = [ "_cal_type", "_telescope_name", "_gain_convention", "_x_orientation", "_cal_style", "_ref_antenna_name", ] if not this.future_array_shapes: compatibility_params.append("_integration_time") if not this.flex_spw: compatibility_params.append("_channel_width") if this.cal_type == "delay": compatibility_params.append("_freq_range") warning_params = [ "_observer", "_git_hash_cal", "_sky_field", "_sky_catalog", "_Nsources", "_baseline_range", "_diffuse_model", ] for a in compatibility_params: if getattr(this, a) != getattr(other, a): msg = ( "UVParameter " + a[1:] + " does not match. Cannot combine objects." ) raise ValueError(msg) for a in warning_params: if getattr(this, a) != getattr(other, a): msg = "UVParameter " + a[1:] + " does not match. Combining anyway." warnings.warn(msg) # Build up history string history_update_string = " Combined data along " n_axes = 0 # Check we don't have overlapping data both_jones = np.intersect1d(this.jones_array, other.jones_array) both_times = np.intersect1d(this.time_array, other.time_array) if this.cal_type != "delay": # With flexible spectral window, the handling here becomes a bit funky, # because we are allowed to have channels with the same frequency *if* they # belong to different spectral windows (one real-life example: might want # to preserve guard bands in the correlator, which can have overlaping RF # frequency channels) if this.flex_spw: this_freq_ind = np.array([], dtype=np.int64) other_freq_ind = np.array([], dtype=np.int64) both_freq = np.array([], dtype=float) both_spw = np.intersect1d(this.spw_array, other.spw_array) for idx in both_spw: this_mask = np.where(this.flex_spw_id_array == idx)[0] other_mask = np.where(other.flex_spw_id_array == idx)[0] if this.future_array_shapes: both_spw_freq, this_spw_ind, other_spw_ind = np.intersect1d( this.freq_array[this_mask], other.freq_array[other_mask], return_indices=True, ) else: both_spw_freq, this_spw_ind, other_spw_ind = np.intersect1d( this.freq_array[0, this_mask], other.freq_array[0, other_mask], return_indices=True, ) this_freq_ind = np.append(this_freq_ind, this_mask[this_spw_ind]) other_freq_ind = np.append( other_freq_ind, other_mask[other_spw_ind] ) both_freq = np.append(both_freq, both_spw_freq) else: if this.future_array_shapes: both_freq, this_freq_ind, other_freq_ind = np.intersect1d( this.freq_array, other.freq_array, return_indices=True ) else: both_freq, this_freq_ind, other_freq_ind = np.intersect1d( this.freq_array[0, :], other.freq_array[0, :], return_indices=True, ) else: # delay type cal # Make a non-empty array so we raise an error if other data is duplicated both_freq = [0] both_ants = np.intersect1d(this.ant_array, other.ant_array) if len(both_jones) > 0: if len(both_times) > 0: if len(both_freq) > 0: if len(both_ants) > 0: raise ValueError( "These objects have overlapping data and" " cannot be combined." ) # Update filename parameter this.filename = uvutils._combine_filenames(this.filename, other.filename) if this.filename is not None: this._filename.form = (len(this.filename),) temp = np.nonzero(~np.in1d(other.ant_array, this.ant_array))[0] if len(temp) > 0: anew_inds = temp history_update_string += "antenna" n_axes += 1 else: anew_inds = [] temp = np.nonzero(~np.in1d(other.time_array, this.time_array))[0] if len(temp) > 0: tnew_inds = temp if n_axes > 0: history_update_string += ", time" else: history_update_string += "time" n_axes += 1 else: tnew_inds = [] # adding along frequency axis is not supported for delay-type cal files if this.cal_type == "gain": # find the freq indices in "other" but not in "this" if self.flex_spw: other_mask = np.ones_like(other.flex_spw_id_array, dtype=bool) for idx in np.intersect1d(this.spw_array, other.spw_array): if this.future_array_shapes: other_mask[other.flex_spw_id_array == idx] = np.isin( other.freq_array[other.flex_spw_id_array == idx], this.freq_array[this.flex_spw_id_array == idx], invert=True, ) else: other_mask[other.flex_spw_id_array == idx] = np.isin( other.freq_array[0, other.flex_spw_id_array == idx], this.freq_array[0, this.flex_spw_id_array == idx], invert=True, ) temp = np.where(other_mask)[0] else: if this.future_array_shapes: temp = np.nonzero(~np.in1d(other.freq_array, this.freq_array))[0] else: temp = np.nonzero( ~np.in1d(other.freq_array[0, :], this.freq_array[0, :]) )[0] if len(temp) > 0: fnew_inds = temp if n_axes > 0: history_update_string += ", frequency" else: history_update_string += "frequency" n_axes += 1 else: fnew_inds = [] else: # delay type, set fnew_inds to an empty list fnew_inds = [] temp = np.nonzero(~np.in1d(other.jones_array, this.jones_array))[0] if len(temp) > 0: jnew_inds = temp if n_axes > 0: history_update_string += ", jones" else: history_update_string += "jones" n_axes += 1 else: jnew_inds = [] # Initialize tqa variables can_combine_tqa = True if this.cal_type == "delay": Nf_tqa = 1 else: Nf_tqa = this.Nfreqs # Pad out self to accommodate new data if len(anew_inds) > 0: this.ant_array = np.concatenate( [this.ant_array, other.ant_array[anew_inds]] ) order = np.argsort(this.ant_array) this.ant_array = this.ant_array[order] if not self.metadata_only: if self.future_array_shapes: zero_pad_data = np.zeros( ( len(anew_inds), this.quality_array.shape[1], this.Ntimes, this.Njones, ) ) zero_pad_flags = np.zeros( ( len(anew_inds), this.quality_array.shape[1], this.Ntimes, this.Njones, ) ) else: zero_pad_data = np.zeros( ( len(anew_inds), 1, this.quality_array.shape[2], this.Ntimes, this.Njones, ) ) zero_pad_flags = np.zeros( (len(anew_inds), 1, this.Nfreqs, this.Ntimes, this.Njones,) ) if this.cal_type == "delay": this.delay_array = np.concatenate( [this.delay_array, zero_pad_data], axis=0 )[order] else: this.gain_array = np.concatenate( [this.gain_array, zero_pad_data], axis=0 )[order] this.flag_array = np.concatenate( [this.flag_array, 1 - zero_pad_flags], axis=0 ).astype(np.bool_)[order] this.quality_array = np.concatenate( [this.quality_array, zero_pad_data], axis=0 )[order] # If total_quality_array exists, we set it to None and warn the user if ( this.total_quality_array is not None or other.total_quality_array is not None ): warnings.warn( "Total quality array detected in at least one file; the " "array in the new object will be set to 'None' because " "whole-array values cannot be combined when adding antennas" ) this.total_quality_array = None can_combine_tqa = False if ( this.input_flag_array is not None or other.input_flag_array is not None ): if self.future_array_shapes: zero_pad = np.zeros( ( len(anew_inds), this.quality_array.shape[1], this.Ntimes, this.Njones, ) ) else: zero_pad = np.zeros( (len(anew_inds), 1, this.Nfreqs, this.Ntimes, this.Njones,) ) if this.input_flag_array is not None: this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=0 ).astype(np.bool_)[order] elif other.input_flag_array is not None: if self.future_array_shapes: this.input_flag_array = np.array( 1 - np.zeros( ( this.Nants_data, this.quality_array.shape[1], this.Ntimes, this.Njones, ) ) ).astype(np.bool_) else: this.input_flag_array = np.array( 1 - np.zeros( ( this.Nants_data, 1, this.Nfreqs, this.Ntimes, this.Njones, ) ) ).astype(np.bool_) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=0 ).astype(np.bool_)[order] if len(fnew_inds) > 0: # Exploit the fact that quality array has the same dimensions as the # main data. # Also do not need to worry about different cases for gain v. delay type if self.future_array_shapes: zero_pad = np.zeros( ( this.quality_array.shape[0], len(fnew_inds), this.Ntimes, this.Njones, ) ) this.freq_array = np.concatenate( [this.freq_array, other.freq_array[fnew_inds]] ) else: zero_pad = np.zeros( ( this.quality_array.shape[0], 1, len(fnew_inds), this.Ntimes, this.Njones, ) ) this.freq_array = np.concatenate( [this.freq_array, other.freq_array[:, fnew_inds]], axis=1 ) if this.flex_spw: this.flex_spw_id_array = np.concatenate( [this.flex_spw_id_array, other.flex_spw_id_array[fnew_inds]] ) this.spw_array = np.concatenate([this.spw_array, other.spw_array]) # We want to preserve per-spw information based on first appearance # in the concatenated array. unique_index = np.sort(np.unique(this.spw_array, return_index=True)[1]) this.spw_array = this.spw_array[unique_index] if this.future_array_shapes: this.freq_range = np.concatenate( [this.freq_range, other.freq_range], axis=0 ) this.freq_range = this.freq_range[unique_index, :] this.Nspws = len(this.spw_array) # If we have a flex/multi-spw data set, need to sort out the order of # the individual windows first. order = np.concatenate( [ np.where(this.flex_spw_id_array == idx)[0] for idx in sorted(this.spw_array) ] ) # With spectral windows sorted, check and see if channels within # windows need sorting. If they are ordered in ascending or descending # fashion, leave them be. If not, sort in ascending order for idx in this.spw_array: select_mask = this.flex_spw_id_array[order] == idx check_freqs = ( this.freq_array[order[select_mask]] if this.future_array_shapes else this.freq_array[0, order[select_mask]] ) if (not np.all(check_freqs[1:] > check_freqs[:-1])) and ( not np.all(check_freqs[1:] < check_freqs[:-1]) ): subsort_order = order[select_mask] order[select_mask] = subsort_order[np.argsort(check_freqs)] this.flex_spw_id_array = this.flex_spw_id_array[order] this.spw_array = np.array(sorted(this.spw_array)) else: if this.future_array_shapes: order = np.argsort(this.freq_array) else: order = np.argsort(this.freq_array[0, :]) if this.future_array_shapes: this.freq_array = this.freq_array[order] else: this.freq_array = this.freq_array[:, order] if this.flex_spw or this.future_array_shapes: this.channel_width = np.concatenate( [this.channel_width, other.channel_width[fnew_inds]] ) this.channel_width = this.channel_width[order] if not self.metadata_only: if self.future_array_shapes: this.gain_array = np.concatenate( [this.gain_array, zero_pad], axis=1 )[:, order, :, :] this.flag_array = np.concatenate( [this.flag_array, 1 - zero_pad], axis=1 ).astype(np.bool_)[:, order, :, :] this.quality_array = np.concatenate( [this.quality_array, zero_pad], axis=1 )[:, order, :, :] if this.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros((len(fnew_inds), this.Ntimes, this.Njones)) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=0 )[order, :, :] elif other.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros((len(fnew_inds), this.Ntimes, this.Njones)) this.total_quality_array = np.zeros( (Nf_tqa, this.Ntimes, this.Njones) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=0 )[order, :, :] else: this.gain_array = np.concatenate( [this.gain_array, zero_pad], axis=2 )[:, :, order, :, :] this.flag_array = np.concatenate( [this.flag_array, 1 - zero_pad], axis=2 ).astype(np.bool_)[:, :, order, :, :] this.quality_array = np.concatenate( [this.quality_array, zero_pad], axis=2 )[:, :, order, :, :] if this.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( (1, len(fnew_inds), this.Ntimes, this.Njones) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=1 )[:, order, :, :] elif other.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( (1, len(fnew_inds), this.Ntimes, this.Njones) ) this.total_quality_array = np.zeros( (1, Nf_tqa, this.Ntimes, this.Njones) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=1 )[:, order, :, :] if ( this.input_flag_array is not None or other.input_flag_array is not None ): if self.future_array_shapes: zero_pad = np.zeros( ( this.flag_array.shape[0], len(fnew_inds), this.Ntimes, this.Njones, ) ) if this.input_flag_array is not None: this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=1 ).astype(np.bool_)[:, order, :, :] elif other.input_flag_array is not None: this.input_flag_array = np.array( 1 - np.zeros( ( this.flag_array.shape[0], this.flag_array.shape[1], this.flag_array.shape[2], this.Njones, ) ) ).astype(np.bool_) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=1 ).astype(np.bool_)[:, order, :, :] else: zero_pad = np.zeros( ( this.flag_array.shape[0], 1, len(fnew_inds), this.Ntimes, this.Njones, ) ) if this.input_flag_array is not None: this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=2 ).astype(np.bool_)[:, :, order, :, :] elif other.input_flag_array is not None: this.input_flag_array = np.array( 1 - np.zeros( ( this.flag_array.shape[0], 1, this.flag_array.shape[2], this.flag_array.shape[3], this.Njones, ) ) ).astype(np.bool_) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=2 ).astype(np.bool_)[:, :, order, :, :] if len(tnew_inds) > 0: # Exploit the fact that quality array has the same dimensions as # the main data this.time_array = np.concatenate( [this.time_array, other.time_array[tnew_inds]] ) this.lst_array = np.concatenate( [this.lst_array, other.lst_array[tnew_inds]] ) order = np.argsort(this.time_array) this.time_array = this.time_array[order] this.lst_array = this.lst_array[order] if self.future_array_shapes: this.integration_time = np.concatenate( [this.integration_time, other.integration_time[tnew_inds]] ) this.integration_time = this.integration_time[order] if not self.metadata_only: if self.future_array_shapes: zero_pad_data = np.zeros( ( this.quality_array.shape[0], this.quality_array.shape[1], len(tnew_inds), this.Njones, ) ) zero_pad_flags = np.zeros( ( this.flag_array.shape[0], this.flag_array.shape[1], len(tnew_inds), this.Njones, ) ) if this.cal_type == "delay": this.delay_array = np.concatenate( [this.delay_array, zero_pad_data], axis=2 )[:, :, order, :] else: this.gain_array = np.concatenate( [this.gain_array, zero_pad_data], axis=2 )[:, :, order, :] this.flag_array = np.concatenate( [this.flag_array, 1 - zero_pad_flags], axis=2 ).astype(np.bool_)[:, :, order, :] this.quality_array = np.concatenate( [this.quality_array, zero_pad_data], axis=2 )[:, :, order, :] if this.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( (this.quality_array.shape[1], len(tnew_inds), this.Njones,) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=1 )[:, order, :] elif other.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( (this.quality_array.shape[1], len(tnew_inds), this.Njones,) ) this.total_quality_array = np.zeros( (Nf_tqa, this.Ntimes, this.Njones) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=1 )[:, order, :] if this.input_flag_array is not None: zero_pad = np.zeros( ( this.input_flag_array.shape[0], this.input_flag_array.shape[1], len(tnew_inds), this.Njones, ) ) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=2 ).astype(np.bool_)[:, :, order, :] elif other.input_flag_array is not None: zero_pad = np.zeros( ( this.flag_array.shape[0], this.flag_array.shape[1], len(tnew_inds), this.Njones, ) ) this.input_flag_array = np.array( 1 - np.zeros( ( this.flag_array.shape[0], this.flag_array.shape[1], this.flag_array.shape[2], this.Njones, ) ) ).astype(np.bool_) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=2 ).astype(np.bool_)[:, :, order, :] else: zero_pad_data = np.zeros( ( this.quality_array.shape[0], 1, this.quality_array.shape[2], len(tnew_inds), this.Njones, ) ) zero_pad_flags = np.zeros( ( this.flag_array.shape[0], 1, this.flag_array.shape[2], len(tnew_inds), this.Njones, ) ) if this.cal_type == "delay": this.delay_array = np.concatenate( [this.delay_array, zero_pad_data], axis=3 )[:, :, :, order, :] else: this.gain_array = np.concatenate( [this.gain_array, zero_pad_data], axis=3 )[:, :, :, order, :] this.flag_array = np.concatenate( [this.flag_array, 1 - zero_pad_flags], axis=3 ).astype(np.bool_)[:, :, :, order, :] this.quality_array = np.concatenate( [this.quality_array, zero_pad_data], axis=3 )[:, :, :, order, :] if this.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( ( 1, this.quality_array.shape[2], len(tnew_inds), this.Njones, ) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=2 )[:, :, order, :] elif other.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( ( 1, this.quality_array.shape[2], len(tnew_inds), this.Njones, ) ) this.total_quality_array = np.zeros( (1, Nf_tqa, this.Ntimes, this.Njones) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=2 )[:, :, order, :] if this.input_flag_array is not None: zero_pad = np.zeros( ( this.input_flag_array.shape[0], 1, this.input_flag_array.shape[2], len(tnew_inds), this.Njones, ) ) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=3 ).astype(np.bool_)[:, :, :, order, :] elif other.input_flag_array is not None: zero_pad = np.zeros( ( this.flag_array.shape[0], 1, this.flag_array.shape[2], len(tnew_inds), this.Njones, ) ) this.input_flag_array = np.array( 1 - np.zeros( ( this.flag_array.shape[0], 1, this.flag_array.shape[2], this.flag_array.shape[3], this.Njones, ) ) ).astype(np.bool_) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=3 ).astype(np.bool_)[:, :, :, order, :] if len(jnew_inds) > 0: # Exploit the fact that quality array has the same dimensions as # the main data this.jones_array = np.concatenate( [this.jones_array, other.jones_array[jnew_inds]] ) order = np.argsort(np.abs(this.jones_array)) this.jones_array = this.jones_array[order] if not self.metadata_only: if self.future_array_shapes: zero_pad_data = np.zeros( ( this.quality_array.shape[0], this.quality_array.shape[1], this.quality_array.shape[2], len(jnew_inds), ) ) zero_pad_flags = np.zeros( ( this.flag_array.shape[0], this.flag_array.shape[1], this.flag_array.shape[2], len(jnew_inds), ) ) if this.cal_type == "delay": this.delay_array = np.concatenate( [this.delay_array, zero_pad_data], axis=3 )[:, :, :, order] else: this.gain_array = np.concatenate( [this.gain_array, zero_pad_data], axis=3 )[:, :, :, order] this.flag_array = np.concatenate( [this.flag_array, 1 - zero_pad_flags], axis=3 ).astype(np.bool_)[:, :, :, order] this.quality_array = np.concatenate( [this.quality_array, zero_pad_data], axis=3 )[:, :, :, order] if this.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( ( this.quality_array.shape[1], this.quality_array.shape[2], len(jnew_inds), ) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=2 )[:, :, order] elif other.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( ( this.quality_array.shape[1], this.quality_array.shape[2], len(jnew_inds), ) ) this.total_quality_array = np.zeros( (Nf_tqa, this.Ntimes, this.Njones) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=2 )[:, :, order] if this.input_flag_array is not None: zero_pad = np.zeros( ( this.input_flag_array.shape[0], this.input_flag_array.shape[1], this.input_flag_array.shape[2], len(jnew_inds), ) ) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=3 ).astype(np.bool_)[:, :, :, order] elif other.input_flag_array is not None: zero_pad = np.zeros( ( this.flag_array.shape[0], this.flag_array.shape[1], this.flag_array.shape[2], len(jnew_inds), ) ) this.input_flag_array = np.array( 1 - np.zeros( ( this.flag_array.shape[0], this.flag_array.shape[1], this.flag_array.shape[2], this.Njones, ) ) ).astype(np.bool_) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=3 ).astype(np.bool_)[:, :, :, order] else: zero_pad_data = np.zeros( ( this.quality_array.shape[0], 1, this.quality_array.shape[2], this.quality_array.shape[3], len(jnew_inds), ) ) zero_pad_flags = np.zeros( ( this.flag_array.shape[0], 1, this.flag_array.shape[2], this.flag_array.shape[3], len(jnew_inds), ) ) if this.cal_type == "delay": this.delay_array = np.concatenate( [this.delay_array, zero_pad_data], axis=4 )[:, :, :, :, order] else: this.gain_array = np.concatenate( [this.gain_array, zero_pad_data], axis=4 )[:, :, :, :, order] this.flag_array = np.concatenate( [this.flag_array, 1 - zero_pad_flags], axis=4 ).astype(np.bool_)[:, :, :, :, order] this.quality_array = np.concatenate( [this.quality_array, zero_pad_data], axis=4 )[:, :, :, :, order] if this.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( ( 1, this.quality_array.shape[2], this.quality_array.shape[3], len(jnew_inds), ) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=3 )[:, :, :, order] elif other.total_quality_array is not None and can_combine_tqa: zero_pad = np.zeros( ( 1, this.quality_array.shape[2], this.quality_array.shape[3], len(jnew_inds), ) ) this.total_quality_array = np.zeros( (1, Nf_tqa, this.Ntimes, this.Njones) ) this.total_quality_array = np.concatenate( [this.total_quality_array, zero_pad], axis=3 )[:, :, :, order] if this.input_flag_array is not None: zero_pad = np.zeros( ( this.input_flag_array.shape[0], 1, this.input_flag_array.shape[2], this.input_flag_array.shape[3], len(jnew_inds), ) ) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=4 ).astype(np.bool_)[:, :, :, :, order] elif other.input_flag_array is not None: zero_pad = np.zeros( ( this.flag_array.shape[0], 1, this.flag_array.shape[2], this.flag_array.shape[3], len(jnew_inds), ) ) this.input_flag_array = np.array( 1 - np.zeros( ( this.flag_array.shape[0], 1, this.flag_array.shape[2], this.flag_array.shape[3], this.Njones, ) ) ).astype(np.bool_) this.input_flag_array = np.concatenate( [this.input_flag_array, 1 - zero_pad], axis=4 ).astype(np.bool_)[:, :, :, :, order] # Now populate the data if not self.metadata_only: jones_t2o = np.nonzero(np.in1d(this.jones_array, other.jones_array))[0] times_t2o = np.nonzero(np.in1d(this.time_array, other.time_array))[0] if self.future_array_shapes: freqs_t2o = np.nonzero(np.in1d(this.freq_array, other.freq_array))[0] else: freqs_t2o = np.nonzero( np.in1d(this.freq_array[0, :], other.freq_array[0, :]) )[0] ants_t2o = np.nonzero(np.in1d(this.ant_array, other.ant_array))[0] if self.future_array_shapes: if this.cal_type == "delay": this.delay_array[ np.ix_(ants_t2o, [0], times_t2o, jones_t2o) ] = other.delay_array this.quality_array[ np.ix_(ants_t2o, [0], times_t2o, jones_t2o) ] = other.quality_array this.flag_array[ np.ix_(ants_t2o, [0], times_t2o, jones_t2o) ] = other.flag_array else: this.gain_array[ np.ix_(ants_t2o, freqs_t2o, times_t2o, jones_t2o) ] = other.gain_array this.quality_array[ np.ix_(ants_t2o, freqs_t2o, times_t2o, jones_t2o) ] = other.quality_array this.flag_array[ np.ix_(ants_t2o, freqs_t2o, times_t2o, jones_t2o) ] = other.flag_array if this.total_quality_array is not None: if other.total_quality_array is not None: if this.cal_type == "delay": this.total_quality_array[ np.ix_([0], times_t2o, jones_t2o) ] = other.total_quality_array else: this.total_quality_array[ np.ix_(freqs_t2o, times_t2o, jones_t2o) ] = other.total_quality_array if this.input_flag_array is not None: if other.input_flag_array is not None: if this.cal_type == "delay": this.input_flag_array[ np.ix_(ants_t2o, [0], times_t2o, jones_t2o) ] = other.input_flag_array else: this.input_flag_array[ np.ix_(ants_t2o, freqs_t2o, times_t2o, jones_t2o) ] = other.input_flag_array else: if this.cal_type == "delay": this.delay_array[ np.ix_(ants_t2o, [0], [0], times_t2o, jones_t2o) ] = other.delay_array this.quality_array[ np.ix_(ants_t2o, [0], [0], times_t2o, jones_t2o) ] = other.quality_array else: this.gain_array[ np.ix_(ants_t2o, [0], freqs_t2o, times_t2o, jones_t2o) ] = other.gain_array this.quality_array[ np.ix_(ants_t2o, [0], freqs_t2o, times_t2o, jones_t2o) ] = other.quality_array this.flag_array[ np.ix_(ants_t2o, [0], freqs_t2o, times_t2o, jones_t2o) ] = other.flag_array if this.total_quality_array is not None: if other.total_quality_array is not None: if this.cal_type == "delay": this.total_quality_array[ np.ix_([0], [0], times_t2o, jones_t2o) ] = other.total_quality_array else: this.total_quality_array[ np.ix_([0], freqs_t2o, times_t2o, jones_t2o) ] = other.total_quality_array if this.input_flag_array is not None: if other.input_flag_array is not None: this.input_flag_array[ np.ix_(ants_t2o, [0], freqs_t2o, times_t2o, jones_t2o) ] = other.input_flag_array # Update N parameters (e.g. Njones) this.Njones = this.jones_array.shape[0] this.Ntimes = this.time_array.shape[0] if this.cal_type == "gain": this.Nfreqs = this.freq_array.size this.Nants_data = len( np.unique(this.ant_array.tolist() + other.ant_array.tolist()) ) # Check specific requirements if this.cal_type == "gain" and this.Nfreqs > 1: spacing_error, chanwidth_error = this._check_freq_spacing( raise_errors=False ) if spacing_error: warnings.warn( "Combined frequencies are not evenly spaced or have differing " "values of channel widths. This will make it impossible to write " "this data out to some file types." ) elif chanwidth_error: warnings.warn( "Combined frequencies are separated by more than their " "channel width. This will make it impossible to write this data " "out to some file types." ) if this.Njones > 2: if not uvutils._test_array_constant_spacing(this._jones_array): warnings.warn( "Combined Jones elements are not evenly spaced. This will " "make it impossible to write this data out to some file types." ) if n_axes > 0: history_update_string += " axis using pyuvdata." histories_match = uvutils._check_histories(this.history, other.history) this.history += history_update_string if not histories_match: if verbose_history: this.history += " Next object history follows. " + other.history else: extra_history = uvutils._combine_history_addition( this.history, other.history ) if extra_history is not None: this.history += ( " Unique part of next object history follows. " + extra_history ) # Check final object is self-consistent if run_check: this.check( check_extra=check_extra, run_check_acceptability=run_check_acceptability ) if not inplace: return this def __iadd__( self, other, run_check=True, check_extra=True, run_check_acceptability=True, ): """ Combine two UVCal objects in place. Along antenna, frequency, time, and/or Jones axis. Parameters ---------- other : :class: UVCal Another UVCal object which will be added to self. run_check : bool Option to check for the existence and proper shapes of parameters after combining objects. check_extra : bool Option to check optional parameters as well as required ones. run_check_acceptability : bool Option to check acceptable range of the values of parameters after combining objects. """ self.__add__( other, inplace=True, run_check=run_check, check_extra=check_extra, run_check_acceptability=run_check_acceptability, ) return self def select( self, antenna_nums=None, antenna_names=None, frequencies=None, freq_chans=None, spws=None, times=None, jones=None, run_check=True, check_extra=True, run_check_acceptability=True, inplace=True, ): """ Downselect data to keep on the object along various axes. Axes that can be selected along include antennas, frequencies, times and antenna polarization (jones). The history attribute on the object will be updated to identify the operations performed. Parameters ---------- antenna_nums : array_like of int, optional The antennas numbers to keep in the object (antenna positions and names for the removed antennas will be retained). This cannot be provided if `antenna_names` is also provided. antenna_names : array_like of str, optional The antennas names to keep in the object (antenna positions and names for the removed antennas will be retained). This cannot be provided if `antenna_nums` is also provided. frequencies : array_like of float, optional The frequencies to keep in the object, each value passed here should exist in the freq_array. freq_chans : array_like of int, optional The frequency channel numbers to keep in the object. spws : array_like of in, optional The spectral window numbers to keep in the object. If this is not a wide-band object and `frequencies` or `freq_chans` is not None, frequencies that match any of the specifications will be kept (i.e. the selections will be OR'ed together). times : array_like of float, optional The times to keep in the object, each value passed here should exist in the time_array. jones : array_like of int or str, optional The antenna polarizations numbers to keep in the object, each value passed here should exist in the jones_array. If passing strings, the canonical polarization strings (e.g. "Jxx", "Jrr") are supported and if the `x_orientation` attribute is set, the physical dipole strings (e.g. "Jnn", "Jee") are also supported. run_check : bool Option to check for the existence and proper shapes of parameters after downselecting data on this object (the default is True, meaning the check will be run). check_extra : bool Option to check optional parameters as well as required ones (the default is True, meaning the optional parameters will be checked). run_check_acceptability : bool Option to check acceptable range of the values of parameters after downselecting data on this object (the default is True, meaning the acceptable range check will be done). inplace : bool Option to perform the select directly on self or return a new UVCal object with just the selected data (the default is True, meaning the select will be done on self). """ if inplace: cal_object = self else: cal_object = self.copy() # build up history string as we go history_update_string = " Downselected to specific " n_selects = 0 if antenna_names is not None: if antenna_nums is not None: raise ValueError( "Only one of antenna_nums and antenna_names can be provided." ) antenna_names = uvutils._get_iterable(antenna_names) antenna_nums = [] for s in antenna_names: if s not in cal_object.antenna_names: raise ValueError( f"Antenna name {s} is not present in the antenna_names array" ) ind = np.where(np.array(cal_object.antenna_names) == s)[0][0] antenna_nums.append(cal_object.antenna_numbers[ind]) if antenna_nums is not None: antenna_nums = uvutils._get_iterable(antenna_nums) history_update_string += "antennas" n_selects += 1 ant_inds = np.zeros(0, dtype=np.int64) for ant in antenna_nums: if ant in cal_object.ant_array: ant_inds = np.append( ant_inds, np.where(cal_object.ant_array == ant)[0] ) else: raise ValueError( f"Antenna number {ant} is not present in the array" ) ant_inds = sorted(set(ant_inds)) cal_object.Nants_data = len(ant_inds) cal_object.ant_array = cal_object.ant_array[ant_inds] if not self.metadata_only: cal_object.flag_array = cal_object.flag_array[ant_inds] cal_object.quality_array = cal_object.quality_array[ant_inds] if cal_object.cal_type == "delay": cal_object.delay_array = cal_object.delay_array[ant_inds] else: cal_object.gain_array = cal_object.gain_array[ant_inds] if cal_object.input_flag_array is not None: cal_object.input_flag_array = cal_object.input_flag_array[ant_inds] if cal_object.total_quality_array is not None: warnings.warn( "Cannot preserve total_quality_array when changing " "number of antennas; discarding" ) cal_object.total_quality_array = None if times is not None: times = uvutils._get_iterable(times) if n_selects > 0: history_update_string += ", times" else: history_update_string += "times" n_selects += 1 time_inds = np.zeros(0, dtype=np.int64) for jd in times: if jd in cal_object.time_array: time_inds = np.append( time_inds, np.where(cal_object.time_array == jd)[0] ) else: raise ValueError( "Time {t} is not present in the time_array".format(t=jd) ) time_inds = sorted(set(time_inds)) cal_object.Ntimes = len(time_inds) cal_object.time_array = cal_object.time_array[time_inds] if cal_object.lst_array is not None: cal_object.lst_array = cal_object.lst_array[time_inds] if self.future_array_shapes: cal_object.integration_time = cal_object.integration_time[time_inds] if cal_object.Ntimes > 1: if not uvutils._test_array_constant_spacing(cal_object._time_array): warnings.warn( "Selected times are not evenly spaced. This " "is not supported by the calfits format." ) if not self.metadata_only: if self.future_array_shapes: cal_object.flag_array = cal_object.flag_array[:, :, time_inds, :] cal_object.quality_array = cal_object.quality_array[ :, :, time_inds, : ] if cal_object.cal_type == "delay": cal_object.delay_array = cal_object.delay_array[ :, :, time_inds, : ] else: cal_object.gain_array = cal_object.gain_array[ :, :, time_inds, : ] if cal_object.input_flag_array is not None: cal_object.input_flag_array = cal_object.input_flag_array[ :, :, time_inds, : ] if cal_object.total_quality_array is not None: cal_object.total_quality_array = cal_object.total_quality_array[ :, time_inds, : ] else: cal_object.flag_array = cal_object.flag_array[:, :, :, time_inds, :] cal_object.quality_array = cal_object.quality_array[ :, :, :, time_inds, : ] if cal_object.cal_type == "delay": cal_object.delay_array = cal_object.delay_array[ :, :, :, time_inds, : ] else: cal_object.gain_array = cal_object.gain_array[ :, :, :, time_inds, : ] if cal_object.input_flag_array is not None: cal_object.input_flag_array = cal_object.input_flag_array[ :, :, :, time_inds, : ] if cal_object.total_quality_array is not None: cal_object.total_quality_array = cal_object.total_quality_array[ :, :, time_inds, : ] if spws is not None: if cal_object.Nspws == 1: warnings.warn( "Cannot select on spws if Nspws=1. Ignoring the spw parameter." ) else: if not cal_object.wide_band: assert cal_object.flex_spw is True, ( "The `flex_spw` parameter must be True if there are multiple " "spectral windows and the `wide_band` parameter is not True." ) # Translate the spws into frequencies if frequencies is None: if self.future_array_shapes: frequencies = self.freq_array[ np.isin(cal_object.flex_spw_id_array, spws) ] else: frequencies = self.freq_array[ 0, np.isin(cal_object.flex_spw_id_array, spws) ] else: assert self.future_array_shapes, ( "The `future_array_shapes` parameter must be True if the " "`wide_band` parameter is True" ) if n_selects > 0: history_update_string += ", spectral windows" else: history_update_string += "spectral windows" n_selects += 1 # Check and see that all requested spws are available spw_check = np.isin(spws, cal_object.spw_array) if not np.all(spw_check): raise ValueError( f"SPW number {spws[np.where(~spw_check)[0][0]]} is not " "present in the spw_array" ) spw_inds = np.where(np.isin(cal_object.spw_array, spws))[0] spw_inds = sorted(set(spw_inds)) cal_object.Nspws = len(spw_inds) cal_object.freq_range = cal_object.freq_range[spw_inds, :] cal_object.spw_array = cal_object.spw_array[spw_inds] if not cal_object.metadata_only: if cal_object.cal_type == "delay": cal_object.delay_array = cal_object.delay_array[ :, spw_inds, :, : ] else: cal_object.gain_array = cal_object.gain_array[ :, spw_inds, :, : ] cal_object.flag_array = cal_object.flag_array[:, spw_inds, :, :] if cal_object.input_flag_array is not None: cal_object.input_flag_array = cal_object.input_flag_array[ :, spw_inds, :, : ] cal_object.quality_array = cal_object.quality_array[ :, spw_inds, :, : ] if cal_object.total_quality_array is not None: tqa = cal_object.total_quality_array[spw_inds, :, :] cal_object.total_quality_array = tqa if freq_chans is not None: freq_chans = uvutils._get_iterable(freq_chans) if frequencies is None: if self.future_array_shapes: frequencies = cal_object.freq_array[freq_chans] else: frequencies = cal_object.freq_array[0, freq_chans] else: frequencies = uvutils._get_iterable(frequencies) if self.future_array_shapes: frequencies = np.sort( list(set(frequencies) | set(cal_object.freq_array[freq_chans])) ) else: frequencies = np.sort( list( set(frequencies) | set(cal_object.freq_array[0, freq_chans]) ) ) if frequencies is not None: frequencies = uvutils._get_iterable(frequencies) if n_selects > 0: history_update_string += ", frequencies" else: history_update_string += "frequencies" n_selects += 1 if cal_object.future_array_shapes: freq_arr_use = self.freq_array else: freq_arr_use = self.freq_array[0, :] # Check and see that all requested freqs are available freq_check = np.isin(frequencies, freq_arr_use) if not np.all(freq_check): raise ValueError( f"Frequency {frequencies[np.where(~freq_check)[0][0]]} is not " "present in the freq_array" ) freq_inds = np.where(np.isin(freq_arr_use, frequencies))[0] freq_inds = sorted(set(freq_inds)) cal_object.Nfreqs = len(freq_inds) if cal_object.future_array_shapes: cal_object.freq_array = cal_object.freq_array[freq_inds] else: cal_object.freq_array = cal_object.freq_array[:, freq_inds] if cal_object.future_array_shapes or cal_object.flex_spw: cal_object.channel_width = cal_object.channel_width[freq_inds] if cal_object.flex_spw: cal_object.flex_spw_id_array = cal_object.flex_spw_id_array[freq_inds] spw_mask = np.isin(cal_object.spw_array, cal_object.flex_spw_id_array) cal_object.spw_array = cal_object.spw_array[spw_mask] if cal_object.freq_range is not None and cal_object.future_array_shapes: cal_object.freq_range = cal_object.freq_range[spw_mask, :] cal_object.Nspws = len(cal_object.spw_array) if cal_object.Nfreqs > 1: spacing_error, chanwidth_error = cal_object._check_freq_spacing( raise_errors=False ) if spacing_error: warnings.warn( "Selected frequencies are not evenly spaced. This " "will make it impossible to write this data out to " "some file types" ) elif chanwidth_error: warnings.warn( "Selected frequencies are not contiguous. This " "will make it impossible to write this data out to " "some file types." ) if not cal_object.metadata_only: if not cal_object.future_array_shapes: cal_object.flag_array = cal_object.flag_array[:, :, freq_inds, :, :] if cal_object.input_flag_array is not None: cal_object.input_flag_array = cal_object.input_flag_array[ :, :, freq_inds, :, : ] if cal_object.cal_type == "delay": pass else: if cal_object.future_array_shapes: cal_object.flag_array = cal_object.flag_array[ :, freq_inds, :, : ] if cal_object.input_flag_array is not None: cal_object.input_flag_array = cal_object.input_flag_array[ :, freq_inds, :, : ] cal_object.quality_array = cal_object.quality_array[ :, freq_inds, :, : ] cal_object.gain_array = cal_object.gain_array[ :, freq_inds, :, : ] if cal_object.total_quality_array is not None: tqa = cal_object.total_quality_array[freq_inds, :, :] cal_object.total_quality_array = tqa else: cal_object.quality_array = cal_object.quality_array[ :, :, freq_inds, :, : ] cal_object.gain_array = cal_object.gain_array[ :, :, freq_inds, :, : ] if cal_object.total_quality_array is not None: tqa = cal_object.total_quality_array[:, freq_inds, :, :] cal_object.total_quality_array = tqa if jones is not None: jones = uvutils._get_iterable(jones) if np.array(jones).ndim > 1: jones = np.array(jones).flatten() if n_selects > 0: history_update_string += ", jones polarization terms" else: history_update_string += "jones polarization terms" n_selects += 1 jones_inds = np.zeros(0, dtype=np.int64) for j in jones: if isinstance(j, str): j_num = uvutils.jstr2num(j, x_orientation=self.x_orientation) else: j_num = j if j_num in cal_object.jones_array: jones_inds = np.append( jones_inds, np.where(cal_object.jones_array == j_num)[0] ) else: raise ValueError( "Jones term {j} is not present in the jones_array".format(j=j) ) jones_inds = sorted(set(jones_inds)) cal_object.Njones = len(jones_inds) cal_object.jones_array = cal_object.jones_array[jones_inds] if len(jones_inds) > 2: jones_separation = ( cal_object.jones_array[1:] - cal_object.jones_array[:-1] ) if not uvutils._test_array_constant(jones_separation): warnings.warn( "Selected jones polarization terms are not evenly spaced. This " "is not supported by the calfits format" ) if not cal_object.metadata_only: if cal_object.future_array_shapes: cal_object.flag_array = cal_object.flag_array[:, :, :, jones_inds] cal_object.quality_array = cal_object.quality_array[ :, :, :, jones_inds ] if cal_object.cal_type == "delay": cal_object.delay_array = cal_object.delay_array[ :, :, :, jones_inds ] else: cal_object.gain_array = cal_object.gain_array[ :, :, :, jones_inds ] if cal_object.input_flag_array is not None: cal_object.input_flag_array = cal_object.input_flag_array[ :, :, :, jones_inds ] if cal_object.total_quality_array is not None: cal_object.total_quality_array = cal_object.total_quality_array[ :, :, jones_inds ] else: cal_object.flag_array = cal_object.flag_array[ :, :, :, :, jones_inds ] cal_object.quality_array = cal_object.quality_array[ :, :, :, :, jones_inds ] if cal_object.cal_type == "delay": cal_object.delay_array = cal_object.delay_array[ :, :, :, :, jones_inds ] else: cal_object.gain_array = cal_object.gain_array[ :, :, :, :, jones_inds ] if cal_object.input_flag_array is not None: cal_object.input_flag_array = cal_object.input_flag_array[ :, :, :, :, jones_inds ] if cal_object.total_quality_array is not None: cal_object.total_quality_array = cal_object.total_quality_array[ :, :, :, jones_inds ] if n_selects > 0: history_update_string += " using pyuvdata." cal_object.history = cal_object.history + history_update_string # check if object is self-consistent if run_check: cal_object.check( check_extra=check_extra, run_check_acceptability=run_check_acceptability ) if not inplace: return cal_object def _convert_from_filetype(self, other): for p in other: param = getattr(other, p) setattr(self, p, param) def _convert_to_filetype(self, filetype): if filetype == "calfits": from . import calfits other_obj = calfits.CALFITS() else: raise ValueError("filetype must be calfits.") for p in self: param = getattr(self, p) setattr(other_obj, p, param) return other_obj @classmethod def initialize_from_uvdata( cls, uvdata, gain_convention, cal_style, future_array_shapes=True, metadata_only=True, include_uvdata_history=True, cal_type="gain", times=None, integration_time=None, time_range=None, frequencies=None, channel_width=None, flex_spw=None, flex_spw_id_array=None, wide_band=None, freq_range=None, spw_array=None, jones=None, ref_antenna_name=None, sky_catalog=None, sky_field=None, diffuse_model=None, baseline_range=None, Nsources=None, # noqa observer=None, gain_scale=None, git_hash_cal=None, git_origin_cal=None, extra_keywords=None, ): """ Initialize this object based on a UVData object. Parameters ---------- uvdata : UVData object The UVData object to initialize from. gain_convention : str What gain convention the UVCal object should be initialized to ("multiply" or "divide"). cal_style : str What calibration style the UVCal object should be initialized to ("sky" or "redundant"). future_array_shapes : bool Option to use the future array shapes (see `use_future_array_shapes` for details). metadata_only : bool Option to only initialize the metadata. If False, this method also initializes the data-like arrays to zeros (or False for the flag_array) with the appropriate sizes. include_uvdata_history : bool Option to include the history from the uvdata object in the uvcal history. cal_type : str What cal_type the UVCal object should be initialized to ("gain", or "delay"). times : array_like of float, optional Calibration times in decimal Julian date. If None, use all unique times from uvdata. integration_time : float or array_like of float, optional Calibration integration time in seconds, an array of shape (Ntimes,) or a scalar if `future_array_shapes` is False. Required if `time_array` is not None, ignored otherwise. time_range : array_like of float, optional Range of times that calibration is valid for in decimal Julian dates, shape (2,). Should only be set if `time_array` is size (1,) frequencies : array_like of float, optional Calibration frequencies (units Hz), shape (Nfreqs,). Defaulted to the freq_array from uvdata if `cal_type="gain"` and `wide_band` is not set to `True`. Ignored if `cal_type="delay"` or `wide_band=True`. channel_width : float or array_like of float, optional Calibration channel width in Hz, an array of shape (Nfreqs,) or a scalar if `future_array_shapes` is False. Required if freq_array is not None and `cal_type="gain"` and `wide_band` is not set to `True`, ignored otherwise. flex_spw : bool, optional Option to use flexible spectral windows. Ignored if freq_array is None or `cal_type="delay"` or `wide_band=True`. flex_spw_id_array : array_like of int, optional Array giving the spectral window value for each frequency channel, shape (Nfreqs,). Ignored if freq_array is None or `cal_type="delay"` or `wide_band=True`. Required if freq_array is not None and flex_spw is True and `cal_type="gain"` and `wide_band` is not set to `True`. wide_band : bool, optional Option to use wide-band calibration. Requires `future_array_shapes` to be `True`. Defaulted to `True` if `future_array_shapes` is True and `cal_type="delay"`, defaulted to `False` otherwise. freq_range : array_like of float, optional Frequency range that solutions are valid for in Hz, shape (Nspws, 2) if `future_array_shapes` is True, shape (2,) otherwise. Defaulted to the min, max of freq_array if `wide_band` is True or `cal_type="delay"`. Defaulting is done per spectral window if uvdata has multiple spectral windows and `future_array_shapes` is True. spw_array : array_like of int, optional Array giving the spectral window numbers. Required if either `wide_band` is True or `cal_type="delay"` and if freq_range is not None and has multiple spectral windows, ignored otherwise. Defaulted to uvdata.spw_array if either `wide_band` is True or `cal_type="delay"` and if freq_range is None. jones : array_like of int, optional Calibration Jones elements. If None, defaults to [-5, -6] (jxx, jyy) if uvdata is in linear pol. [-1, -2] (jrr, jll) if uvdata is in circular pol. A ValueError is raised if jones_array is None and uvdata is in psuedo-stokes. ref_antenna_name : str, optional Phase reference antenna, required if cal_style = "sky". sky_catalog : str, optional Name of calibration catalog, required if cal_sky = "sky". sky_field : str, optional Short string describing field center or dominant source, required if cal_sky = "sky". diffuse_model : str, optional Name of diffuse model. baseline_range : array_like of float, optional Range of baselines used for calibration. Nsources : int, optional Number of sources used. observer : str, optional Name of observer who calculated calibration solutions. gain_scale : str, optional The gain scale of the calibration, which indicates the units of the calibrated visibilities. For example, Jy or K str. git_hash_cal : str, optional Commit hash of calibration software (from git_origin_cal) used to generate solutions. git_origin_cal : str, optional Origin (on github for e.g) of calibration software. Url and branch. extra_keywords : dict, optional Any user supplied extra keywords, type=dict. Raises ------ ValueError If cal_style is 'sky' and ref_antenna_name, sky_catalog or sky_field are not provided; if freq_array is not None, flex_spw is True and flex_spw_id_array is None; if freq_array and channel_width are None and the uvdata object does not use flexible spectral windows and the uvdata channel width varies; if time_array and integration_time are None and the uvdata integration time varies; if time_array is not None and integration_time is not specified or is the wrong type; if jones_array is None and uvdata is in psuedo-stokes. """ if not issubclass(type(uvdata), UVData): raise ValueError("uvdata must be a UVData (or subclassed) object.") uvc = cls() if cal_type not in ["delay", "gain"]: raise ValueError("cal_type must be either 'gain' or 'delay'.") if cal_type == "gain": uvc._set_gain() elif cal_type == "delay": uvc._set_delay() if future_array_shapes: uvc._set_future_array_shapes() if wide_band is not None: uvc._set_wide_band(wide_band=wide_band) uvc.cal_style = cal_style uvc.gain_convention = gain_convention if cal_style == "sky" and ( ref_antenna_name is None or sky_catalog is None or sky_field is None ): raise ValueError( "If cal_style is 'sky', ref_antenna_name, sky_catalog and sky_field " "must all be provided." ) if ref_antenna_name is not None: uvc.ref_antenna_name = ref_antenna_name if sky_catalog is not None: uvc.sky_catalog = sky_catalog if sky_field is not None: uvc.sky_field = sky_field if diffuse_model is not None: uvc.diffuse_model = diffuse_model if baseline_range is not None: uvc.baseline_range = baseline_range if Nsources is not None: uvc.Nsources = Nsources if observer is not None: uvc.observer = observer if gain_scale is not None: uvc.gain_scale = gain_scale if git_hash_cal is not None: uvc.git_hash_cal = git_hash_cal if git_origin_cal is not None: uvc.git_origin_cal = git_origin_cal if extra_keywords is not None: uvc.extra_keywords = extra_keywords params_to_copy = [ "telescope_name", "telescope_location", "antenna_numbers", "antenna_names", "antenna_positions", "Nants_telescope", "Nants_data", "x_orientation", ] if uvc.cal_type != "delay" and not uvc.wide_band: if frequencies is None: params_to_copy.extend(["Nfreqs", "flex_spw", "spw_array", "Nspws"]) if uvdata.flex_spw: uvc._set_flex_spw() if uvdata.future_array_shapes == uvc.future_array_shapes: params_to_copy.extend(["freq_array"]) else: if uvc.future_array_shapes: uvc.freq_array = uvdata.freq_array[0, :] else: uvc.freq_array = uvdata.freq_array[np.newaxis, :] if ( uvdata.flex_spw or uvdata.future_array_shapes == uvc.future_array_shapes ): params_to_copy.extend(["channel_width"]) else: if uvc.future_array_shapes: uvc.channel_width = np.full( uvc.freq_array.size, uvdata.channel_width, dtype=np.float64 ) else: uvdata_channel_widths = np.unique(uvdata.channel_width) if uvdata_channel_widths.size == 1: uvc.channel_width = uvdata_channel_widths[0] else: raise ValueError( "uvdata has varying channel widths but does not have " "flexible spectral windows and future_array_shapes is " "False. Please specify frequencies and channel_width." ) if uvdata.flex_spw: params_to_copy.extend(["flex_spw_id_array"]) else: if frequencies.ndim != 1: raise ValueError("Frequencies must be a 1 dimensional array") if future_array_shapes: uvc.freq_array = frequencies else: uvc.freq_array = frequencies[np.newaxis, :] uvc.Nfreqs = frequencies.size if flex_spw: uvc._set_flex_spw() if flex_spw_id_array is None: raise ValueError( "If frequencies is provided and flex_spw is True, a " "flex_spw_id_array must be provided." ) uvc.flex_spw_id_array = flex_spw_id_array uvc.spw_array = np.unique(uvc.flex_spw_id_array) uvc.Nspws = uvc.spw_array.size else: uvc.spw_array = np.array([0]) uvc.Nspws = 1 if channel_width is None: raise ValueError( "channel_width must be provided if frequencies is provided" ) if future_array_shapes or flex_spw: if isinstance(channel_width, (np.ndarray, list)): uvc.channel_width = np.asarray(channel_width) else: uvc.channel_width = np.full( uvc.Nfreqs, channel_width, dtype=np.float64 ) else: if isinstance(channel_width, (np.ndarray, list)): raise ValueError( "channel_width must be scalar if both future_array_shapes " "and flex_spw are False." ) uvc.channel_width = channel_width else: uvc.Nfreqs = 1 if freq_range is None: if uvc.future_array_shapes: params_to_copy.extend(["spw_array", "Nspws"]) if uvdata.flex_spw: uvc.freq_range = np.zeros((uvdata.Nspws, 2), dtype=float) for spw_ind, spw in enumerate(uvdata.spw_array): if uvdata.future_array_shapes: freqs_in_spw = uvdata.freq_array[ np.nonzero(uvdata.flex_spw_id_array == spw) ] else: freqs_in_spw = uvdata.freq_array[ 0, np.nonzero(uvdata.flex_spw_id_array == spw) ] uvc.freq_range[spw_ind, :] = np.asarray( [np.min(freqs_in_spw), np.max(freqs_in_spw)] ) else: uvc.freq_range = np.asarray( [[np.min(uvdata.freq_array), np.max(uvdata.freq_array)]] ) else: uvc.Nspws = 1 uvc.spw_array = np.asarray([0]) uvc.freq_range = [ np.min(uvdata.freq_array), np.max(uvdata.freq_array), ] else: freq_range_use = np.asarray(freq_range) if future_array_shapes: if freq_range_use.shape == (2,): freq_range_use = freq_range_use[np.newaxis, :] if freq_range_use.ndim != 2 or freq_range_use.shape[1] != 2: raise ValueError( "if future_array_shapes is True, freq_range must be an " "array shaped like (Nspws, 2)." ) uvc.freq_range = freq_range_use uvc.Nspws = uvc.freq_range.shape[0] if uvc.Nspws > 1: if spw_array is None: raise ValueError( "An spw_array must be provided for delay or wide-band " "cals if freq_range has multiple spectral windows" ) uvc.spw_array = spw_array else: uvc.spw_array = np.asarray([0]) else: uvc.Nspws = 1 uvc.spw_array = np.asarray([0]) if freq_range_use.size == 2: uvc.freq_range = np.squeeze(freq_range_use).tolist() else: raise ValueError( "if future_array_shapes is False, freq_range must have " "2 elements." ) for param_name in params_to_copy: setattr(uvc, param_name, getattr(uvdata, param_name)) # sort the antenna information (the order in the UVData object may be strange) ant_order = np.argsort(uvc.antenna_numbers) uvc.antenna_numbers = uvc.antenna_numbers[ant_order] uvc.antenna_names = ((np.asarray(uvc.antenna_names))[ant_order]).tolist() uvc.antenna_positions = uvc.antenna_positions[ant_order, :] if times is None: # get all unique times uvc.time_array = np.unique(uvdata.time_array) uvdata_int_times = np.unique(uvdata.integration_time) if uvdata_int_times.size == 1: uvdata_int_times = uvdata_int_times[0] if uvc.future_array_shapes: uvc.integration_time = np.full( uvc.time_array.size, uvdata_int_times, dtype=np.float64 ) else: uvc.integration_time = uvdata_int_times else: raise ValueError( "uvdata integration times vary. Please specify times and " "integration_time" ) else: uvc.time_array = times if integration_time is None: raise ValueError( "integation_time must be provided if times is provided" ) if future_array_shapes: if isinstance(integration_time, (np.ndarray, list)): uvc.integration_time = np.asarray(integration_time) else: uvc.integration_time = np.full( uvc.time_array.size, integration_time, dtype=np.float64 ) else: if isinstance(integration_time, (np.ndarray, list)): raise ValueError( "integration_time must be scalar if future_array_shapes is " "False." ) uvc.integration_time = integration_time uvc.Ntimes = uvc.time_array.size uvc.set_lsts_from_time_array() if time_range is not None: uvc.time_range = time_range if jones is None: if np.all(uvdata.polarization_array < -4): if uvdata.Npols == 1 and uvdata.polarization_array[0] > -7: # single pol data, make a single pol cal object uvc.jones_array = uvdata.polarization_array else: uvc.jones_array = np.array([-5, -6]) elif np.all(uvdata.polarization_array < 0): if uvdata.Npols == 1 and uvdata.polarization_array[0] > -3: # single pol data, make a single pol cal object uvc.jones_array = uvdata.polarization_array else: uvc.jones_array = np.array([-1, -2]) else: raise ValueError( "jones parameter is None and uvdata object is in " "psuedo-stokes polarization. Please set jones." ) else: uvc.jones_array = np.asarray(jones) uvc.Njones = uvc.jones_array.size uvc.ant_array = np.union1d(uvdata.ant_1_array, uvdata.ant_2_array) uvc.history = "Initialized from a UVData object with pyuvdata." if include_uvdata_history: uvc.history += " UVData history is: " + uvdata.history if not metadata_only: for param in uvc._required_data_params: uvparam = getattr(uvc, "_" + param) expected_type = uvparam.expected_type # all data like params on UVCal have expected types that are tuples. # since uvc is re-initialized at the start of this method, the user # can't affect this, so don't need handling for non-tuples dtype_use = expected_type[0] setattr( uvc, param, np.zeros(uvparam.expected_shape(uvc), dtype=dtype_use), ) uvc.check() return uvc def read_calfits( self, filename, read_data=True, run_check=True, check_extra=True, run_check_acceptability=True, ): """ Read in data from calfits file(s). Parameters ---------- filename : str or list of str The calfits file(s) to read from. read_data : bool Read in the gains or delays, quality arrays and flag arrays. If set to False, only the metadata will be read in. Setting read_data to False results in a metadata only object. run_check : bool Option to check for the existence and proper shapes of parameters after reading in the file. check_extra : bool Option to check optional parameters as well as required ones. run_check_acceptability : bool Option to check acceptable range of the values of parameters after reading in the file. """ from . import calfits if isinstance(filename, (list, tuple)): self.read_calfits( filename[0], read_data=read_data, run_check=run_check, check_extra=check_extra, run_check_acceptability=run_check_acceptability, ) if len(filename) > 1: for f in filename[1:]: uvcal2 = UVCal() uvcal2.read_calfits( f, read_data=read_data, run_check=run_check, check_extra=check_extra, run_check_acceptability=run_check_acceptability, ) self += uvcal2 del uvcal2 else: calfits_obj = calfits.CALFITS() calfits_obj.read_calfits( filename, read_data=read_data, run_check=run_check, check_extra=check_extra, run_check_acceptability=run_check_acceptability, ) self._convert_from_filetype(calfits_obj) del calfits_obj def read_fhd_cal( self, cal_file, obs_file, layout_file=None, settings_file=None, raw=True, read_data=True, extra_history=None, run_check=True, check_extra=True, run_check_acceptability=True, ): """ Read data from an FHD cal.sav file. Parameters ---------- cal_file : str or list of str The cal.sav file or list of files to read from. obs_file : str or list of str The obs.sav file or list of files to read from. layout_file : str The FHD layout file. Required for antenna_positions to be set. settings_file : str or list of str, optional The settings_file or list of files to read from. Optional, but very useful for provenance. raw : bool Option to use the raw (per antenna, per frequency) solution or to use the fitted (polynomial over phase/amplitude) solution. Default is True (meaning use the raw solutions). read_data : bool Read in the gains, quality array and flag data. If set to False, only the metadata will be read in. Setting read_data to False results in a metadata only object. Note that if read_data is False, metadata is derived entirely from the obs_file, which may result in slightly different values than if it is derived from the cal file. extra_history : str or list of str, optional String(s) to add to the object's history parameter. run_check : bool Option to check for the existence and proper shapes of parameters after reading in the file. check_extra : bool Option to check optional parameters as well as required ones. run_check_acceptability : bool Option to check acceptable range of the values of parameters after reading in the file. """ from . import fhd_cal if isinstance(cal_file, (list, tuple)): if isinstance(obs_file, (list, tuple)): if len(obs_file) != len(cal_file): raise ValueError( "Number of obs_files must match number of cal_files" ) else: raise ValueError("Number of obs_files must match number of cal_files") if layout_file is not None: if isinstance(layout_file, (list, tuple)): if len(layout_file) != len(cal_file): raise ValueError( "Number of layout_files must match number of cal_files" ) else: raise ValueError( "Number of layout_files must match number of cal_files" ) layout_file_use = layout_file[0] else: layout_file_use = None if settings_file is not None: if isinstance(settings_file, (list, tuple)): if len(settings_file) != len(cal_file): raise ValueError( "Number of settings_files must match number of cal_files" ) else: raise ValueError( "Number of settings_files must match number of cal_files" ) settings_file_use = settings_file[0] else: settings_file_use = None self.read_fhd_cal( cal_file[0], obs_file[0], layout_file=layout_file_use, settings_file=settings_file_use, raw=raw, read_data=read_data, extra_history=extra_history, run_check=run_check, check_extra=check_extra, run_check_acceptability=run_check_acceptability, ) if len(cal_file) > 1: for ind, f in enumerate(cal_file[1:]): uvcal2 = UVCal() if settings_file is not None: settings_file_use = settings_file[ind + 1] if layout_file is not None: layout_file_use = layout_file[ind + 1] uvcal2.read_fhd_cal( f, obs_file[ind + 1], layout_file=layout_file_use, settings_file=settings_file_use, raw=raw, read_data=read_data, extra_history=extra_history, run_check=run_check, check_extra=check_extra, run_check_acceptability=run_check_acceptability, ) self += uvcal2 del uvcal2 else: if isinstance(obs_file, (list, tuple)): raise ValueError("Number of obs_files must match number of cal_files") if layout_file is not None: if isinstance(layout_file, (list, tuple)) and len(layout_file) > 1: raise ValueError( "Number of layout_files must match number of cal_files" ) if settings_file is not None: if isinstance(settings_file, (list, tuple)) and len(settings_file) > 1: raise ValueError( "Number of settings_files must match number of cal_files" ) fhd_cal_obj = fhd_cal.FHDCal() fhd_cal_obj.read_fhd_cal( cal_file, obs_file, layout_file=layout_file, settings_file=settings_file, raw=raw, read_data=read_data, extra_history=extra_history, run_check=run_check, check_extra=check_extra, run_check_acceptability=run_check_acceptability, ) self._convert_from_filetype(fhd_cal_obj) del fhd_cal_obj def write_calfits( self, filename, run_check=True, check_extra=True, run_check_acceptability=True, clobber=False, ): """ Write the data to a calfits file. Parameters ---------- filename : str The calfits file to write to. run_check : bool Option to check for the existence and proper shapes of parameters before writing the file. check_extra : bool Option to check optional parameters as well as required ones. run_check_acceptability : bool Option to check acceptable range of the values of parameters before writing the file. clobber : bool Option to overwrite the filename if the file already exists. Raises ------ ValueError If the UVCal object is a metadata only object. """ if self.metadata_only: raise ValueError( "Cannot write out metadata only objects to a calfits file." ) calfits_obj = self._convert_to_filetype("calfits") calfits_obj.write_calfits( filename, run_check=run_check, check_extra=check_extra, run_check_acceptability=run_check_acceptability, clobber=clobber, ) del calfits_obj
HERA-Team/pyuvdata
pyuvdata/uvcal/uvcal.py
Python
bsd-2-clause
170,532
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class AuthConfig(AppConfig): name = 'esia_auth' verbose_name = _('Esia META')
wolfstein9119/django-esia-auth
esia_auth/apps.py
Python
bsd-2-clause
178
import os, unicodedata from django.utils.translation import ugettext_lazy as _ from django.core.files.storage import FileSystemStorage from django.db.models.fields.files import FileField from django.core.files.storage import default_storage from django.conf import settings from django.utils.safestring import mark_safe class AdminThumbnailMixin(object): thumbnail_options = {'size': (60, 60)} thumbnail_image_field_name = 'image' thumbnail_alt_field_name = None def _thumb(self, image, options={'size': (60, 60)}, alt=None): from easy_thumbnails.files import get_thumbnailer media = getattr(settings, 'THUMBNAIL_MEDIA_URL', settings.MEDIA_URL) attrs = [] try: src = "%s%s" % (media, get_thumbnailer(image).get_thumbnail(options)) except: src = "" if alt is not None: attrs.append('alt="%s"' % alt) return mark_safe('<img src="%s" %s />' % (src, " ".join(attrs))) def thumbnail(self, obj): kwargs = {'options': self.thumbnail_options} if self.thumbnail_alt_field_name: kwargs['alt'] = getattr(obj, self.thumbnail_alt_field_name) return self._thumb(getattr(obj, self.thumbnail_image_field_name), **kwargs) thumbnail.allow_tags = True thumbnail.short_description = _('Thumbnail') def file_cleanup(sender, **kwargs): """ File cleanup callback used to emulate the old delete behavior using signals. Initially django deleted linked files when an object containing a File/ImageField was deleted. Usage: >>> from django.db.models.signals import post_delete >>> post_delete.connect(file_cleanup, sender=MyModel, dispatch_uid="mymodel.file_cleanup") """ for fieldname in sender._meta.get_all_field_names(): try: field = sender._meta.get_field(fieldname) except: field = None if field and isinstance(field, FileField): inst = kwargs['instance'] f = getattr(inst, fieldname) m = inst.__class__._default_manager if hasattr(f, 'path') and os.path.exists(f.path) \ and not m.filter(**{'%s__exact' % fieldname: getattr(inst, fieldname)})\ .exclude(pk=inst._get_pk_val()): try: #os.remove(f.path) default_storage.delete(f.path) except: pass class ASCIISafeFileSystemStorage(FileSystemStorage): """ Same as FileSystemStorage, but converts unicode characters in file name to ASCII characters before saving the file. This is mostly useful for the non-English world. Usage (settings.py): >>> DEFAULT_FILE_STORAGE = 'webcore.utils.storage.ASCIISafeFileSystemStorage' """ def get_valid_name(self, name): name = unicodedata.normalize('NFKD', unicode(name.replace(' ', '_'))).encode('ascii', 'ignore') return super(ASCIISafeFileSystemStorage, self).get_valid_name(name)
Krozark/django-slider
slider/utils.py
Python
bsd-2-clause
3,032
# # This file is part of pysmi software. # # Copyright (c) 2015-2020, Ilya Etingof <etingof@gmail.com> # License: http://snmplabs.com/pysmi/license.html # class AbstractSearcher(object): def setOptions(self, **kwargs): for k in kwargs: setattr(self, k, kwargs[k]) return self def fileExists(self, mibname, mtime, rebuild=False): raise NotImplementedError()
etingof/pysmi
pysmi/searcher/base.py
Python
bsd-2-clause
405
""" Test rest_framework_json_api's utils functions. """ from rest_framework_json_api import utils from ..serializers import EntrySerializer from ..tests import TestBase class GetRelatedResourceTests(TestBase): """ Ensure the `get_related_resource_type` function returns correct types. """ def test_reverse_relation(self): """ Ensure reverse foreign keys have their types identified correctly. """ serializer = EntrySerializer() field = serializer.fields['comments'] self.assertEqual(utils.get_related_resource_type(field), 'comments') def test_m2m_relation(self): """ Ensure m2ms have their types identified correctly. """ serializer = EntrySerializer() field = serializer.fields['authors'] self.assertEqual(utils.get_related_resource_type(field), 'authors')
Instawork/django-rest-framework-json-api
example/tests/test_utils.py
Python
bsd-2-clause
882
# created by Angus Clark 9/2/17 updated 27/2/17 # ToDo impliment traceroute function into this # Perhaps get rid of unnecessary itemediate temp file import socket import os import json import my_traceroute s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = '130.56.253.43' #print host port = 5201 # Change port (must enable security settigns of server) s.bind((host,port)) s.listen(5) MAX_HOPS = 30 # max hops for traceroute while True: c, addr = s.accept() #accept incoming Connection f = open('temp.json','wb') # open blank binary to dump incoming data #print addr[0] l = c.recv(1024) while(l): # Dump data into temp file and get next chunk of data f.write(l) l = c.recv(1024) f.close() c.close() tempfile = open('temp.json','rb') info = json.load(tempfile) info["UserInfo"]["ip"] = addr[0] # store ip address of sender last_addr = '0.0.0.0' # placeholder for first iteration for hop in range(1,MAX_HOPS): result = my_traceroute.traceroute(hop, info["UserInfo"]["ip"]) #print result if result == -1: break if result[1] == last_addr: break info["TRACEROUTE"][str(result[0])] = {} info["TRACEROUTE"][str(result[0])].update({'node':result[1], 'rtt':result[2]}) last_addr = result[1] id = info["UserInfo"]["user id"] timestamp = info["UserInfo"]["timestamp"] os.system('mkdir /home/ubuntu/data/'+id) path = "/home/ubuntu/data/" + id + "/" filename = timestamp + '.json' savefile = open(path + filename, 'w+') savefile.write(json.dumps(info)) savefile.close()
wmizzi/tn2capstone
ServerScript/recievestore.py
Python
bsd-2-clause
1,705
from datetime import timedelta from propeller.cookie import Cookie from propeller.options import Options from propeller.template import Template from propeller.util.dict import MultiDict from urllib import quote import httplib import propeller class Response(object): def __init__(self, body='', status_code=200, content_type='text/html'): self.body = body self.status_code = status_code self.headers = MultiDict() self.cookies = [] self.headers['Content-Type'] = content_type def _get_status_code(self): return self._status_code def _set_status_code(self, status_code): assert status_code >= 200 and status_code <= 500, \ 'status_code must be an int between 200 and 500' self._status_code = status_code def _get_body(self): return self._body def _set_body(self, body): assert isinstance(body, basestring) or isinstance(body, Template), \ 'body must be an instance of basestring or Template' if isinstance(body, basestring): self._body = body elif isinstance(body, Template): self._body = str(body) def _build_headers(self): self.headers['Content-Length'] = len(self.body) if 'Content-Type' not in self.headers or not \ self.headers['Content-Type'][0]: self.headers['Content-Type'] = 'text/html; charset=utf-8' status = 'HTTP/1.0 %d %s' % (self.status_code, httplib.responses[self.status_code]) headers = ['%s: %s' % (k, v) for k, v in self.headers.items()] headers += ['Set-Cookie: %s' % str(c) for c in self.cookies] headers = '\r\n'.join([status] + headers) + '\r\n\r\n' return headers def _error_page(self, title, subtitle='', traceback=None): t = Options.tpl_env.get_template('error.html') return t.render( title=title, subtitle=subtitle, traceback=traceback, version=propeller.__version__ ) def set_cookie(self, name, value, domain=None, expires=None, path=None, secure=False): self.cookies.append(Cookie(name=name, value=value, domain=domain, expires=expires, path=path, secure=secure)) def __str__(self): return self._build_headers() + self.body status_code = property(_get_status_code, _set_status_code) body = property(_get_body, _set_body) class RedirectResponse(Response): def __init__(self, redirect_url, permanent=False, *args, **kwargs): status_code = 301 if permanent else 302 super(RedirectResponse, self).__init__(status_code=status_code, *args, **kwargs) self.redirect_url = redirect_url def __str__(self): if 'Location' not in self.headers: self.headers['Location'] = self.redirect_url return super(RedirectResponse, self).__str__() class BadRequestResponse(Response): def __init__(self, *args, **kwargs): super(BadRequestResponse, self).__init__(status_code=400, *args, **kwargs) def __str__(self): if not self.body and Options.debug: self.body = self._error_page(httplib.responses[self.status_code]) return super(BadRequestResponse, self).__str__() class NotFoundResponse(Response): def __init__(self, url=None, *args, **kwargs): super(NotFoundResponse, self).__init__(status_code=404, *args, **kwargs) self.url = url def __str__(self): if not self.body and Options.debug: self.body = self._error_page(httplib.responses[self.status_code], self.url) return super(NotFoundResponse, self).__str__() class InternalServerErrorResponse(Response): def __init__(self, title, subtitle, traceback, *args, **kwargs): super(InternalServerErrorResponse, self).__init__(status_code=500, *args, **kwargs) self.title = title self.subtitle = subtitle self.traceback = traceback def __str__(self): if not self.body and Options.debug: self.body = self._error_page(self.title, self.subtitle, self.traceback) return super(InternalServerErrorResponse, self).__str__()
bwind/propeller
propeller/response.py
Python
bsd-2-clause
4,589
import imp import os.path import pkgutil import six import unittest import streamlink.plugins from streamlink import Streamlink class PluginTestMeta(type): def __new__(mcs, name, bases, dict): plugin_path = os.path.dirname(streamlink.plugins.__file__) plugins = [] for loader, pname, ispkg in pkgutil.iter_modules([plugin_path]): file, pathname, desc = imp.find_module(pname, [plugin_path]) module = imp.load_module(pname, file, pathname, desc) if hasattr(module, "__plugin__"): plugins.append((pname)) session = Streamlink() def gentest(pname): def load_plugin_test(self): # Reset file variable to ensure it is still open when doing # load_plugin else python might open the plugin source .py # using ascii encoding instead of utf-8. # See also open() call here: imp._HackedGetData.get_data file, pathname, desc = imp.find_module(pname, [plugin_path]) session.load_plugin(pname, file, pathname, desc) # validate that can_handle_url does not fail session.plugins[pname].can_handle_url("http://test.com") return load_plugin_test for pname in plugins: dict['test_{0}_load'.format(pname)] = gentest(pname) return type.__new__(mcs, name, bases, dict) @six.add_metaclass(PluginTestMeta) class TestPlugins(unittest.TestCase): """ Test that each plugin can be loaded and does not fail when calling can_handle_url. """
back-to/streamlink
tests/test_plugins.py
Python
bsd-2-clause
1,609
"""Shortest game. When we play games, we always bet in one of two ways in each game: - betting one chip - betting all-in Wins are paid equal to the wager, so if we bet C chips and wins, we get 2C chips back. Suppose yesterday was a lucky day for us, we won every game we played. Starting with 1 chip and leaving the game with N chips. And we played all-in no more than K times. Given the integers N and K, return the minimum number of games that are necessary for us to play. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function def shortest_game(N, K): # Apply top-down recursion, which is efficient with no repetition. if N <= 2 or K == 0: # Base cases: If N is 1 or 2, or K is 0, bet N-1 times of 1 chip. return N - 1 if N % 2 == 0: # If N is even, bet 1 all-in, and # continue playing game for N//2 with K-1 all-in opportunities. return 1 + shortest_game(N // 2, K - 1) else: # If N is odd, bet 1 chip, and # continue playing game for N-1 with K all-in opportunities. return 1 + shortest_game(N - 1, K) def main(): # Output: 7 N = 8 K = 0 print(shortest_game(N, K)) # Output: 6 N = 18 K = 2 print(shortest_game(N, K)) # Output: 4 N = 10 K = 10 print(shortest_game(N, K)) # Output: 0 N = 1 K = 0 print(shortest_game(N, K)) # Output: 8 N = 100 K = 5 print(shortest_game(N, K)) if __name__ == '__main__': main()
bowen0701/algorithms_data_structures
alg_shortest_game.py
Python
bsd-2-clause
1,558
import hashlib import inspect import json import logging import re from xml.sax.saxutils import escape as xml_escape import webob from wsgiservice import xmlserializer from wsgiservice.decorators import mount from wsgiservice.exceptions import (MultiValidationException, ResponseException, ValidationException) from wsgiservice.status import * logger = logging.getLogger(__name__) class Resource(object): """Base class for all WsgiService resources. A resource is a unique REST endpoint which accepts different methods for different actions. For each HTTP call the corresponding method (equal to the HTTP method) will be called. """ #: The root tag for generated XML output. Used by :func:`to_text_xml`. #: (Default: 'response') XML_ROOT_TAG = 'response' #: List of the known HTTP methods. Used by :func:`get_method` to handle #: methods that are not implemented. (Default: All methods defined by the #: HTTP 1.1 standard :rfc:`2616`) KNOWN_METHODS = ['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT'] #: List of tuples mapping file extensions to MIME types. The first item of #: the tuple is the extension and the second is the associated MIME type. #: Used by :func:`get_content_type` to determine the requested MIME type. #: (Default: '.xml' and '.json'). EXTENSION_MAP = [ ('.xml', 'text/xml'), ('.json', 'application/json'), ] #: A tuple of exceptions that should be treated as 404. An ideal candidate #: is KeyError if you do dictionary accesses. Used by :func:`call` which #: calls :func:`handle_exception_404` whenever an exception from this #: tuple occurs. (Default: Empty tuple) NOT_FOUND = () #: A tuple of absolute paths that should return a 404. By default this is #: used to ignored requests for favicon.ico and robots.txt so that #: browsers don't cause too many exceptions. IGNORED_PATHS = ('/favicon.ico', '/robots.txt') #: Whether the input parameters from GET and POST should be decoded #: according to the encoding specified by the request. This should only be #: changed to False if the input is supposed to be byte values. (Default: #: True) DECODE_PARAMS = True #: Object representing the current request. Set by the constructor. request = None #: Object representing the current response. Set by the constructor. response = None #: Dictionary with the path parameters. Set by the constructor. path_params = None #: String with the current path. Same as request.path except the extension #: is removed. So instead of `/movies.json` it is just `/movies`. Set by #: the constructor. request_path = None #: Reference to the application. Set by the constructor. application = None #: Charset to output in the Content-Type headers. Set to None to avoid #: sending this. charset = 'UTF-8' # Cache for the `data` property _data = None def __init__(self, request, response, path_params, application=None): """Constructor. Order of the parameters is not guaranteed, always used named parameters. :param request: Object representing the current request. :type request: :class:`webob.Request` :param response: Object representing the response to be sent. :type response: :class:`webob.Response` :param path_params: Dictionary of all parameters passed in via the path. This is the return value of :func:`Router.__call__`. :type path_params: dict :param application: Reference to the application which is calling this resource. Can be used to reference other resources or properties of the application itself. :type path_params: :class:`wsgiservice.Application` """ self.request = request self.response = response self.path_params = path_params self.application = application self.request_path = '' if request: self.request_path = request.path if path_params and path_params.get('_extension'): ext = path_params['_extension'] if self.request_path.endswith(ext): self.request_path = self.request_path[0:-len(ext)] def OPTIONS(self): """Default implementation of the OPTIONS verb. Outputs a list of allowed methods on this resource in the ``Allow`` response header. """ self.response.headers['Allow'] = self.get_allowed_methods() def __call__(self): """Main entry point for calling this resource. Handles the method dispatching, response conversion, etc. for this resource. Catches all exceptions: - :class:`webob.exceptions.ResponseException`: Replaces the instance's response attribute with the one from the exception. - For all exceptions in the :attr:`NOT_FOUND` tuple :func:`handle_exception_404` is called. - :class:`wsgiservice.exceptions.ValidationException`: :func:`handle_exception_400` is called. - For all other exceptions deriving from the :class:`Exception` base class, the :func:`handle_exception` method is called. """ self.type = self.get_content_type() try: self.method = self.get_method() self.handle_ignored_resources() self.assert_conditions() self.response.body_raw = self.call_method(self.method) except ResponseException, e: # a response was raised, catch it self.response = e.response r = e.response if r.status_int == 404 and not r.body and not hasattr(r, 'body_raw'): self.handle_exception_404(e) except self.NOT_FOUND, e: self.handle_exception_404(e) except ValidationException, e: self.handle_exception_400(e) except Exception, e: self.handle_exception(e) self.convert_response() self.set_response_headers() return self.response @property def data(self): """Returns the request data as a dictionary. Merges the path parameters, GET parameters and POST parameters (form-encoded or JSON dictionary). If a key is present in multiple of these, the first one defined is used. """ if self._data: return self._data retval = {} data = self.get_request_data() for subdata in data: for key, value in subdata.iteritems(): if not key in retval: retval[key] = value self._data = retval return retval def get_resource(self, resource, **kwargs): """Returns a new instance of the resource class passed in as resource. This is a helper to make future-compatibility easier when new arguments get added to the constructor. :param resource: Resource class to instantiate. Gets called with the named arguments as required for the constructor. :type resource: :class:`Resource` :param kwargs: Additional named arguments to pass to the constructor function. :type kwargs: dict """ return resource(request=self.request, response=self.response, path_params=self.path_params, application=self.application, **kwargs) def get_method(self, method=None): """Returns the method to call on this instance as a string. Raises a HTTP exception if no method can be found. Aborts with a 405 status code for known methods (based on the :attr:`KNOWN_METHODS` list) and a 501 status code for all other methods. :param method: Name of the method to return. Must be all-uppercase. :type method: str :raises: :class:`webob.exceptions.ResponseException` of status 405 or 501 if the method is not implemented on this resource. """ if method is None: method = self.request.method if hasattr(self, method) and callable(getattr(self, method)): return method elif method == 'HEAD': return self.get_method('GET') # Error: did not find any method, raise a 405 or 501 exception elif method in self.KNOWN_METHODS: # Known HTTP methods => 405 Method Not Allowed raise_405(self) else: # Unknown HTTP methods => 501 Not Implemented raise_501(self) def get_content_type(self): """Returns the Content Type to serve from either the extension or the Accept headers. Uses the :attr:`EXTENSION_MAP` list for all the configured MIME types. """ extension = self.path_params.get('_extension') for ext, mime in self.EXTENSION_MAP: if ext == extension: return mime # Else: use the Accept headers if self.response.vary is None: self.response.vary = ['Accept'] else: self.response.vary.append('Accept') types = [mime for ext, mime in self.EXTENSION_MAP] ct = self.request.accept.best_match(types) # No best match found. The specification allows us to either return a # 406 or just use another format in this case. # We pick the default format, though that may become a configurable # behavior in the future. if not ct: ct = types[0] return ct def handle_ignored_resources(self): """Ignore robots.txt and favicon.ico GET requests based on a list of absolute paths in :attr:`IGNORED_PATHS`. Aborts the request with a 404 status code. This is mostly a usability issue to avoid extra log entries for resources we are not interested in. :raises: :class:`webob.exceptions.ResponseException` of status 404 if the resource is ignored. """ if (self.method in ('GET', 'HEAD') and self.request.path_qs in self.IGNORED_PATHS): raise_404(self) def assert_conditions(self): """Handles various HTTP conditions and raises HTTP exceptions to abort the request. - Content-MD5 request header must match the MD5 hash of the full input (:func:`assert_condition_md5`). - If-Match and If-None-Match etags are checked against the ETag of this resource (:func:`assert_condition_etag`). - If-Modified-Since and If-Unmodified-Since are checked against the modification date of this resource (:func:`assert_condition_last_modified`). .. todo:: Return a 501 exception when any Content-* headers have been set in the request. (See :rfc:`2616`, section 9.6) """ self.assert_condition_md5() etag = self.clean_etag(self.call_method('get_etag')) self.response.last_modified = self.call_method('get_last_modified') self.assert_condition_etag() self.assert_condition_last_modified() def assert_condition_md5(self): """If the ``Content-MD5`` request header is present in the request it's verified against the MD5 hash of the request body. If they don't match, a 400 HTTP response is returned. :raises: :class:`webob.exceptions.ResponseException` of status 400 if the MD5 hash does not match the body. """ if 'Content-MD5' in self.request.headers: body_md5 = hashlib.md5(self.request.body).hexdigest() if body_md5 != self.request.headers['Content-MD5']: raise_400(self, msg='Invalid Content-MD5 request header.') def assert_condition_etag(self): """If the resource has an ETag (see :func:`get_etag`) the request headers ``If-Match`` and ``If-None-Match`` are verified. May abort the request with 304 or 412 response codes. :raises: - :class:`webob.exceptions.ResponseException` of status 304 if the ETag matches the ``If-None-Match`` request header (GET/HEAD requests only). - :class:`webob.exceptions.ResponseException` of status 412 if the ETag matches the ``If-None-Match`` request header (for requests other than GET/HEAD) or the ETag does not match the ``If-Match`` header. """ if self.response.etag: etag = self.response.etag.replace('"', '') if not etag in self.request.if_match: raise_412(self, 'If-Match request header does not the resource ETag.') if etag in self.request.if_none_match: if self.request.method in ('GET', 'HEAD'): raise_304(self) else: raise_412(self, 'If-None-Match request header matches resource ETag.') def assert_condition_last_modified(self): """If the resource has a last modified date (see :func:`get_last_modified`) the request headers ``If-Modified-Since`` and ``If-Unmodified-Since`` are verified. May abort the request with 304 or 412 response codes. :raises: - :class:`webob.exceptions.ResponseException` of status 304 if the ``If-Modified-Since`` is later than the last modified date. - :class:`webob.exceptions.ResponseException` of status 412 if the last modified date is later than the ``If-Unmodified-Since`` header. """ rq = self.request rs = self.response if rs.last_modified: rsl = rs.last_modified if rq.if_modified_since and rsl <= rq.if_modified_since: raise_304(self) if rq.if_unmodified_since and rsl > rq.if_unmodified_since: raise_412(self, 'Resource is newer than the ' 'If-Unmodified-Since request header.') def get_etag(self): """Returns a string to be used as the ETag for this resource. Used to set the ``ETag`` response headers and for conditional requests using the ``If-Match`` and ``If-None-Match`` request headers. """ return None def clean_etag(self, etag): """Cleans the ETag as returned by :func:`get_etag`. Will wrap it in quotes and append the extension for the current MIME type. """ if etag: etag = etag.replace('"', '') extension = None for ext, mime in self.EXTENSION_MAP: if mime == self.type: extension = ext[1:] break if extension: etag += '_' + extension self.response.etag = etag def get_last_modified(self): """Return a :class:`datetime.datetime` object of the when the resource was last modified. Used to set the ``Last-Modified`` response header and for conditional requests using the ``If-Modified-Since`` and ``If-Unmodified-Since`` request headers. :rtype: :class:`datetime.datetime` """ return None def get_allowed_methods(self): """Returns a coma-separated list of method names that are allowed on this instance. Useful to set the ``Allowed`` response header. """ return ", ".join([method for method in dir(self) if method.upper() == method and callable(getattr(self, method))]) def call_method(self, method_name): """Call an instance method filling in all the method parameters based on their names. The parameters are filled in from the following locations (in that order of precedence): 1. Path parameters from routing 2. GET parameters 3. POST parameters All values are validated using the method :func:`validate_param`. The return value of the method is returned unaltered. :param method_name: Name of the method on the current instance to call. :type method_name: str """ params = [] method = getattr(self, method_name) method_params, varargs, varkw, defaults = inspect.getargspec(method) if method_params and len(method_params) > 1: method_params.pop(0) # pop the self off data = self._merge_defaults(self.data, method_params, defaults) params = self._get_validated_params(method, method_params, data) return self._call_method(method, params, method_params) def _call_method(self, method, params, method_params): """ Override this method to add additional validation. :param method: Method to be called. :param params: Validated parameter values for calling the method. :param method_params: The list of parameters in the method's signature. :return: The result of calling the method. """ return method(*params) def _get_validated_params(self, method, method_params, data): """ Returns a list of params to call the method with. The parameters are returned in the order as they are expected by the method. All parameters are validated and converted to the requested datatype, based on the configuration from the `@validate` decorator. :param method_params: The introspected parameters the method needs. :type method_params: List of parameter names. :param data: All key/value pairs passed in with the request. :type data: dict """ errors = {} params = [] for param in method_params: value = data.get(param) try: self.validate_param(method, param, value) value = self.convert_param(method, param, value) except ValidationException, e: # Collect all errors, so we can raise them as one consolidated # validation exception. errors[param] = e else: params.append(value) if not errors: return params # Raise consolidated exceptions. The message of the first error is used # as message of the new consolidated exception. raise MultiValidationException(errors, unicode(errors.values()[0]).encode('utf-8')) def validate_param(self, method, param, value): """Validates the parameter according to the configurations in the _validations dictionary of either the method or the instance. This dictionaries are written by the decorator :func:`wsgiservice.decorators.validate`. .. todo:: Allow validation by type (e.g. header, post, query, etc.) :param method: A function to get the validation information from (done using :func:`_get_validation`). :type method: Python function :param param: Name of the parameter to validate the value for. :type param: str :param value: Value passed in for the given parameter. :type value: Any valid Python value :raises: :class:`wsgiservice.exceptions.ValidationException` if the value is invalid for the given method and parameter. """ rules = self._get_validation(method, param) if not rules: return if rules.get('mandatory') and ( value is None or (isinstance(value, basestring) and len(value) == 0)): raise ValidationException("Missing value for {0}.".format(param)) elif rules.get('re') and (rules.get('mandatory') or value is not None): if not re.search('^' + rules['re'] + '$', value): raise ValidationException("Invalid value for {0}.".format(param)) def convert_param(self, method, param, value): """Converts the parameter using the function 'convert' function of the validation rules. Same parameters as the `validate_param` method, so it might have just been added there. But lumping together the two functionalities would make overwriting harder. :param method: A function to get the validation information from (done using :func:`_get_validation`). :type method: Python function :param param: Name of the parameter to validate the value for. :type param: str :param value: Value passed in for the given parameter. :type value: Any valid Python value :raises: :class:`wsgiservice.exceptions.ValidationException` if the value is invalid for the given method and parameter. """ rules = self._get_validation(method, param) has_convert = rules and rules.get('convert') if not has_convert or (value is None and not rules['mandatory']): return value try: return rules['convert'](value) except ValueError: raise ValidationException("Invalid value for {0}.".format(param)) def _get_validation(self, method, param): """Return the correct validations dictionary for this parameter. First checks the method itself and then its class. If no validation is defined for this parameter, None is returned. :param method: A function to get the validation information from. :type method: Python function :param param: Name of the parameter to get validation information for. :type param: str """ if hasattr(method, '_validations') and param in method._validations: return method._validations[param] elif (hasattr(method.im_class, '_validations') and param in method.im_class._validations): return method.im_class._validations[param] else: return None def convert_response(self): """Finish filling the instance's response object so it's ready to be served to the client. This includes converting the body_raw property to the content type requested by the user if necessary. """ if hasattr(self.response, 'body_raw'): if self.response.body_raw is not None: to_type = re.sub('[^a-zA-Z_]', '_', self.type) to_type_method = 'to_' + to_type if hasattr(self, to_type_method): self.response.body = getattr(self, to_type_method)( self.response.body_raw) del self.response.body_raw def to_application_json(self, raw): """Returns the JSON version of the given raw Python object. :param raw: The return value of the resource method. :type raw: Any valid Python value :rtype: string """ return json.dumps(raw) def to_text_xml(self, raw): """Returns the XML string version of the given raw Python object. Uses :func:`_get_xml_value` which applies some heuristics for converting data to XML. The default root tag is 'response', but that can be overwritten by changing the :attr:`XML_ROOT_TAG` instance variable. Uses :func:`wsgiservice.xmlserializer.dumps()` for the actual work. :param raw: The return value of the resource method. :type raw: Any valid Python value :rtype: string """ return xmlserializer.dumps(raw, self.XML_ROOT_TAG) def handle_exception(self, e, status=500): """Handle the given exception. Log, sets the response code and output the exception message as an error message. :param e: Exception which is being handled. :type e: :class:`Exception` :param status: Status code to set. :type status: int """ logger.exception( "An exception occurred while handling the request: %s", e) self.response.body_raw = {'error': unicode(e).encode('utf-8')} self.response.status = status def handle_exception_400(self, e): """Handle the given validation exception. Log, sets the response code to 400 and output all errors as a dictionary. :param e: Exception which is being handled. :type e: :class:`ValidationException`, usually its subclass :class:`MultiValidationException`. """ if isinstance(e, MultiValidationException): errors = dict([ (param, unicode(exc).encode('utf-8')) for param, exc in e.errors.iteritems() ]) logger.info("A 400 Bad Request exception occurred while " "handling the request", exc_info=True, extra={'errors': errors, 'e': e}) first_error = unicode(e.errors.values()[0]).encode('utf-8') self.response.body_raw = {'errors': errors, 'error': first_error} self.response.status = 400 else: # Use normal `handle_exception` as fallback. This point is probably # reached by legacy code only. self.handle_exception(e, status=400) def handle_exception_404(self, e): """Handle the given exception. Log, sets the response code to 404 and output the exception message as an error message. :param e: Exception which is being handled. :type e: :class:`Exception` """ logger.debug("A 404 Not Found exception occurred while handling " "the request.") self.response.body_raw = {'error': 'Not Found'} self.response.status = 404 def set_response_headers(self): """Sets all the calculated response headers.""" self.set_response_content_type() self.set_response_content_md5() def set_response_content_type(self): """Set the Content-Type in the response. Uses the :attr:`type` instance attribute which was set by :func:`get_content_type`. Also declares a UTF-8 charset. """ if self.response.body: ct = self.type if self.charset: ct += '; charset=' + self.charset self.response.headers['Content-Type'] = ct elif 'Content-Type' in self.response.headers: del self.response.headers['Content-Type'] def set_response_content_md5(self): """Set the Content-MD5 response header. Calculated from the the response body by creating the MD5 hash from it. """ self.response.content_md5 = hashlib.md5(self.response.body).hexdigest() def get_request_data(self): """ Read the input values. Returns a list of dictionaries. These will be used to automatically pass them into the method. Additionally a combined dictionary is written to `self.data`. In the case of JSON input, that element in this list will be the parsed JSON value. That may not be a dictionary. """ request_data = [self.path_params, self.request.GET] content_type = self.request.headers.get('Content-Type', '')\ .split(';')[0].strip() if content_type == 'application/json' and self.request.body: try: post = json.loads(self.request.body) except ValueError: raise_400(self, msg='Invalid JSON content data') if isinstance(post, dict): request_data.append(post) else: request_data.append(self.request.POST) return request_data def _merge_defaults(self, data, method_params, defaults): """Helper method for adding default values to the data dictionary. The `defaults` are the default values inspected from the method that will be called. For any values that are not present in the incoming data, the default value is added. """ if defaults: optional_args = method_params[-len(defaults):] for key, value in zip(optional_args, defaults): if not key in data: data[key] = value return data @mount('/_internal/help') class Help(Resource): """Provides documentation for all resources of the current application. .. todo:: Allow documentation of output. .. todo:: Use first sentence of docstring for summary, add bigger version at the bottom. """ EXTENSION_MAP = [('.html', 'text/html')] + Resource.EXTENSION_MAP XML_ROOT_TAG = 'help' def GET(self): """Returns documentation for the application.""" retval = [] for res in self.application._resources: retval.append({ 'name': res.__name__, 'desc': self._get_doc(res), 'properties': { 'XML_ROOT_TAG': res.XML_ROOT_TAG, 'KNOWN_METHODS': res.KNOWN_METHODS, 'EXTENSION_MAP': dict((key[1:], value) for key, value in res.EXTENSION_MAP), 'NOT_FOUND': [ex.__name__ for ex in res.NOT_FOUND], }, 'methods': self._get_methods(res), 'path': self.request.script_name + res._path, }) # Sort by name retval = [(r['name'], r) for r in retval] retval.sort() retval = [r[1] for r in retval] return retval def _get_methods(self, res): """Return a dictionary of method descriptions for the given resource. :param res: Resource class to get all HTTP methods from. :type res: :class:`webob.resource.Resource` """ retval = {} inst = res(request=webob.Request.blank('/'), response=webob.Response(), path_params={}) methods = [m.strip() for m in inst.get_allowed_methods().split(',')] for method_name in methods: method = getattr(res, method_name) retval[method_name] = { 'desc': self._get_doc(method), 'parameters': self._get_parameters(res, method)} return retval def _get_doc(self, obj): """Returns a slightly modified (stripped) docstring for the given Python object. Returns an empty string if the object doesn't have any documentation. :param obj: Python object to get the docstring from. :type obj: A method or class. """ doc = obj.__doc__ if doc: return doc.strip() else: return '' def _get_parameters(self, res, method): """Return a parameters dictionary for the given resource/method. :param res: Resource class to get all HTTP methods from. :type res: :class:`webob.resource.Resource` :param method: The method to get parameters from. :type method: Python function """ method_params, varargs, varkw, defaults = inspect.getargspec(method) if method_params: method_params.pop(0) # pop the self off self._add_path_parameters(method_params, res) retval = {} for param in method_params: is_path_param = '{' + param + '}' in res._path validation = self._get_validation(method, param) retval[param] = { 'path_param': is_path_param, 'mandatory': is_path_param or validation, 'validate_re': None, 'desc': '', } if validation: retval[param]['validate_re'] = validation['re'] retval[param]['desc'] = validation['doc'] or '' return retval def _add_path_parameters(self, method_params, res): """Extract all path parameters as they are always required even though some methods may not have them in their definition. :param method_params: Current list of parameters from the method. :type method_params: Ordered list of method parameter names. :param res: Resource class to get the path from. :type res: :class:`webob.resource.Resource` """ for param in re.findall('{([^}]+)}', res._path): if param not in method_params: method_params.append(param) def _get_xml_value(self, value): """Overwritten _get_xml_value which uses the tag 'resource' for list children. Calls :func:`Resource._get_xml_value` for all non-list values. :param value: The value to convert to HTML. :type raw: Any valid Python value """ if isinstance(value, list): retval = [] for key, value in enumerate(value): retval.append('<resource>') retval.append(self._get_xml_value(value)) retval.append('</resource>') return "".join(retval) else: return Resource._get_xml_value(self, value) def to_text_html(self, raw): """Returns the HTML string version of the given raw Python object. Hard-coded to return a nicely-presented service information document. :param raw: The return value of the resource method. :type raw: Any valid Python object :rtype: string .. todo:: Treat paragraphs and/or newlines better in output. """ retval = ["""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/> <title>Help Example</title> <style> /* YUI reset.css */ html{color:#000;background:#FFF;}body,div,dl,dt,dd,ul,ol,li,h1,h2,h3,h4,h5,h6,pre,code,form,fieldset,legend,input,button,textarea,p,blockquote,th,td{margin:0;padding:0;}table{border-collapse:collapse;border-spacing:0;}fieldset,img{border:0;}address,caption,cite,code,dfn,em,strong,th,var,optgroup{font-style:inherit;font-weight:inherit;}del,ins{text-decoration:none;}li{list-style:none;}caption,th{text-align:left;}h1,h2,h3,h4,h5,h6{font-size:100%;font-weight:normal;}q:before,q:after{content:'';}abbr,acronym{border:0;font-variant:normal;}sup{vertical-align:baseline;}sub{vertical-align:baseline;}legend{color:#000;}input,button,textarea,select,optgroup,option{font-family:inherit;font-size:inherit;font-style:inherit;font-weight:inherit;}input,button,textarea,select{*font-size:100%;} /* YUI fonts.css */ body{font:13px/1.231 arial,helvetica,clean,sans-serif;*font-size:small;*font:x-small;}select,input,button,textarea,button{font:99% arial,helvetica,clean,sans-serif;}table{font-size:inherit;font:100%;}pre,code,kbd,samp,tt{font-family:monospace;*font-size:108%;line-height:100%;} /* YUI base.css */ body{margin:10px;}h1{font-size:138.5%;}h2{font-size:123.1%;}h3{font-size:108%;}h1,h2,h3{margin:1em 0;}h1,h2,h3,h4,h5,h6,strong,dt{font-weight:bold;}optgroup{font-weight:normal;}abbr,acronym{border-bottom:1px dotted #000;cursor:help;}em{font-style:italic;}del{text-decoration:line-through;}blockquote,ul,ol,dl{margin:1em;}ol,ul,dl{margin-left:2em;}ol li{list-style:decimal outside;}ul li{list-style:disc outside;}dl dd{margin-left:1em;}th,td{border:1px solid #000;padding:.5em;}th{font-weight:bold;text-align:center;}caption{margin-bottom:.5em;text-align:center;}sup{vertical-align:super;}sub{vertical-align:sub;}p,fieldset,table,pre{margin-bottom:1em;}button,input[type="checkbox"],input[type="radio"],input[type="reset"],input[type="submit"]{padding:1px;} h2 {margin-top: 0;} .resource_details {padding-top: 2em;border-top: 1px dotted #ccc;margin-top: 2em;} .method_details {margin-left: 2em;} /* JS form */ form { padding: 1em; border: 1px solid #ccc; } input.error { background: #FCECEC; color: red; border: 1px solid red; } label { font-weight: bold; float: left; width: 10em; } p.form_element, input.submit { clear: left; } div.result { margin-top: 1em; } h4 { margin-bottom: 0.5em; } a.add_input { margin: 0.5em; } .hidden { display: none !important; } a.toggle_details { margin-bottom: 1em; display: block; } </style> <script> /** * Adds a resource's method - a form to the current location with the ability * to submit a request to the service filling in all the parameters. */ function add_resource_method(target, resource, method_name, method) { new ResourceMethodForm(target, resource, method_name, method); } function ResourceMethodForm(target, resource, method_name, method) { this.targetName = target; this.target = document.getElementById(target); this.resource = resource; this.method_name = method_name; this.method = method; this.init(); } var pr = ResourceMethodForm.prototype; pr.init = function() { var fragment = document.createDocumentFragment(); var form = this.create_form(fragment); var input_container = document.createElement('div'); this.input_container = input_container; form.appendChild(input_container); this.create_form_params(input_container); this.create_form_buttons(form); this.create_result_field(form); this.target.appendChild(fragment); }; pr.create_form = function(parent) { var form = document.createElement('form'); form.action = ''; form.target = '_blank'; var that = this; form.onsubmit = function() { return that.on_submit(); }; var h4 = document.createElement('h4'); h4.innerHTML = 'Debug form'; form.appendChild(h4); parent.appendChild(form); return form; }; pr.create_form_params = function(parent) { for (param in this.method.parameters) { this.create_form_field(parent, 'param', 'text', param); } // Accept header var mimes = []; var emap = this.resource.properties.EXTENSION_MAP; for (extension in emap) { mimes.push(emap[extension]); } this.create_form_field(parent, 'header', 'select', 'Accept', mimes); }; pr.create_form_field = function(parent, type, field_type, name, options) { var id = type + '_' + this.resource['name'] + '_' + this.method_name + '_' + name; var d = document.createElement('div'); d.className = type; d.id = id; var input_id = id + '_input'; var lbl = document.createElement('label'); lbl.innerHTML = name; if (type == 'header') { lbl.innerHTML += ' (Header)'; } lbl.setAttribute('for', input_id); d.appendChild(lbl); var field = null; if (field_type == 'select') { field = document.createElement('select'); for (var i = 0; i < options.length; i++) { var option = document.createElement('option'); option.value = options[i]; option.innerHTML = this.format(options[i]); field.appendChild(option); } } else { field = document.createElement('input'); field.type = 'text'; } field.id = input_id; field.name = name; d.appendChild(field); parent.appendChild(d); }; pr.create_form_buttons = function(parent) { var subm = document.createElement('input'); subm.type = 'submit'; subm.value = 'Execute request (' + this.method_name + ')'; subm.className = 'submit'; parent.appendChild(subm); var that = this; var create_field = document.createElement('a'); create_field.href = '#'; create_field.className = 'add_input'; create_field.innerHTML = 'Add parameter'; create_field.onclick = function() { var name = prompt("Enter a field name:"); if (name !== null) { that.create_form_field(that.input_container, 'param', 'text', name); } return false; }; parent.appendChild(create_field); var create_header = document.createElement('a'); create_header.href = '#'; create_header.className = 'add_input'; create_header.innerHTML = 'Add header'; create_header.onclick = function() { var name = prompt("Enter a header name:"); if (name !== null) { that.create_form_field(that.input_container, 'header', 'text', name); } return false; }; parent.appendChild(create_header); }; pr.create_result_field = function(parent) { this.result_node = document.createElement('div'); this.result_node.className = 'result'; parent.appendChild(this.result_node); }; pr.on_submit = function() { var xhr = null; var that = this; this.result_node.innerHTML = 'Executing...'; if (window.XMLHttpRequest) { xhr = new XMLHttpRequest(); } else if (window.ActiveXObject) { xhr = new ActiveXObject("Microsoft.XMLHTTP"); } xhr.onreadystatechange = function() { if (xhr.readyState == 4) { // Received var data = [ '<h5>Status</h5>', xhr.status, ' ', that.get_status(xhr.status, xhr.statusText), '<h5>Response Headers</h5>', that.format(xhr.getAllResponseHeaders()), '<h5>Response Body</h5>', that.format(xhr.responseText), '(' + xhr.responseText.length + ' bytes)' ]; that.result_node.innerHTML = ''; for (var i = 0; i < data.length; i++) { that.result_node.innerHTML += data[i]; } } }; // Get parameters and fill them into path, query string and POST data var input = this.get_parameters(); if (input['__error__']) { this.result_node.innerHTML = 'ERROR: Missing data.'; return false; } var path = this.resource.path; var data = ''; // Request parameters for (param_name in this.method.parameters) { var param = this.method.parameters[param_name]; if (param['path_param']) { path = path.replace('{' + param_name + '}', input['params'][param_name]); } else { data += escape(param_name) + '=' + escape(input['params'][param_name]) + '&'; } } if (data === '') { data = null; } else if (this.method_name == 'GET' || this.method_name == 'HEAD') { // Convert data to query string path += '?' + data; data = null; } xhr.open(this.method_name, path, true); if (data !== null) { xhr.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded'); } // Request headers for (header in input['headers']) { xhr.setRequestHeader(header, input['headers'][header]); } xhr.send(data); return false; }; pr.format = function(str) { var str = str.replace(/&/g, "&amp;").replace(/</g, "&lt;"). replace(/>/g, "&gt;"); // Linkify HTTP URLs str = str.replace(/(http:\/\/[^ ]+)/g, '<a href="$1" target="_blank">$1</a>'); str = '<pre>' + str + '</pre>'; return str; }; pr.get_status = function(status, statusText) { /* Need to get the status text manually as statusText is broken on Safari. */ if (statusText == 'OK') { // Safari always uses OK, replace it manually var STATI = { 200: 'OK', 201: 'Created', 202: 'Accepted', 203: 'Non-Authoritative Information', 204: 'No Content', 205: 'Reset Content', 206: 'Partial Content', 300: 'Multiple Choices', 301: 'Moved Permanently', 302: 'Found', 303: 'See Other', 304: 'Not Modified', 305: 'Use Proxy', 307: 'Temporary Redirect', 400: 'Bad Request', 401: 'Unauthorized', 402: 'Payment Required', 403: 'Forbidden', 404: 'Not Found', 405: 'Method Not Allowed', 406: 'Not Acceptable', 407: 'Proxy Authentication Required', 408: 'Request Timeout', 409: 'Conflict', 410: 'Gone', 411: 'Length Required', 412: 'Precondition Failed', 413: 'Request Entity Too Large', 414: 'Request-URI Too Long', 415: 'Unsupported Media Type', 416: 'Requested Range Not Satisfiable', 417: 'Expectation Failed', 500: 'Internal Server Error', 501: 'Not Implemented', 502: 'Bad Gateway', 503: 'Service Unavailable', 504: 'Gateway Timeout', 505: 'HTTP Version Not Supported' }; if (typeof(STATI[status]) !== 'undefined') { return STATI[status]; } } return statusText; }; pr.get_parameters = function() { var params = {'__error__': false, 'headers': {}, 'params': {}}; var fields = []; // Get all fields var inputs = this.target.getElementsByTagName('input'); for (var i = 0; i < inputs.length; i++) { fields.push(inputs[i]); } var selects = this.target.getElementsByTagName('select'); for (var i = 0; i < selects.length; i++) { fields.push(selects[i]); } var inp = null; for (var i = 0; i < fields.length; i++) { inp = fields[i]; var type = inp.parentNode.className; if (type == 'header') { params['headers'][inp.name] = inp.value; } else if (type == 'param') { inp.className = ''; params['params'][inp.name] = inp.value; // Validate input if (this.method.parameters[inp.name]['mandatory'] && inp.value === '') { inp.className = 'error'; params['__error__'] = true; } } } return params; }; /* Hides all .resource_details elements and inserts a toggle link at their place. */ function toggle_visibility() { var divs = document.getElementsByTagName('div'); var len = divs.length; for (var i = 0; i < len; i++) { var div = divs[i]; if (div.className == 'method_details') { toggle_visibility_div(div); } } } function toggle_visibility_div(div) { div.className += ' hidden'; var link = document.createElement('a'); link.innerHTML = 'Show details'; link.href = '#'; link.className = 'toggle_details'; link.onclick = function() { console.debug(div); if (link.innerHTML == 'Show details') { div.className = div.className.replace(' hidden', ''); link.innerHTML = 'Hide details'; } else { div.className += ' hidden'; link.innerHTML = 'Show details'; } return false; }; div.parentNode.insertBefore(link, div); } </script> </head> <body> <h1>WsgiService help</h1> """] self.to_text_html_overview(retval, raw) self.to_text_html_resources(retval, raw) retval.append('<script>toggle_visibility();</script>') retval.append('</body></html>') return re.compile('^ *', re.MULTILINE).sub('', "".join(retval)) def to_text_html_overview(self, retval, raw): """Add the overview table to the HTML output. :param retval: The list of strings which is used to collect the HTML response. :type retval: list :param raw: The original return value of this resources :func:`GET` method. :type raw: Dictionary """ retval.append('<table id="overview">') retval.append('<tr><th>Resource</th><th>Path</th><th>Description</th></tr>') for resource in raw: retval.append('<tr><td><a href="#{0}">{0}</a></td><td>{1}</td><td>{2}</td></tr>'.format( xml_escape(resource['name']), xml_escape(resource['path']), xml_escape(resource['desc']))) retval.append('</table>') def to_text_html_resources(self, retval, raw): """Add the resources details to the HTML output. :param retval: The list of strings which is used to collect the HTML response. :type retval: list :param raw: The original return value of this resources :func:`GET` method. :type raw: Dictionary """ for resource in raw: retval.append('<div class="resource_details">') retval.append('<h2 id="{0}">{0}</h2>'.format( xml_escape(resource['name']))) if resource['desc']: retval.append('<p class="desc">{0}</p>'.format(xml_escape(resource['desc']))) retval.append('<table class="config">') retval.append('<tr><th>Path</th><td>{0}</td>'.format(xml_escape( resource['path']))) representations = [value + ' (.' + key + ')' for key, value in resource['properties']['EXTENSION_MAP'].iteritems()] retval.append('<tr><th>Representations</th><td>{0}</td>'.format( xml_escape(', '.join(representations)))) retval.append('</table>') self.to_text_html_methods(retval, resource) retval.append('</div>') def to_text_html_methods(self, retval, resource): """Add the methods of this resource to the HTML output. :param retval: The list of strings which is used to collect the HTML response. :type retval: list :param resource: The documentation of one resource. :type resource: Dictionary """ for method_name, method in resource['methods'].iteritems(): retval.append('<h3 id="{0}_{1}">{1}</h3>'.format( xml_escape(resource['name']), xml_escape(method_name))) retval.append('<div class="method_details" id="{0}_{1}_container">'.format( xml_escape(resource['name']), xml_escape(method_name))) if method['desc']: retval.append('<p class="desc">{0}</p>'.format(xml_escape(method['desc']))) if method['parameters']: retval.append('<table class="parameters">') retval.append('<tr><th>Name</th><th>Mandatory</th><th>Description</th><th>Validation</th>') for param_name, param in method['parameters'].iteritems(): # filter out any parameters that can't be written as html. # can contain stuff in other encodings than ascii, # so convert it to ascii mandatory = '-' description = param['desc'].encode('ascii', 'replace') validation = '' if param['mandatory']: mandatory = 'Yes' # the 'convert' key is a <type 'function'> that can't # be written as a string, so convert it to one. if type(param['mandatory']) is dict and \ 'convert' in param['mandatory']: param['mandatory']['convert'] = \ str(param['mandatory']['convert']) if param['path_param']: mandatory += ' (Path parameter)' if param['validate_re']: validation = 'Regular expression: <tt>' + \ xml_escape(param['validate_re'].encode('ascii', 'replace')) + \ '</tt>' retval.append('<tr><td>{0}</td><td>{1}</td><td>{2}</td>' '<td>{3}</td>'.format(xml_escape(param_name), xml_escape(mandatory), xml_escape(description), validation)) retval.append('</table>') retval.append('</div>') retval.append('<script>add_resource_method({0},{1},{2},{3});</script>'.format( xml_escape(json.dumps(resource['name']+'_'+method_name+'_container')), xml_escape(json.dumps(resource)), xml_escape(json.dumps(method_name)), xml_escape(json.dumps(method)))) class NotFoundResource(Resource): EXTENSION_MAP = [('.html', 'text/html')] + Resource.EXTENSION_MAP def GET(self): self.response.status = 404 return {'error': 'The requested resource does not exist.'} def get_method(self, method=None): return 'GET' def handle_ignored_resources(self): return def to_text_html(self, raw): return "".join([ '<html>', '<head><title>404 Not Found</title></head>', '<body>', '<center><h1>404 Not Found</h1></center>', '<center>The requested resource does not exist.</center>', '</body></html>' ])
beekpr/wsgiservice
wsgiservice/resource.py
Python
bsd-2-clause
58,294