gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import or_
from sqlalchemy.orm import exc
from oslo.db import exception as db_exc
from neutron.common import constants as n_const
from neutron.db import api as db_api
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import portbindings
from neutron.i18n import _LE, _LI
from neutron import manager
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import models
LOG = log.getLogger(__name__)
# limit the number of port OR LIKE statements in one query
MAX_PORTS_PER_QUERY = 500
def _make_segment_dict(record):
"""Make a segment dictionary out of a DB record."""
return {api.ID: record.id,
api.NETWORK_TYPE: record.network_type,
api.PHYSICAL_NETWORK: record.physical_network,
api.SEGMENTATION_ID: record.segmentation_id}
def add_network_segment(session, network_id, segment, segment_index=0,
is_dynamic=False):
with session.begin(subtransactions=True):
record = models.NetworkSegment(
id=uuidutils.generate_uuid(),
network_id=network_id,
network_type=segment.get(api.NETWORK_TYPE),
physical_network=segment.get(api.PHYSICAL_NETWORK),
segmentation_id=segment.get(api.SEGMENTATION_ID),
segment_index=segment_index,
is_dynamic=is_dynamic
)
session.add(record)
segment[api.ID] = record.id
LOG.info(_LI("Added segment %(id)s of type %(network_type)s for network"
" %(network_id)s"),
{'id': record.id,
'network_type': record.network_type,
'network_id': record.network_id})
def get_network_segments(session, network_id, filter_dynamic=False):
with session.begin(subtransactions=True):
query = (session.query(models.NetworkSegment).
filter_by(network_id=network_id).
order_by(models.NetworkSegment.segment_index))
if filter_dynamic is not None:
query = query.filter_by(is_dynamic=filter_dynamic)
records = query.all()
return [_make_segment_dict(record) for record in records]
def get_segment_by_id(session, segment_id):
with session.begin(subtransactions=True):
try:
record = (session.query(models.NetworkSegment).
filter_by(id=segment_id).
one())
return _make_segment_dict(record)
except exc.NoResultFound:
return
def get_dynamic_segment(session, network_id, physical_network=None,
segmentation_id=None):
"""Return a dynamic segment for the filters provided if one exists."""
with session.begin(subtransactions=True):
query = (session.query(models.NetworkSegment).
filter_by(network_id=network_id, is_dynamic=True))
if physical_network:
query = query.filter_by(physical_network=physical_network)
if segmentation_id:
query = query.filter_by(segmentation_id=segmentation_id)
record = query.first()
if record:
return _make_segment_dict(record)
else:
LOG.debug("No dynamic segment found for "
"Network:%(network_id)s, "
"Physical network:%(physnet)s, "
"segmentation_id:%(segmentation_id)s",
{'network_id': network_id,
'physnet': physical_network,
'segmentation_id': segmentation_id})
return None
def delete_network_segment(session, segment_id):
"""Release a dynamic segment for the params provided if one exists."""
with session.begin(subtransactions=True):
(session.query(models.NetworkSegment).
filter_by(id=segment_id).delete())
def add_port_binding(session, port_id):
with session.begin(subtransactions=True):
record = models.PortBinding(
port_id=port_id,
vif_type=portbindings.VIF_TYPE_UNBOUND)
session.add(record)
return record
def get_locked_port_and_binding(session, port_id):
"""Get port and port binding records for update within transaction."""
try:
# REVISIT(rkukura): We need the Port and PortBinding records
# to both be added to the session and locked for update. A
# single joined query should work, but the combination of left
# outer joins and postgresql doesn't seem to work.
port = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(id=port_id).
with_lockmode('update').
one())
binding = (session.query(models.PortBinding).
enable_eagerloads(False).
filter_by(port_id=port_id).
with_lockmode('update').
one())
return port, binding
except exc.NoResultFound:
return None, None
def ensure_dvr_port_binding(session, port_id, host, router_id=None):
record = (session.query(models.DVRPortBinding).
filter_by(port_id=port_id, host=host).first())
if record:
return record
try:
with session.begin(subtransactions=True):
record = models.DVRPortBinding(
port_id=port_id,
host=host,
router_id=router_id,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vnic_type=portbindings.VNIC_NORMAL,
cap_port_filter=False,
status=n_const.PORT_STATUS_DOWN)
session.add(record)
return record
except db_exc.DBDuplicateEntry:
LOG.debug("DVR Port %s already bound", port_id)
return (session.query(models.DVRPortBinding).
filter_by(port_id=port_id, host=host).one())
def delete_dvr_port_binding(session, port_id, host):
with session.begin(subtransactions=True):
(session.query(models.DVRPortBinding).
filter_by(port_id=port_id, host=host).
delete(synchronize_session=False))
def delete_dvr_port_binding_if_stale(session, binding):
if not binding.router_id and binding.status == n_const.PORT_STATUS_DOWN:
with session.begin(subtransactions=True):
LOG.debug("DVR: Deleting binding %s", binding)
session.delete(binding)
def get_port(session, port_id):
"""Get port record for update within transcation."""
with session.begin(subtransactions=True):
try:
record = (session.query(models_v2.Port).
filter(models_v2.Port.id.startswith(port_id)).
one())
return record
except exc.NoResultFound:
return
except exc.MultipleResultsFound:
LOG.error(_LE("Multiple ports have port_id starting with %s"),
port_id)
return
def get_port_from_device_mac(device_mac):
LOG.debug("get_port_from_device_mac() called for mac %s", device_mac)
session = db_api.get_session()
qry = session.query(models_v2.Port).filter_by(mac_address=device_mac)
return qry.first()
def get_ports_and_sgs(port_ids):
"""Get ports from database with security group info."""
# break large queries into smaller parts
if len(port_ids) > MAX_PORTS_PER_QUERY:
LOG.debug("Number of ports %(pcount)s exceeds the maximum per "
"query %(maxp)s. Partitioning queries.",
{'pcount': len(port_ids), 'maxp': MAX_PORTS_PER_QUERY})
return (get_ports_and_sgs(port_ids[:MAX_PORTS_PER_QUERY]) +
get_ports_and_sgs(port_ids[MAX_PORTS_PER_QUERY:]))
LOG.debug("get_ports_and_sgs() called for port_ids %s", port_ids)
if not port_ids:
# if port_ids is empty, avoid querying to DB to ask it for nothing
return []
ports_to_sg_ids = get_sg_ids_grouped_by_port(port_ids)
return [make_port_dict_with_security_groups(port, sec_groups)
for port, sec_groups in ports_to_sg_ids.iteritems()]
def get_sg_ids_grouped_by_port(port_ids):
sg_ids_grouped_by_port = {}
session = db_api.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
# partial UUIDs must be individually matched with startswith.
# full UUIDs may be matched directly in an IN statement
partial_uuids = set(port_id for port_id in port_ids
if not uuidutils.is_uuid_like(port_id))
full_uuids = set(port_ids) - partial_uuids
or_criteria = [models_v2.Port.id.startswith(port_id)
for port_id in partial_uuids]
if full_uuids:
or_criteria.append(models_v2.Port.id.in_(full_uuids))
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(or_(*or_criteria))
for port, sg_id in query:
if port not in sg_ids_grouped_by_port:
sg_ids_grouped_by_port[port] = []
if sg_id:
sg_ids_grouped_by_port[port].append(sg_id)
return sg_ids_grouped_by_port
def make_port_dict_with_security_groups(port, sec_groups):
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = sec_groups
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
def get_port_binding_host(port_id):
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
query = (session.query(models.PortBinding).
filter(models.PortBinding.port_id.startswith(port_id)).
one())
except exc.NoResultFound:
LOG.debug("No binding found for port %(port_id)s",
{'port_id': port_id})
return
except exc.MultipleResultsFound:
LOG.error(_LE("Multiple ports have port_id starting with %s"),
port_id)
return
return query.host
def generate_dvr_port_status(session, port_id):
# an OR'ed value of status assigned to parent port from the
# dvrportbinding bucket
query = session.query(models.DVRPortBinding)
final_status = n_const.PORT_STATUS_BUILD
for bind in query.filter(models.DVRPortBinding.port_id == port_id):
if bind.status == n_const.PORT_STATUS_ACTIVE:
return bind.status
elif bind.status == n_const.PORT_STATUS_DOWN:
final_status = bind.status
return final_status
def get_dvr_port_binding_by_host(session, port_id, host):
with session.begin(subtransactions=True):
binding = (session.query(models.DVRPortBinding).
filter(models.DVRPortBinding.port_id.startswith(port_id),
models.DVRPortBinding.host == host).first())
if not binding:
LOG.debug("No binding for DVR port %(port_id)s with host "
"%(host)s", {'port_id': port_id, 'host': host})
return binding
def get_dvr_port_bindings(session, port_id):
with session.begin(subtransactions=True):
bindings = (session.query(models.DVRPortBinding).
filter(models.DVRPortBinding.port_id.startswith(port_id)).
all())
if not bindings:
LOG.debug("No bindings for DVR port %s", port_id)
return bindings
| |
"""
A pysaml2 frontend module for the satosa proxy
"""
import copy
import functools
import json
import logging
import re
from base64 import urlsafe_b64decode
from base64 import urlsafe_b64encode
from urllib.parse import quote
from urllib.parse import quote_plus
from urllib.parse import unquote
from urllib.parse import unquote_plus
from urllib.parse import urlparse
from http.cookies import SimpleCookie
from saml2 import SAMLError, xmldsig
from saml2.config import IdPConfig
from saml2.extension.mdui import NAMESPACE as UI_NAMESPACE
from saml2.metadata import create_metadata_string
from saml2.saml import NameID
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.saml import NAMEID_FORMAT_PERSISTENT
from saml2.saml import NAMEID_FORMAT_EMAILADDRESS
from saml2.saml import NAMEID_FORMAT_UNSPECIFIED
from saml2.samlp import name_id_policy_from_string
from saml2.server import Server
from satosa.base import SAMLBaseModule
from satosa.context import Context
from .base import FrontendModule
from ..response import Response
from ..response import ServiceError
from ..saml_util import make_saml_response
from satosa.exception import SATOSAError
import satosa.util as util
import satosa.logging_util as lu
from satosa.internal import InternalData
logger = logging.getLogger(__name__)
subject_type_map = {
NAMEID_FORMAT_TRANSIENT: NAMEID_FORMAT_TRANSIENT,
NAMEID_FORMAT_PERSISTENT: NAMEID_FORMAT_PERSISTENT,
NAMEID_FORMAT_EMAILADDRESS: NAMEID_FORMAT_EMAILADDRESS,
NAMEID_FORMAT_UNSPECIFIED: NAMEID_FORMAT_UNSPECIFIED,
"public": NAMEID_FORMAT_PERSISTENT,
"pairwise": NAMEID_FORMAT_TRANSIENT,
}
def subject_type_to_saml_nameid_format(subject_type):
return subject_type_map.get(subject_type, NAMEID_FORMAT_PERSISTENT)
class SAMLFrontend(FrontendModule, SAMLBaseModule):
"""
A pysaml2 frontend module
"""
KEY_CUSTOM_ATTR_RELEASE = 'custom_attribute_release'
KEY_ENDPOINTS = 'endpoints'
KEY_IDP_CONFIG = 'idp_config'
def __init__(self, auth_req_callback_func, internal_attributes, config, base_url, name):
self._validate_config(config)
super().__init__(auth_req_callback_func, internal_attributes, base_url, name)
self.config = self.init_config(config)
self.endpoints = config[self.KEY_ENDPOINTS]
self.custom_attribute_release = config.get(
self.KEY_CUSTOM_ATTR_RELEASE)
self.idp = None
def handle_authn_response(self, context, internal_response):
"""
See super class method satosa.frontends.base.FrontendModule#handle_authn_response
:type context: satosa.context.Context
:type internal_response: satosa.internal.InternalData
:rtype satosa.response.Response
"""
return self._handle_authn_response(context, internal_response, self.idp)
def handle_authn_request(self, context, binding_in):
"""
This method is bound to the starting endpoint of the authentication.
:type context: satosa.context.Context
:type binding_in: str
:rtype: satosa.response.Response
:param context: The current context
:param binding_in: The binding type (http post, http redirect, ...)
:return: response
"""
return self._handle_authn_request(context, binding_in, self.idp)
def handle_backend_error(self, exception):
"""
See super class satosa.frontends.base.FrontendModule
:type exception: satosa.exception.SATOSAError
:rtype: satosa.response.Response
"""
return self._handle_backend_error(exception, self.idp)
def register_endpoints(self, backend_names):
"""
See super class satosa.frontends.base.FrontendModule
:type backend_names: list[str]
:rtype: list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))]
"""
url_map = []
if self.enable_metadata_reload():
url_map.append(
("^%s/%s$" % (self.name, "reload-metadata"), self._reload_metadata))
self.idp_config = self._build_idp_config_endpoints(
self.config[self.KEY_IDP_CONFIG], backend_names)
# Create the idp
idp_config = IdPConfig().load(copy.deepcopy(self.idp_config))
self.idp = Server(config=idp_config)
return self._register_endpoints(backend_names) + url_map
def _create_state_data(self, context, resp_args, relay_state):
"""
Returns a dict containing the state needed in the response flow.
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
:param context: The current context
:param resp_args: Response arguments
:param relay_state: Request relay state
:return: A state as a dict
"""
if "name_id_policy" in resp_args and resp_args["name_id_policy"] is not None:
resp_args["name_id_policy"] = resp_args["name_id_policy"].to_string().decode("utf-8")
return {"resp_args": resp_args, "relay_state": relay_state}
def load_state(self, state):
"""
Loads a state from state
:type state: satosa.state.State
:rtype: dict[str, Any]
:param state: The current state
:return: The dictionary given by the save_state function
"""
state_data = state[self.name]
if isinstance(state_data["resp_args"]["name_id_policy"], str):
state_data["resp_args"]["name_id_policy"] = name_id_policy_from_string(
state_data["resp_args"]["name_id_policy"])
return state_data
def _validate_config(self, config):
"""
Validates some parts of the module config
:type config: dict[str, dict[str, Any] | str]
:param config: The module config
"""
required_keys = [
self.KEY_IDP_CONFIG,
self.KEY_ENDPOINTS,
]
if not config:
raise ValueError("No configuration given")
for key in required_keys:
try:
_val = config[key]
except KeyError as e:
raise ValueError("Missing configuration key: %s" % key) from e
def _handle_authn_request(self, context, binding_in, idp):
"""
See doc for handle_authn_request method.
:type context: satosa.context.Context
:type binding_in: str
:type idp: saml.server.Server
:rtype: satosa.response.Response
:param context: The current context
:param binding_in: The pysaml binding type
:param idp: The saml frontend idp server
:return: response
"""
req_info = idp.parse_authn_request(context.request["SAMLRequest"], binding_in)
authn_req = req_info.message
msg = "{}".format(authn_req)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
# keep the ForceAuthn value to be used by plugins
context.decorate(Context.KEY_FORCE_AUTHN, authn_req.force_authn)
try:
resp_args = idp.response_args(authn_req)
except SAMLError as e:
msg = "Could not find necessary info about entity: {}".format(e)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.error(logline)
return ServiceError("Incorrect request from requester: %s" % e)
requester = resp_args["sp_entity_id"]
context.state[self.name] = self._create_state_data(context, idp.response_args(authn_req),
context.request.get("RelayState"))
subject = authn_req.subject
name_id_value = subject.name_id.text if subject else None
nameid_formats = {
"from_policy": authn_req.name_id_policy and authn_req.name_id_policy.format,
"from_response": subject and subject.name_id and subject.name_id.format,
"from_metadata": (
idp.metadata[requester]
.get("spsso_descriptor", [{}])[0]
.get("name_id_format", [{}])[0]
.get("text")
),
"default": NAMEID_FORMAT_TRANSIENT,
}
name_id_format = (
nameid_formats["from_policy"]
or (
nameid_formats["from_response"] != NAMEID_FORMAT_UNSPECIFIED
and nameid_formats["from_response"]
)
or nameid_formats["from_metadata"]
or nameid_formats["from_response"]
or nameid_formats["default"]
)
requester_name = self._get_sp_display_name(idp, requester)
internal_req = InternalData(
subject_id=name_id_value,
subject_type=name_id_format,
requester=requester,
requester_name=requester_name,
)
idp_policy = idp.config.getattr("policy", "idp")
if idp_policy:
internal_req.attributes = self._get_approved_attributes(
idp, idp_policy, requester, context.state
)
authn_context_class_ref_nodes = getattr(
authn_req.requested_authn_context, 'authn_context_class_ref', []
)
authn_context = [ref.text for ref in authn_context_class_ref_nodes]
context.decorate(Context.KEY_AUTHN_CONTEXT_CLASS_REF, authn_context)
context.decorate(Context.KEY_METADATA_STORE, self.idp.metadata)
return self.auth_req_callback_func(context, internal_req)
def _get_approved_attributes(self, idp, idp_policy, sp_entity_id, state):
"""
Returns a list of approved attributes
:type idp: saml.server.Server
:type idp_policy: saml2.assertion.Policy
:type sp_entity_id: str
:type state: satosa.state.State
:rtype: list[str]
:param idp: The saml frontend idp server
:param idp_policy: The idp policy
:param sp_entity_id: The requesting sp entity id
:param state: The current state
:return: A list containing approved attributes
"""
name_format = idp_policy.get_name_form(sp_entity_id)
attrconvs = idp.config.attribute_converters
idp_policy.acs = attrconvs
attribute_filter = []
for aconv in attrconvs:
if aconv.name_format == name_format:
all_attributes = {v: None for v in aconv._fro.values()}
attribute_filter = list(idp_policy.restrict(all_attributes, sp_entity_id, idp.metadata).keys())
break
attribute_filter = self.converter.to_internal_filter(self.attribute_profile, attribute_filter)
msg = "Filter: {}".format(attribute_filter)
logline = lu.LOG_FMT.format(id=lu.get_session_id(state), message=msg)
logger.debug(logline)
return attribute_filter
def _filter_attributes(self, idp, internal_response, context,):
idp_policy = idp.config.getattr("policy", "idp")
attributes = {}
if idp_policy:
approved_attributes = self._get_approved_attributes(
idp, idp_policy, internal_response.requester, context.state
)
attributes = {
k: v
for k, v in internal_response.attributes.items()
if k in approved_attributes
}
return attributes
def _handle_authn_response(self, context, internal_response, idp):
"""
See super class satosa.frontends.base.FrontendModule
:type context: satosa.context.Context
:type internal_response: satosa.internal.InternalData
:type idp: saml.server.Server
:param context: The current context
:param internal_response: The internal response
:param idp: The saml frontend idp server
:return: A saml response
"""
request_state = self.load_state(context.state)
resp_args = request_state["resp_args"]
sp_entity_id = resp_args["sp_entity_id"]
internal_response.attributes = self._filter_attributes(
idp, internal_response, context)
ava = self.converter.from_internal(
self.attribute_profile, internal_response.attributes)
auth_info = {}
if self.acr_mapping:
auth_info["class_ref"] = self.acr_mapping.get(
internal_response.auth_info.issuer, self.acr_mapping[""])
else:
auth_info["class_ref"] = internal_response.auth_info.auth_class_ref
auth_info["authn_auth"] = internal_response.auth_info.issuer
if self.custom_attribute_release:
custom_release = util.get_dict_defaults(
self.custom_attribute_release,
internal_response.auth_info.issuer,
sp_entity_id)
attributes_to_remove = custom_release.get("exclude", [])
for k in attributes_to_remove:
ava.pop(k, None)
nameid_value = internal_response.subject_id
nameid_format = subject_type_to_saml_nameid_format(
internal_response.subject_type
)
# If the backend did not receive a SAML <NameID> and so
# name_id is set to None then do not create a NameID instance.
# Instead pass None as the name name_id to the IdP server
# instance and it will use its configured policy to construct
# a <NameID>, with the default to create a transient <NameID>.
name_id = None if not nameid_value else NameID(
text=nameid_value,
format=nameid_format,
sp_name_qualifier=None,
name_qualifier=None,
)
msg = "returning attributes {}".format(json.dumps(ava))
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
idp_conf = self.idp_config.get('service', {}).get('idp', {})
policies = idp_conf.get('policy', {})
sp_policy = policies.get('default', {})
sp_policy.update(policies.get(sp_entity_id, {}))
sign_assertion = sp_policy.get('sign_assertion', False)
sign_response = sp_policy.get('sign_response', True)
encrypt_assertion = sp_policy.get('encrypt_assertion', False)
encrypted_advice_attributes = sp_policy.get('encrypted_advice_attributes', False)
signing_algorithm = idp_conf.get('signing_algorithm')
digest_algorithm = idp_conf.get('digest_algorithm')
sign_alg_attr = sp_policy.get('sign_alg', 'SIG_RSA_SHA256')
digest_alg_attr = sp_policy.get('digest_alg', 'DIGEST_SHA256')
# Construct arguments for method create_authn_response
# on IdP Server instance
args = {
# Add the SP details
**resp_args,
# AuthnResponse data
'identity': ava,
'name_id': name_id,
'authn': auth_info,
'sign_response': sign_response,
'sign_assertion': sign_assertion,
'encrypt_assertion': encrypt_assertion,
'encrypted_advice_attributes': encrypted_advice_attributes,
}
args['sign_alg'] = signing_algorithm
if not args['sign_alg']:
try:
args['sign_alg'] = getattr(xmldsig, sign_alg_attr)
except AttributeError as e:
msg = "Unsupported sign algorithm {}".format(sign_alg_attr)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.error(logline)
raise Exception(msg) from e
msg = "signing with algorithm {}".format(args['sign_alg'])
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
args['digest_alg'] = digest_algorithm
if not args['digest_alg']:
try:
args['digest_alg'] = getattr(xmldsig, digest_alg_attr)
except AttributeError as e:
msg = "Unsupported digest algorithm {}".format(digest_alg_attr)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.error(logline)
raise Exception(msg) from e
msg = "using digest algorithm {}".format(args['digest_alg'])
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
if sign_alg_attr or digest_alg_attr:
msg = (
"sign_alg and digest_alg are deprecated; "
"instead, use signing_algorithm and digest_algorithm "
"under the service/idp configuration path "
"(not under policy/default)."
)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.warning(msg)
resp = idp.create_authn_response(**args)
http_args = idp.apply_binding(
resp_args["binding"], str(resp), resp_args["destination"],
request_state["relay_state"], response=True)
# Set the common domain cookie _saml_idp if so configured.
if self.config.get('common_domain_cookie'):
self._set_common_domain_cookie(internal_response, http_args, context)
del context.state[self.name]
return make_saml_response(resp_args["binding"], http_args)
def _handle_backend_error(self, exception, idp):
"""
See super class satosa.frontends.base.FrontendModule
:type exception: satosa.exception.SATOSAAuthenticationError
:type idp: saml.server.Server
:rtype: satosa.response.Response
:param exception: The SATOSAAuthenticationError
:param idp: The saml frontend idp server
:return: A response
"""
loaded_state = self.load_state(exception.state)
relay_state = loaded_state["relay_state"]
resp_args = loaded_state["resp_args"]
error_resp = idp.create_error_response(resp_args["in_response_to"],
resp_args["destination"],
Exception(exception.message))
http_args = idp.apply_binding(resp_args["binding"], str(error_resp), resp_args["destination"], relay_state,
response=True)
msg = "HTTPSards: {}".format(http_args)
logline = lu.LOG_FMT.format(id=lu.get_session_id(exception.state), message=msg)
logger.debug(logline)
return make_saml_response(resp_args["binding"], http_args)
def _metadata_endpoint(self, context):
"""
Endpoint for retrieving the backend metadata
:type context: satosa.context.Context
:rtype: satosa.response.Response
:param context: The current context
:return: response with metadata
"""
msg = "Sending metadata response for entityId = {}".format(self.idp.config.entityid)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
metadata_string = create_metadata_string(None, self.idp.config, 4, None, None, None, None,
None).decode("utf-8")
return Response(metadata_string, content="text/xml")
def _reload_metadata(self, context):
"""
Reload SAML metadata
"""
logger.debug("Reloading metadata")
res = self.idp.reload_metadata(
copy.deepcopy(self.config[SAMLFrontend.KEY_IDP_CONFIG]['metadata'])
)
message = "Metadata reload %s" % ("OK" if res else "failed")
status = "200 OK" if res else "500 FAILED"
return Response(message=message, status=status)
def _register_endpoints(self, providers):
"""
Register methods to endpoints
:type providers: list[str]
:rtype: list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))]
:param providers: A list of backend providers
:return: A list of endpoint/method pairs
"""
url_map = []
for endp_category in self.endpoints:
for binding, endp in self.endpoints[endp_category].items():
valid_providers = ""
for provider in providers:
valid_providers = "{}|^{}".format(valid_providers, provider)
valid_providers = valid_providers.lstrip("|")
parsed_endp = urlparse(endp)
url_map.append(("(%s)/%s$" % (valid_providers, parsed_endp.path),
functools.partial(self.handle_authn_request, binding_in=binding)))
if self.expose_entityid_endpoint():
logger.debug("Exposing frontend entity endpoint = {}".format(self.idp.config.entityid))
parsed_entity_id = urlparse(self.idp.config.entityid)
url_map.append(("^{0}".format(parsed_entity_id.path[1:]),
self._metadata_endpoint))
return url_map
def _set_common_domain_cookie(self, internal_response, http_args, context):
"""
"""
# Find any existing common domain cookie and deconsruct it to
# obtain the list of IdPs.
cookie = SimpleCookie(context.cookie)
if '_saml_idp' in cookie:
common_domain_cookie = cookie['_saml_idp']
msg = "Found existing common domain cookie {}".format(common_domain_cookie)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
space_separated_b64_idp_string = unquote(common_domain_cookie.value)
b64_idp_list = space_separated_b64_idp_string.split()
idp_list = [urlsafe_b64decode(b64_idp).decode('utf-8') for b64_idp in b64_idp_list]
else:
msg = "No existing common domain cookie found"
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
idp_list = []
msg = "Common domain cookie list of IdPs is {}".format(idp_list)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
# Identity the current IdP just used for authentication in this flow.
this_flow_idp = internal_response.auth_info.issuer
# Remove all occurrences of the current IdP from the list of IdPs.
idp_list = [idp for idp in idp_list if idp != this_flow_idp]
# Append the current IdP.
idp_list.append(this_flow_idp)
msg = "Added IdP {} to common domain cookie list of IdPs".format(this_flow_idp)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
msg = "Common domain cookie list of IdPs is now {}".format(idp_list)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
# Construct the cookie.
b64_idp_list = [urlsafe_b64encode(idp.encode()).decode("utf-8") for idp in idp_list]
space_separated_b64_idp_string = " ".join(b64_idp_list)
url_encoded_space_separated_b64_idp_string = quote(space_separated_b64_idp_string)
cookie = SimpleCookie()
cookie['_saml_idp'] = url_encoded_space_separated_b64_idp_string
cookie['_saml_idp']['path'] = '/'
# Use the domain from configuration if present else use the domain
# from the base URL for the front end.
domain = urlparse(self.base_url).netloc
if isinstance(self.config['common_domain_cookie'], dict):
if 'domain' in self.config['common_domain_cookie']:
domain = self.config['common_domain_cookie']['domain']
# Ensure that the domain begins with a '.'
if domain[0] != '.':
domain = '.' + domain
cookie['_saml_idp']['domain'] = domain
cookie['_saml_idp']['secure'] = True
# Set the cookie.
msg = "Setting common domain cookie with {}".format(cookie.output())
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.debug(logline)
http_args['headers'].append(tuple(cookie.output().split(": ", 1)))
def _build_idp_config_endpoints(self, config, providers):
"""
Builds the final frontend module config
:type config: dict[str, Any]
:type providers: list[str]
:rtype: dict[str, Any]
:param config: The module config
:param providers: A list of backend names
:return: The final config
"""
# Add an endpoint to each provider
idp_endpoints = []
for endp_category in self.endpoints:
for func, endpoint in self.endpoints[endp_category].items():
for provider in providers:
_endpoint = "{base}/{provider}/{endpoint}".format(
base=self.base_url, provider=provider, endpoint=endpoint)
idp_endpoints.append((_endpoint, func))
config["service"]["idp"]["endpoints"][endp_category] = idp_endpoints
return config
def _get_sp_display_name(self, idp, entity_id):
extensions = idp.metadata.extension(entity_id, "spsso_descriptor", "{}&UIInfo".format(UI_NAMESPACE))
if not extensions:
return None
try:
return extensions[0]["display_name"]
except (IndexError, KeyError) as e:
pass
return None
class SAMLMirrorFrontend(SAMLFrontend):
"""
Frontend module that uses dynamic entity id and partially dynamic endpoints.
"""
def _load_endpoints_to_config(self, provider, target_entity_id, config=None):
"""
Loads approved endpoints to the config.
:type url_base: str
:type provider: str
:type target_entity_id: str
:rtype: dict[str, Any]
:param url_base: The proxy base url
:param provider: target backend name
:param target_entity_id: frontend target entity id
:return: IDP config with endpoints
"""
idp_conf = copy.deepcopy(config or self.idp_config)
for service, endpoint in self.endpoints.items():
idp_endpoints = []
for binding, path in endpoint.items():
url = "{base}/{provider}/{target_id}/{path}".format(
base=self.base_url, provider=provider,
target_id=target_entity_id, path=path)
idp_endpoints.append((url, binding))
idp_conf["service"]["idp"]["endpoints"][service] = idp_endpoints
return idp_conf
def _load_idp_dynamic_endpoints(self, context):
"""
Loads an idp server that accepts the target backend name in the endpoint url
ex: /<backend_name>/sso/redirect
:type context: The current context
:rtype: saml.server.Server
:param context:
:return: An idp server
"""
target_entity_id = context.target_entity_id_from_path()
idp_conf_file = self._load_endpoints_to_config(context.target_backend, target_entity_id)
idp_config = IdPConfig().load(idp_conf_file)
return Server(config=idp_config)
def _load_idp_dynamic_entity_id(self, state):
"""
Loads an idp server with the entity id saved in state
:type state: satosa.state.State
:rtype: saml.server.Server
:param state: The current state
:return: An idp server
"""
# Change the idp entity id dynamically
idp_config_file = copy.deepcopy(self.idp_config)
idp_config_file["entityid"] = "{}/{}".format(self.idp_config["entityid"], state[self.name]["target_entity_id"])
idp_config = IdPConfig().load(idp_config_file)
return Server(config=idp_config)
def handle_authn_request(self, context, binding_in):
"""
Loads approved endpoints dynamically
See super class satosa.frontends.saml2.SAMLFrontend#handle_authn_request
:type context: satosa.context.Context
:type binding_in: str
:rtype: satosa.response.Response
"""
target_entity_id = context.target_entity_id_from_path()
target_entity_id = urlsafe_b64decode(target_entity_id).decode()
context.decorate(Context.KEY_TARGET_ENTITYID, target_entity_id)
idp = self._load_idp_dynamic_endpoints(context)
return self._handle_authn_request(context, binding_in, idp)
def _create_state_data(self, context, resp_args, relay_state):
"""
Adds the frontend idp entity id to state
See super class satosa.frontends.saml2.SAMLFrontend#save_state
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
"""
state = super()._create_state_data(context, resp_args, relay_state)
state["target_entity_id"] = context.target_entity_id_from_path()
return state
def handle_backend_error(self, exception):
"""
Loads the frontend entity id dynamically.
See super class satosa.frontends.saml2.SAMLFrontend#handle_backend_error
:type exception: satosa.exception.SATOSAAuthenticationError
:rtype: satosa.response.Response
"""
idp = self._load_idp_dynamic_entity_id(exception.state)
return self._handle_backend_error(exception, idp)
def handle_authn_response(self, context, internal_response):
"""
See super class satosa.frontends.base.FrontendModule#handle_authn_response
:param context:
:param internal_response:
:return:
"""
idp = self._load_idp_dynamic_entity_id(context.state)
return self._handle_authn_response(context, internal_response, idp)
def _register_endpoints(self, providers):
"""
See super class satosa.frontends.base.FrontendModule#register_endpoints
:type providers: list[str]
:rtype list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))] |
list[(str, (satosa.context.Context) -> satosa.response.Response)]
:param providers: A list with backend names
:return: A list of url and endpoint function pairs
"""
url_map = []
for endp_category in self.endpoints:
for binding, endp in self.endpoints[endp_category].items():
valid_providers = "|^".join(providers)
parsed_endp = urlparse(endp)
url_map.append(
(
r"(^{})/\S+/{}".format(valid_providers, parsed_endp.path),
functools.partial(self.handle_authn_request, binding_in=binding)
)
)
return url_map
class SAMLVirtualCoFrontend(SAMLFrontend):
"""
Frontend module that exposes multiple virtual SAML identity providers,
each representing a collaborative organization or CO.
"""
KEY_CO = 'collaborative_organizations'
KEY_CO_NAME = 'co_name'
KEY_CO_ENTITY_ID = 'co_entity_id'
KEY_CO_ATTRIBUTES = 'co_static_saml_attributes'
KEY_CO_ATTRIBUTE_SCOPE = 'co_attribute_scope'
KEY_CONTACT_PERSON = 'contact_person'
KEY_ENCODEABLE_NAME = 'encodeable_name'
KEY_ORGANIZATION = 'organization'
KEY_ORGANIZATION_KEYS = ['display_name', 'name', 'url']
def __init__(self, auth_req_callback_func, internal_attributes, config, base_url, name):
self.has_multiple_backends = False
super().__init__(auth_req_callback_func, internal_attributes, config, base_url, name)
def handle_authn_request(self, context, binding_in):
"""
See super class
satosa.frontends.saml2.SAMLFrontend#handle_authn_request
:type context: satosa.context.Context
:type binding_in: str
:rtype: satosa.response.Response
"""
# Using the context of the current request dynamically create an
# IdP instance and then use it to handle the authentication request.
idp = self._create_co_virtual_idp(context)
return self._handle_authn_request(context, binding_in, idp)
def handle_authn_response(self, context, internal_response):
"""
See super class satosa.frontends.base.
FrontendModule#handle_authn_response
:param context:
:param internal_response:
:return:
"""
return self._handle_authn_response(context, internal_response)
def _handle_authn_response(self, context, internal_response):
"""
"""
# Using the context of the current request and saved state from the
# authentication request dynamically create an IdP instance.
idp = self._create_co_virtual_idp(context)
# Add any static attributes for the CO.
co_config = self._get_co_config(context)
if self.KEY_CO_ATTRIBUTES in co_config:
attributes = internal_response.attributes
for attribute, value in co_config[self.KEY_CO_ATTRIBUTES].items():
# XXX This should be refactored when Python 3.4 support is
# XXX no longer required to use isinstance(value, Iterable).
try:
if iter(value) and not isinstance(value, str):
attributes[attribute] = value
else:
attributes[attribute] = [value]
except TypeError:
attributes[attribute] = [value]
# Handle the authentication response.
return super()._handle_authn_response(context, internal_response, idp)
def _create_state_data(self, context, resp_args, relay_state):
"""
Adds the CO name to state
See super class satosa.frontends.saml2.SAMLFrontend#save_state
:type context: satosa.context.Context
:type resp_args: dict[str, str | saml2.samlp.NameIDPolicy]
:type relay_state: str
:rtype: dict[str, dict[str, str] | str]
"""
state = super()._create_state_data(context, resp_args, relay_state)
state[self.KEY_CO_NAME] = context.get_decoration(self.KEY_CO_NAME)
state[self.KEY_CO_ENTITY_ID] = context.get_decoration(
self.KEY_CO_ENTITY_ID)
co_config = self._get_co_config(context)
state[self.KEY_CO_ATTRIBUTE_SCOPE] = co_config.get(
self.KEY_CO_ATTRIBUTE_SCOPE,
None)
return state
def _get_co_config(self, context):
"""
Obtain the configuration for the CO.
:type context: The current context
:rtype: dict
:param context: The current context
:return: CO configuration
"""
co_name = self._get_co_name(context)
for co in self.config[self.KEY_CO]:
if co[self.KEY_ENCODEABLE_NAME] == co_name:
return co
def _get_co_name_from_path(self, context):
"""
The CO name is URL encoded and obtained from the request path
for a request coming into one of the standard binding endpoints.
For example the HTTP-Redirect binding request path will have the
format
{base}/{backend}/{co_name}/sso/redirect
:type context: satosa.context.Context
:rtype: str
:param context:
"""
url_encoded_co_name = context.path.split("/")[1]
co_name = unquote_plus(url_encoded_co_name)
return co_name
def _get_co_name(self, context):
"""
Obtain the CO name previously saved in the request state, or if not set
use the request path obtained from the current context to determine
the target CO.
:type context: The current context
:rtype: string
:param context: The current context
:return: CO name
"""
try:
co_name = context.state[self.name][self.KEY_CO_NAME]
logline = "Found CO {} from state".format(co_name)
logger.debug(logline)
except KeyError:
co_name = self._get_co_name_from_path(context)
logline = "Found CO {} from request path".format(co_name)
logger.debug(logline)
return co_name
def _add_endpoints_to_config(self, config, co_name, backend_name):
"""
Use the request path from the context to determine the target backend,
then construct mappings from bindings to endpoints for the virtual
IdP for the CO.
The endpoint URLs have the form
{base}/{backend}/{co_name}/{path}
:type config: satosa.satosa_config.SATOSAConfig
:type co_name: str
:type backend_name: str
:rtype: satosa.satosa_config.SATOSAConfig
:param config: satosa proxy config
:param co_name: CO name
:param backend_name: The target backend name
:return: config with mappings for CO IdP
"""
for service, endpoint in self.endpoints.items():
idp_endpoints = []
for binding, path in endpoint.items():
url = "{base}/{backend}/{co_name}/{path}".format(
base=self.base_url,
backend=backend_name,
co_name=quote_plus(co_name),
path=path)
mapping = (url, binding)
idp_endpoints.append(mapping)
# Overwrite the IdP config with the CO specific mappings between
# SAML binding and URL endpoints.
config["service"]["idp"]["endpoints"][service] = idp_endpoints
return config
def _add_entity_id(self, config, co_name, backend_name):
"""
Use the CO name to construct the entity ID for the virtual IdP
for the CO and add it to the config. Also add it to the
context.
The entity ID has the form
{base_entity_id}/{backend_name}/{co_name}
:type context: The current context
:type config: satosa.satosa_config.SATOSAConfig
:type co_name: str
:type backend_name: str
:rtype: satosa.satosa_config.SATOSAConfig
:param context:
:param config: satosa proxy config
:param co_name: CO name
:param backend_name: Backend name
:return: config with updated entity ID
"""
base_entity_id = config['entityid']
# If not using template for entityId and does not has multiple backends, then for backward compatibility append co_name at end
if "<co_name>" not in base_entity_id and not self.has_multiple_backends:
base_entity_id = "{}/{}".format(base_entity_id, "<co_name>")
replace = [
("<backend_name>", quote_plus(backend_name)),
("<co_name>", quote_plus(co_name))
]
for _replace in replace:
base_entity_id = base_entity_id.replace(_replace[0], _replace[1])
config['entityid'] = base_entity_id
return config
def _overlay_for_saml_metadata(self, config, co_name):
"""
Overlay configuration details like organization and contact person
from the front end configuration onto the IdP configuration to
support SAML metadata generation.
:type config: satosa.satosa_config.SATOSAConfig
:type co_name: str
:rtype: satosa.satosa_config.SATOSAConfig
:param config: satosa proxy config
:param co_name: CO name
:return: config with updated details for SAML metadata
"""
all_co_configs = self.config[self.KEY_CO]
co_config = next(
item for item in all_co_configs
if item[self.KEY_ENCODEABLE_NAME] == co_name
)
key = self.KEY_ORGANIZATION
if key in co_config:
if key not in config:
config[key] = {}
for org_key in self.KEY_ORGANIZATION_KEYS:
if org_key in co_config[key]:
config[key][org_key] = co_config[key][org_key]
key = self.KEY_CONTACT_PERSON
if key in co_config:
config[key] = co_config[key]
return config
def _co_names_from_config(self):
"""
Parse the configuration for the names of the COs for which to
construct virtual IdPs.
:rtype: [str]
:return: list of CO names
"""
co_names = [co[self.KEY_ENCODEABLE_NAME] for
co in self.config[self.KEY_CO]]
return co_names
def _create_co_virtual_idp(self, context, co_name=None):
"""
Create a virtual IdP to represent the CO.
:type context: The current context
:rtype: saml.server.Server
:param context:
:return: An idp server
"""
co_name = co_name or self._get_co_name(context)
context.decorate(self.KEY_CO_NAME, co_name)
# Verify that we are configured for this CO. If the CO was not
# configured most likely the endpoint used was not registered and
# SATOSA core code threw an exception before getting here, but we
# include this check in case later the regex used to register the
# endpoints is relaxed.
co_names = self._co_names_from_config()
if co_name not in co_names:
msg = "CO {} not in configured list of COs {}".format(co_name,
co_names)
logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg)
logger.warn(logline)
raise SATOSAError(msg)
# Make a copy of the general IdP config that we will then overwrite
# with mappings between SAML bindings and CO specific URL endpoints,
# and the entityID for the CO virtual IdP.
backend_name = context.target_backend
idp_config = copy.deepcopy(self.idp_config)
idp_config = self._add_endpoints_to_config(
idp_config, co_name, backend_name
)
idp_config = self._add_entity_id(idp_config, co_name, backend_name)
context.decorate(self.KEY_CO_ENTITY_ID, idp_config['entityid'])
# Use the overwritten IdP config to generate a pysaml2 config object
# and from it a server object.
pysaml2_idp_config = IdPConfig().load(idp_config)
server = Server(config=pysaml2_idp_config)
return server
def _register_endpoints(self, backend_names):
"""
See super class satosa.frontends.base.FrontendModule#register_endpoints
Endpoints have the format
{base}/{backend}/{co_name}/{binding path}
For example the HTTP-Redirect binding request path will have the
format
{base}/{backend}/{co_name}/sso/redirect
:type providers: list[str]
:rtype list[(str, ((satosa.context.Context, Any) ->
satosa.response.Response, Any))] |
list[(str, (satosa.context.Context) ->
satosa.response.Response)]
:param backend_names: A list of backend names
:return: A list of url and endpoint function pairs
"""
# Throw exception if there is possibility of duplicate entity ids when using co_names with multiple backends
self.has_multiple_backends = len(backend_names) > 1
co_names = self._co_names_from_config()
all_entity_ids = []
for backend_name in backend_names:
for co_name in co_names:
all_entity_ids.append(self._add_entity_id(copy.deepcopy(self.idp_config), co_name, backend_name)['entityid'])
if len(all_entity_ids) != len(set(all_entity_ids)):
raise ValueError("Duplicate entities ids would be created for co-frontends, please make sure to make entity ids unique. "
"You can use <backend_name> and <co_name> to achieve it. See example yaml file.")
# Create a regex pattern that will match any of the CO names. We
# escape special characters like '+' and '.' that are valid
# characters in an URL encoded string.
url_encoded_co_names = [re.escape(quote_plus(name)) for name in
co_names]
co_name_pattern = "|".join(url_encoded_co_names)
# Create a regex pattern that will match any of the backend names.
backend_url_pattern = "|^".join(backend_names)
logline = "Input backend names are {}".format(backend_names)
logger.debug(logline)
logline = "Created backend regex '{}'".format(backend_url_pattern)
logger.debug(logline)
# Hold a list of tuples containing URL regex patterns and the callables
# that handle them.
url_to_callable_mappings = []
# Loop over IdP endpoint categories, e.g., single_sign_on_service.
for endpoint_category in self.endpoints:
logline = "Examining endpoint category {}".format(endpoint_category)
logger.debug(logline)
# For each endpoint category loop of the bindings and their
# assigned endpoints.
for binding, endpoint in self.endpoints[endpoint_category].items():
logline = "Found binding {} and endpoint {}".format(binding, endpoint)
logger.debug(logline)
# Parse out the path from the endpoint.
endpoint_path = urlparse(endpoint).path
logline = "Using path {}".format(endpoint_path)
logger.debug(logline)
# Use the backend URL pattern and the endpoint path to create
# a regex that will match and that includes a pattern for
# matching the URL encoded CO name.
regex_pattern = "(^{})/({})/{}".format(
backend_url_pattern,
co_name_pattern,
endpoint_path)
logline = "Created URL regex {}".format(regex_pattern)
logger.debug(logline)
# Map the regex pattern to a callable.
the_callable = functools.partial(self.handle_authn_request,
binding_in=binding)
logger.debug("Created callable {}".format(the_callable))
mapping = (regex_pattern, the_callable)
url_to_callable_mappings.append(mapping)
logline = "Adding mapping {}".format(mapping)
logger.debug(logline)
if self.expose_entityid_endpoint():
for backend_name in backend_names:
for co_name in co_names:
idp_config = self._add_entity_id(copy.deepcopy(self.idp_config), co_name, backend_name)
entity_id = idp_config['entityid']
logger.debug("Exposing frontend entity endpoint = {}".format(entity_id))
parsed_entity_id = urlparse(entity_id)
metadata_endpoint = "^{0}".format(parsed_entity_id.path[1:])
the_callable = functools.partial(self._metadata_endpoint, co_name=co_name)
url_to_callable_mappings.append((metadata_endpoint, the_callable))
return url_to_callable_mappings
def _metadata_endpoint(self, context, co_name):
"""
Endpoint for retrieving the virtual frontend metadata
:type context: satosa.context.Context
:rtype: satosa.response.Response
:param context: The current context
:return: response with metadata
"""
# Using the context of the current request and saved state from the
# authentication request dynamically create an IdP instance.
self.idp = self._create_co_virtual_idp(context, co_name=co_name)
return super()._metadata_endpoint(context=context);
| |
import os
import shutil
import tempfile
from mock import patch
from ..hashindex import NSIndex
from ..helpers import Location, IntegrityError
from ..locking import UpgradableLock
from ..remote import RemoteRepository, InvalidRPCMethod
from ..repository import Repository
from . import BaseTestCase
class RepositoryTestCaseBase(BaseTestCase):
key_size = 32
def open(self, create=False):
return Repository(os.path.join(self.tmppath, 'repository'), create=create)
def setUp(self):
self.tmppath = tempfile.mkdtemp()
self.repository = self.open(create=True)
def tearDown(self):
self.repository.close()
shutil.rmtree(self.tmppath)
def reopen(self):
if self.repository:
self.repository.close()
self.repository = self.open()
class RepositoryTestCase(RepositoryTestCaseBase):
def test1(self):
for x in range(100):
self.repository.put(('%-32d' % x).encode('ascii'), b'SOMEDATA')
key50 = ('%-32d' % 50).encode('ascii')
self.assert_equal(self.repository.get(key50), b'SOMEDATA')
self.repository.delete(key50)
self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(key50))
self.repository.commit()
self.repository.close()
repository2 = self.open()
self.assert_raises(Repository.ObjectNotFound, lambda: repository2.get(key50))
for x in range(100):
if x == 50:
continue
self.assert_equal(repository2.get(('%-32d' % x).encode('ascii')), b'SOMEDATA')
repository2.close()
def test2(self):
"""Test multiple sequential transactions
"""
self.repository.put(b'00000000000000000000000000000000', b'foo')
self.repository.put(b'00000000000000000000000000000001', b'foo')
self.repository.commit()
self.repository.delete(b'00000000000000000000000000000000')
self.repository.put(b'00000000000000000000000000000001', b'bar')
self.repository.commit()
self.assert_equal(self.repository.get(b'00000000000000000000000000000001'), b'bar')
def test_consistency(self):
"""Test cache consistency
"""
self.repository.put(b'00000000000000000000000000000000', b'foo')
self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo')
self.repository.put(b'00000000000000000000000000000000', b'foo2')
self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo2')
self.repository.put(b'00000000000000000000000000000000', b'bar')
self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'bar')
self.repository.delete(b'00000000000000000000000000000000')
self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(b'00000000000000000000000000000000'))
def test_consistency2(self):
"""Test cache consistency2
"""
self.repository.put(b'00000000000000000000000000000000', b'foo')
self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo')
self.repository.commit()
self.repository.put(b'00000000000000000000000000000000', b'foo2')
self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo2')
self.repository.rollback()
self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo')
def test_overwrite_in_same_transaction(self):
"""Test cache consistency2
"""
self.repository.put(b'00000000000000000000000000000000', b'foo')
self.repository.put(b'00000000000000000000000000000000', b'foo2')
self.repository.commit()
self.assert_equal(self.repository.get(b'00000000000000000000000000000000'), b'foo2')
def test_single_kind_transactions(self):
# put
self.repository.put(b'00000000000000000000000000000000', b'foo')
self.repository.commit()
self.repository.close()
# replace
self.repository = self.open()
self.repository.put(b'00000000000000000000000000000000', b'bar')
self.repository.commit()
self.repository.close()
# delete
self.repository = self.open()
self.repository.delete(b'00000000000000000000000000000000')
self.repository.commit()
def test_list(self):
for x in range(100):
self.repository.put(('%-32d' % x).encode('ascii'), b'SOMEDATA')
all = self.repository.list()
self.assert_equal(len(all), 100)
first_half = self.repository.list(limit=50)
self.assert_equal(len(first_half), 50)
self.assert_equal(first_half, all[:50])
second_half = self.repository.list(marker=first_half[-1])
self.assert_equal(len(second_half), 50)
self.assert_equal(second_half, all[50:])
self.assert_equal(len(self.repository.list(limit=50)), 50)
class RepositoryCommitTestCase(RepositoryTestCaseBase):
def add_keys(self):
self.repository.put(b'00000000000000000000000000000000', b'foo')
self.repository.put(b'00000000000000000000000000000001', b'bar')
self.repository.put(b'00000000000000000000000000000003', b'bar')
self.repository.commit()
self.repository.put(b'00000000000000000000000000000001', b'bar2')
self.repository.put(b'00000000000000000000000000000002', b'boo')
self.repository.delete(b'00000000000000000000000000000003')
def test_replay_of_missing_index(self):
self.add_keys()
for name in os.listdir(self.repository.path):
if name.startswith('index.'):
os.unlink(os.path.join(self.repository.path, name))
self.reopen()
self.assert_equal(len(self.repository), 3)
self.assert_equal(self.repository.check(), True)
def test_crash_before_compact_segments(self):
self.add_keys()
self.repository.compact_segments = None
try:
self.repository.commit()
except TypeError:
pass
self.reopen()
self.assert_equal(len(self.repository), 3)
self.assert_equal(self.repository.check(), True)
def test_replay_of_readonly_repository(self):
self.add_keys()
for name in os.listdir(self.repository.path):
if name.startswith('index.'):
os.unlink(os.path.join(self.repository.path, name))
with patch.object(UpgradableLock, 'upgrade', side_effect=UpgradableLock.ExclusiveLockFailed) as upgrade:
self.reopen()
self.assert_raises(UpgradableLock.ExclusiveLockFailed, lambda: len(self.repository))
upgrade.assert_called_once_with()
def test_crash_before_write_index(self):
self.add_keys()
self.repository.write_index = None
try:
self.repository.commit()
except TypeError:
pass
self.reopen()
self.assert_equal(len(self.repository), 3)
self.assert_equal(self.repository.check(), True)
def test_crash_before_deleting_compacted_segments(self):
self.add_keys()
self.repository.io.delete_segment = None
try:
self.repository.commit()
except TypeError:
pass
self.reopen()
self.assert_equal(len(self.repository), 3)
self.assert_equal(self.repository.check(), True)
self.assert_equal(len(self.repository), 3)
class RepositoryCheckTestCase(RepositoryTestCaseBase):
def list_indices(self):
return [name for name in os.listdir(os.path.join(self.tmppath, 'repository')) if name.startswith('index.')]
def check(self, repair=False, status=True):
self.assert_equal(self.repository.check(repair=repair), status)
# Make sure no tmp files are left behind
self.assert_equal([name for name in os.listdir(os.path.join(self.tmppath, 'repository')) if 'tmp' in name], [], 'Found tmp files')
def get_objects(self, *ids):
for id_ in ids:
self.repository.get(('%032d' % id_).encode('ascii'))
def add_objects(self, segments):
for ids in segments:
for id_ in ids:
self.repository.put(('%032d' % id_).encode('ascii'), b'data')
self.repository.commit()
def get_head(self):
return sorted(int(n) for n in os.listdir(os.path.join(self.tmppath, 'repository', 'data', '0')) if n.isdigit())[-1]
def open_index(self):
return NSIndex.read(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head())))
def corrupt_object(self, id_):
idx = self.open_index()
segment, offset = idx[('%032d' % id_).encode('ascii')]
with open(os.path.join(self.tmppath, 'repository', 'data', '0', str(segment)), 'r+b') as fd:
fd.seek(offset)
fd.write(b'BOOM')
def delete_segment(self, segment):
os.unlink(os.path.join(self.tmppath, 'repository', 'data', '0', str(segment)))
def delete_index(self):
os.unlink(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head())))
def rename_index(self, new_name):
os.rename(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head())),
os.path.join(self.tmppath, 'repository', new_name))
def list_objects(self):
return set(int(key) for key in self.repository.list())
def test_repair_corrupted_segment(self):
self.add_objects([[1, 2, 3], [4, 5, 6]])
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
self.check(status=True)
self.corrupt_object(5)
self.assert_raises(IntegrityError, lambda: self.get_objects(5))
self.repository.rollback()
# Make sure a regular check does not repair anything
self.check(status=False)
self.check(status=False)
# Make sure a repair actually repairs the repo
self.check(repair=True, status=True)
self.get_objects(4)
self.check(status=True)
self.assert_equal(set([1, 2, 3, 4, 6]), self.list_objects())
def test_repair_missing_segment(self):
self.add_objects([[1, 2, 3], [4, 5, 6]])
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
self.check(status=True)
self.delete_segment(1)
self.repository.rollback()
self.check(repair=True, status=True)
self.assert_equal(set([1, 2, 3]), self.list_objects())
def test_repair_missing_commit_segment(self):
self.add_objects([[1, 2, 3], [4, 5, 6]])
self.delete_segment(1)
self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4))
self.assert_equal(set([1, 2, 3]), self.list_objects())
def test_repair_corrupted_commit_segment(self):
self.add_objects([[1, 2, 3], [4, 5, 6]])
with open(os.path.join(self.tmppath, 'repository', 'data', '0', '1'), 'r+b') as fd:
fd.seek(-1, os.SEEK_END)
fd.write(b'X')
self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4))
self.check(status=True)
self.get_objects(3)
self.assert_equal(set([1, 2, 3]), self.list_objects())
def test_repair_no_commits(self):
self.add_objects([[1, 2, 3]])
with open(os.path.join(self.tmppath, 'repository', 'data', '0', '0'), 'r+b') as fd:
fd.seek(-1, os.SEEK_END)
fd.write(b'X')
self.assert_raises(Repository.CheckNeeded, lambda: self.get_objects(4))
self.check(status=False)
self.check(status=False)
self.assert_equal(self.list_indices(), ['index.0'])
self.check(repair=True, status=True)
self.assert_equal(self.list_indices(), ['index.1'])
self.check(status=True)
self.get_objects(3)
self.assert_equal(set([1, 2, 3]), self.list_objects())
def test_repair_missing_index(self):
self.add_objects([[1, 2, 3], [4, 5, 6]])
self.delete_index()
self.check(status=True)
self.get_objects(4)
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
def test_repair_index_too_new(self):
self.add_objects([[1, 2, 3], [4, 5, 6]])
self.assert_equal(self.list_indices(), ['index.1'])
self.rename_index('index.100')
self.check(status=True)
self.assert_equal(self.list_indices(), ['index.1'])
self.get_objects(4)
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
def test_crash_before_compact(self):
self.repository.put(bytes(32), b'data')
self.repository.put(bytes(32), b'data2')
# Simulate a crash before compact
with patch.object(Repository, 'compact_segments') as compact:
self.repository.commit()
compact.assert_called_once_with()
self.reopen()
self.check(repair=True)
self.assert_equal(self.repository.get(bytes(32)), b'data2')
class RemoteRepositoryTestCase(RepositoryTestCase):
def open(self, create=False):
return RemoteRepository(Location('__testsuite__:' + os.path.join(self.tmppath, 'repository')), create=create)
def test_invalid_rpc(self):
self.assert_raises(InvalidRPCMethod, lambda: self.repository.call('__init__', None))
class RemoteRepositoryCheckTestCase(RepositoryCheckTestCase):
def open(self, create=False):
return RemoteRepository(Location('__testsuite__:' + os.path.join(self.tmppath, 'repository')), create=create)
def test_crash_before_compact(self):
# skip this test, we can't mock-patch a Repository class in another process!
pass
| |
#!/usr/bin/env python3.9
import sys
import os
import re
import collections
from typing import Any, Dict, List, Set, Tuple
import utils
def quoteme(to_quote, quote_char):
return "".join((quote_char, to_quote, quote_char))
def quoteme_list(to_quote_list, quote_char):
return [quoteme(to_q, quote_char) for to_q in to_quote_list]
def quoteme_single(to_quote):
return quoteme(to_quote, "'")
def quoteme_single_list(to_quote_list, ):
return quoteme_list(to_quote_list, "'")
def quoteme_double(to_quote):
return quoteme(to_quote, '"')
def quoteme_double_list(to_quote_list):
return quoteme_list(to_quote_list, '"')
def quoteme_double_list_for_sql(to_quote_list):
return "".join(('("', '","'.join(to_quote_list), '")'))
def quoteme_single_list_for_sql(to_quote_list):
return "".join(("('", "','".join(to_quote_list), "')"))
#no_need_for_raw_re = re.compile('^[a-zA-Z0-9_\-\./${}%:+ ]+$')
escape_quotations_re = re.compile("['\"\\\\]")
def escape_quotations(simple_string):
""" escape the characters ', '. \ """
retVal = escape_quotations_re.sub(lambda match_obj: '\\'+match_obj.group(0), simple_string)
return retVal
def quoteme_raw_string(simple_string):
assert isinstance(simple_string, str), f"{simple_string} is not of type str"
if not simple_string:
retVal = 'r""'
else:
simple_string = os.fspath(simple_string)
possible_quote_marks = ('"', "'", '"""', "'''")
if "\n" in simple_string: # multiline strings need triple quotation
possible_quote_marks = ('"""', "'''")
for quote_mark in possible_quote_marks:
# 1st priority is to create a raw string. Strings that end with the quotation mark or with \ cannot be raw.
if quote_mark not in simple_string and quote_mark[-1] != simple_string[-1] and simple_string[-1] != '\\':
retVal = "".join(('r', quote_mark, simple_string, quote_mark))
break
else:
# if all possible quotations marks are present in the string - do proper escaping and return non-raw string
retVal = "".join(('"', escape_quotations(simple_string), '"'))
return retVal
types_that_do_not_need_quotation = (int, float, bool)
def quoteme_raw_if_string(some_thing):
if not isinstance(some_thing, types_that_do_not_need_quotation):
return quoteme_raw_string(str(some_thing))
else:
return str(some_thing)
def quoteme_raw_by_type(some_thing, config_vars=None):
retVal = None
if isinstance(some_thing, types_that_do_not_need_quotation):
retVal = str(some_thing)
elif isinstance(some_thing, str):
if config_vars is not None:
some_thing = config_vars.resolve_str(some_thing)
retVal = quoteme_raw_string(some_thing)
elif isinstance(some_thing, os.PathLike):
retVal = quoteme_raw_by_type(os.fspath(some_thing), config_vars)
elif isinstance(some_thing, collections.abc.Sequence):
retVal = "".join(("[", ",".join(quoteme_raw_by_type(t, config_vars) for t in some_thing),"]"))
elif isinstance(some_thing, collections.abc.Mapping):
item_strs = list()
for k, v in sorted(some_thing.items()):
item_strs.append(f"""{quoteme_raw_by_type(k)}:{quoteme_raw_by_type(v, config_vars)}""")
retVal = "".join(("{", ",".join(item_strs), "}"))
return retVal
def quoteme_raw_list(list_of_things):
retVal = [quoteme_raw_if_string(something) for something in list_of_things]
return retVal
def quoteme_raw_if_list(list_of_things, one_element_list_as_string=False):
if isinstance(list_of_things, str):
retVal = quoteme_raw_if_string(list_of_things)
elif isinstance(list_of_things, collections.abc.Sequence):
if one_element_list_as_string and len(list_of_things) == 1:
retVal = quoteme_raw_if_string(list_of_things[0])
else:
retVal = quoteme_raw_list(list_of_things)
retVal = "".join(("[", ",".join(retVal), "]"))
else:
retVal = quoteme_raw_if_string(list_of_things)
return retVal
def quote_path_properly(path_to_quote):
quote_char = "'"
if "'" in path_to_quote or "${" in path_to_quote:
quote_char = '"'
if '"' in path_to_quote:
raise Exception(f"""both single quote and double quote found in {path_to_quote}""")
quoted_path = "".join((quote_char, path_to_quote, quote_char))
return quoted_path
detect_quotations = re.compile("(?P<prefix>[\"'])(?P<the_unquoted_text>.+)(?P=prefix)")
def unquoteme(to_unquote):
retVal = to_unquote
has_quotations = detect_quotations.match(to_unquote)
if has_quotations:
retVal = has_quotations['the_unquoted_text']
return retVal
def unicodify(in_something, encoding='utf-8'):
if in_something is not None:
if isinstance(in_something, str):
retVal = in_something
elif isinstance(in_something, bytes):
retVal = in_something.decode(encoding, errors='backslashreplace')
else:
retVal = str(in_something)
else:
retVal = None
return retVal
def bytetify(in_something):
if in_something is not None:
if not isinstance(in_something, bytes):
retVal = str(in_something).encode()
else:
retVal = in_something
else:
retVal = None
return retVal
def bool_int_to_str(in_bool_int):
if in_bool_int == 0:
retVal = "no"
else:
retVal = "yes"
return retVal
def str_to_bool_int(the_str):
if the_str.lower() in ("yes", "true", "y", 't', '1'):
retVal = 1
elif the_str.lower() in ("no", "false", "n", "f", '0'):
retVal = 0
else:
raise ValueError(f"Cannot translate {the_str} to bool-int")
return retVal
def is_iterable_but_not_str(obj_to_check):
retVal = hasattr(obj_to_check, '__iter__') and not isinstance(obj_to_check, str)
return retVal
if __name__ == "__main__":
#print(quoteme_raw_string(r'''"$(LOCAL_REPO_SYNC_DIR)/Mac/Utilities/plist/plist_creator.sh" "$(__Plist_for_native_instruments_1__)"'''))
#print(quoteme_raw_string("""single-single(') triple-single(''') single-double(") single-triple(\"\"\")"""))
rere = re.compile("['\"\\\\]")
s = r"""A"B'C'''D'\\EFG"""
rs = rere.sub(lambda matchobj: '\\'+matchobj.group(0), s)
print(rs)
| |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""Various utilities to help with unit and functional testing."""
from __future__ import absolute_import
import gc
import io
import socket
import sys
import os
import pwd
from operator import setitem, delitem
from collections import namedtuple
from contextlib import contextmanager
from random import random
import shutil
from signal import SIGKILL
from subprocess import check_call, check_output
from functools import wraps
from zope.interface import implementer
from zope.interface.verify import verifyClass, verifyObject
from ipaddr import IPAddress
from twisted.internet.interfaces import IProcessTransport, IReactorProcess
from twisted.python.filepath import FilePath, Permissions
from twisted.internet.task import Clock, deferLater
from twisted.internet.defer import maybeDeferred, Deferred
from twisted.internet.error import ConnectionDone
from twisted.internet import reactor
from twisted.cred.portal import IRealm, Portal
from twisted.conch.ssh.keys import Key
from twisted.conch.checkers import SSHPublicKeyDatabase
from twisted.conch.openssh_compat.factory import OpenSSHFactory
from twisted.conch.unix import UnixConchUser
from twisted.trial.unittest import SynchronousTestCase, SkipTest
from twisted.internet.protocol import Factory, Protocol
from characteristic import attributes
from . import __version__
from .common.script import (
FlockerScriptRunner, ICommandLineScript)
@implementer(IProcessTransport)
class FakeProcessTransport(object):
"""
Mock process transport to observe signals sent to a process.
@ivar signals: L{list} of signals sent to process.
"""
def __init__(self):
self.signals = []
def signalProcess(self, signal):
self.signals.append(signal)
class SpawnProcessArguments(namedtuple(
'ProcessData',
'processProtocol executable args env path '
'uid gid usePTY childFDs transport')):
"""
Object recording the arguments passed to L{FakeProcessReactor.spawnProcess}
as well as the L{IProcessTransport} that was connected to the protocol.
@ivar transport: Fake transport connected to the protocol.
@type transport: L{IProcessTransport}
@see L{twisted.internet.interfaces.IReactorProcess.spawnProcess}
"""
@implementer(IReactorProcess)
class FakeProcessReactor(Clock):
"""
Fake reactor implmenting process support.
@ivar processes: List of process that have been spawned
@type processes: L{list} of L{SpawnProcessArguments}.
"""
def __init__(self):
Clock.__init__(self)
self.processes = []
def timeout(self):
if self.calls:
return max(0, self.calls[0].getTime() - self.seconds())
return 0
def spawnProcess(self, processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0, childFDs=None):
transport = FakeProcessTransport()
self.processes.append(SpawnProcessArguments(
processProtocol, executable, args, env, path, uid, gid, usePTY,
childFDs, transport=transport))
processProtocol.makeConnection(transport)
return transport
verifyClass(IReactorProcess, FakeProcessReactor)
@contextmanager
def assertNoFDsLeaked(test_case):
"""Context manager that asserts no file descriptors are leaked.
:param test_case: The ``TestCase`` running this unit test.
"""
# Make sure there's no file descriptors that will be cleared by GC
# later on:
gc.collect()
def process_fds():
path = FilePath(b"/proc/self/fd")
return set([child.basename() for child in path.children()])
fds = process_fds()
try:
yield
finally:
test_case.assertEqual(process_fds(), fds)
def loop_until(predicate):
"""Call predicate every 0.1 seconds, until it returns something ``Truthy``.
:param predicate: Callable returning termination condition.
:type predicate: 0-argument callable returning a Deferred.
:return: A ``Deferred`` firing with the first ``Truthy`` response from
``predicate``.
"""
d = maybeDeferred(predicate)
def loop(result):
if not result:
d = deferLater(reactor, 0.1, predicate)
d.addCallback(loop)
return d
return result
d.addCallback(loop)
return d
def random_name():
"""Return a short, random name.
:return name: A random ``unicode`` name.
"""
return u"%d" % (int(random() * 1e12),)
def help_problems(command_name, help_text):
"""Identify and return a list of help text problems.
:param unicode command_name: The name of the command which should appear in
the help text.
:param bytes help_text: The full help text to be inspected.
:return: A list of problems found with the supplied ``help_text``.
:rtype: list
"""
problems = []
expected_start = u'Usage: {command}'.format(
command=command_name).encode('utf8')
if not help_text.startswith(expected_start):
problems.append(
'Does not begin with {expected}. Found {actual} instead'.format(
expected=repr(expected_start),
actual=repr(help_text[:len(expected_start)])
)
)
return problems
class FakeSysModule(object):
"""A ``sys`` like substitute.
For use in testing the handling of `argv`, `stdout` and `stderr` by command
line scripts.
:ivar list argv: See ``__init__``
:ivar stdout: A :py:class:`io.BytesIO` object representing standard output.
:ivar stderr: A :py:class:`io.BytesIO` object representing standard error.
"""
def __init__(self, argv=None):
"""Initialise the fake sys module.
:param list argv: The arguments list which should be exposed as
``sys.argv``.
"""
if argv is None:
argv = []
self.argv = argv
# io.BytesIO is not quite the same as sys.stdout/stderr
# particularly with respect to unicode handling. So,
# hopefully the implementation doesn't try to write any
# unicode.
self.stdout = io.BytesIO()
self.stderr = io.BytesIO()
class FlockerScriptTestsMixin(object):
"""Common tests for scripts that can be run via L{FlockerScriptRunner}
:ivar ICommandLineScript script: The script class under test.
:ivar usage.Options options: The options parser class to use in the test.
:ivar text command_name: The name of the command represented by ``script``.
"""
script = None
options = None
command_name = None
def test_interface(self):
"""
A script that is meant to be run by ``FlockerScriptRunner`` must
implement ``ICommandLineScript``.
"""
self.assertTrue(verifyObject(ICommandLineScript, self.script()))
def test_incorrect_arguments(self):
"""
``FlockerScriptRunner.main`` exits with status 1 and prints help to
`stderr` if supplied with unexpected arguments.
"""
sys_module = FakeSysModule(
argv=[self.command_name, b'--unexpected_argument'])
script = FlockerScriptRunner(
reactor=None, script=self.script(), options=self.options(),
sys_module=sys_module)
error = self.assertRaises(SystemExit, script.main)
error_text = sys_module.stderr.getvalue()
self.assertEqual(
(1, []),
(error.code, help_problems(self.command_name, error_text))
)
class StandardOptionsTestsMixin(object):
"""Tests for classes decorated with ``flocker_standard_options``.
Tests for the standard options that should be available on every flocker
command.
:ivar usage.Options options: The ``usage.Options`` class under test.
"""
options = None
def test_sys_module_default(self):
"""
``flocker_standard_options`` adds a ``_sys_module`` attribute which is
``sys`` by default.
"""
self.assertIs(sys, self.options()._sys_module)
def test_sys_module_override(self):
"""
``flocker_standard_options`` adds a ``sys_module`` argument to the
initialiser which is assigned to ``_sys_module``.
"""
dummy_sys_module = object()
self.assertIs(
dummy_sys_module,
self.options(sys_module=dummy_sys_module)._sys_module
)
def test_version(self):
"""
Flocker commands have a `--version` option which prints the current
version string to stdout and causes the command to exit with status
`0`.
"""
sys = FakeSysModule()
error = self.assertRaises(
SystemExit,
self.options(sys_module=sys).parseOptions,
['--version']
)
self.assertEqual(
(__version__ + '\n', 0),
(sys.stdout.getvalue(), error.code)
)
def test_verbosity_default(self):
"""
Flocker commands have `verbosity` of `0` by default.
"""
options = self.options()
self.assertEqual(0, options['verbosity'])
def test_verbosity_option(self):
"""
Flocker commands have a `--verbose` option which increments the
configured verbosity by `1`.
"""
options = self.options()
# The command may otherwise give a UsageError
# "Wrong number of arguments." if there are arguments required.
# See https://github.com/ClusterHQ/flocker/issues/184 about a solution
# which does not involve patching.
self.patch(options, "parseArgs", lambda: None)
options.parseOptions(['--verbose'])
self.assertEqual(1, options['verbosity'])
def test_verbosity_option_short(self):
"""
Flocker commands have a `-v` option which increments the configured
verbosity by 1.
"""
options = self.options()
# The command may otherwise give a UsageError
# "Wrong number of arguments." if there are arguments required.
# See https://github.com/ClusterHQ/flocker/issues/184 about a solution
# which does not involve patching.
self.patch(options, "parseArgs", lambda: None)
options.parseOptions(['-v'])
self.assertEqual(1, options['verbosity'])
def test_verbosity_multiple(self):
"""
`--verbose` can be supplied multiple times to increase the verbosity.
"""
options = self.options()
# The command may otherwise give a UsageError
# "Wrong number of arguments." if there are arguments required.
# See https://github.com/ClusterHQ/flocker/issues/184 about a solution
# which does not involve patching.
self.patch(options, "parseArgs", lambda: None)
options.parseOptions(['-v', '--verbose'])
self.assertEqual(2, options['verbosity'])
class _InMemoryPublicKeyChecker(SSHPublicKeyDatabase):
"""
Check SSH public keys in-memory.
"""
def __init__(self, public_key):
"""
:param Key public_key: The public key we will accept.
"""
self._key = public_key
def checkKey(self, credentials):
"""
Validate some SSH key credentials.
Access is granted only to root since that is the user we expect
for connections from ``flocker-cli`` and ``flocker-changestate``.
"""
return (self._key.blob() == credentials.blob and
credentials.username == b"root")
class _FixedHomeConchUser(UnixConchUser):
"""
An SSH user with a fixed, configurable home directory.
This is like a normal UNIX SSH user except the user's home directory is not
determined by the ``pwd`` database.
"""
def __init__(self, username, home):
"""
:param FilePath home: The path of the directory to use as this user's
home directory.
"""
UnixConchUser.__init__(self, username)
self._home = home
def getHomeDir(self):
"""
Give back the pre-determined home directory.
"""
return self._home.path
def getUserGroupId(self):
"""
Give back some not-strictly-legal ``None`` UID/GID
identifiers. This prevents the Conch server from trying to
switch IDs (which it can't do if it is not running as root).
"""
return None, None
@implementer(IRealm)
class UnixSSHRealm(object):
"""
An ``IRealm`` for a Conch server which gives out ``_FixedHomeConchUser``
users.
"""
def __init__(self, home):
self.home = home
def requestAvatar(self, username, mind, *interfaces):
user = _FixedHomeConchUser(username, self.home)
return interfaces[0], user, user.logout
class _ConchServer(object):
"""
A helper for a test fixture to run an SSH server using Twisted Conch.
:ivar IPv4Address ip: The address the server is listening on.
:ivar int port: The port number the server is listening on.
:ivar _port: An object which provides ``IListeningPort`` and represents the
listening Conch server.
:ivar FilePath home_path: The path of the home directory of the user which
is allowed to authenticate against this server.
:ivar FilePath key_path: The path of an SSH private key which can be used
to authenticate against the server.
:ivar FilePath host_key_path: The path of the server's private host key.
"""
def __init__(self, base_path):
"""
:param FilePath base_path: The path beneath which all of the temporary
SSH server-related files will be created. An ``ssh`` directory
will be created as a child of this directory to hold the key pair
that is generated. An ``sshd`` directory will also be created here
to hold the generated host key. A ``home`` directory is also
created here and used as the home directory for shell logins to the
server.
"""
self.home = base_path.child(b"home")
self.home.makedirs()
ssh_path = base_path.child(b"ssh")
ssh_path.makedirs()
self.key_path = ssh_path.child(b"key")
check_call(
[b"ssh-keygen",
# Specify the path where the generated key is written.
b"-f", self.key_path.path,
# Specify an empty passphrase.
b"-N", b"",
# Generate as little output as possible.
b"-q"])
key = Key.fromFile(self.key_path.path)
sshd_path = base_path.child(b"sshd")
sshd_path.makedirs()
self.host_key_path = sshd_path.child(b"ssh_host_key")
check_call(
[b"ssh-keygen",
# See above for option explanations.
b"-f", self.host_key_path.path,
b"-N", b"",
b"-q"])
factory = OpenSSHFactory()
realm = UnixSSHRealm(self.home)
checker = _InMemoryPublicKeyChecker(public_key=key.public())
factory.portal = Portal(realm, [checker])
factory.dataRoot = sshd_path.path
factory.moduliRoot = b"/etc/ssh"
self._port = reactor.listenTCP(0, factory, interface=b"127.0.0.1")
self.ip = IPAddress(self._port.getHost().host)
self.port = self._port.getHost().port
def restore(self):
"""
Shut down the SSH server.
:return: A ``Deferred`` that fires when this has been done.
"""
return self._port.stopListening()
def create_ssh_server(base_path):
"""
:py:func:`create_ssh_server` is a fixture which creates and runs a new SSH
server and stops it later. Use the :py:meth:`restore` method of the
returned object to stop the server.
:param FilePath base_path: The path to a directory in which key material
will be generated.
"""
return _ConchServer(base_path)
class _SSHAgent(object):
"""
A helper for a test fixture to run an `ssh-agent` process.
:ivar FilePath key_path: The path of an SSH private key which can be used
to authenticate against the server.
"""
def __init__(self, key_file):
"""
Start an `ssh-agent` and add its socket path and pid to the global
environment so that SSH sub-processes can use it for authentication.
:param FilePath key_file: An SSH private key file which can be used
when authenticating with SSH servers.
"""
self._cleanups = []
output = check_output([b"ssh-agent", b"-c"]).splitlines()
# setenv SSH_AUTH_SOCK /tmp/ssh-5EfGti8RPQbQ/agent.6390;
# setenv SSH_AGENT_PID 6391;
# echo Agent pid 6391;
sock = output[0].split()[2][:-1]
pid = output[1].split()[2][:-1]
self._pid = int(pid)
def patchdict(k, v):
if k in os.environ:
self._cleanups.append(
lambda old=os.environ[k]: setitem(os.environ, k, old))
else:
self._cleanups.append(lambda: delitem(os.environ, k))
os.environ[k] = v
patchdict(b"SSH_AUTH_SOCK", sock)
patchdict(b"SSH_AGENT_PID", pid)
with open(os.devnull, "w") as discard:
# See https://github.com/clusterhq/flocker/issues/192
check_call(
[b"ssh-add", key_file.path],
stdout=discard, stderr=discard)
def restore(self):
"""
Shut down the SSH agent and restore the test environment to its
previous state.
"""
for cleanup in self._cleanups:
cleanup()
os.kill(self._pid, SIGKILL)
def create_ssh_agent(key_file, testcase=None):
"""
:py:func:`create_ssh_agent` is a fixture which creates and runs a new SSH
agent and stops it later. Use the :py:meth:`restore` method of the
returned object to stop the server.
:param FilePath key_file: The path of an SSH private key which can be
used when authenticating with SSH servers.
:param TestCase testcase: The ``TestCase`` object requiring the SSH
agent. Optional, adds a cleanup if supplied.
:rtype: _SSHAgent
"""
agent = _SSHAgent(key_file)
if testcase:
testcase.addCleanup(agent.restore)
return agent
def make_with_init_tests(record_type, kwargs, expected_defaults=None):
"""
Return a ``TestCase`` which tests that ``record_type.__init__`` accepts the
supplied ``kwargs`` and exposes them as public attributes.
:param record_type: The class with an ``__init__`` method to be tested.
:param kwargs: The keyword arguments which will be supplied to the
``__init__`` method.
:param dict expected_defaults: The default keys and default values of
arguments which have defaults and which may be omitted when calling the
initialiser.
:returns: A ``TestCase``.
"""
if expected_defaults is None:
expected_defaults = {}
unknown_defaults = set(expected_defaults.keys()) - set(kwargs.keys())
if unknown_defaults:
raise TypeError(
'expected_defaults contained the following unrecognized keys: '
'{}'.format(tuple(unknown_defaults)))
required_kwargs = kwargs.copy()
for k, v in expected_defaults.items():
required_kwargs.pop(k)
class WithInitTests(SynchronousTestCase):
"""
Tests for classes decorated with ``with_init``.
"""
def test_init(self):
"""
The record type accepts keyword arguments which are exposed as
public attributes.
"""
record = record_type(**kwargs)
self.assertEqual(
kwargs.values(),
[getattr(record, key) for key in kwargs.keys()]
)
def test_optional_arguments(self):
"""
The record type initialiser has arguments which may be omitted.
"""
try:
record = record_type(**required_kwargs)
except ValueError as e:
self.fail(
'One of the following arguments was expected to be '
'optional but appears to be required: %r. '
'Error was: %r' % (
expected_defaults.keys(), e))
self.assertEqual(
required_kwargs.values(),
[getattr(record, key) for key in required_kwargs.keys()]
)
def test_optional_defaults(self):
"""
The optional arguments have the expected defaults.
"""
try:
record = record_type(**required_kwargs)
except ValueError as e:
self.fail(
'One of the following arguments was expected to be '
'optional but appears to be required: %r. '
'Error was: %r' % (
expected_defaults.keys(), e))
self.assertEqual(
expected_defaults.values(),
[getattr(record, key) for key in expected_defaults.keys()]
)
return WithInitTests
def find_free_port(interface='127.0.0.1', socket_family=socket.AF_INET,
socket_type=socket.SOCK_STREAM):
"""
Ask the platform to allocate a free port on the specified interface, then
release the socket and return the address which was allocated.
Copied from ``twisted.internet.test.connectionmixins.findFreePort``.
:param bytes interface: The local address to try to bind the port on.
:param int socket_family: The socket family of port.
:param int socket_type: The socket type of the port.
:return: A two-tuple of address and port, like that returned by
``socket.getsockname``.
"""
address = socket.getaddrinfo(interface, 0)[0][4]
probe = socket.socket(socket_family, socket_type)
try:
probe.bind(address)
return probe.getsockname()
finally:
probe.close()
def make_capture_protocol():
"""
Return a ``Deferred``, and a ``Protocol`` which will capture bytes and fire
the ``Deferred`` when its connection is lost.
:returns: A 2-tuple of ``Deferred`` and ``Protocol`` instance.
:rtype: tuple
"""
d = Deferred()
captured_data = []
class Recorder(Protocol):
def dataReceived(self, data):
captured_data.append(data)
def connectionLost(self, reason):
if reason.check(ConnectionDone):
d.callback(b''.join(captured_data))
else:
d.errback(reason)
return d, Recorder()
class ProtocolPoppingFactory(Factory):
"""
A factory which only creates a fixed list of protocols.
For example if in a test you want to ensure that a test server only handles
a single connection, you'd supply a list of one ``Protocol``
instance. Subsequent requests will result in an ``IndexError``.
"""
def __init__(self, protocols):
"""
:param list protocols: A list of ``Protocol`` instances which will be
used for successive connections.
"""
self.protocols = protocols
def buildProtocol(self, addr):
return self.protocols.pop()
@attributes(['test', 'source_dir'])
class DockerImageBuilder(object):
"""
Build a docker image, tag it, and remove the image later.
:ivar TestCase test: The test the builder is being used in.
:ivar FilePath source_dir: The path to the directory containing a
``Dockerfile.in`` file.
"""
def _process_template(self, template_file, target_file, replacements):
"""
Fill in the placeholders in `template_file` with the `replacements` and
write the result to `target_file`.
:param FilePath template_file: The file containing the placeholders.
:param FilePath target_file: The file to which the result will be
written.
:param dict replacements: A dictionary of variable names and
replacement values.
"""
with template_file.open() as f:
template = f.read().decode('utf8')
target_file.setContent(template.format(**replacements))
def build(self, dockerfile_variables=None):
"""
Build an image and tag it in the local Docker repository.
:param dict dockerfile_variables: A dictionary of replacements which
will be applied to a `Dockerfile.in` template file if such a file
exists.
:return: ``bytes`` with the tag name applied to the built image.
"""
if dockerfile_variables is None:
dockerfile_variables = {}
working_dir = FilePath(self.test.mktemp())
working_dir.makedirs()
docker_dir = working_dir.child('docker')
shutil.copytree(self.source_dir.path, docker_dir.path)
template_file = docker_dir.child('Dockerfile.in')
docker_file = docker_dir.child('Dockerfile')
if template_file.exists() and not docker_file.exists():
self._process_template(
template_file, docker_file, dockerfile_variables)
tag = b"flockerlocaltests/" + random_name()
# XXX: This dumps lots of debug output to stderr which messes up the
# test results output. It's useful debug info incase of a test failure
# so it would be better to send it to the test.log file. See
# https://github.com/ClusterHQ/flocker/issues/171
command = [
b'docker', b'build',
# Always clean up intermediate containers in case of failures.
b'--force-rm',
b'--tag=%s' % (tag,),
docker_dir.path
]
check_call(command)
# XXX until https://github.com/ClusterHQ/flocker/issues/409 is
# fixed we will often have a container lying around which is still
# using the new image, so removing the image will fail.
# self.test.addCleanup(check_call, [b"docker", b"rmi", tag])
return tag
def skip_on_broken_permissions(test_method):
"""
Skips the wrapped test when the temporary directory is on a
filesystem with broken permissions.
Virtualbox's shared folder (as used for :file:`/vagrant`) doesn't entirely
respect changing permissions. For example, this test detects running on a
shared folder by the fact that all permissions can't be removed from a
file.
:param callable test_method: Test method to wrap.
:return: The wrapped method.
:raise SkipTest: when the temporary directory is on a filesystem with
broken permissions.
"""
@wraps(test_method)
def wrapper(case, *args, **kwargs):
test_file = FilePath(case.mktemp())
test_file.touch()
test_file.chmod(0o000)
permissions = test_file.getPermissions()
test_file.chmod(0o777)
if permissions != Permissions(0o000):
raise SkipTest(
"Can't run test on filesystem with broken permissions.")
return test_method(case, *args, **kwargs)
return wrapper
@contextmanager
def attempt_effective_uid(username, suppress_errors=False):
"""
A context manager to temporarily change the effective user id.
:param bytes username: The username whose uid will take effect.
:param bool suppress_errors: Set to `True` to suppress `OSError`
("Operation not permitted") when running as a non-root user.
"""
original_euid = os.geteuid()
new_euid = pwd.getpwnam(username).pw_uid
restore_euid = False
if original_euid != new_euid:
try:
os.seteuid(new_euid)
except OSError as e:
# Only handle "Operation not permitted" errors.
if not suppress_errors or e.errno != 1:
raise
else:
restore_euid = True
try:
yield
finally:
if restore_euid:
os.seteuid(original_euid)
def assertContainsAll(haystack, needles, test_case):
"""
Assert that all the terms in the needles list are found in the haystack.
:param test_case: The ``TestCase`` instance on which to call assertions.
:param list needles: A list of terms to search for in the ``haystack``.
:param haystack: An iterable in which to search for the terms in needles.
"""
for needle in reversed(needles):
if needle in haystack:
needles.remove(needle)
if needles:
test_case.fail(
'{haystack} did not contain {needles}'.format(
haystack=haystack, needles=needles
)
)
| |
from paddle.v2.framework.layer_helper import LayerHelper, unique_name
import paddle.v2.framework.core as core
from paddle.v2.framework.framework import OpProtoHolder, Variable, Program
from paddle.v2.framework.initializer import ConstantInitializer
import re
__all__ = [
'fc', 'data', 'cross_entropy', 'conv2d', 'pool2d', 'embedding', 'concat',
'StaticRNN', 'cast', 'sequence_conv', 'sequence_pool', 'sums', 'cos_sim',
'batch_norm', 'accuracy'
]
def fc(input,
size,
param_attr=None,
bias_attr=True,
name=None,
act=None,
num_flatten_dims=1,
program=None,
init_program=None):
# create helper
helper = LayerHelper('fc', **locals())
dtype = helper.input_dtype()
# mul
mul_results = []
for input_var, param_attr in helper.iter_inputs_and_params():
input_shape = input_var.shape
param_shape = [
reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
] + [size]
w = helper.create_parameter(
attr=param_attr, shape=param_shape, dtype=dtype)
tmp = helper.create_tmp_variable(dtype)
helper.append_op(
type="mul",
inputs={
"X": input_var,
"Y": w,
},
outputs={"Out": tmp},
attrs={'x_num_col_dims': num_flatten_dims,
'y_num_col_dims': 1})
mul_results.append(tmp)
# sum
if len(mul_results) == 1:
pre_bias = mul_results[0]
else:
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias})
# add bias
pre_activation = helper.append_bias_op(pre_bias)
# add activation
return helper.append_activation(pre_activation)
def embedding(input,
size,
data_type='float32',
is_sparse=False,
param_attr=None,
program=None,
init_program=None):
helper = LayerHelper('embedding', **locals())
w = helper.create_parameter(
attr=helper.param_attr, shape=size, dtype=data_type)
tmp = helper.create_tmp_variable(data_type)
helper.append_op(
type='lookup_table',
inputs={'Ids': input,
'W': w},
outputs={'Out': tmp},
attrs={'is_sparse': is_sparse})
return tmp
def data(name,
shape,
data_type='float32',
type=core.VarDesc.VarType.LOD_TENSOR,
append_batch_size=True,
program=None,
init_program=None):
helper = LayerHelper('data', **locals())
if append_batch_size:
shape = [-1] + shape # append batch size as -1
return helper.create_global_variable(
name=name, shape=shape, dtype=data_type, type=type)
def _convert_(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _create_op_func_(op_type):
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
not_intermediate_outputs = \
filter(lambda output: not output.intermediate, op_proto.outputs)
intermediate_outputs = \
filter(lambda output: output.intermediate, op_proto.outputs)
if len(not_intermediate_outputs) != 1:
raise ValueError(
"Only one not intermediate output operator can be automatically generated"
)
if not_intermediate_outputs[0].duplicable:
raise ValueError(
"Only not duplicable op can be automatically generated")
for output in intermediate_outputs:
if output.duplicable:
raise ValueError(
"Only when all intermediate ops are not duplicable, "
"this op can be automatically generated")
o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs]
def func(**kwargs):
helper = LayerHelper(op_type, **kwargs)
inputs = dict()
dtype = None
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
for each in val:
if not isinstance(each, Variable):
raise ValueError("input of {0} must be variable".format(
op_type))
if dtype is None:
dtype = each.data_type
elif dtype != each.data_type:
raise ValueError(
"operator {0} must input same dtype".format(op_type))
inputs[ipt.name] = val
outputs = dict()
out = helper.create_tmp_variable(dtype=dtype)
outputs[o_name] = [out]
for name in intermediate_output_names:
outputs[name] = [helper.create_tmp_variable(dtype=dtype)]
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs)
return helper.append_activation(out)
func.__name__ = op_type
globals()[op_type] = func
global __all__
__all__.append(op_type)
_create_op_func_('mean')
_create_op_func_('mul')
_create_op_func_('elementwise_add')
_create_op_func_('dropout')
_create_op_func_('reshape')
def cast(x, data_type, program=None):
helper = LayerHelper('cast', **locals())
out = helper.create_tmp_variable(dtype=data_type)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_data_type': x.data_type,
'out_data_type': out.data_type})
return out
def concat(input, axis, program=None, init_program=None):
helper = LayerHelper('concat', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='concat',
inputs={'X': input},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def sums(input, program=None, init_program=None):
helper = LayerHelper('sum', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(type='sum', inputs={'X': [input]}, outputs={'Out': out})
return out
def cos_sim(X, Y, program=None, init_program=None):
helper = LayerHelper('cos_sim', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype("X"))
xnorm = helper.create_tmp_variable(dtype=helper.input_dtype("X"))
ynorm = helper.create_tmp_variable(dtype=helper.input_dtype("X"))
helper.append_op(
type='cos_sim',
inputs={'X': [X],
'Y': [Y]},
outputs={'Out': [out],
'XNorm': [xnorm],
'YNorm': [ynorm]})
return out, xnorm, ynorm
def cross_entropy(input, label, **kwargs):
helper = LayerHelper('cross_entropy', **kwargs)
out = helper.create_tmp_variable(dtype=input.data_type)
helper.append_op(
type='cross_entropy',
inputs={'X': [input],
'Label': [label]},
outputs={'Y': [out]},
attrs=kwargs)
return out
def square_error_cost(input, label, **kwargs):
helper = LayerHelper('square_error_cost', **kwargs)
minus_out = helper.create_tmp_variable(dtype=input.data_type)
helper.append_op(
type='elementwise_sub',
inputs={'X': [input],
'Y': [label]},
outputs={'Out': [minus_out]})
square_out = helper.create_tmp_variable(dtype=input.data_type)
helper.append_op(
type='square', inputs={'X': [minus_out]}, outputs={'Y': [square_out]})
return square_out
def accuracy(input, label, k=1, **kwargs):
helper = LayerHelper("accuracy", **kwargs)
topk_out = helper.create_tmp_variable(dtype=input.data_type)
topk_indices = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="top_k",
inputs={"X": [input]},
outputs={"Out": [topk_out],
"Indices": [topk_indices]},
attrs={"k": k})
acc_out_dtype = kwargs.get("out_dtype", "float32")
acc_out = helper.create_tmp_variable(dtype=acc_out_dtype)
helper.append_op(
type="accuracy",
inputs={
"Out": [topk_out],
"Indices": [topk_indices],
"Label": [label]
},
outputs={"Accuracy": [acc_out]})
return acc_out
def sequence_conv(input,
num_filters,
filter_size=3,
stride=1,
padding=None,
bias_attr=None,
param_attr=None,
program=None,
init_program=None):
# FIXME(dzh) : want to unify the argument of python layer
# function. So we ignore some unecessary attributes.
# such as, padding_trainable, context_start.
helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype()
filter_shape = [filter_size * input.shape[1], num_filters]
filter = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
type='sequence_conv',
inputs={
'X': [input],
'Filter': [filter],
},
outputs={"Out": pre_bias},
attrs={
'context_stride': stride,
'context_start': 0,
'context_length': filter_size
})
pre_act = helper.append_bias_op(pre_bias)
return helper.append_activation(pre_act)
def conv2d(input,
num_filters,
name=None,
filter_size=[1, 1],
act=None,
groups=None,
stride=[1, 1],
padding=None,
bias_attr=None,
param_attr=None,
program=None,
init_program=None):
helper = LayerHelper('conv2d', **locals())
dtype = helper.input_dtype()
num_channels = input.shape[1]
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups is not 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels / groups
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if isinstance(stride, int):
stride = [stride, stride]
if isinstance(padding, int):
padding = [padding, padding]
input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
filter = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
type='conv2d',
inputs={
'Input': input,
'Filter': filter,
},
outputs={"Output": pre_bias},
attrs={'strides': stride,
'paddings': padding,
'groups': groups})
pre_act = helper.append_bias_op(pre_bias)
return helper.append_activation(pre_act)
def sequence_pool(input, pool_type, **kwargs):
ENUM_POOL_TYPE = set(["MAX", "AVG", "SQRT", "LAST", "FIRST"])
if pool_type.upper() not in ENUM_POOL_TYPE:
raise ValueError("Unknown pool_type: '%s'. It can only be %s.",
str(pool_type), " ".join(ENUM_POOL_TYPE))
helper = LayerHelper('sequence_pool', **kwargs)
dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype)
helper.append_op(
type="sequence_pool",
inputs={"X": [input]},
outputs={"Out": [pool_out]},
attrs={"pooltype": pool_type.upper()})
return pool_out
def pool2d(input,
pool_size,
pool_type,
pool_stride=[1, 1],
pool_padding=[0, 0],
global_pooling=False,
program=None,
init_program=None):
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if isinstance(pool_size, int):
pool_size = [pool_size, pool_size]
if isinstance(pool_stride, int):
pool_stride = [pool_stride, pool_stride]
if isinstance(pool_padding, int):
pool_padding = [pool_padding, pool_padding]
helper = LayerHelper('pool2d', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype)
helper.append_op(
type="pool2d",
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"poolingType": pool_type,
"ksize": pool_size,
"globalPooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding
})
return pool_out
def batch_norm(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
program=None,
init_program=None):
helper = LayerHelper('batch_norm', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
def create_persistable_var(dtype, shape, initializer=None):
name = unique_name(".".join([helper.name, "xxxx"]))
var = init_program.global_block().create_var(
dtype=dtype, shape=shape, name=name, persistable=True)
if initializer is not None:
initializer(var, var.block)
return program.global_block().create_var(
name=name, dtype=dtype, shape=shape, persistable=True)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype)
bias = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype)
# create input
mean = create_persistable_var(dtype, param_shape, ConstantInitializer(0.0))
variance = create_persistable_var(dtype, param_shape,
ConstantInitializer(1.0))
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_tmp_variable(dtype)
saved_variance = helper.create_tmp_variable(dtype)
batch_norm_out = helper.create_tmp_variable(dtype)
helper.append_op(
type="batch_norm",
inputs={
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
},
outputs={
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
},
attrs={"momentum": momentum,
"epsilon": epsilon,
"is_test": is_test})
return helper.append_activation(batch_norm_out)
class BlockGuard(object):
"""
BlockGuard used to create sub-block in program by using Python `with`
keyword.
"""
def __init__(self, program):
if not isinstance(program, Program):
raise TypeError("BlockGuard takes a program")
self.program = program
def __enter__(self):
self.program.create_block()
def __exit__(self, exc_type, exc_val, exc_tb):
self.program.rollback()
if exc_type is not None:
return False # re-raise exception
return True
class StaticRNNGuard(BlockGuard):
def __init__(self, rnn):
if not isinstance(rnn, StaticRNN):
raise TypeError("StaticRNNGuard takes an StaticRNN")
super(StaticRNNGuard, self).__init__(rnn.helper.program)
self.rnn = rnn
def __enter__(self):
self.rnn.status = StaticRNN.IN_RNN_BLOCK
return super(StaticRNNGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
self.rnn.complete_rnn_op()
return super(StaticRNNGuard, self).__exit__(exc_type, exc_val, exc_tb)
class StaticRNNMemoryLink(object):
"""
:param init: the initial variable for Memory
:type init: Variable
:param pre_mem: the memory variable in previous time step
:type pre_mem: Variable
:param mem: the memory variable in current time step
:type mem: Variable
"""
def __init__(self, init, pre_mem, mem=None):
self.init = init
self.pre_mem = pre_mem
self.mem = mem
class StaticRNN(object):
BEFORE_RNN_BLOCK = 0
IN_RNN_BLOCK = 1
AFTER_RNN_BLOCK = 2
def __init__(self, name=None, program=None):
self.helper = LayerHelper("static_rnn", name=name, program=program)
self.memories = {} # memory map, from pre_mem.name --> MemoryLink
self.inputs = [] # input variable list in current block
self.outputs = [] # output variable list in parent block
self.status = StaticRNN.BEFORE_RNN_BLOCK # status flag.
# sequence length, since it is a static RNN, sequence length are fixed.
self.seq_len = None
def step(self):
return StaticRNNGuard(self)
def _assert_in_rnn_block_(self, method):
if self.status != StaticRNN.IN_RNN_BLOCK:
raise ValueError("You must invoke {0} in rnn block".format(method))
def memory(self, init=None, shape=None, dtype=None, init_value=0):
self._assert_in_rnn_block_('memory')
if init is None:
if shape is None or dtype is None:
raise ValueError(
"if init is None, memory at least need shape and dtype")
parent_block = self.parent_block()
var_name = unique_name("@".join([self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var(
name=var_name, shape=shape, dtype=dtype, persistable=False)
parent_block.append_op(
type="fill_constant",
inputs={},
outputs={'Out': [boot_var]},
attrs={
'value': init_value,
'shape': boot_var.shape,
'data_type': boot_var.data_type
})
return self.memory(init=boot_var)
else:
pre_mem = self.helper.create_variable(
name=unique_name("@".join([self.helper.name, "mem"])),
dtype=init.data_type,
shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink(
init=init, pre_mem=pre_mem)
return pre_mem
def step_input(self, x):
self._assert_in_rnn_block_('step_input')
if not isinstance(x, Variable):
raise TypeError("step input takes a Variable")
if self.seq_len is None:
self.seq_len = x.shape[1]
elif self.seq_len != x.shape[1]:
raise ValueError("Static RNN only take fix seq_len input")
ipt = self.helper.create_variable(
name=x.name,
dtype=x.data_type,
shape=[-1] + list(x.shape[2:]),
type=x.type)
self.inputs.append(ipt)
return ipt
def step_output(self, o):
self._assert_in_rnn_block_('step_output')
if not isinstance(o, Variable):
raise TypeError("step output takes a Variable")
out_var = self.parent_block().create_var(
name=o.name,
shape=[-1, self.seq_len] + list(o.shape[1:]),
dtype=o.data_type)
self.outputs.append(out_var)
def output(self, *outputs):
for each in outputs:
self.step_output(each)
def update_memory(self, mem, var):
if not isinstance(mem, Variable) or not isinstance(var, Variable):
raise TypeError("update memory should take variables")
self.memories[mem.name].mem = var
def parent_block(self):
prog = self.helper.program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def __call__(self, *args, **kwargs):
if self.status != StaticRNN.AFTER_RNN_BLOCK:
raise ValueError("RNN output can only be retrieved after rnn block")
if len(self.outputs) == 0:
raise ValueError("RNN has no output")
elif len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def complete_rnn_op(self):
# TODO(yuyang18): Create RNN Op here.
# Implement this method after RNN op complete.
pass
| |
"""Config flow for Plex."""
import copy
import logging
from aiohttp import web_response
import plexapi.exceptions
from plexapi.gdm import GDM
from plexauth import PlexAuth
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import http
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_HOST,
CONF_PORT,
CONF_SOURCE,
CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import (
AUTH_CALLBACK_NAME,
AUTH_CALLBACK_PATH,
AUTOMATIC_SETUP_STRING,
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
CONF_USE_EPISODE_ART,
DEFAULT_PORT,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
MANUAL_SETUP_STRING,
PLEX_SERVER_CONFIG,
SERVERS,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified
from .server import PlexServer
HEADER_FRONTEND_BASE = "HA-Frontend-Base"
_LOGGER = logging.getLogger(__package__)
@callback
def configured_servers(hass):
"""Return a set of the configured Plex servers."""
return {
entry.data[CONF_SERVER_IDENTIFIER]
for entry in hass.config_entries.async_entries(DOMAIN)
}
async def async_discover(hass):
"""Scan for available Plex servers."""
gdm = GDM()
await hass.async_add_executor_job(gdm.scan)
for server_data in gdm.entries:
await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_INTEGRATION_DISCOVERY},
data=server_data,
)
class PlexFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Plex config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlexOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Plex flow."""
self.current_login = {}
self.available_servers = None
self.plexauth = None
self.token = None
self.client_id = None
self._manual = False
async def async_step_user(
self, user_input=None, errors=None
): # pylint: disable=arguments-differ
"""Handle a flow initialized by the user."""
if user_input is not None:
return await self.async_step_plex_website_auth()
if self.show_advanced_options:
return await self.async_step_user_advanced(errors=errors)
return self.async_show_form(step_id="user", errors=errors)
async def async_step_user_advanced(self, user_input=None, errors=None):
"""Handle an advanced mode flow initialized by the user."""
if user_input is not None:
if user_input.get("setup_method") == MANUAL_SETUP_STRING:
self._manual = True
return await self.async_step_manual_setup()
return await self.async_step_plex_website_auth()
data_schema = vol.Schema(
{
vol.Required("setup_method", default=AUTOMATIC_SETUP_STRING): vol.In(
[AUTOMATIC_SETUP_STRING, MANUAL_SETUP_STRING]
)
}
)
return self.async_show_form(
step_id="user_advanced", data_schema=data_schema, errors=errors
)
async def async_step_manual_setup(self, user_input=None, errors=None):
"""Begin manual configuration."""
if user_input is not None and errors is None:
user_input.pop(CONF_URL, None)
if host := user_input.get(CONF_HOST):
port = user_input[CONF_PORT]
prefix = "https" if user_input.get(CONF_SSL) else "http"
user_input[CONF_URL] = f"{prefix}://{host}:{port}"
elif CONF_TOKEN not in user_input:
return await self.async_step_manual_setup(
user_input=user_input, errors={"base": "host_or_token"}
)
return await self.async_step_server_validate(user_input)
previous_input = user_input or {}
data_schema = vol.Schema(
{
vol.Optional(
CONF_HOST,
description={"suggested_value": previous_input.get(CONF_HOST)},
): str,
vol.Required(
CONF_PORT, default=previous_input.get(CONF_PORT, DEFAULT_PORT)
): int,
vol.Required(
CONF_SSL, default=previous_input.get(CONF_SSL, DEFAULT_SSL)
): bool,
vol.Required(
CONF_VERIFY_SSL,
default=previous_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
vol.Optional(
CONF_TOKEN,
description={"suggested_value": previous_input.get(CONF_TOKEN)},
): str,
}
)
return self.async_show_form(
step_id="manual_setup", data_schema=data_schema, errors=errors
)
async def async_step_server_validate(self, server_config):
"""Validate a provided configuration."""
errors = {}
self.current_login = server_config
plex_server = PlexServer(self.hass, server_config)
try:
await self.hass.async_add_executor_job(plex_server.connect)
except NoServersFound:
_LOGGER.error("No servers linked to Plex account")
errors["base"] = "no_servers"
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized):
_LOGGER.error("Invalid credentials provided, config not created")
errors[CONF_TOKEN] = "faulty_credentials"
except requests.exceptions.SSLError as error:
_LOGGER.error("SSL certificate error: [%s]", error)
errors["base"] = "ssl_error"
except (plexapi.exceptions.NotFound, requests.exceptions.ConnectionError):
server_identifier = (
server_config.get(CONF_URL) or plex_server.server_choice or "Unknown"
)
_LOGGER.error("Plex server could not be reached: %s", server_identifier)
errors[CONF_HOST] = "not_found"
except ServerNotSpecified as available_servers:
self.available_servers = available_servers.args[0]
return await self.async_step_select_server()
except Exception as error: # pylint: disable=broad-except
_LOGGER.exception("Unknown error connecting to Plex server: %s", error)
return self.async_abort(reason="unknown")
if errors:
if self._manual:
return await self.async_step_manual_setup(
user_input=server_config, errors=errors
)
return await self.async_step_user(errors=errors)
server_id = plex_server.machine_identifier
url = plex_server.url_in_use
token = server_config.get(CONF_TOKEN)
entry_config = {CONF_URL: url}
if self.client_id:
entry_config[CONF_CLIENT_ID] = self.client_id
if token:
entry_config[CONF_TOKEN] = token
if url.startswith("https"):
entry_config[CONF_VERIFY_SSL] = server_config.get(
CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL
)
data = {
CONF_SERVER: plex_server.friendly_name,
CONF_SERVER_IDENTIFIER: server_id,
PLEX_SERVER_CONFIG: entry_config,
}
entry = await self.async_set_unique_id(server_id)
if self.context[CONF_SOURCE] == config_entries.SOURCE_REAUTH:
self.hass.config_entries.async_update_entry(entry, data=data)
_LOGGER.debug("Updated config entry for %s", plex_server.friendly_name)
await self.hass.config_entries.async_reload(entry.entry_id)
return self.async_abort(reason="reauth_successful")
self._abort_if_unique_id_configured()
_LOGGER.debug("Valid config created for %s", plex_server.friendly_name)
return self.async_create_entry(title=url, data=data)
async def async_step_select_server(self, user_input=None):
"""Use selected Plex server."""
config = dict(self.current_login)
if user_input is not None:
config[CONF_SERVER_IDENTIFIER] = user_input[CONF_SERVER_IDENTIFIER]
return await self.async_step_server_validate(config)
configured = configured_servers(self.hass)
available_servers = {
server_id: f"{name} ({owner})" if owner else name
for (name, server_id, owner) in self.available_servers
if server_id not in configured
}
if not available_servers:
return self.async_abort(reason="all_configured")
if len(available_servers) == 1:
config[CONF_SERVER_IDENTIFIER] = next(iter(available_servers))
return await self.async_step_server_validate(config)
return self.async_show_form(
step_id="select_server",
data_schema=vol.Schema(
{vol.Required(CONF_SERVER_IDENTIFIER): vol.In(available_servers)}
),
errors={},
)
async def async_step_integration_discovery(self, discovery_info):
"""Handle GDM discovery."""
machine_identifier = discovery_info["data"]["Resource-Identifier"]
await self.async_set_unique_id(machine_identifier)
self._abort_if_unique_id_configured()
host = f"{discovery_info['from'][0]}:{discovery_info['data']['Port']}"
name = discovery_info["data"]["Name"]
self.context["title_placeholders"] = {
"host": host,
"name": name,
}
return await self.async_step_user()
async def async_step_plex_website_auth(self):
"""Begin external auth flow on Plex website."""
self.hass.http.register_view(PlexAuthorizationCallbackView)
if (req := http.current_request.get()) is None:
raise RuntimeError("No current request in context")
if (hass_url := req.headers.get(HEADER_FRONTEND_BASE)) is None:
raise RuntimeError("No header in request")
headers = {"Origin": hass_url}
payload = {
"X-Plex-Device-Name": X_PLEX_DEVICE_NAME,
"X-Plex-Version": X_PLEX_VERSION,
"X-Plex-Product": X_PLEX_PRODUCT,
"X-Plex-Device": self.hass.config.location_name,
"X-Plex-Platform": X_PLEX_PLATFORM,
"X-Plex-Model": "Plex OAuth",
}
session = async_get_clientsession(self.hass)
self.plexauth = PlexAuth(payload, session, headers)
await self.plexauth.initiate_auth()
forward_url = f"{hass_url}{AUTH_CALLBACK_PATH}?flow_id={self.flow_id}"
auth_url = self.plexauth.auth_url(forward_url)
return self.async_external_step(step_id="obtain_token", url=auth_url)
async def async_step_obtain_token(self, user_input=None):
"""Obtain token after external auth completed."""
token = await self.plexauth.token(10)
if not token:
return self.async_external_step_done(next_step_id="timed_out")
self.token = token
self.client_id = self.plexauth.client_identifier
return self.async_external_step_done(next_step_id="use_external_token")
async def async_step_timed_out(self, user_input=None):
"""Abort flow when time expires."""
return self.async_abort(reason="token_request_timeout")
async def async_step_use_external_token(self, user_input=None):
"""Continue server validation with external token."""
server_config = {CONF_TOKEN: self.token}
return await self.async_step_server_validate(server_config)
async def async_step_reauth(self, data):
"""Handle a reauthorization flow request."""
self.current_login = dict(data)
return await self.async_step_user()
class PlexOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Plex options."""
def __init__(self, config_entry):
"""Initialize Plex options flow."""
self.options = copy.deepcopy(dict(config_entry.options))
self.server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
async def async_step_init(self, user_input=None):
"""Manage the Plex options."""
return await self.async_step_plex_mp_settings()
async def async_step_plex_mp_settings(self, user_input=None):
"""Manage the Plex media_player options."""
plex_server = self.hass.data[DOMAIN][SERVERS][self.server_id]
if user_input is not None:
self.options[MP_DOMAIN][CONF_USE_EPISODE_ART] = user_input[
CONF_USE_EPISODE_ART
]
self.options[MP_DOMAIN][CONF_IGNORE_NEW_SHARED_USERS] = user_input[
CONF_IGNORE_NEW_SHARED_USERS
]
self.options[MP_DOMAIN][CONF_IGNORE_PLEX_WEB_CLIENTS] = user_input[
CONF_IGNORE_PLEX_WEB_CLIENTS
]
account_data = {
user: {"enabled": bool(user in user_input[CONF_MONITORED_USERS])}
for user in plex_server.accounts
}
self.options[MP_DOMAIN][CONF_MONITORED_USERS] = account_data
return self.async_create_entry(title="", data=self.options)
available_accounts = {name: name for name in plex_server.accounts}
available_accounts[plex_server.owner] += " [Owner]"
default_accounts = plex_server.accounts
known_accounts = set(plex_server.option_monitored_users)
if known_accounts:
default_accounts = {
user
for user in plex_server.option_monitored_users
if plex_server.option_monitored_users[user]["enabled"]
}
default_accounts.intersection_update(plex_server.accounts)
for user in plex_server.accounts:
if user not in known_accounts:
available_accounts[user] += " [New]"
if not plex_server.option_ignore_new_shared_users:
for new_user in plex_server.accounts - known_accounts:
default_accounts.add(new_user)
return self.async_show_form(
step_id="plex_mp_settings",
data_schema=vol.Schema(
{
vol.Required(
CONF_USE_EPISODE_ART,
default=plex_server.option_use_episode_art,
): bool,
vol.Optional(
CONF_MONITORED_USERS, default=default_accounts
): cv.multi_select(available_accounts),
vol.Required(
CONF_IGNORE_NEW_SHARED_USERS,
default=plex_server.option_ignore_new_shared_users,
): bool,
vol.Required(
CONF_IGNORE_PLEX_WEB_CLIENTS,
default=plex_server.option_ignore_plexweb_clients,
): bool,
}
),
)
class PlexAuthorizationCallbackView(HomeAssistantView):
"""Handle callback from external auth."""
url = AUTH_CALLBACK_PATH
name = AUTH_CALLBACK_NAME
requires_auth = False
async def get(self, request):
"""Receive authorization confirmation."""
# pylint: disable=no-self-use
hass = request.app["hass"]
await hass.config_entries.flow.async_configure(
flow_id=request.query["flow_id"], user_input=None
)
return web_response.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>Success! This window can be closed",
)
| |
#! /usr/bin/env python
"""Test the arraymodule.
Roger E. Masse
"""
import unittest
from test import test_support
from weakref import proxy
import array, cStringIO
from cPickle import loads, dumps, HIGHEST_PROTOCOL
class ArraySubclass(array.array):
pass
class ArraySubclassWithKwargs(array.array):
def __init__(self, typecode, newarg=None):
array.array.__init__(self, typecode)
tests = [] # list to accumulate all tests
typecodes = "cubBhHiIlLfd"
class BadConstructorTest(unittest.TestCase):
def test_constructor(self):
self.assertRaises(TypeError, array.array)
self.assertRaises(TypeError, array.array, spam=42)
self.assertRaises(TypeError, array.array, 'xx')
self.assertRaises(ValueError, array.array, 'x')
tests.append(BadConstructorTest)
class BaseTest(unittest.TestCase):
# Required class attributes (provided by subclasses
# typecode: the typecode to test
# example: an initializer usable in the constructor for this type
# smallerexample: the same length as example, but smaller
# biggerexample: the same length as example, but bigger
# outside: An entry that is not in example
# minitemsize: the minimum guaranteed itemsize
def assertEntryEqual(self, entry1, entry2):
self.assertEqual(entry1, entry2)
def badtypecode(self):
# Return a typecode that is different from our own
return typecodes[(typecodes.index(self.typecode)+1) % len(typecodes)]
def test_constructor(self):
a = array.array(self.typecode)
self.assertEqual(a.typecode, self.typecode)
self.assertTrue(a.itemsize>=self.minitemsize)
self.assertRaises(TypeError, array.array, self.typecode, None)
def test_len(self):
a = array.array(self.typecode)
a.append(self.example[0])
self.assertEqual(len(a), 1)
a = array.array(self.typecode, self.example)
self.assertEqual(len(a), len(self.example))
def test_buffer_info(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.buffer_info, 42)
bi = a.buffer_info()
self.assertIsInstance(bi, tuple)
self.assertEqual(len(bi), 2)
self.assertIsInstance(bi[0], (int, long))
self.assertIsInstance(bi[1], int)
self.assertEqual(bi[1], len(a))
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
b.byteswap()
self.assertEqual(a, b)
def test_copy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.copy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_deepcopy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.deepcopy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_pickle(self):
for protocol in range(HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode, self.example)
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode, self.example)
a.x = 10
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
def test_pickle_for_empty_array(self):
for protocol in range(HIGHEST_PROTOCOL + 1):
a = array.array(self.typecode)
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode)
a.x = 10
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
def test_insert(self):
a = array.array(self.typecode, self.example)
a.insert(0, self.example[0])
self.assertEqual(len(a), 1+len(self.example))
self.assertEqual(a[0], a[1])
self.assertRaises(TypeError, a.insert)
self.assertRaises(TypeError, a.insert, None)
self.assertRaises(TypeError, a.insert, 0, None)
a = array.array(self.typecode, self.example)
a.insert(-1, self.example[0])
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:-1] + self.example[:1] + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a.insert(-1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a.insert(1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[:1])
)
def test_tofromfile(self):
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.tofile)
self.assertRaises(TypeError, a.tofile, cStringIO.StringIO())
test_support.unlink(test_support.TESTFN)
f = open(test_support.TESTFN, 'wb')
try:
a.tofile(f)
f.close()
b = array.array(self.typecode)
f = open(test_support.TESTFN, 'rb')
self.assertRaises(TypeError, b.fromfile)
self.assertRaises(
TypeError,
b.fromfile,
cStringIO.StringIO(), len(self.example)
)
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
b.fromfile(f, len(self.example))
self.assertEqual(a, b)
self.assertRaises(EOFError, b.fromfile, f, 1)
f.close()
finally:
if not f.closed:
f.close()
test_support.unlink(test_support.TESTFN)
def test_fromfile_ioerror(self):
# Issue #5395: Check if fromfile raises a proper IOError
# instead of EOFError.
a = array.array(self.typecode)
f = open(test_support.TESTFN, 'wb')
try:
self.assertRaises(IOError, a.fromfile, f, len(self.example))
finally:
f.close()
test_support.unlink(test_support.TESTFN)
def test_filewrite(self):
a = array.array(self.typecode, 2*self.example)
f = open(test_support.TESTFN, 'wb')
try:
f.write(a)
f.close()
b = array.array(self.typecode)
f = open(test_support.TESTFN, 'rb')
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
b.fromfile(f, len(self.example))
self.assertEqual(a, b)
f.close()
finally:
if not f.closed:
f.close()
test_support.unlink(test_support.TESTFN)
def test_tofromlist(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tolist, 42)
self.assertRaises(TypeError, b.fromlist)
self.assertRaises(TypeError, b.fromlist, 42)
self.assertRaises(TypeError, b.fromlist, [None])
b.fromlist(a.tolist())
self.assertEqual(a, b)
def test_tofromstring(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tostring, 42)
self.assertRaises(TypeError, b.fromstring)
self.assertRaises(TypeError, b.fromstring, 42)
b.fromstring(a.tostring())
self.assertEqual(a, b)
if a.itemsize>1:
self.assertRaises(ValueError, b.fromstring, "x")
def test_repr(self):
a = array.array(self.typecode, 2*self.example)
self.assertEqual(a, eval(repr(a), {"array": array.array}))
a = array.array(self.typecode)
self.assertEqual(repr(a), "array('%s')" % self.typecode)
def test_str(self):
a = array.array(self.typecode, 2*self.example)
str(a)
def test_cmp(self):
a = array.array(self.typecode, self.example)
self.assertTrue((a == 42) is False)
self.assertTrue((a != 42) is True)
self.assertTrue((a == a) is True)
self.assertTrue((a != a) is False)
self.assertTrue((a < a) is False)
self.assertTrue((a <= a) is True)
self.assertTrue((a > a) is False)
self.assertTrue((a >= a) is True)
al = array.array(self.typecode, self.smallerexample)
ab = array.array(self.typecode, self.biggerexample)
self.assertTrue((a == 2*a) is False)
self.assertTrue((a != 2*a) is True)
self.assertTrue((a < 2*a) is True)
self.assertTrue((a <= 2*a) is True)
self.assertTrue((a > 2*a) is False)
self.assertTrue((a >= 2*a) is False)
self.assertTrue((a == al) is False)
self.assertTrue((a != al) is True)
self.assertTrue((a < al) is False)
self.assertTrue((a <= al) is False)
self.assertTrue((a > al) is True)
self.assertTrue((a >= al) is True)
self.assertTrue((a == ab) is False)
self.assertTrue((a != ab) is True)
self.assertTrue((a < ab) is True)
self.assertTrue((a <= ab) is True)
self.assertTrue((a > ab) is False)
self.assertTrue((a >= ab) is False)
def test_add(self):
a = array.array(self.typecode, self.example) \
+ array.array(self.typecode, self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[::-1])
)
b = array.array(self.badtypecode())
with self.assertRaises(TypeError):
a + b
with self.assertRaises(TypeError):
a + 'bad'
def test_iadd(self):
a = array.array(self.typecode, self.example[::-1])
b = a
a += array.array(self.typecode, 2*self.example)
self.assertTrue(a is b)
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1]+2*self.example)
)
a = array.array(self.typecode, self.example)
a += a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example)
)
b = array.array(self.badtypecode())
with self.assertRaises(TypeError):
a += b
with self.assertRaises(TypeError):
a += 'bad'
def test_mul(self):
a = 5*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a = array.array(self.typecode, self.example)*5
self.assertEqual(
a,
array.array(self.typecode, self.example*5)
)
a = 0*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
a = (-1)*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
with self.assertRaises(TypeError):
a * 'bad'
def test_imul(self):
a = array.array(self.typecode, self.example)
b = a
a *= 5
self.assertTrue(a is b)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a *= 0
self.assertTrue(a is b)
self.assertEqual(a, array.array(self.typecode))
a *= 1000
self.assertTrue(a is b)
self.assertEqual(a, array.array(self.typecode))
a *= -1
self.assertTrue(a is b)
self.assertEqual(a, array.array(self.typecode))
a = array.array(self.typecode, self.example)
a *= -1
self.assertEqual(a, array.array(self.typecode))
with self.assertRaises(TypeError):
a *= 'bad'
def test_getitem(self):
a = array.array(self.typecode, self.example)
self.assertEntryEqual(a[0], self.example[0])
self.assertEntryEqual(a[0L], self.example[0])
self.assertEntryEqual(a[-1], self.example[-1])
self.assertEntryEqual(a[-1L], self.example[-1])
self.assertEntryEqual(a[len(self.example)-1], self.example[-1])
self.assertEntryEqual(a[-len(self.example)], self.example[0])
self.assertRaises(TypeError, a.__getitem__)
self.assertRaises(IndexError, a.__getitem__, len(self.example))
self.assertRaises(IndexError, a.__getitem__, -len(self.example)-1)
def test_setitem(self):
a = array.array(self.typecode, self.example)
a[0] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[0L] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1L] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[len(self.example)-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-len(self.example)] = a[-1]
self.assertEntryEqual(a[0], a[-1])
self.assertRaises(TypeError, a.__setitem__)
self.assertRaises(TypeError, a.__setitem__, None)
self.assertRaises(TypeError, a.__setitem__, 0, None)
self.assertRaises(
IndexError,
a.__setitem__,
len(self.example), self.example[0]
)
self.assertRaises(
IndexError,
a.__setitem__,
-len(self.example)-1, self.example[0]
)
def test_delitem(self):
a = array.array(self.typecode, self.example)
del a[0]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
a = array.array(self.typecode, self.example)
del a[-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[len(self.example)-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[-len(self.example)]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
self.assertRaises(TypeError, a.__delitem__)
self.assertRaises(TypeError, a.__delitem__, None)
self.assertRaises(IndexError, a.__delitem__, len(self.example))
self.assertRaises(IndexError, a.__delitem__, -len(self.example)-1)
def test_getslice(self):
a = array.array(self.typecode, self.example)
self.assertEqual(a[:], a)
self.assertEqual(
a[1:],
array.array(self.typecode, self.example[1:])
)
self.assertEqual(
a[:1],
array.array(self.typecode, self.example[:1])
)
self.assertEqual(
a[:-1],
array.array(self.typecode, self.example[:-1])
)
self.assertEqual(
a[-1:],
array.array(self.typecode, self.example[-1:])
)
self.assertEqual(
a[-1:-1],
array.array(self.typecode)
)
self.assertEqual(
a[2:1],
array.array(self.typecode)
)
self.assertEqual(
a[1000:],
array.array(self.typecode)
)
self.assertEqual(a[-1000:], a)
self.assertEqual(a[:1000], a)
self.assertEqual(
a[:-1000],
array.array(self.typecode)
)
self.assertEqual(a[-1000:1000], a)
self.assertEqual(
a[2000:1000],
array.array(self.typecode)
)
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing
# (Assumes list conversion works correctly, too)
a = array.array(self.typecode, self.example)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
self.assertEqual(list(a[start:stop:step]),
list(a)[start:stop:step])
def test_setslice(self):
a = array.array(self.typecode, self.example)
a[:1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[:-1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[-1:])
)
a = array.array(self.typecode, self.example)
a[-1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:-1] = a
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:1] + self.example + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a[1000:] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[-1000:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:1000] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:-1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[1:0] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[2000:1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setslice__, 0, 0, None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), None)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__setslice__, 0, 0, b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), b)
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
a = array.array(self.typecode, self.example)
L = list(a)
# Make sure we have a slice of exactly the right length,
# but with (hopefully) different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
a[start:stop:step] = array.array(self.typecode, data)
self.assertEqual(a, array.array(self.typecode, L))
del L[start:stop:step]
del a[start:stop:step]
self.assertEqual(a, array.array(self.typecode, L))
def test_index(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.index)
for x in example:
self.assertEqual(a.index(x), example.index(x))
self.assertRaises(ValueError, a.index, None)
self.assertRaises(ValueError, a.index, self.outside)
def test_count(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.count)
for x in example:
self.assertEqual(a.count(x), example.count(x))
self.assertEqual(a.count(self.outside), 0)
self.assertEqual(a.count(None), 0)
def test_remove(self):
for x in self.example:
example = 2*self.example
a = array.array(self.typecode, example)
pos = example.index(x)
example2 = example[:pos] + example[pos+1:]
a.remove(x)
self.assertEqual(a, array.array(self.typecode, example2))
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.remove, self.outside)
self.assertRaises(ValueError, a.remove, None)
def test_pop(self):
a = array.array(self.typecode)
self.assertRaises(IndexError, a.pop)
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.pop, 42, 42)
self.assertRaises(TypeError, a.pop, None)
self.assertRaises(IndexError, a.pop, len(a))
self.assertRaises(IndexError, a.pop, -len(a)-1)
self.assertEntryEqual(a.pop(0), self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:]+self.example)
)
self.assertEntryEqual(a.pop(1), self.example[2])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:2]+self.example[3:]+self.example)
)
self.assertEntryEqual(a.pop(0), self.example[1])
self.assertEntryEqual(a.pop(), self.example[-1])
self.assertEqual(
a,
array.array(self.typecode, self.example[3:]+self.example[:-1])
)
def test_reverse(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.reverse, 42)
a.reverse()
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1])
)
def test_extend(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.extend)
a.extend(array.array(self.typecode, self.example[::-1]))
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
a = array.array(self.typecode, self.example)
a.extend(a)
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example)
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.extend, b)
a = array.array(self.typecode, self.example)
a.extend(self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
def test_constructor_with_iterable_argument(self):
a = array.array(self.typecode, iter(self.example))
b = array.array(self.typecode, self.example)
self.assertEqual(a, b)
# non-iterable argument
self.assertRaises(TypeError, array.array, self.typecode, 10)
# pass through errors raised in __iter__
class A:
def __iter__(self):
raise UnicodeError
self.assertRaises(UnicodeError, array.array, self.typecode, A())
# pass through errors raised in next()
def B():
raise UnicodeError
yield None
self.assertRaises(UnicodeError, array.array, self.typecode, B())
def test_coveritertraverse(self):
try:
import gc
except ImportError:
return
a = array.array(self.typecode)
l = [iter(a)]
l.append(l)
gc.collect()
def test_buffer(self):
a = array.array(self.typecode, self.example)
with test_support.check_py3k_warnings():
b = buffer(a)
self.assertEqual(b[0], a.tostring()[0])
def test_weakref(self):
s = array.array(self.typecode, self.example)
p = proxy(s)
self.assertEqual(p.tostring(), s.tostring())
s = None
test_support.gc_collect()
self.assertRaises(ReferenceError, len, p)
def test_bug_782369(self):
import sys
if hasattr(sys, "getrefcount"):
for i in range(10):
b = array.array('B', range(64))
rc = sys.getrefcount(10)
for i in range(10):
b = array.array('B', range(64))
self.assertEqual(rc, sys.getrefcount(10))
def test_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
ArraySubclassWithKwargs('b', newarg=1)
class StringTest(BaseTest):
def test_setitem(self):
super(StringTest, self).test_setitem()
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setitem__, 0, self.example[:2])
class CharacterTest(StringTest):
typecode = 'c'
example = '\x01azAZ\x00\xfe'
smallerexample = '\x01azAY\x00\xfe'
biggerexample = '\x01azAZ\x00\xff'
outside = '\x33'
minitemsize = 1
def test_subbclassing(self):
class EditableString(array.array):
def __new__(cls, s, *args, **kwargs):
return array.array.__new__(cls, 'c', s)
def __init__(self, s, color='blue'):
self.color = color
def strip(self):
self[:] = array.array('c', self.tostring().strip())
def __repr__(self):
return 'EditableString(%r)' % self.tostring()
s = EditableString("\ttest\r\n")
s.strip()
self.assertEqual(s.tostring(), "test")
self.assertEqual(s.color, "blue")
s.color = "red"
self.assertEqual(s.color, "red")
self.assertEqual(s.__dict__.keys(), ["color"])
def test_nounicode(self):
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.fromunicode, unicode(''))
self.assertRaises(ValueError, a.tounicode)
tests.append(CharacterTest)
if test_support.have_unicode:
class UnicodeTest(StringTest):
typecode = 'u'
example = unicode(r'\x01\u263a\x00\ufeff', 'unicode-escape')
smallerexample = unicode(r'\x01\u263a\x00\ufefe', 'unicode-escape')
biggerexample = unicode(r'\x01\u263a\x01\ufeff', 'unicode-escape')
outside = unicode('\x33')
minitemsize = 2
def test_unicode(self):
self.assertRaises(TypeError, array.array, 'b', unicode('foo', 'ascii'))
a = array.array('u', unicode(r'\xa0\xc2\u1234', 'unicode-escape'))
a.fromunicode(unicode(' ', 'ascii'))
a.fromunicode(unicode('', 'ascii'))
a.fromunicode(unicode('', 'ascii'))
a.fromunicode(unicode(r'\x11abc\xff\u1234', 'unicode-escape'))
s = a.tounicode()
self.assertEqual(
s,
unicode(r'\xa0\xc2\u1234 \x11abc\xff\u1234', 'unicode-escape')
)
s = unicode(r'\x00="\'a\\b\x80\xff\u0000\u0001\u1234', 'unicode-escape')
a = array.array('u', s)
self.assertEqual(
repr(a),
r"""array('u', u'\x00="\'a\\b\x80\xff\x00\x01\u1234')"""
)
self.assertRaises(TypeError, a.fromunicode)
tests.append(UnicodeTest)
class NumberTest(BaseTest):
def test_extslice(self):
a = array.array(self.typecode, range(5))
self.assertEqual(a[::], a)
self.assertEqual(a[::2], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1::2], array.array(self.typecode, [1,3]))
self.assertEqual(a[::-1], array.array(self.typecode, [4,3,2,1,0]))
self.assertEqual(a[::-2], array.array(self.typecode, [4,2,0]))
self.assertEqual(a[3::-2], array.array(self.typecode, [3,1]))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1000:2000:2], array.array(self.typecode, []))
self.assertEqual(a[-1000:-2000:-2], array.array(self.typecode, []))
def test_delslice(self):
a = array.array(self.typecode, range(5))
del a[::2]
self.assertEqual(a, array.array(self.typecode, [1,3]))
a = array.array(self.typecode, range(5))
del a[1::2]
self.assertEqual(a, array.array(self.typecode, [0,2,4]))
a = array.array(self.typecode, range(5))
del a[1::-2]
self.assertEqual(a, array.array(self.typecode, [0,2,3,4]))
a = array.array(self.typecode, range(10))
del a[::1000]
self.assertEqual(a, array.array(self.typecode, [1,2,3,4,5,6,7,8,9]))
# test issue7788
a = array.array(self.typecode, range(10))
del a[9::1<<333]
def test_assignment(self):
a = array.array(self.typecode, range(10))
a[::2] = array.array(self.typecode, [42]*5)
self.assertEqual(a, array.array(self.typecode, [42, 1, 42, 3, 42, 5, 42, 7, 42, 9]))
a = array.array(self.typecode, range(10))
a[::-4] = array.array(self.typecode, [10]*3)
self.assertEqual(a, array.array(self.typecode, [0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = array.array(self.typecode, range(4))
a[::-1] = a
self.assertEqual(a, array.array(self.typecode, [3, 2, 1, 0]))
a = array.array(self.typecode, range(10))
b = a[:]
c = a[:]
ins = array.array(self.typecode, range(2))
a[2:3] = ins
b[slice(2,3)] = ins
c[2:3:] = ins
def test_iterationcontains(self):
a = array.array(self.typecode, range(10))
self.assertEqual(list(a), range(10))
b = array.array(self.typecode, [20])
self.assertEqual(a[-1] in a, True)
self.assertEqual(b[0] not in a, True)
def check_overflow(self, lower, upper):
# method to be used by subclasses
# should not overflow assigning lower limit
a = array.array(self.typecode, [lower])
a[0] = lower
# should overflow assigning less than lower limit
self.assertRaises(OverflowError, array.array, self.typecode, [lower-1])
self.assertRaises(OverflowError, a.__setitem__, 0, lower-1)
# should not overflow assigning upper limit
a = array.array(self.typecode, [upper])
a[0] = upper
# should overflow assigning more than upper limit
self.assertRaises(OverflowError, array.array, self.typecode, [upper+1])
self.assertRaises(OverflowError, a.__setitem__, 0, upper+1)
def test_subclassing(self):
typecode = self.typecode
class ExaggeratingArray(array.array):
__slots__ = ['offset']
def __new__(cls, typecode, data, offset):
return array.array.__new__(cls, typecode, data)
def __init__(self, typecode, data, offset):
self.offset = offset
def __getitem__(self, i):
return array.array.__getitem__(self, i) + self.offset
a = ExaggeratingArray(self.typecode, [3, 6, 7, 11], 4)
self.assertEntryEqual(a[0], 7)
self.assertRaises(AttributeError, setattr, a, "color", "blue")
class SignedNumberTest(NumberTest):
example = [-1, 0, 1, 42, 0x7f]
smallerexample = [-1, 0, 1, 42, 0x7e]
biggerexample = [-1, 0, 1, 43, 0x7f]
outside = 23
def test_overflow(self):
a = array.array(self.typecode)
lower = -1 * long(pow(2, a.itemsize * 8 - 1))
upper = long(pow(2, a.itemsize * 8 - 1)) - 1L
self.check_overflow(lower, upper)
class UnsignedNumberTest(NumberTest):
example = [0, 1, 17, 23, 42, 0xff]
smallerexample = [0, 1, 17, 23, 42, 0xfe]
biggerexample = [0, 1, 17, 23, 43, 0xff]
outside = 0xaa
def test_overflow(self):
a = array.array(self.typecode)
lower = 0
upper = long(pow(2, a.itemsize * 8)) - 1L
self.check_overflow(lower, upper)
class ByteTest(SignedNumberTest):
typecode = 'b'
minitemsize = 1
tests.append(ByteTest)
class UnsignedByteTest(UnsignedNumberTest):
typecode = 'B'
minitemsize = 1
tests.append(UnsignedByteTest)
class ShortTest(SignedNumberTest):
typecode = 'h'
minitemsize = 2
tests.append(ShortTest)
class UnsignedShortTest(UnsignedNumberTest):
typecode = 'H'
minitemsize = 2
tests.append(UnsignedShortTest)
class IntTest(SignedNumberTest):
typecode = 'i'
minitemsize = 2
tests.append(IntTest)
class UnsignedIntTest(UnsignedNumberTest):
typecode = 'I'
minitemsize = 2
tests.append(UnsignedIntTest)
class LongTest(SignedNumberTest):
typecode = 'l'
minitemsize = 4
tests.append(LongTest)
class UnsignedLongTest(UnsignedNumberTest):
typecode = 'L'
minitemsize = 4
tests.append(UnsignedLongTest)
class FPTest(NumberTest):
example = [-42.0, 0, 42, 1e5, -1e10]
smallerexample = [-42.0, 0, 42, 1e5, -2e10]
biggerexample = [-42.0, 0, 42, 1e5, 1e10]
outside = 23
def assertEntryEqual(self, entry1, entry2):
self.assertAlmostEqual(entry1, entry2)
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
# On alphas treating the byte swapped bit patters as
# floats/doubles results in floating point exceptions
# => compare the 8bit string values instead
self.assertNotEqual(a.tostring(), b.tostring())
b.byteswap()
self.assertEqual(a, b)
class FloatTest(FPTest):
typecode = 'f'
minitemsize = 4
tests.append(FloatTest)
class DoubleTest(FPTest):
typecode = 'd'
minitemsize = 8
def test_alloc_overflow(self):
from sys import maxsize
a = array.array('d', [-1]*65536)
try:
a *= maxsize//65536 + 1
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
b = array.array('d', [ 2.71828183, 3.14159265, -1])
try:
b * (maxsize//3 + 1)
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
tests.append(DoubleTest)
def test_main(verbose=None):
import sys
test_support.run_unittest(*tests)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*tests)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| |
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_identity_group
version_added: "3.17.7"
short_description: Hashicorp Vault identity group configuration module
description:
- Module to configure identity groups in Hashicorp Vault.
options:
mount_point:
description:
- location where this method/backend is mounted. also known as "path"
default: identity
name:
description:
- name of the group
default: None
id:
description:
- ID of the group
default: None
group_type:
description:
- Type of the group, internal or external. Defaults to internal
default: internal
metadata:
description:
- metadata to be associated with the group
default: None
policies:
description:
- policies to be tied to the group
default: None
member_group_ids:
description:
- group IDs to be assigned as group members
default: None
member_entity_ids:
description:
- entity IDs to be assigned as group members
default: None
state:
description:
- whether create/update or delete the entity
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_identity_group:
name: 'my-group'
policies:
- 'my-policy'
member_group_ids:
- 'group-id-xxxx'
member_entity_ids:
- 'entity-id-xxxx'
metadata:
'department': 'ops'
token: "{{ vault_token }}"
url: "{{ vault_url }}"
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=False, type='str', default=None)
argspec['id'] = dict(required=False, type='str', default=None)
argspec['group_type'] = dict(required=False, type='str', default='internal')
argspec['mount_point'] = dict(required=False, type='str', default='identity')
argspec['metadata'] = dict(required=False, type='dict', default={})
argspec['policies'] = dict(required=False, type='list', default=[])
argspec['member_group_ids'] = dict(required=False, type='list', default=None)
argspec['member_entity_ids'] = dict(required=False, type='list', default=None)
argspec['state'] = dict(required=False, choices=['present', 'absent'], default='present')
module = hashivault_init(argspec)
result = hashivault_identity_group(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
def hashivault_identity_group_update(group_details, client, group_id, group_name, group_type, group_metadata,
group_policies, group_member_group_ids, group_member_entity_ids, mount_point):
changed = False
# if the groups were created without any entity members, group members, or policies,
# then vault will return null for each respectively
# if they were created with these and then all were removed it returns an empty list
# for each respectively. The below is required to account for this
# existing member_group_ids
if group_details['member_group_ids'] is not None and group_member_group_ids is not None:
if set(group_details['member_group_ids']) != set(group_member_group_ids):
changed = True
# new member_group_ids and none existing
elif group_member_group_ids is not None and len(group_member_group_ids) > 0:
changed = True
# existing policies
if group_details['policies'] is not None:
if set(group_details['policies']) != set(group_policies):
changed = True
# new policies and none existing
elif len(group_policies) > 0:
changed = True
# existing member_entity_ids
if group_details['member_entity_ids'] is not None and group_member_entity_ids is not None:
if set(group_details['member_entity_ids']) != set(group_member_entity_ids):
changed = True
# new member_entity_ids and none existing
elif group_member_entity_ids is not None and len(group_member_entity_ids) > 0:
changed = True
# existing metadata
if group_details['metadata'] is not None:
if group_details['metadata'] != group_metadata:
changed = True
# new metadata and none existing
elif len(group_metadata) > 0:
changed = True
if group_details['name'] != group_name or group_details['type'] != group_type or changed:
try:
response = client.secrets.identity.update_group(
group_id=group_id,
name=group_name,
group_type=group_type,
metadata=group_metadata,
policies=group_policies,
member_group_ids=group_member_group_ids,
member_entity_ids=group_member_entity_ids,
mount_point=mount_point
)
except Exception as e:
return {'failed': True, 'msg': str(e)}
if response.status_code == 204:
return {'changed': True}
return {'changed': True, 'data': response}
return {'changed': False}
def hashivault_identity_group_create_or_update(params):
client = hashivault_auth_client(params)
group_name = params.get('name')
group_id = params.get('id')
group_type = params.get('group_type')
mount_point = params.get('mount_point')
group_metadata = params.get('metadata')
group_policies = params.get('policies')
group_member_group_ids = params.get('member_group_ids')
group_member_entity_ids = params.get('member_entity_ids')
if group_id is not None:
try:
group_details = client.secrets.identity.read_group(group_id=group_id)
except Exception as e:
return {'failed': True, 'msg': str(e)}
return hashivault_identity_group_update(group_details['data'], client, group_id, group_name, group_type,
group_metadata, group_policies, group_member_group_ids,
group_member_entity_ids, mount_point)
elif group_name is not None:
try:
group_details = client.secrets.identity.read_group_by_name(name=group_name)
except Exception:
response = client.secrets.identity.create_or_update_group_by_name(
name=group_name,
group_type=group_type,
metadata=group_metadata,
policies=group_policies,
member_group_ids=group_member_group_ids,
member_entity_ids=group_member_entity_ids,
mount_point=mount_point
)
from requests.models import Response
if isinstance(response, Response):
response = response.json()
return {'changed': True, 'data': response['data']}
return hashivault_identity_group_update(group_details['data'], client, group_name=group_name,
group_id=group_details['data']['id'],
group_type=group_type,
group_metadata=group_metadata,
group_policies=group_policies,
group_member_group_ids=group_member_group_ids,
group_member_entity_ids=group_member_entity_ids,
mount_point=mount_point)
return {'failed': True, 'msg': "Either name or id must be provided"}
def hashivault_identity_group_delete(params):
client = hashivault_auth_client(params)
group_id = params.get('id')
group_name = params.get('name')
if group_id is not None:
try:
client.secrets.identity.read_group(group_id=group_id)
except Exception:
return {'changed': False}
client.secrets.identity.delete_group(group_id=group_id)
return {'changed': True}
elif group_name is not None:
try:
client.secrets.identity.read_group_by_name(name=group_name)
except Exception:
return {'changed': False}
client.secrets.identity.delete_group_by_name(name=group_name)
return {'changed': True}
return {'failed': True, 'msg': "Either name or id must be provided"}
@hashiwrapper
def hashivault_identity_group(params):
state = params.get('state')
if state == 'present':
return hashivault_identity_group_create_or_update(params)
elif state == 'absent':
return hashivault_identity_group_delete(params)
if __name__ == '__main__':
main()
| |
'''
Handlers for command line subcommands
'''
from .. import api, bundle, readers, writers, refconverters, convert, manip
from ..misc import compact_elements
from .common import format_columns
def _bse_cli_list_basis_sets(args):
'''Handles the list-basis-sets subcommand'''
metadata = api.filter_basis_sets(args.substr, args.family, args.role, args.elements, args.data_dir)
if args.no_description:
liststr = [x['display_name'] for x in metadata.values()]
else:
liststr = format_columns([(v['display_name'], v['description']) for k, v in metadata.items()])
return '\n'.join(liststr)
def _bse_cli_list_families(args):
'''Handles the list-families subcommand'''
families = api.get_families(args.data_dir)
return '\n'.join(families)
def _bse_cli_list_writer_formats(args):
'''Handles the list-writer-formats subcommand'''
all_formats = writers.get_writer_formats()
if args.no_description:
liststr = all_formats.keys()
else:
liststr = format_columns(all_formats.items())
return '\n'.join(sorted(liststr))
def _bse_cli_list_reader_formats(args):
all_formats = readers.get_reader_formats()
if args.no_description:
liststr = all_formats.keys()
else:
liststr = format_columns(all_formats.items())
return '\n'.join(liststr)
def _bse_cli_list_formats(args):
all_formats = api.get_formats()
if args.no_description:
liststr = all_formats.keys()
else:
liststr = format_columns(all_formats.items())
return '\n'.join(liststr)
def _bse_cli_list_ref_formats(args):
'''Handles the list-ref-formats subcommand'''
all_refformats = refconverters.get_reference_formats()
if args.no_description:
liststr = all_refformats.keys()
else:
liststr = format_columns(all_refformats.items())
return '\n'.join(liststr)
def _bse_cli_list_roles(args):
'''Handles the list-roles subcommand'''
all_roles = api.get_roles()
if args.no_description:
liststr = all_roles.keys()
else:
liststr = format_columns(all_roles.items())
return '\n'.join(liststr)
def _bse_cli_get_data_dir(args):
'''Handles the get-data-dir subcommand'''
return api.get_data_dir()
def _bse_cli_lookup_by_role(args):
'''Handles the lookup-by-role subcommand'''
return api.lookup_basis_by_role(args.basis, args.role, args.data_dir)
def _bse_cli_get_basis(args):
'''Handles the get-basis subcommand'''
return api.get_basis(name=args.basis,
elements=args.elements,
version=args.version,
fmt=args.fmt,
uncontract_general=args.unc_gen,
uncontract_spdf=args.unc_spdf,
uncontract_segmented=args.unc_seg,
remove_free_primitives=args.rm_free,
make_general=args.make_gen,
optimize_general=args.opt_gen,
augment_diffuse=args.aug_diffuse,
augment_steep=args.aug_steep,
get_aux=args.get_aux,
data_dir=args.data_dir,
header=not args.noheader)
def _bse_cli_get_refs(args):
'''Handles the get-refs subcommand'''
return api.get_references(basis_name=args.basis,
elements=args.elements,
version=args.version,
fmt=args.reffmt,
data_dir=args.data_dir)
def _bse_cli_get_info(args):
'''Handles the get-info subcommand'''
bs_meta = api.get_metadata(args.data_dir)[args.basis]
ret = []
ret.append('-' * 80)
ret.append(args.basis)
ret.append('-' * 80)
ret.append(' Display Name: ' + bs_meta['display_name'])
ret.append(' Description: ' + bs_meta['description'])
ret.append(' Role: ' + bs_meta['role'])
ret.append(' Family: ' + bs_meta['family'])
ret.append(' Function Types: ' + ','.join(bs_meta['function_types']))
ret.append(' Latest Version: ' + bs_meta['latest_version'])
ret.append('')
aux = bs_meta['auxiliaries']
if not aux:
ret.append('Auxiliary Basis Sets: None')
else:
ret.append('Auxiliary Basis Sets:')
ret.extend(format_columns(list(aux.items()), ' '))
ver = bs_meta['versions']
ret.append('')
ret.append('Versions:')
# Print 4 columns - version, date, elements, revision description
version_lines = format_columns([(k, v['revdate'], compact_elements(v['elements']), v['revdesc'])
for k, v in ver.items()], ' ')
ret.extend(version_lines)
return '\n'.join(ret)
def _bse_cli_get_notes(args):
'''Handles the get-notes subcommand'''
return api.get_basis_notes(args.basis, args.data_dir)
def _bse_cli_get_family(args):
'''Handles the get-family subcommand'''
return api.get_basis_family(args.basis, args.data_dir)
def _bse_cli_get_versions(args):
'''Handles the get-versions subcommand'''
name = args.basis.lower()
metadata = api.get_metadata(args.data_dir)
if name not in metadata:
raise KeyError(
"Basis set {} does not exist. For a complete list of basis sets, use the 'list-basis-sets' command".format(
name))
version_data = {k: v['revdesc'] for k, v in metadata[name]['versions'].items()}
if args.no_description:
liststr = version_data.keys()
else:
liststr = format_columns(version_data.items())
return '\n'.join(liststr)
def _bse_cli_get_family_notes(args):
'''Handles the get-family-notes subcommand'''
return api.get_family_notes(args.family, args.data_dir)
def _bse_cli_convert_basis(args):
'''Handles the convert-basis subcommand'''
# We convert file -> file
convert.convert_formatted_basis_file(args.input_file, args.output_file, args.in_fmt, args.out_fmt)
return "Converted {} -> {}".format(args.input_file, args.output_file)
def _bse_cli_create_bundle(args):
'''Handles the create-bundle subcommand'''
bundle.create_bundle(args.bundle_file, args.fmt, args.reffmt, args.archive_type, args.data_dir)
return "Created " + args.bundle_file
def _bse_cli_convert_basis(args):
'''Handles the convert-basis subcommand'''
# We convert file -> file
convert.convert_formatted_basis_file(args.input_file, args.output_file, args.in_fmt, args.out_fmt)
return "Converted {} -> {}".format(args.input_file, args.output_file)
def _bse_cli_autoaux_basis(args):
'''Handles the autoaux-basis subcommand'''
orbital_basis_dict = readers.read_formatted_basis_file(args.input_file, args.in_fmt)
# Initialize some fields that aren't initialized by the BSE reader
orbital_basis_dict['revision_description'] = ''
orbital_basis_dict['version'] = ''
# Generate the autoaux basis
autoaux_basis_dict = manip.autoaux_basis(orbital_basis_dict)
# Save it to the wanted file
writers.write_formatted_basis_file(autoaux_basis_dict, args.output_file, args.out_fmt)
return "Orbital basis {} -> AutoAux basis {}".format(args.input_file, args.output_file)
def _bse_cli_autoabs_basis(args):
'''Handles the autoabs-basis subcommand'''
orbital_basis_dict = readers.read_formatted_basis_file(args.input_file, args.in_fmt)
# Initialize some fields that aren't initialized by the BSE reader
orbital_basis_dict['revision_description'] = ''
orbital_basis_dict['version'] = ''
# Generate the autoabs basis
autoabs_basis_dict = manip.autoabs_basis(orbital_basis_dict)
# Save it to the wanted file
writers.write_formatted_basis_file(autoabs_basis_dict, args.output_file, args.out_fmt)
return "Orbital basis {} -> AutoABS basis {}".format(args.input_file, args.output_file)
def bse_cli_handle_subcmd(args):
handler_map = {
'list-formats': _bse_cli_list_writer_formats,
'list-writer-formats': _bse_cli_list_writer_formats,
'list-reader-formats': _bse_cli_list_reader_formats,
'list-ref-formats': _bse_cli_list_ref_formats,
'list-roles': _bse_cli_list_roles,
'get-data-dir': _bse_cli_get_data_dir,
'list-basis-sets': _bse_cli_list_basis_sets,
'list-families': _bse_cli_list_families,
'lookup-by-role': _bse_cli_lookup_by_role,
'get-basis': _bse_cli_get_basis,
'get-refs': _bse_cli_get_refs,
'get-info': _bse_cli_get_info,
'get-notes': _bse_cli_get_notes,
'get-family': _bse_cli_get_family,
'get-versions': _bse_cli_get_versions,
'get-family-notes': _bse_cli_get_family_notes,
'convert-basis': _bse_cli_convert_basis,
'create-bundle': _bse_cli_create_bundle,
'autoaux-basis': _bse_cli_autoaux_basis,
'autoabs-basis': _bse_cli_autoabs_basis
}
return handler_map[args.subcmd](args)
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions raised by the Horizon code and the machinery for handling them.
"""
import logging
import os
import sys
import six
from django.core.management import color_style # noqa
from django.http import HttpRequest # noqa
from django.utils import encoding
from django.utils.translation import ugettext_lazy as _
from django.views.debug import CLEANSED_SUBSTITUTE # noqa
from django.views.debug import SafeExceptionReporterFilter # noqa
from horizon.conf import HORIZON_CONFIG # noqa
from horizon import messages
LOG = logging.getLogger(__name__)
class HorizonReporterFilter(SafeExceptionReporterFilter):
"""Error report filter that's always active, even in DEBUG mode."""
def is_active(self, request):
return True
# TODO(gabriel): This bugfix is cribbed from Django's code. When 1.4.1
# is available we can remove this code.
def get_traceback_frame_variables(self, request, tb_frame):
"""Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper'
in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper,
'sensitive_variables',
None)
break
current_frame = current_frame.f_back
cleansed = []
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed.append((name, CLEANSED_SUBSTITUTE))
return cleansed
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
elif isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
else:
# Potentially cleanse only the request if it's one of the
# frame variables.
for name, value in tb_frame.f_locals.items():
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
cleansed.append((name, value))
return cleansed
class HorizonException(Exception):
"""Base exception class for distinguishing our own exception classes."""
pass
class Http302(HorizonException):
"""Error class which can be raised from within a handler to cause an
early bailout and redirect at the middleware level.
"""
status_code = 302
def __init__(self, location, message=None):
self.location = location
self.message = message
class NotAuthorized(HorizonException):
"""Raised whenever a user attempts to access a resource which they do not
have permission-based access to (such as when failing the
:func:`~horizon.decorators.require_perms` decorator).
The included :class:`~horizon.middleware.HorizonMiddleware` catches
``NotAuthorized`` and handles it gracefully by displaying an error
message and redirecting the user to a login page.
"""
status_code = 401
class NotAuthenticated(HorizonException):
"""Raised when a user is trying to make requests and they are not logged
in.
The included :class:`~horizon.middleware.HorizonMiddleware` catches
``NotAuthenticated`` and handles it gracefully by displaying an error
message and redirecting the user to a login page.
"""
status_code = 403
class NotFound(HorizonException):
"""Generic error to replace all "Not Found"-type API errors."""
status_code = 404
class Conflict(HorizonException):
"""Generic error to replace all "Conflict"-type API errors."""
status_code = 409
class RecoverableError(HorizonException):
"""Generic error to replace any "Recoverable"-type API errors."""
status_code = 100 # HTTP status code "Continue"
class ServiceCatalogException(HorizonException):
"""Raised when a requested service is not available in the
``ServiceCatalog`` returned by Keystone.
"""
def __init__(self, service_name):
message = 'Invalid service catalog service: %s' % service_name
super(ServiceCatalogException, self).__init__(message)
class AlreadyExists(HorizonException):
"""Exception to be raised when trying to create an API resource which
already exists.
"""
def __init__(self, name, resource_type):
self.attrs = {"name": name, "resource": resource_type}
self.msg = _('A %(resource)s with the name "%(name)s" already exists.')
def __repr__(self):
return self.msg % self.attrs
def __str__(self):
return self.msg % self.attrs
def __unicode__(self):
return self.msg % self.attrs
class ConfigurationError(HorizonException):
"""Exception to be raised when invalid settings have been provided."""
pass
class NotAvailable(HorizonException):
"""Exception to be raised when something is not available."""
pass
class WorkflowError(HorizonException):
"""Exception to be raised when something goes wrong in a workflow."""
pass
class WorkflowValidationError(HorizonException):
"""Exception raised during workflow validation if required data is missing,
or existing data is not valid.
"""
pass
class HandledException(HorizonException):
"""Used internally to track exceptions that have gone through
:func:`horizon.exceptions.handle` more than once.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
UNAUTHORIZED = tuple(HORIZON_CONFIG['exceptions']['unauthorized'])
NOT_FOUND = tuple(HORIZON_CONFIG['exceptions']['not_found'])
RECOVERABLE = (AlreadyExists, Conflict, NotAvailable, ServiceCatalogException)
RECOVERABLE += tuple(HORIZON_CONFIG['exceptions']['recoverable'])
def error_color(msg):
return color_style().ERROR_OUTPUT(msg)
def check_message(keywords, message):
"""Checks an exception for given keywords and raises a new ``ActionError``
with the desired message if the keywords are found. This allows selective
control over API error messages.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
if set(str(exc_value).split(" ")).issuperset(set(keywords)):
exc_value.message = message
raise
def handle_unauthorized(request, message, redirect, ignore, escalate, handled,
force_silence, force_log,
log_method, log_entry, log_level):
if ignore:
return NotAuthorized
if not force_silence and not handled:
log_method(error_color("Unauthorized: %s" % log_entry))
if not handled:
if message:
message = _("Unauthorized: %s") % message
# We get some pretty useless error messages back from
# some clients, so let's define our own fallback.
fallback = _("Unauthorized. Please try logging in again.")
messages.error(request, message or fallback)
# Escalation means logging the user out and raising NotAuthorized
# so the middleware will redirect them appropriately.
if escalate:
# Prevents creation of circular import. django.contrib.auth
# requires openstack_dashboard.settings to be loaded (by trying to
# access settings.CACHES in in django.core.caches) while
# openstack_dashboard.settings requires django.contrib.auth to be
# loaded while importing openstack_auth.utils
from django.contrib.auth import logout # noqa
logout(request)
raise NotAuthorized
# Otherwise continue and present our "unauthorized" error message.
return NotAuthorized
def handle_notfound(request, message, redirect, ignore, escalate, handled,
force_silence, force_log,
log_method, log_entry, log_level):
if not force_silence and not handled and (not ignore or force_log):
log_method(error_color("Not Found: %s" % log_entry))
if not ignore and not handled:
messages.error(request, message or log_entry)
if redirect:
raise Http302(redirect)
if not escalate:
return NotFound # return to normal code flow
def handle_recoverable(request, message, redirect, ignore, escalate, handled,
force_silence, force_log,
log_method, log_entry, log_level):
if not force_silence and not handled and (not ignore or force_log):
# Default recoverable error to WARN log level
log_method = getattr(LOG, log_level or "warning")
log_method(error_color("Recoverable error: %s" % log_entry))
if not ignore and not handled:
messages.error(request, message or log_entry)
if redirect:
raise Http302(redirect)
if not escalate:
return RecoverableError # return to normal code flow
HANDLE_EXC_METHODS = [
{'exc': UNAUTHORIZED, 'handler': handle_unauthorized, 'set_wrap': False},
{'exc': NOT_FOUND, 'handler': handle_notfound, 'set_wrap': True},
{'exc': RECOVERABLE, 'handler': handle_recoverable, 'set_wrap': True},
]
def handle(request, message=None, redirect=None, ignore=False,
escalate=False, log_level=None, force_log=None):
"""Centralized error handling for Horizon.
Because Horizon consumes so many different APIs with completely
different ``Exception`` types, it's necessary to have a centralized
place for handling exceptions which may be raised.
Exceptions are roughly divided into 3 types:
#. ``UNAUTHORIZED``: Errors resulting from authentication or authorization
problems. These result in being logged out and sent to the login screen.
#. ``NOT_FOUND``: Errors resulting from objects which could not be
located via the API. These generally result in a user-facing error
message, but are otherwise returned to the normal code flow. Optionally
a redirect value may be passed to the error handler so users are
returned to a different view than the one requested in addition to the
error message.
#. RECOVERABLE: Generic API errors which generate a user-facing message
but drop directly back to the regular code flow.
All other exceptions bubble the stack as normal unless the ``ignore``
argument is passed in as ``True``, in which case only unrecognized
errors are bubbled.
If the exception is not re-raised, an appropriate wrapper exception
class indicating the type of exception that was encountered will be
returned.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
log_method = getattr(LOG, log_level or "exception")
force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False)
force_silence = getattr(exc_value, "silence_logging", False)
# Because the same exception may travel through this method more than
# once (if it's re-raised) we may want to treat it differently
# the second time (e.g. no user messages/logging).
handled = issubclass(exc_type, HandledException)
wrap = False
# Restore our original exception information, but re-wrap it at the end
if handled:
exc_type, exc_value, exc_traceback = exc_value.wrapped
wrap = True
log_entry = encoding.force_text(exc_value)
# We trust messages from our own exceptions
if issubclass(exc_type, HorizonException):
message = exc_value
# If the message has a placeholder for the exception, fill it in
elif message and "%(exc)s" in message:
message = encoding.force_text(message) % {"exc": log_entry}
if message:
message = encoding.force_text(message)
for exc_handler in HANDLE_EXC_METHODS:
if issubclass(exc_type, exc_handler['exc']):
if exc_handler['set_wrap']:
wrap = True
handler = exc_handler['handler']
ret = handler(request, message, redirect, ignore, escalate,
handled, force_silence, force_log,
log_method, log_entry, log_level)
if ret:
return ret # return to normal code flow
# If we've gotten here, time to wrap and/or raise our exception.
if wrap:
raise HandledException([exc_type, exc_value, exc_traceback])
six.reraise(exc_type, exc_value, exc_traceback)
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.exceptions import TaskError
from pants.build_graph.build_graph import BuildGraph
from pants.goal.products import UnionProducts
class ClasspathEntry(object):
"""Represents a java classpath entry."""
def __init__(self, path):
self._path = path
@property
def path(self):
"""Returns the pants internal path of this classpath entry.
Suitable for use in constructing classpaths for pants executions and pants generated artifacts.
:rtype: string
"""
return self._path
def is_excluded_by(self, excludes):
"""Returns `True` if this classpath entry should be excluded given the `excludes` in play.
:param excludes: The excludes to check this classpath entry against.
:type excludes: list of :class:`pants.backend.jvm.targets.exclude.Exclude`
:rtype: bool
"""
return False
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
return isinstance(other, ClasspathEntry) and self.path == other.path
def __ne__(self, other):
return not self == other
def __repr__(self):
return 'ClasspathEntry(path={!r})'.format(self.path)
class ArtifactClasspathEntry(ClasspathEntry):
"""Represents a resolved third party classpath entry."""
def __init__(self, path, coordinate, cache_path):
super(ArtifactClasspathEntry, self).__init__(path)
self._coordinate = coordinate
self._cache_path = cache_path
@property
def coordinate(self):
"""Returns the maven coordinate that used to resolve this classpath entry's artifact.
:rtype: :class:`pants.backend.jvm.jar_dependency_utils.M2Coordinate`
"""
return self._coordinate
@property
def cache_path(self):
"""Returns the external cache path of this classpath entry.
For example, the `~/.m2/repository` or `~/.ivy2/cache` location of the resolved artifact for
maven and ivy resolvers respectively.
Suitable for use in constructing classpaths for external tools that should not be subject to
potential volatility in pants own internal caches.
:rtype: string
"""
return self._cache_path
def is_excluded_by(self, excludes):
return any(_matches_exclude(self.coordinate, exclude) for exclude in excludes)
def __hash__(self):
return hash((self.path, self.coordinate, self.cache_path))
def __eq__(self, other):
return (isinstance(other, ArtifactClasspathEntry) and
self.path == other.path and
self.coordinate == other.coordinate and
self.cache_path == other.cache_path)
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('ArtifactClasspathEntry(path={!r}, coordinate={!r}, cache_path={!r})'
.format(self.path, self.coordinate, self.cache_path))
def _matches_exclude(coordinate, exclude):
if not coordinate.org == exclude.org:
return False
if not exclude.name:
return True
if coordinate.name == exclude.name:
return True
return False
def _not_excluded_filter(excludes):
def not_excluded(path_tuple):
conf, classpath_entry = path_tuple
return not classpath_entry.is_excluded_by(excludes)
return not_excluded
class ClasspathProducts(object):
def __init__(self, pants_workdir, classpaths=None, excludes=None):
self._classpaths = classpaths or UnionProducts()
self._excludes = excludes or UnionProducts()
self._pants_workdir = pants_workdir
@staticmethod
def init_func(pants_workdir):
return lambda: ClasspathProducts(pants_workdir)
def copy(self):
"""Returns a copy of this ClasspathProducts.
Edits to the copy's classpaths or exclude associations will not affect the classpaths or
excludes in the original. The copy is shallow though, so edits to the the copy's product values
will mutate the original's product values. See `UnionProducts.copy`.
:rtype: :class:`ClasspathProducts`
"""
return ClasspathProducts(pants_workdir=self._pants_workdir,
classpaths=self._classpaths.copy(),
excludes=self._excludes.copy())
def add_for_targets(self, targets, classpath_elements):
"""Adds classpath path elements to the products of all the provided targets."""
for target in targets:
self.add_for_target(target, classpath_elements)
def add_for_target(self, target, classpath_elements):
"""Adds classpath path elements to the products of the provided target."""
self._add_elements_for_target(target, self._wrap_path_elements(classpath_elements))
def add_jars_for_targets(self, targets, conf, resolved_jars):
"""Adds jar classpath elements to the products of the provided targets.
The resolved jars are added in a way that works with excludes.
"""
classpath_entries = []
for jar in resolved_jars:
if not jar.pants_path:
raise TaskError('Jar: {!s} has no specified path.'.format(jar.coordinate))
cp_entry = ArtifactClasspathEntry(jar.pants_path, jar.coordinate, jar.cache_path)
classpath_entries.append((conf, cp_entry))
for target in targets:
self._add_elements_for_target(target, classpath_entries)
def add_excludes_for_targets(self, targets):
"""Add excludes from the provided targets.
Does not look up transitive excludes.
:param targets: The targets to add excludes for.
:type targets: list of :class:`pants.build_graph.target.Target`
"""
for target in targets:
self._add_excludes_for_target(target)
def remove_for_target(self, target, classpath_elements):
"""Removes the given entries for the target."""
self._classpaths.remove_for_target(target, self._wrap_path_elements(classpath_elements))
def get_for_target(self, target):
"""Gets the classpath products for the given target.
Products are returned in order, respecting target excludes.
:param target: The target to lookup classpath products for.
:returns: The ordered (conf, path) tuples, with paths being either classfile directories or
jars.
:rtype: list of (string, string)
"""
return self.get_for_targets([target])
def get_for_targets(self, targets):
"""Gets the classpath products for the given targets.
Products are returned in order, respecting target excludes.
:param targets: The targets to lookup classpath products for.
:returns: The ordered (conf, path) tuples, with paths being either classfile directories or
jars.
:rtype: list of (string, string)
"""
cp_entries = self.get_classpath_entries_for_targets(targets)
return [(conf, cp_entry.path) for conf, cp_entry in cp_entries]
def get_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the classpath products for the given targets.
Products are returned in order, optionally respecting target excludes.
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
"""
classpath_tuples = self._classpaths.get_for_targets(targets)
if respect_excludes:
return self._filter_by_excludes(classpath_tuples, targets)
else:
return classpath_tuples
def get_artifact_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the artifact classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include external artifact classpath elements (ie: resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ArtifactClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if isinstance(cp_entry, ArtifactClasspathEntry)]
def get_internal_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the internal classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include internal artifact classpath elements (ie: no resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if not isinstance(cp_entry, ArtifactClasspathEntry)]
def _filter_by_excludes(self, classpath_tuples, root_targets):
# Excludes are always applied transitively, so regardless of whether a transitive
# set of targets was included here, their closure must be included.
closure = BuildGraph.closure(root_targets, bfs=True)
excludes = self._excludes.get_for_targets(closure)
return filter(_not_excluded_filter(excludes), classpath_tuples)
def _add_excludes_for_target(self, target):
if target.is_exported:
self._excludes.add_for_target(target, [Exclude(target.provides.org,
target.provides.name)])
if isinstance(target, JvmTarget) and target.excludes:
self._excludes.add_for_target(target, target.excludes)
def _wrap_path_elements(self, classpath_elements):
return [(element[0], ClasspathEntry(element[1])) for element in classpath_elements]
def _add_elements_for_target(self, target, elements):
self._validate_classpath_tuples(elements, target)
self._classpaths.add_for_target(target, elements)
def _validate_classpath_tuples(self, classpath, target):
"""Validates that all files are located within the working directory, to simplify relativization.
:param classpath: The list of classpath tuples. Each tuple is a 2-tuple of ivy_conf and
ClasspathEntry.
:param target: The target that the classpath tuple is being registered for.
:raises: `TaskError` when the path is outside the work directory
"""
for classpath_tuple in classpath:
conf, classpath_entry = classpath_tuple
path = classpath_entry.path
if os.path.relpath(path, self._pants_workdir).startswith(os.pardir):
raise TaskError(
'Classpath entry {} for target {} is located outside the working directory "{}".'
.format(path, target.address.spec, self._pants_workdir))
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron.extensions import l3
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.notifiers import nova
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as pconst
from oslo.config import cfg
from gbpservice.common import utils
from gbpservice.neutron.extensions import servicechain as sc_ext
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
LOG = logging.getLogger(__name__)
class LocalAPI(object):
"""API for interacting with the neutron Plugins directly."""
@property
def _nova_notifier(self):
return nova.Notifier()
@property
def _core_plugin(self):
# REVISIT(rkukura): Need initialization method after all
# plugins are loaded to grab and store plugin.
return manager.NeutronManager.get_plugin()
@property
def _l3_plugin(self):
# REVISIT(rkukura): Need initialization method after all
# plugins are loaded to grab and store plugin.
plugins = manager.NeutronManager.get_service_plugins()
l3_plugin = plugins.get(pconst.L3_ROUTER_NAT)
if not l3_plugin:
LOG.error(_("No L3 router service plugin found."))
raise exc.GroupPolicyDeploymentError()
return l3_plugin
@property
def _servicechain_plugin(self):
# REVISIT(rkukura): Need initialization method after all
# plugins are loaded to grab and store plugin.
plugins = manager.NeutronManager.get_service_plugins()
servicechain_plugin = plugins.get(pconst.SERVICECHAIN)
if not servicechain_plugin:
LOG.error(_("No Servicechain service plugin found."))
raise exc.GroupPolicyDeploymentError()
return servicechain_plugin
@property
def _dhcp_agent_notifier(self):
# REVISIT(rkukura): Need initialization method after all
# plugins are loaded to grab and store notifier.
if not self._cached_agent_notifier:
agent_notifiers = getattr(self._core_plugin, 'agent_notifiers', {})
self._cached_agent_notifier = (
agent_notifiers.get(const.AGENT_TYPE_DHCP) or
dhcp_rpc_agent_api.DhcpAgentNotifyAPI())
return self._cached_agent_notifier
def _create_resource(self, plugin, context, resource, attrs):
# REVISIT(rkukura): Do create.start notification?
# REVISIT(rkukura): Check authorization?
# REVISIT(rkukura): Do quota?
with utils.clean_session(context.session):
action = 'create_' + resource
obj_creator = getattr(plugin, action)
obj = obj_creator(context, {resource: attrs})
self._nova_notifier.send_network_change(action, {},
{resource: obj})
# REVISIT(rkukura): Do create.end notification?
if cfg.CONF.dhcp_agent_notification:
self._dhcp_agent_notifier.notify(context,
{resource: obj},
resource + '.create.end')
return obj
def _update_resource(self, plugin, context, resource, resource_id, attrs):
# REVISIT(rkukura): Do update.start notification?
# REVISIT(rkukura): Check authorization?
with utils.clean_session(context.session):
obj_getter = getattr(plugin, 'get_' + resource)
orig_obj = obj_getter(context, resource_id)
action = 'update_' + resource
obj_updater = getattr(plugin, action)
obj = obj_updater(context, resource_id, {resource: attrs})
self._nova_notifier.send_network_change(action, orig_obj,
{resource: obj})
# REVISIT(rkukura): Do update.end notification?
if cfg.CONF.dhcp_agent_notification:
self._dhcp_agent_notifier.notify(context,
{resource: obj},
resource + '.update.end')
return obj
def _delete_resource(self, plugin, context, resource, resource_id):
# REVISIT(rkukura): Do delete.start notification?
# REVISIT(rkukura): Check authorization?
with utils.clean_session(context.session):
obj_getter = getattr(plugin, 'get_' + resource)
obj = obj_getter(context, resource_id)
action = 'delete_' + resource
obj_deleter = getattr(plugin, action)
obj_deleter(context, resource_id)
self._nova_notifier.send_network_change(action, {},
{resource: obj})
# REVISIT(rkukura): Do delete.end notification?
if cfg.CONF.dhcp_agent_notification:
self._dhcp_agent_notifier.notify(context,
{resource: obj},
resource + '.delete.end')
def _get_resource(self, plugin, context, resource, resource_id):
with utils.clean_session(context.session):
obj_getter = getattr(plugin, 'get_' + resource)
obj = obj_getter(context, resource_id)
return obj
def _get_resources(self, plugin, context, resource, filters=None):
with utils.clean_session(context.session):
obj_getter = getattr(plugin, 'get_' + resource + 's')
obj = obj_getter(context, filters)
return obj
# The following methods perform the necessary subset of
# functionality from neutron.api.v2.base.Controller.
#
# REVISIT(rkukura): Can we just use the WSGI Controller? Using
# neutronclient is also a possibility, but presents significant
# issues to unit testing as well as overhead and failure modes.
def _get_port(self, plugin_context, port_id):
return self._get_resource(self._core_plugin, plugin_context, 'port',
port_id)
def _get_ports(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(self._core_plugin, plugin_context, 'port',
filters)
def _create_port(self, plugin_context, attrs):
return self._create_resource(self._core_plugin, plugin_context, 'port',
attrs)
def _update_port(self, plugin_context, port_id, attrs):
return self._update_resource(self._core_plugin, plugin_context, 'port',
port_id, attrs)
def _delete_port(self, plugin_context, port_id):
try:
self._delete_resource(self._core_plugin,
plugin_context, 'port', port_id)
except n_exc.PortNotFound:
LOG.warn(_('Port %s already deleted'), port_id)
def _get_subnet(self, plugin_context, subnet_id):
return self._get_resource(self._core_plugin, plugin_context, 'subnet',
subnet_id)
def _get_subnets(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(self._core_plugin, plugin_context, 'subnet',
filters)
def _create_subnet(self, plugin_context, attrs):
return self._create_resource(self._core_plugin, plugin_context,
'subnet', attrs)
def _update_subnet(self, plugin_context, subnet_id, attrs):
return self._update_resource(self._core_plugin, plugin_context,
'subnet', subnet_id, attrs)
def _delete_subnet(self, plugin_context, subnet_id):
try:
self._delete_resource(self._core_plugin, plugin_context, 'subnet',
subnet_id)
except n_exc.SubnetNotFound:
LOG.warn(_('Subnet %s already deleted'), subnet_id)
def _get_network(self, plugin_context, network_id):
return self._get_resource(self._core_plugin, plugin_context, 'network',
network_id)
def _get_networks(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(
self._core_plugin, plugin_context, 'network', filters)
def _create_network(self, plugin_context, attrs):
return self._create_resource(self._core_plugin, plugin_context,
'network', attrs)
def _delete_network(self, plugin_context, network_id):
try:
self._delete_resource(self._core_plugin, plugin_context,
'network', network_id)
except n_exc.NetworkNotFound:
LOG.warn(_('Network %s already deleted'), network_id)
def _get_router(self, plugin_context, router_id):
return self._get_resource(self._l3_plugin, plugin_context, 'router',
router_id)
def _get_routers(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(self._l3_plugin, plugin_context, 'router',
filters)
def _create_router(self, plugin_context, attrs):
return self._create_resource(self._l3_plugin, plugin_context, 'router',
attrs)
def _update_router(self, plugin_context, router_id, attrs):
return self._update_resource(self._l3_plugin, plugin_context, 'router',
router_id, attrs)
def _add_router_interface(self, plugin_context, router_id, interface_info):
self._l3_plugin.add_router_interface(plugin_context,
router_id, interface_info)
def _remove_router_interface(self, plugin_context, router_id,
interface_info):
self._l3_plugin.remove_router_interface(plugin_context, router_id,
interface_info)
def _add_router_gw_interface(self, plugin_context, router_id, gw_info):
return self._l3_plugin.update_router(
plugin_context, router_id,
{'router': {'external_gateway_info': gw_info}})
def _remove_router_gw_interface(self, plugin_context, router_id,
interface_info):
self._l3_plugin.update_router(
plugin_context, router_id,
{'router': {'external_gateway_info': None}})
def _delete_router(self, plugin_context, router_id):
try:
self._delete_resource(self._l3_plugin, plugin_context, 'router',
router_id)
except l3.RouterNotFound:
LOG.warn(_('Router %s already deleted'), router_id)
def _get_sg(self, plugin_context, sg_id):
return self._get_resource(
self._core_plugin, plugin_context, 'security_group', sg_id)
def _get_sgs(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(
self._core_plugin, plugin_context, 'security_group', filters)
def _create_sg(self, plugin_context, attrs):
return self._create_resource(self._core_plugin, plugin_context,
'security_group', attrs)
def _update_sg(self, plugin_context, sg_id, attrs):
return self._update_resource(self._core_plugin, plugin_context,
'security_group', sg_id, attrs)
def _delete_sg(self, plugin_context, sg_id):
try:
self._delete_resource(self._core_plugin, plugin_context,
'security_group', sg_id)
except ext_sg.SecurityGroupNotFound:
LOG.warn(_('Security Group %s already deleted'), sg_id)
def _get_sg_rule(self, plugin_context, sg_rule_id):
return self._get_resource(
self._core_plugin, plugin_context, 'security_group_rule',
sg_rule_id)
def _get_sg_rules(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(
self._core_plugin, plugin_context, 'security_group_rule', filters)
def _create_sg_rule(self, plugin_context, attrs):
try:
return self._create_resource(self._core_plugin, plugin_context,
'security_group_rule', attrs)
except ext_sg.SecurityGroupRuleExists as ex:
LOG.warn(_('Security Group already exists %s'), ex.message)
return
def _update_sg_rule(self, plugin_context, sg_rule_id, attrs):
return self._update_resource(self._core_plugin, plugin_context,
'security_group_rule', sg_rule_id,
attrs)
def _delete_sg_rule(self, plugin_context, sg_rule_id):
try:
self._delete_resource(self._core_plugin, plugin_context,
'security_group_rule', sg_rule_id)
except ext_sg.SecurityGroupRuleNotFound:
LOG.warn(_('Security Group Rule %s already deleted'), sg_rule_id)
def _get_fip(self, plugin_context, fip_id):
return self._get_resource(
self._l3_plugin, plugin_context, 'floatingip', fip_id)
def _get_fips(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(
self._l3_plugin, plugin_context, 'floatingip', filters)
def _create_fip(self, plugin_context, attrs):
return self._create_resource(self._l3_plugin, plugin_context,
'floatingip', attrs)
def _update_fip(self, plugin_context, fip_id, attrs):
return self._update_resource(self._l3_plugin, plugin_context,
'floatingip', fip_id, attrs)
def _delete_fip(self, plugin_context, fip_id):
try:
self._delete_resource(self._l3_plugin, plugin_context,
'floatingip', fip_id)
except l3.FloatingIPNotFound:
LOG.warn(_('Floating IP %s Already deleted'), fip_id)
def _get_servicechain_instance(self, plugin_context, sci_id):
return self._get_resource(
self._servicechain_plugin, plugin_context, 'servicechain_instance',
sci_id)
def _get_servicechain_instances(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(
self._servicechain_plugin, plugin_context, 'servicechain_instance',
filters)
def _create_servicechain_instance(self, plugin_context, attrs):
return self._create_resource(
self._servicechain_plugin, plugin_context,
'servicechain_instance', attrs)
def _update_servicechain_instance(self, plugin_context, sci_id, attrs):
return self._update_resource(self._servicechain_plugin, plugin_context,
'servicechain_instance', sci_id, attrs)
def _delete_servicechain_instance(self, context, sci_id):
try:
self._delete_resource(self._servicechain_plugin,
context._plugin_context,
'servicechain_instance', sci_id)
except sc_ext.ServiceChainInstanceNotFound:
# SC could have been already deleted
LOG.warn(_("servicechain %s already deleted"), sci_id)
def _get_servicechain_spec(self, plugin_context, scs_id):
return self._get_resource(
self._servicechain_plugin, plugin_context, 'servicechain_spec',
scs_id)
def _get_servicechain_specs(self, plugin_context, filters=None):
filters = filters or {}
return self._get_resources(
self._servicechain_plugin, plugin_context, 'servicechain_spec',
filters)
def _create_servicechain_spec(self, plugin_context, attrs):
return self._create_resource(
self._servicechain_plugin, plugin_context,
'servicechain_spec', attrs)
def _update_servicechain_spec(self, plugin_context, scs_id, attrs):
return self._update_resource(self._servicechain_plugin, plugin_context,
'servicechain_spec', scs_id, attrs)
def _delete_servicechain_spec(self, context, scs_id):
try:
self._delete_resource(self._servicechain_plugin,
context._plugin_context,
'servicechain_spec', scs_id)
except sc_ext.ServiceChainSpecNotFound:
# SC could have been already deleted
LOG.warn(_("servicechain spec %s already deleted"), scs_id)
| |
" Charon: Context handler for saving an entity. "
import logging
import couchdb
from . import constants
from . import utils
class Field(object):
"Specification of a data field for an entity."
type ='text'
def __init__(self, key, title=None, description=None,
mandatory=False, editable=True, default=None):
assert key
self.key = key
self.title = title or key.capitalize().replace('_', ' ')
self.description = description or self.__doc__
self.mandatory = mandatory # A non-None value is requried.
self.editable = editable # Changeable once set?
self.default=default
self.none_value=u'None'
def store(self, saver, data=None, check_only=False):
"""Check, convert and store the field value.
If 'data' is None, then obtain the value from HTML form parameter.
If 'check_only' is True, then just do validity checking, no update.
"""
if not saver.is_new() and not self.editable: return
logging.debug("Field.store(%s)", data)
value = self.get(saver, data=data)
try:
value = self.process(saver, value)
except ValueError, msg:
raise ValueError("field '{0}': {1}".format(self.key, msg))
if check_only: return
if self.default is not None and value is None:
value = self.default
if value == saver.doc.get(self.key):
logging.debug("Field.store: '%s' value equal", self.key)
return
saver.doc[self.key] = value
saver.changed[self.key] = value
def get(self, saver, data=None):
"Obtain the value from data, if given, else from HTML form parameter."
if data is None:
value = saver.rqh.get_argument(self.key, default=None)
if value == self.none_value:
return None
else:
return value
else:
try:
return data[self.key]
except KeyError:
return saver.get(self.key)
def process(self, saver, value):
"""Check validity and return converted to the appropriate type.
Raise ValueError if there is a problem."""
self.check_mandatory(saver, value)
self.check_valid(saver, value)
return value or None
def check_mandatory(self, saver, value):
"Check that a value is provided when required."
if self.mandatory and value is None:
raise ValueError('a defined value is mandatory')
def check_valid(self, saver, value):
"Check that the value, if provided, is valid."
pass
def html_display(self, entity):
"Return the field value as valid HTML."
return str(entity.get(self.key) or '-')
def html_create(self):
"Return an appropriate HTML input field for a create form."
return '<input type="text" name="{0}">'.format(self.key)
def html_edit(self, entity):
"Return an appropriate HTML input field for an edit form."
if self.editable:
return '<input type="text" name="{0}" value="{1}">'.\
format(self.key, entity.get(self.key) or '')
else:
return entity.get(self.key) or '-'
class IdField(Field):
"The identifier for the entity."
type ='identifier'
def __init__(self, key, title=None, description=None):
super(IdField, self).__init__(key, title=title,
description=description,
mandatory=True, editable=False)
def check_valid(self, saver, value):
"Only allow a subset of ordinary ASCII characters."
logging.debug('IdField.check_valid')
if not constants.ALLOWED_ID_CHARS.match(value):
raise ValueError('invalid identifier value (disallowed characters)')
class SelectField(Field):
"Select one of a set of values."
none_value = u'None'
def __init__(self, key, title=None, description=None,
mandatory=False, editable=True, options=[]):
super(SelectField, self).__init__(key, title=title,
description=description,
mandatory=mandatory,
editable=editable)
self.options = options
def get(self, saver, data=None):
"Obtain the value from data, if given, else from HTML form parameter."
if data is None :
value = saver.rqh.get_argument(self.key, default=None)
if value == self.none_value:
return None
else:
return value
else:
try:
return data[self.key]
except KeyError:
return saver.get(self.key)
def check_valid(self, saver, value):
"Check that the value, if provided, is valid."
if value is None or value == self.none_value: return
if value not in self.options:
logging.debug("invalid select value: %s", value)
raise ValueError("invalid value '{0}; not among options for select".
format(value))
def html_create(self):
"Return the field HTML input field for a create form."
options = ["<option>{0}</option>".format(o) for o in self.options]
if not self.mandatory:
options.insert(0, "<option>{0}</option>".format(self.none_value))
return '<select name="{0}">{1}</select>'.format(self.key, options)
def html_edit(self, entity):
"Return the field HTML input field for an edit form."
value = entity.get(self.key)
if self.editable:
options = []
if not self.mandatory:
if value is None:
options.append("<option selected>{0}</option>".format(
self.none_value))
else:
options.append("<option>{0}</option>".format(
self.none_value))
for option in self.options:
if value == option:
options.append("<option selected>{0}</option>".format(option))
else:
options.append("<option>{0}</option>".format(option))
return '<select name="{0}">{1}</select>'.format(self.key, options)
else:
return value or '-'
class NameField(Field):
"The name for the entity, unique if non-null."
def __init__(self, key, title=None, description=None):
super(NameField, self).__init__(key, title=title,
description=description,
mandatory=False)
class FloatField(Field):
"A floating point value field."
type ='float'
def __init__(self, key, title=None, description=None,
mandatory=False, editable=True, default=None):
super(FloatField, self).__init__(key,
title=title,
description=description,
mandatory=mandatory,
editable=editable, default=default)
def process(self, saver, value):
self.check_mandatory(saver, value)
if value is None: return None
if value == '': return None
return float(value)
def html_display(self, entity):
"Return the field value as valid HTML."
value = entity.get(self.key)
if value is None:
value = '-'
else:
value = str(value)
return '<span class="number">{0}</span>'.format(value)
def html_edit(self, entity):
"Return the field HTML input field for an edit form."
value = entity.get(self.key)
if value is None:
if self.editable:
return '<input type="text" name="{0}">'.format(self.key)
else:
return '-'
else:
if self.editable:
return '<input type="text" name="{0}" value="{1}">'.\
format(self.key, value)
else:
return str(value)
class RangeFloatField(FloatField):
"A floating point value field, with an allowed range."
def __init__(self, key, minimum=None, maximum=None,
title=None, description=None,
mandatory=False, editable=True):
super(RangeFloatField, self).__init__(key,
title=title,
description=description,
mandatory=mandatory,
editable=editable)
self.minimum = minimum
self.maximum = maximum
def process(self, saver, value):
value = super(RangeFloatField, self).process(saver, value)
if value is None: return None
if self.minimum is not None:
if value < self.minimum: raise ValueError('value too low')
if self.maximum is not None:
if value > self.maximum: raise ValueError('value too high')
return value
class Saver(object):
"Context handler defining the fields of the entity and saving the data."
doctype = None
fields = []
field_keys = []
def __init__(self, doc=None, rqh=None, db=None):
self.fields_lookup = dict([(f.key, f) for f in self.fields])
assert self.doctype
if rqh is not None:
self.rqh = rqh
self.db = rqh.db
self.current_user = rqh.current_user
elif db is not None:
self.db = db
self.current_user = dict()
else:
raise AttributeError('neither db nor rqh given')
self.doc = doc or dict()
self.changed = dict()
if '_id' in self.doc:
assert self.doctype == self.doc[constants.DB_DOCTYPE]
else:
self.doc['_id'] = utils.get_iuid()
self.doc[constants.DB_DOCTYPE] = self.doctype
self.initialize()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if type is not None: return False # No exceptions handled here
self.finalize()
try:
self.db.save(self.doc)
except couchdb.http.ResourceConflict:
raise IOError('document revision update conflict')
utils.log(self.db, self.doc,
changed=self.changed,
current_user=self.current_user)
def __setitem__(self, key, value):
"Update the key/value pair."
try:
field = self.fields_lookup[key]
except KeyError:
try:
checker = getattr(self, "check_{0}".format(key))
except AttributeError:
pass
else:
checker(value)
try:
converter = getattr(self, "convert_{0}".format(key))
except AttributeError:
pass
else:
value = converter(value)
try:
if self.doc[key] == value: return
except KeyError:
pass
self.doc[key] = value
self.changed[key] = value
else:
field.store(self, value)
def __getitem__(self, key):
return self.doc[key]
def initialize(self):
"Perform actions when creating the entity."
self.doc['created'] = utils.timestamp()
def is_new(self):
"Is the entity new, i.e. not previously saved in the database?"
return '_rev' not in self.doc
def store(self, data=None, check_only=False):
"""Given the fields, store the data items.
If data is None, then obtain the value from HTML form parameter.
If 'check_only' is True, then just do validity checking, no update.
"""
for field in self.fields:
field.store(self, data=data, check_only=check_only)
def finalize(self):
"Perform any final modifications before saving the entity."
self.doc['modified'] = utils.timestamp()
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
| |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from itertools import combinations
from itertools import product
import netaddr
from nailgun import consts
from nailgun import objects
from nailgun.objects.serializers.network_configuration \
import NetworkConfigurationSerializer
from nailgun.objects.serializers.network_configuration \
import NeutronNetworkConfigurationSerializer
from nailgun.objects.serializers.network_configuration \
import NovaNetworkConfigurationSerializer
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.task.helpers import TaskHelper
class NetworkCheck(object):
def __init__(self, task, data):
"""Collect Network Groups data
"""
self.cluster = task.cluster
self.task = task
self.data = data
self.net_man = objects.Cluster.get_network_manager(self.cluster)
self.net_provider = self.cluster.net_provider
admin_ng = self.net_man.get_admin_network_group()
fields = NetworkGroup.__mapper__.columns.keys() + ['meta']
net = NetworkConfigurationSerializer.serialize_network_group(admin_ng,
fields)
# change Admin name for UI
net.update(name='admin (PXE)')
self.networks = [net]
for ng in self.cluster.network_groups:
net = NetworkConfigurationSerializer.serialize_network_group(
ng,
fields)
self.networks.append(net)
# merge with data['networks']
if 'networks' in data:
for data_net in data['networks']:
for net in self.networks:
if data_net['id'] == net['id']:
if data_net.get('meta'):
data_net.pop('meta')
net.update(data_net)
if data_net.get('name') == 'fuelweb_admin':
net.update(name='admin (PXE)')
break
else:
raise errors.NetworkCheckError(
u"Invalid network ID: {0}".format(data_net['id']))
# get common networking parameters
serializer = {'neutron': NeutronNetworkConfigurationSerializer,
'nova_network': NovaNetworkConfigurationSerializer}
self.network_config = serializer[self.net_provider].\
serialize_network_params(self.cluster)
self.network_config.update(data.get('networking_parameters', {}))
self.result = []
self.err_msgs = []
def expose_error_messages(self):
TaskHelper.expose_network_check_error_messages(
self.task,
self.result,
self.err_msgs)
def check_untagged_intersection(self):
"""check if there are untagged networks on the same interface
(both nova-net and neutron)
"""
netw_untagged = lambda n: (n['vlan_start'] is None) \
and (not n['meta'].get('ext_net_data')) \
and (not n['meta'].get('neutron_vlan_range'))
untagged_nets = dict([(n['id'], n['name']) for n in self.networks
if netw_untagged(n)])
# check if nic have assign only one untagged network
if len(untagged_nets) >= 2:
logger.info(
"Untagged networks found, "
"checking intersection between them...")
bond_interfaces = (
objects.Cluster.get_bond_interfaces_for_all_nodes(
self.cluster,
untagged_nets.keys()))
nic_interfaces = (
objects.Cluster.get_nic_interfaces_for_all_nodes(
self.cluster,
untagged_nets.keys()))
found_intersection = []
all_interfaces = bond_interfaces + nic_interfaces
for iface in all_interfaces:
# network name is changed for Admin on UI
nets = [[ng['name'] for ng in self.networks
if n.id == ng['id']][0]
for n in iface.assigned_networks_list]
crossed_nets = set(nets) & set(untagged_nets.values())
if len(crossed_nets) > 1:
err_net_names = ['"{0}"'.format(i)
for i in crossed_nets]
found_intersection.append((objects.Node.get_by_mac_or_uid(
node_uid=iface.node_id).name,
err_net_names))
if found_intersection:
nodes_with_errors = [
u'{1} networks at node "{0}"'.format(
int_node,
", ".join(int_nets)
) for int_node, int_nets in found_intersection]
self.err_msgs.append(
u"Some untagged networks are assigned to the same "
u"physical interface. You should assign them to "
u"different physical interfaces. Affected:\n{0}".format(
"\n".join(nodes_with_errors)))
self.result.append({"ids": [],
"errors": []})
self.expose_error_messages()
def check_network_address_spaces_intersection(self):
"""check intersection of networks address spaces for all networks
(nova-net)
"""
nets_w_cidr = filter(lambda n: n['cidr'], self.networks)
for ngs in combinations(nets_w_cidr, 2):
addrs = [netaddr.IPNetwork(ngs[0]['cidr']).cidr,
netaddr.IPNetwork(ngs[1]['cidr']).cidr]
if self.net_man.is_range_intersection(addrs[0], addrs[1]):
self.err_msgs.append(
u"Address space intersection between "
"networks:\n{0}.".format(
", ".join([ngs[0]['name'], ngs[1]['name']])
)
)
self.result.append({
"ids": [int(ngs[0]["id"]), int(ngs[1]["id"])],
"errors": ["cidr"]
})
# Check for intersection with 'fixed' networks
fixed_cidr = netaddr.IPNetwork(
self.network_config['fixed_networks_cidr']).cidr
for ng in nets_w_cidr:
if self.net_man.is_range_intersection(
fixed_cidr, netaddr.IPNetwork(ng['cidr']).cidr):
self.err_msgs.append(
u"Address space intersection between "
"networks:\nfixed, {0}.".format(ng['name'])
)
self.result.append({
"ids": [int(ng["id"])],
"errors": ["cidr"]
})
# Check for intersection with floating ranges
nets_w_cidr = [n for n in self.networks
if n.get('cidr') and n['name'] != 'public']
fl_ranges = [netaddr.IPRange(v[0], v[1])
for v in self.network_config['floating_ranges']]
for net_vs_range in product(nets_w_cidr, fl_ranges):
cidr = netaddr.IPNetwork(net_vs_range[0]['cidr']).cidr
if self.net_man.is_range_intersection(cidr, net_vs_range[1]):
self.err_msgs.append(
u"Address space intersection between floating range '{0}'"
" and '{1}' network.".format(
net_vs_range[1], net_vs_range[0]['name'])
)
self.result.append({
"ids": [int(net_vs_range[0]["id"])],
"errors": ["cidr", "floating_ranges"]
})
self.expose_error_messages()
def check_public_floating_ranges_intersection(self):
"""1. Check intersection of networks address spaces inside
Public and Floating network
2. Check that Public Gateway is in Public CIDR
3. Check that Public IP ranges are in Public CIDR
(nova-net)
"""
pub = [ng for ng in self.networks
if ng['name'] == 'public'][0]
# Check intersection of networks address spaces inside
# Public and Floating network
pub_ranges_err = False
nets = {
'public': [netaddr.IPRange(v[0], v[1])
for v in pub['ip_ranges']],
'floating': [netaddr.IPRange(v[0], v[1])
for v in self.network_config['floating_ranges']]
}
for name, ranges in nets.iteritems():
ids = [pub['id']] if name == 'public' else []
for npair in combinations(ranges, 2):
if self.net_man.is_range_intersection(npair[0], npair[1]):
self.err_msgs.append(
u"Address space intersection between ranges "
u"of {0} network.".format(name)
)
self.result.append({"ids": ids,
"errors": ["ip_ranges"]})
for net in ranges:
# Check intersection of public GW and pub/float IP ranges
if netaddr.IPAddress(pub['gateway']) in net:
self.err_msgs.append(
u"Address intersection between "
u"public gateway and IP range "
u"of {0} network.".format(name)
)
self.result.append({"ids": ids,
"errors": ["gateway",
"ip_ranges"]})
# Check that public IP ranges are in public CIDR
if name == 'public':
if net not in netaddr.IPNetwork(pub['cidr']) \
and not pub_ranges_err:
pub_ranges_err = True
self.err_msgs.append(
u"Public gateway and public ranges "
u"are not in one CIDR."
)
self.result.append({"ids": ids,
"errors": ["gateway",
"ip_ranges"]})
self.expose_error_messages()
# Check intersection of public and floating ranges
for npair in combinations(nets['public'] + nets['floating'], 2):
if self.net_man.is_range_intersection(npair[0], npair[1]):
self.err_msgs.append(
u"Address space intersection between range "
u"of public network and floating range."
)
self.result.append({"ids": [pub['id']],
"errors": ["ip_ranges"]})
self.expose_error_messages()
def check_vlan_ids_range_and_intersection(self):
"""1. check intersection of networks VLAN IDs ranges
2. check networks VLAN ID ranges are in allowed range
(nova-net)
"""
tagged_nets = dict(
(n['name'], [int(n['vlan_start']), 0])
for n in self.networks
if n['vlan_start'] is not None)
if self.network_config['fixed_networks_vlan_start']:
tagged_nets['fixed'] = [
self.network_config['fixed_networks_vlan_start'],
self.network_config['fixed_networks_amount'] - 1]
for name, vlan_range in tagged_nets.iteritems():
# check VLAN ID range against [2-4094]
if vlan_range[0] < 2 or vlan_range[0] + vlan_range[1] > 4094:
self.err_msgs.append(
u"VLAN ID(s) is out of range for "
"{0} network.".format(name)
)
self.result.append({"ids": [int(n["id"]) for n in self.networks
if n['name'] == name],
"errors": ["vlan_start"]})
for net in combinations(tagged_nets.keys(), 2):
range1 = tagged_nets[net[0]]
range2 = tagged_nets[net[1]]
if range1[0] <= range2[0] + range2[1] \
and range2[0] <= range1[0] + range1[1]:
self.err_msgs.append(
u"{0} networks use the same VLAN ID(s). "
"You should assign different VLAN IDs "
"to every network.".format(", ".join(net)))
self.result.append({"ids": [int(n["id"])
for n in self.networks
if n['name'] in net],
"errors": ["vlan_start"]})
self.expose_error_messages()
def check_networks_amount(self):
"""1. check number of fixed networks is one in case of FlatDHCPManager
2. check number of fixed networks fit in fixed CIDR and size of
one fixed network
(nova-net)
"""
netmanager = self.network_config['net_manager']
net_size = int(self.network_config['fixed_network_size'])
net_amount = int(self.network_config['fixed_networks_amount'])
net_cidr = netaddr.IPNetwork(
self.network_config['fixed_networks_cidr'])
if not netmanager == consts.NOVA_NET_MANAGERS.FlatDHCPManager and\
net_size * net_amount > net_cidr.size:
self.err_msgs.append(
u"Number of fixed networks ({0}) doesn't fit into "
u"fixed CIDR ({1}) and size of one fixed network "
u"({2}).".format(net_amount, net_cidr, net_size)
)
self.result.append({"ids": [],
"errors": ["fixed_network_size",
"fixed_networks_amount"]})
self.expose_error_messages()
def neutron_check_segmentation_ids(self):
"""1. check networks VLAN IDs not in Neutron L2 private VLAN ID range
for VLAN segmentation only
2. check networks VLAN IDs should not intersect
(neutron)
"""
tagged_nets = dict((n["name"], n["vlan_start"]) for n in filter(
lambda n: (n["vlan_start"] is not None), self.networks))
if tagged_nets:
if self.task.cluster.network_config.segmentation_type == 'vlan':
# check networks tags not in Neutron L2 private VLAN ID range
vrange = self.network_config['vlan_range']
net_intersect = [name for name, vlan in tagged_nets.iteritems()
if vrange[0] <= vlan <= vrange[1]]
if net_intersect:
nets_with_errors = ", ". \
join(net_intersect)
err_msg = u"VLAN tags of {0} network(s) intersect with " \
u"VLAN ID range defined for Neutron L2. " \
u"Networks VLAN tags must not intersect " \
u"with Neutron L2 VLAN ID range.". \
format(nets_with_errors)
raise errors.NetworkCheckError(err_msg)
# check networks VLAN IDs should not intersect
net_intersect = [name for name, vlan in tagged_nets.iteritems()
if tagged_nets.values().count(vlan) >= 2]
if net_intersect:
err_msg = u"{0} networks use the same VLAN tags. " \
u"You should assign different VLAN tag " \
u"to every network.".format(", ".join(net_intersect))
raise errors.NetworkCheckError(err_msg)
def neutron_check_network_address_spaces_intersection(self):
"""Check intersection between address spaces of all networks
including admin (neutron)
"""
# check intersection of address ranges
# between all networks
for ngs in combinations(self.networks, 2):
if ngs[0].get('cidr') and ngs[1].get('cidr'):
cidr1 = netaddr.IPNetwork(ngs[0]['cidr'])
cidr2 = netaddr.IPNetwork(ngs[1]['cidr'])
if self.net_man.is_cidr_intersection(cidr1, cidr2):
self.err_msgs.append(
u"Address space intersection "
u"between networks:\n{0}".format(
", ".join([ngs[0]['name'], ngs[1]['name']])
)
)
self.result.append({
"ids": [int(ngs[0]["id"]), int(ngs[1]["id"])],
"errors": ["cidr"]
})
self.expose_error_messages()
# check Floating Start and Stop IPs belong to Public CIDR
public = filter(lambda ng: ng['name'] == 'public', self.networks)[0]
public_cidr = netaddr.IPNetwork(public['cidr']).cidr
fl_range = self.network_config['floating_ranges'][0]
fl_ip_range = netaddr.IPRange(fl_range[0], fl_range[1])
if fl_ip_range not in public_cidr:
self.err_msgs.append(
u"Floating address range {0}:{1} is not in public "
u"address space {2}.".format(
netaddr.IPAddress(fl_range[0]),
netaddr.IPAddress(fl_range[1]),
public['cidr']
)
)
self.result = [{"ids": [int(public["id"])],
"errors": ["cidr", "ip_ranges"]}]
self.expose_error_messages()
# Check intersection of networks address spaces inside
# Public network
ranges = [netaddr.IPRange(v[0], v[1])
for v in public['ip_ranges']] + [fl_ip_range]
public_gw = netaddr.IPAddress(public['gateway'])
for npair in combinations(ranges, 2):
if self.net_man.is_range_intersection(npair[0], npair[1]):
if fl_ip_range in npair:
self.err_msgs.append(
u"Address space intersection between ranges "
u"of public and external network."
)
else:
self.err_msgs.append(
u"Address space intersection between ranges "
u"of public network."
)
self.result.append({"ids": [int(public["id"])],
"errors": ["ip_ranges"]})
for net in ranges:
# Check intersection of public GW and public IP ranges
if public_gw in net:
self.err_msgs.append(
u"Address intersection between public gateway "
u"and IP range of public network."
)
self.result.append({"ids": [int(public["id"])],
"errors": ["gateway", "ip_ranges"]})
# Check that public IP ranges are in public CIDR
if net not in public_cidr:
self.err_msgs.append(
u"Public gateway and public ranges "
u"are not in one CIDR."
)
self.result.append({"ids": [int(public["id"])],
"errors": ["gateway", "ip_ranges"]})
self.expose_error_messages()
# check internal Gateway is in Internal CIDR
cidr = netaddr.IPNetwork(self.network_config['internal_cidr'])
gw = netaddr.IPAddress(self.network_config['internal_gateway'])
if gw not in cidr:
self.result.append({"ids": [],
"name": ["internal"],
"errors": ["gateway"]})
self.err_msgs.append(
u"Internal gateway {0} is not in internal "
u"address space {1}.".format(str(gw), str(cidr))
)
if self.net_man.is_range_intersection(fl_ip_range, cidr):
self.result.append({"ids": [],
"name": ["internal", "external"],
"errors": ["cidr", "ip_ranges"]})
self.err_msgs.append(
u"Intersection between internal CIDR and floating range."
)
self.expose_error_messages()
def neutron_check_l3_addresses_not_match_subnet_and_broadcast(self):
"""check virtual l3 network address ranges and gateway don't intersect
with subnetwork address and broadcast address (neutron)
"""
ext_fl = self.network_config['floating_ranges'][0]
ext_fl_r = netaddr.IPRange(ext_fl[0], ext_fl[1])
pub = filter(lambda n: n['name'] == 'public', self.networks)[0]
pub_cidr = netaddr.IPNetwork(pub['cidr'])
if pub_cidr.network in ext_fl_r or pub_cidr.broadcast in ext_fl_r:
self.err_msgs.append(
u"Neutron L3 external floating range [{0}] intersect with "
u"either subnet address or broadcast address "
u"of public network.".format(str(ext_fl_r))
)
self.result.append({"ids": [],
"name": ["external"],
"errors": ["ip_ranges"]})
int_cidr = netaddr.IPNetwork(self.network_config['internal_cidr']).cidr
int_gw = netaddr.IPAddress(self.network_config['internal_gateway'])
if int_gw == int_cidr.network or int_gw == int_cidr.broadcast:
self.err_msgs.append(
u"Neutron L3 internal network gateway address is equal to "
u"either subnet address or broadcast address of the network."
)
self.result.append({"ids": [],
"name": ["internal"],
"errors": ["gateway"]})
self.expose_error_messages()
def check_network_classes_exclude_loopback(self):
"""1. check network address space lies inside A,B or C network class
address space
2. check network address space doesn't lie inside loopback
address space
(both neutron and nova-net)
"""
for n in self.networks:
if n.get('cidr'):
cidr = netaddr.IPNetwork(n['cidr']).cidr
if cidr in netaddr.IPNetwork('224.0.0.0/3'):
self.err_msgs.append(
u"{0} network address space does not belong to "
u"A, B, C network classes. It must belong to either "
u"A, B or C network class.".format(n["name"])
)
self.result.append({"ids": [int(n["id"])],
"errors": ["cidr", "ip_ranges"]})
elif cidr in netaddr.IPNetwork('127.0.0.0/8'):
self.err_msgs.append(
u"{0} network address space is inside loopback range "
u"(127.0.0.0/8). It must have no intersection with "
u"loopback range.".format(n["name"])
)
self.result.append({"ids": [int(n["id"])],
"errors": ["cidr", "ip_ranges"]})
self.expose_error_messages()
def check_network_addresses_not_match_subnet_and_broadcast(self):
"""check network address ranges and gateway don't intersect with
subnetwork address and broadcast address (both neutron and nova-net)
"""
for n in self.networks:
if n['meta']['notation'] == 'ip_ranges':
cidr = netaddr.IPNetwork(n['cidr']).cidr
if n.get('gateway'):
gw = netaddr.IPAddress(n['gateway'])
if gw == cidr.network or gw == cidr.broadcast:
self.err_msgs.append(
u"{0} network gateway address is equal to either "
u"subnet address or broadcast address "
u"of the network.".format(n["name"])
)
self.result.append({"ids": [int(n["id"])],
"errors": ["gateway"]})
if n.get('ip_ranges'):
for r in n['ip_ranges']:
ipr = netaddr.IPRange(r[0], r[1])
if cidr.network in ipr or cidr.broadcast in ipr:
self.err_msgs.append(
u"{0} network IP range [{1}] intersect with "
u"either subnet address or broadcast address "
u"of the network.".format(n["name"], str(ipr))
)
self.result.append({"ids": [int(n["id"])],
"errors": ["ip_ranges"]})
flt_range = self.network_config['floating_ranges']
for r in flt_range:
ipr = netaddr.IPRange(r[0], r[1])
if cidr.network in ipr or cidr.broadcast in ipr:
self.err_msgs.append(
u"{0} network floating IP range [{1}] intersect "
u"with either subnet address or broadcast address "
u"of the network.".format(n["name"], str(ipr))
)
self.result.append({"ids": [int(n["id"])],
"errors": ["ip_ranges"]})
self.expose_error_messages()
def check_bond_slaves_speeds(self):
"""check bond slaves speeds are equal
"""
for node in self.cluster.nodes:
for bond in node.bond_interfaces:
slaves_speed = set(
[slave.current_speed for slave in bond.slaves])
if len(slaves_speed) != 1 or slaves_speed.pop() is None:
warn_msg = u"Node '{0}': interface '{1}' slave NICs " \
u"have different or unrecognized speeds". \
format(node.name, bond.name)
logger.warn(warn_msg)
self.err_msgs.append(warn_msg)
def check_configuration(self):
"""check network configuration parameters
"""
if self.net_provider == consts.CLUSTER_NET_PROVIDERS.neutron:
self.neutron_check_network_address_spaces_intersection()
self.neutron_check_segmentation_ids()
self.neutron_check_l3_addresses_not_match_subnet_and_broadcast()
else:
self.check_public_floating_ranges_intersection()
self.check_network_address_spaces_intersection()
self.check_networks_amount()
self.check_vlan_ids_range_and_intersection()
self.check_network_classes_exclude_loopback()
self.check_network_addresses_not_match_subnet_and_broadcast()
def check_interface_mapping(self):
"""check mapping of networks to NICs
"""
self.check_untagged_intersection()
self.check_bond_slaves_speeds()
return self.err_msgs
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from datetime import datetime
import os.path
import pkg_resources
import sys
from trac.admin import *
from trac.core import *
from trac.wiki import model
from trac.wiki.api import WikiSystem
from trac.util import read_file
from trac.util.compat import any
from trac.util.datefmt import format_datetime, from_utimestamp, \
to_utimestamp, utc
from trac.util.text import to_unicode, unicode_quote, unicode_unquote, \
print_table, printout
from trac.util.translation import _
class WikiAdmin(Component):
"""trac-admin command provider for wiki administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('wiki list', '',
'List wiki pages',
None, self._do_list)
yield ('wiki rename', '<page> <new_name>',
'Rename wiki page',
self._complete_page, self._do_rename)
yield ('wiki remove', '<page>',
'Remove wiki page',
self._complete_page, self._do_remove)
yield ('wiki export', '<page> [file]',
'Export wiki page to file or stdout',
self._complete_import_export, self._do_export)
yield ('wiki import', '<page> [file]',
'Import wiki page from file or stdin',
self._complete_import_export, self._do_import)
yield ('wiki dump', '<directory> [page] [...]',
"""Export wiki pages to files named by title
Individual wiki page names can be specified after the directory.
A name ending with a * means that all wiki pages starting with
that prefix should be dumped. If no name is specified, all wiki
pages are dumped.""",
self._complete_dump, self._do_dump)
yield ('wiki load', '<path> [...]',
"""Import wiki pages from files
If a given path is a file, it is imported as a page with the
name of the file. If a path is a directory, all files in that
directory are imported.""",
self._complete_load_replace, self._do_load)
yield ('wiki replace', '<path> [...]',
"""Replace the content of wiki pages from files (DANGEROUS!)
This command replaces the content of the last version of one
or more wiki pages with new content. The previous content is
lost, and no new entry is created in the page history. The
metadata of the page (time, author) is not changed either.
If a given path is a file, it is imported as a page with the
name of the file. If a path is a directory, all files in that
directory are imported.
WARNING: This operation results in the loss of the previous
content and cannot be undone. It may be advisable to backup
the current content using "wiki dump" beforehand.""",
self._complete_load_replace, self._do_replace)
yield ('wiki upgrade', '',
'Upgrade default wiki pages to current version',
None, self._do_upgrade)
def get_wiki_list(self):
return list(WikiSystem(self.env).get_pages())
def export_page(self, page, filename, cursor=None):
if cursor is None:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT text FROM wiki WHERE name=%s "
"ORDER BY version DESC LIMIT 1", (page,))
for text, in cursor:
break
else:
raise AdminCommandError(_("Page '%(page)s' not found", page=page))
if not filename:
printout(text)
else:
if os.path.isfile(filename):
raise AdminCommandError(_("File '%(name)s' exists",
name=filename))
f = open(filename, 'w')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
def import_page(self, filename, title, create_only=[],
replace=False):
if filename:
if not os.path.isfile(filename):
raise AdminCommandError(_("'%(name)s' is not a file",
name=filename))
data = read_file(filename)
else:
data = sys.stdin.read()
data = to_unicode(data, 'utf-8')
result = [True]
@self.env.with_transaction()
def do_import(db):
cursor = db.cursor()
# Make sure we don't insert the exact same page twice
cursor.execute("SELECT text FROM wiki WHERE name=%s "
"ORDER BY version DESC LIMIT 1",
(title,))
old = list(cursor)
if old and title in create_only:
printout(_(' %(title)s already exists', title=title))
result[0] = False
return
if old and data == old[0][0]:
printout(_(' %(title)s is already up to date', title=title))
result[0] = False
return
if replace and old:
cursor.execute("UPDATE wiki SET text=%s WHERE name=%s "
" AND version=(SELECT max(version) FROM wiki "
" WHERE name=%s)",
(data, title, title))
else:
cursor.execute("INSERT INTO wiki(version,name,time,author,"
" ipnr,text) "
"SELECT 1+COALESCE(max(version),0),%s,%s,"
" 'trac','127.0.0.1',%s FROM wiki "
"WHERE name=%s",
(title, to_utimestamp(datetime.now(utc)), data,
title))
if not old:
del WikiSystem(self.env).pages
return result[0]
def load_pages(self, dir, ignore=[], create_only=[], replace=False):
@self.env.with_transaction()
def do_load(db):
for page in os.listdir(dir):
if page in ignore:
continue
filename = os.path.join(dir, page)
page = unicode_unquote(page.encode('utf-8'))
if os.path.isfile(filename):
if self.import_page(filename, page, create_only, replace):
printout(_(" %(page)s imported from %(filename)s",
filename=filename, page=page))
def _complete_page(self, args):
if len(args) == 1:
return self.get_wiki_list()
def _complete_import_export(self, args):
if len(args) == 1:
return self.get_wiki_list()
elif len(args) == 2:
return get_dir_list(args[-1])
def _complete_dump(self, args):
if len(args) == 1:
return get_dir_list(args[-1], dirs_only=True)
elif len(args) >= 2:
return self.get_wiki_list()
def _complete_load_replace(self, args):
if len(args) >= 1:
return get_dir_list(args[-1])
def _do_list(self):
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT name, max(version), max(time) "
"FROM wiki GROUP BY name ORDER BY name")
print_table([(r[0], int(r[1]),
format_datetime(from_utimestamp(r[2]),
console_datetime_format))
for r in cursor],
[_('Title'), _('Edits'), _('Modified')])
def _do_rename(self, name, new_name):
if new_name == name:
return
if not new_name:
raise AdminCommandError(_('A new name is mandatory for a rename.'))
@self.env.with_transaction()
def do_rename(db):
if model.WikiPage(self.env, new_name, db=db).exists:
raise AdminCommandError(_('The page %(name)s already exists.',
name=new_name))
page = model.WikiPage(self.env, name, db=db)
page.rename(new_name)
def _do_remove(self, name):
@self.env.with_transaction()
def do_transaction(db):
if name.endswith('*'):
pages = list(WikiSystem(self.env).get_pages(name.rstrip('*')
or None))
for p in pages:
page = model.WikiPage(self.env, p, db=db)
page.delete()
print_table(((p,) for p in pages), [_('Deleted pages')])
else:
page = model.WikiPage(self.env, name, db=db)
page.delete()
def _do_export(self, page, filename=None):
self.export_page(page, filename)
def _do_import(self, page, filename=None):
self.import_page(filename, page)
def _do_dump(self, directory, *names):
if not names:
names = ['*']
pages = self.get_wiki_list()
if not os.path.isdir(directory):
if not os.path.exists(directory):
os.mkdir(directory)
else:
raise AdminCommandError(_("'%(name)s' is not a directory",
name=directory))
db = self.env.get_db_cnx()
cursor = db.cursor()
for p in pages:
if any(p == name or (name.endswith('*')
and p.startswith(name[:-1]))
for name in names):
dst = os.path.join(directory, unicode_quote(p, ''))
printout(' %s => %s' % (p, dst))
self.export_page(p, dst, cursor)
def _load_or_replace(self, paths, replace):
@self.env.with_transaction()
def do_transaction(db):
for path in paths:
if os.path.isdir(path):
self.load_pages(path, replace=replace)
else:
page = os.path.basename(path)
page = unicode_unquote(page.encode('utf-8'))
if self.import_page(path, page, replace=replace):
printout(_(" %(page)s imported from %(filename)s",
filename=path, page=page))
def _do_load(self, *paths):
self._load_or_replace(paths, replace=False)
def _do_replace(self, *paths):
self._load_or_replace(paths, replace=True)
def _do_upgrade(self):
self.load_pages(pkg_resources.resource_filename('trac.wiki',
'default-pages'),
ignore=['WikiStart', 'checkwiki.py'],
create_only=['InterMapTxt'])
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class wisite_translationinternalip_binding(base_resource) :
""" Binding class showing the translationinternalip that can be bound to wisite.
"""
def __init__(self) :
self._translationinternalip = ""
self._accesstype = ""
self._translationinternalport = 0
self._translationexternalip = ""
self._translationexternalport = 0
self._sitepath = ""
self.___count = 0
@property
def sitepath(self) :
ur"""Path to the Web Interface site.<br/>Minimum length = 1<br/>Maximum length = 250.
"""
try :
return self._sitepath
except Exception as e:
raise e
@sitepath.setter
def sitepath(self, sitepath) :
ur"""Path to the Web Interface site.<br/>Minimum length = 1<br/>Maximum length = 250
"""
try :
self._sitepath = sitepath
except Exception as e:
raise e
@property
def accesstype(self) :
ur"""Type of access to the XenApp or XenDesktop server.
Available settings function as follows:
* User Device - Clients can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.
* Gateway - Access Gateway can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.
* User Device and Gateway - Both clients and Access Gateway can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.<br/>Default value: UserDevice<br/>Possible values = UserDevice, Gateway, UserDeviceAndGateway.
"""
try :
return self._accesstype
except Exception as e:
raise e
@accesstype.setter
def accesstype(self, accesstype) :
ur"""Type of access to the XenApp or XenDesktop server.
Available settings function as follows:
* User Device - Clients can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.
* Gateway - Access Gateway can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.
* User Device and Gateway - Both clients and Access Gateway can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.<br/>Default value: UserDevice<br/>Possible values = UserDevice, Gateway, UserDeviceAndGateway
"""
try :
self._accesstype = accesstype
except Exception as e:
raise e
@property
def translationexternalport(self) :
ur"""External port number associated with the server's port number.<br/>Range 1 - 65535.
"""
try :
return self._translationexternalport
except Exception as e:
raise e
@translationexternalport.setter
def translationexternalport(self, translationexternalport) :
ur"""External port number associated with the server's port number.<br/>Range 1 - 65535
"""
try :
self._translationexternalport = translationexternalport
except Exception as e:
raise e
@property
def translationinternalip(self) :
ur"""IP address of the server for which you want to associate an external IP address. (Clients access the server through the associated external address and port.).<br/>Default value: 0.
"""
try :
return self._translationinternalip
except Exception as e:
raise e
@translationinternalip.setter
def translationinternalip(self, translationinternalip) :
ur"""IP address of the server for which you want to associate an external IP address. (Clients access the server through the associated external address and port.).<br/>Default value: 0
"""
try :
self._translationinternalip = translationinternalip
except Exception as e:
raise e
@property
def translationexternalip(self) :
ur"""External IP address associated with server's IP address.
"""
try :
return self._translationexternalip
except Exception as e:
raise e
@translationexternalip.setter
def translationexternalip(self, translationexternalip) :
ur"""External IP address associated with server's IP address.
"""
try :
self._translationexternalip = translationexternalip
except Exception as e:
raise e
@property
def translationinternalport(self) :
ur"""Port number of the server for which you want to associate an external port. (Clients access the server through the associated external address and port.).<br/>Range 1 - 65535.
"""
try :
return self._translationinternalport
except Exception as e:
raise e
@translationinternalport.setter
def translationinternalport(self, translationinternalport) :
ur"""Port number of the server for which you want to associate an external port. (Clients access the server through the associated external address and port.).<br/>Range 1 - 65535
"""
try :
self._translationinternalport = translationinternalport
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(wisite_translationinternalip_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.wisite_translationinternalip_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.sitepath is not None :
return str(self.sitepath)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = wisite_translationinternalip_binding()
updateresource.sitepath = resource.sitepath
updateresource.translationinternalip = resource.translationinternalip
updateresource.translationinternalport = resource.translationinternalport
updateresource.translationexternalip = resource.translationexternalip
updateresource.translationexternalport = resource.translationexternalport
updateresource.accesstype = resource.accesstype
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [wisite_translationinternalip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].sitepath = resource[i].sitepath
updateresources[i].translationinternalip = resource[i].translationinternalip
updateresources[i].translationinternalport = resource[i].translationinternalport
updateresources[i].translationexternalip = resource[i].translationexternalip
updateresources[i].translationexternalport = resource[i].translationexternalport
updateresources[i].accesstype = resource[i].accesstype
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = wisite_translationinternalip_binding()
deleteresource.sitepath = resource.sitepath
deleteresource.translationinternalip = resource.translationinternalip
deleteresource.translationinternalport = resource.translationinternalport
deleteresource.translationexternalip = resource.translationexternalip
deleteresource.translationexternalport = resource.translationexternalport
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [wisite_translationinternalip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].sitepath = resource[i].sitepath
deleteresources[i].translationinternalip = resource[i].translationinternalip
deleteresources[i].translationinternalport = resource[i].translationinternalport
deleteresources[i].translationexternalip = resource[i].translationexternalip
deleteresources[i].translationexternalport = resource[i].translationexternalport
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, sitepath) :
ur""" Use this API to fetch wisite_translationinternalip_binding resources.
"""
try :
obj = wisite_translationinternalip_binding()
obj.sitepath = sitepath
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, sitepath, filter_) :
ur""" Use this API to fetch filtered set of wisite_translationinternalip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = wisite_translationinternalip_binding()
obj.sitepath = sitepath
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, sitepath) :
ur""" Use this API to count wisite_translationinternalip_binding resources configued on NetScaler.
"""
try :
obj = wisite_translationinternalip_binding()
obj.sitepath = sitepath
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, sitepath, filter_) :
ur""" Use this API to count the filtered set of wisite_translationinternalip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = wisite_translationinternalip_binding()
obj.sitepath = sitepath
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Accessmethod:
Direct = "Direct"
Alternate = "Alternate"
Translated = "Translated"
GatewayDirect = "GatewayDirect"
GatewayAlternate = "GatewayAlternate"
GatewayTranslated = "GatewayTranslated"
class Accesstype:
UserDevice = "UserDevice"
Gateway = "Gateway"
UserDeviceAndGateway = "UserDeviceAndGateway"
class Transport:
HTTP = "HTTP"
HTTPS = "HTTPS"
SSLRELAY = "SSLRELAY"
class Loadbalance:
ON = "ON"
OFF = "OFF"
class Recoveryfarm:
ON = "ON"
OFF = "OFF"
class wisite_translationinternalip_binding_response(base_response) :
def __init__(self, length=1) :
self.wisite_translationinternalip_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.wisite_translationinternalip_binding = [wisite_translationinternalip_binding() for _ in range(length)]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._reservation_transactions_operations import build_list_by_billing_profile_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReservationTransactionsOperations:
"""ReservationTransactionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.consumption.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
billing_account_id: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ReservationTransactionsListResult"]:
"""List of transactions for reserved instances on billing account scope.
:param billing_account_id: BillingAccount ID.
:type billing_account_id: str
:param filter: Filter reservation transactions by date range. The properties/EventDate for
start date and end date. The filter supports 'le' and 'ge'.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReservationTransactionsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.consumption.models.ReservationTransactionsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReservationTransactionsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
billing_account_id=billing_account_id,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
billing_account_id=billing_account_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReservationTransactionsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/providers/Microsoft.Consumption/reservationTransactions'} # type: ignore
@distributed_trace
def list_by_billing_profile(
self,
billing_account_id: str,
billing_profile_id: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ModernReservationTransactionsListResult"]:
"""List of transactions for reserved instances on billing account scope.
:param billing_account_id: BillingAccount ID.
:type billing_account_id: str
:param billing_profile_id: Azure Billing Profile ID.
:type billing_profile_id: str
:param filter: Filter reservation transactions by date range. The properties/EventDate for
start date and end date. The filter supports 'le' and 'ge'.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModernReservationTransactionsListResult or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.consumption.models.ModernReservationTransactionsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ModernReservationTransactionsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_profile_request(
billing_account_id=billing_account_id,
billing_profile_id=billing_profile_id,
filter=filter,
template_url=self.list_by_billing_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_billing_profile_request(
billing_account_id=billing_account_id,
billing_profile_id=billing_profile_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ModernReservationTransactionsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_billing_profile.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/providers/Microsoft.Consumption/reservationTransactions'} # type: ignore
| |
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Singleton classes needed for authentication."""
import binascii
import json
from re import compile
from Crypto.Cipher import AES, ARC2
from Crypto.Hash import HMAC, SHA256
from Crypto.Random import random
from django.http import HttpResponseRedirect
from oauth2client.client import OAuth2WebServerFlow
from pinball.config.pinball_config import PinballConfig
__author__ = 'Tongbo Huang, Devin Lundberg'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = ['Tongbo Huang', 'Devin Lundberg']
__license__ = 'Apache'
__version__ = '2.0'
class SingletonType(type):
"""This is a singleton metaclass.
All classes created by this metaclass will only be initialized once.
"""
def __call__(cls, *args, **kwargs):
try:
return cls.__instance
except AttributeError:
cls.__instance = super(SingletonType, cls).__call__(*args, **kwargs)
return cls.__instance
class OAuth2Flow(object):
"""This class is used to create a google OAuth2WebServerFlow object with
Pinball client credentials.
You can created new ones from Google Cloud Console.
"""
__metaclass__ = SingletonType
_scope = 'https://www.googleapis.com/auth/userinfo.email '\
'https://www.googleapis.com/auth/admin.directory.group.readonly'
_redirect_uri = ''
if not PinballConfig.UI_HOST:
_redirect_uri = 'http://localhost:8080/oauth2callback/'
else:
_redirect_uri = 'https://%s/oauth2callback/' % PinballConfig.UI_HOST
_flow = {}
for domain in PinballConfig.AUTHENTICATION_DOMAINS:
_flow[domain] = OAuth2WebServerFlow(client_id=PinballConfig.GOOGLE_CLIENT_ID,
client_secret=PinballConfig.GOOGLE_CLIENT_SECRET,
scope=_scope,
redirect_uri=_redirect_uri,
hd=domain)
def get_flow(self, domain):
"""Getter for the google OAuth2WebServerFlow object with
Pinball client credentials.
Returns:
The google OAuth2WebServerFlow object with Pinball client
credentials.
"""
return self._flow[domain]
def domain_authenticated(self, domain):
"""Check if the given domain is authorized with the application.
Returns:
The domain to be checked.
"""
return domain in self._flow.keys()
def get_domains(self):
"""Get all authenticated domains.
Returns:
Authenticated domains
"""
return PinballConfig.AUTHENTICATION_DOMAINS
class CryptoException(Exception):
pass
class Crypter(object):
"""Implements modern authenticated encryption for strings
The current version (1) uses HMAC-SHA256 and AES256-CBC using
Encrypt-then-MAC. Should be secure >>>2030 according to NIST standards.
http://www.keylength.com/ for a summary of algorithms and expected
security. The message is padded with null characters. This means that
strings with null characters should not use this method unless a
different padding scheme is added
AES in GCM mode is faster, but the pycrypto implementation is immature.
This may be a better choice for future versions.
There is currently legacy support for decryption under the old key
using ARC2
TODO(devinlundberg): remove legacy ARC2 decryption support
"""
__metaclass__ = SingletonType
# Remove legacy crypter in future.
_legacy_crypter = ARC2.new(PinballConfig.SECRET_KEY, ARC2.MODE_ECB)
_aes_block_size = 16
_padding_char = '\x00'
def _serialize(self, ciphertext, mac, **params):
"""Creates a serialized crypto object with current version and key"""
encoded_params = {k: v.encode('base64') for k, v in params.items()}
return json.dumps({
'version': PinballConfig.CRYPTO_VERSION,
'ciphertext': ciphertext.encode('base64'),
'auth': mac.encode('base64'),
'params': encoded_params
}).encode('base64')
def _deserialize(self, encoded_ciphertext):
"""Gets version, ciphertext, auth, and params from serialized object"""
try:
ciphertext_json = encoded_ciphertext.decode('base64')
except binascii.Error:
raise CryptoException('Invalid Base64')
try:
ciphertext_obj = json.loads(ciphertext_json)
except ValueError:
raise CryptoException('Invalid JSON format')
if any(key not in ciphertext_obj
for key in ('version', 'ciphertext', 'auth', 'params')):
raise CryptoException('Invalid JSON parameters')
version = ciphertext_obj['version']
try:
ciphertext = ciphertext_obj['ciphertext'].decode('base64')
auth = ciphertext_obj['auth'].decode('base64')
params = {k: v.decode('base64')
for k, v in ciphertext_obj['params'].items()}
except binascii.Error:
raise CryptoException('Invalid Base64')
except AttributeError:
raise CryptoException('Unsupported types')
return version, ciphertext, auth, params
def _cbc_hmac_sha256_decrypt(self, ciphertext, auth, iv):
"""Authenticated decrypt using AES-CBC and HMAC SHA256
Encrypt-then-MAC"""
hmac = HMAC.new(PinballConfig.HMAC_KEY, digestmod=SHA256)
hmac.update(ciphertext)
hmac.update(iv)
if hmac.hexdigest() != auth:
raise CryptoException('Decryption Failed')
aes = AES.new(PinballConfig.AES_CBC_KEY, AES.MODE_CBC, iv)
return aes.decrypt(ciphertext).rstrip(self._padding_char)
def encrypt(self, message):
"""Encrypts string of any length using the current crypto version
Args:
message: The string that needs to be encrypted.
Returns:
A serialized authenticated encrypted object
"""
iv = ''.join(chr(random.randint(0, 255))
for _ in range(self._aes_block_size))
aes = AES.new(PinballConfig.AES_CBC_KEY, AES.MODE_CBC, iv)
hmac = HMAC.new(PinballConfig.HMAC_KEY, digestmod=SHA256)
padded_length = (len(message) + self._aes_block_size -
len(message) % (self._aes_block_size))
padded_message = message.ljust(padded_length, self._padding_char)
ciphertext = aes.encrypt(padded_message)
hmac.update(ciphertext)
hmac.update(iv)
return self._serialize(ciphertext, hmac.hexdigest(), iv=iv)
def decrypt(self, encoded_ciphertext):
"""Deserializes and decrypts a string with the current or legacy
algorithms
Args:
encoded_ciphertext: The string that needs to be decrypted.
Returns:
The decrypted message.
Throws:
CryptoException: on failed decryption.
"""
try:
version, ciphertext, auth, params = self._deserialize(encoded_ciphertext)
except CryptoException:
# This should raise an exception when support for ARC2 ends.
return self._legacy_crypter.decrypt(encoded_ciphertext).rstrip('0')
if version == 1:
if 'iv' not in params:
raise CryptoException('Missing IV')
return self._cbc_hmac_sha256_decrypt(ciphertext, auth, params['iv'])
else:
raise CryptoException('Unsupported Crypto Version')
class RequireLogin(object):
"""This middleware requires a user to be authenticated to view any pages.
Exemptions to this requirement can optionally be specified
in settings via a list of regular expressions in EXEMPT_URLS (which
you can copy from your urls.py).
Requires authentication middleware and template context processors to be
loaded. You'll get an error if they aren't.
"""
def process_request(self, request):
"""The Login Required middleware requires authentication middleware to
be installed. Edit your MIDDLEWARE_CLASSES setting to insert
'django.contrib.auth.middlware.AuthenticationMiddleware'.
If that doesn't work, ensure your TEMPLATE_CONTEXT_PROCESSORS setting
includes 'django.core.context_processors.auth'.
Args:
request: The request sent towards any application url.
Returns:
Redirect user to signin page if the user has not logged in or their
client side credential cookie is malform.
"""
domains = OAuth2Flow().get_domains()
EXEMPT_URLS = [compile('/signin/'),
compile('/oauth2callback/'),
compile('/logout/'),
compile('/static/.*')]
path = request.path_info
if any(m.match(path) for m in EXEMPT_URLS):
pass
elif 'login' in request.COOKIES and 'user_id' in request.COOKIES \
and 'domain_url' in request.COOKIES:
crypter = Crypter()
try:
user_id = crypter.decrypt(request.COOKIES['login'])
domain = crypter.decrypt(request.COOKIES['domain_url'])
except CryptoException:
return HttpResponseRedirect('/signin/')
if user_id == request.COOKIES['user_id'] and user_id != '' \
and domain in domains:
pass
else:
return HttpResponseRedirect('/signin/')
else:
return HttpResponseRedirect('/signin/')
| |
"""Test the Synapse command line client.
"""
import base64
import os
import pytest
from unittest.mock import call, Mock, patch, MagicMock
import synapseclient.__main__ as cmdline
from synapseclient.core.exceptions import SynapseAuthenticationError, SynapseNoCredentialsError
from synapseclient.entity import File
import synapseutils
def test_command_sync(syn):
"""Test the sync function.
Since this function only passes argparse arguments for the sync subcommand
straight to `synapseutils.sync.syncToSynapse`, the only tests here are for
the command line arguments provided and that the function is called once.
"""
mockFileOpener = MagicMock()
with patch("argparse.FileType", return_value=mockFileOpener):
parser = cmdline.build_parser()
args = parser.parse_args(['sync', '/tmp/foobarbaz.tsv'])
mockFileOpener.assert_called_once_with('/tmp/foobarbaz.tsv')
assert args.manifestFile is mockFileOpener.return_value
assert args.dryRun is False
assert args.sendMessages is False
assert args.retries == 4
with patch.object(synapseutils, "syncToSynapse") as mockedSyncToSynapse:
cmdline.sync(args, syn)
mockedSyncToSynapse.assert_called_once_with(syn,
manifestFile=args.manifestFile,
dryRun=args.dryRun,
sendMessages=args.sendMessages,
retries=args.retries)
def test_migrate__default_args(syn):
"""Test that the command line arguments are successfully passed to the migrate function
when using the default options"""
entity_id = 'syn12345'
dest_storage_location_id = '98766'
db_path = '/tmp/foo/bar'
parser = cmdline.build_parser()
# test w/ default optional args
args = parser.parse_args([
'migrate',
'syn12345',
dest_storage_location_id,
db_path,
])
assert args.id == entity_id
assert args.dest_storage_location_id == dest_storage_location_id
assert args.db_path == db_path
assert args.file_version_strategy == 'new'
assert args.include_table_files is False
assert args.continue_on_error is False
assert args.dryRun is False
assert args.force is False
assert args.csv_log_path is None
def test_migrate__fully_specified_args(mocker, syn):
"""Test that the command line arguments are successfully passed to the migrate function
when the arguments are fully specified"""
entity_id = 'syn12345'
dest_storage_location_id = '98766'
source_storage_location_ids = ['12345', '23456']
db_path = '/tmp/foo/bar'
parser = cmdline.build_parser()
# test w/ fully specified args
args = parser.parse_args([
'migrate',
entity_id,
dest_storage_location_id,
db_path,
'--source_storage_location_ids', *source_storage_location_ids,
'--file_version_strategy', 'all',
'--dryRun',
'--include_table_files',
'--continue_on_error',
'--force',
'--csv_log_path', '/tmp/foo/bar'
])
assert args.id == entity_id
assert args.dest_storage_location_id == dest_storage_location_id
assert args.source_storage_location_ids == source_storage_location_ids
assert args.db_path == db_path
assert args.file_version_strategy == 'all'
assert args.include_table_files is True
assert args.continue_on_error is True
assert args.dryRun is True
assert args.force is True
assert args.csv_log_path == '/tmp/foo/bar'
# verify args are passed through to the fn
mock_index = mocker.patch.object(synapseutils, 'index_files_for_migration')
mock_migrate = mocker.patch.object(synapseutils, 'migrate_indexed_files')
cmdline.migrate(args, syn)
mock_index.assert_called_once_with(
syn,
args.id,
args.dest_storage_location_id,
args.db_path,
source_storage_location_ids=args.source_storage_location_ids,
file_version_strategy='all',
include_table_files=True,
continue_on_error=True,
)
# during a dryRun the actual migration should not occur
assert mock_migrate.called is False
# without dryRun then migrate should also be called
args.dryRun = False
cmdline.migrate(args, syn)
mock_migrate.assert_called_once_with(
syn,
args.db_path,
create_table_snapshots=True,
continue_on_error=True,
force=True
)
def test_migrate__dont_continue(mocker, syn):
"""Verify we exit gracefully if migrate returns no result
(e.g. the user declined to continue with the migration after reading the result of the index"""
storage_location_id = '98766'
db_path = '/tmp/foo/bar'
parser = cmdline.build_parser()
mocker.patch.object(synapseutils, 'index_files_for_migration')
mock_migrate = mocker.patch.object(synapseutils, 'migrate_indexed_files')
# a None simulates the user declining to continue
mock_migrate.return_value = None
args = parser.parse_args([
'migrate',
'syn12345',
storage_location_id,
db_path,
])
cmdline.migrate(args, syn)
@patch.object(cmdline, 'synapseutils')
def test_get_manifest_option(mock_synapseutils):
"""
Verify the create manifest option works properly for three choices which are 'all', 'root', 'suppress'.
"""
parser = cmdline.build_parser()
syn = Mock()
# createManifest defaults to all
args = parser.parse_args(['get', '-r', 'syn123'])
assert args.manifest == 'all'
cmdline.get(args, syn)
mock_synapseutils.syncFromSynapse.assert_called_with(syn, 'syn123', './', followLink=False, manifest="all")
# creating the root manifest file only
args = parser.parse_args(['get', '-r', 'syn123', '--manifest', 'root'])
assert args.manifest == 'root'
cmdline.get(args, syn)
mock_synapseutils.syncFromSynapse.assert_called_with(syn, 'syn123', './', followLink=False, manifest="root")
# suppress creating the manifest file
args = parser.parse_args(['get', '-r', 'syn123', '--manifest', 'suppress'])
assert args.manifest == 'suppress'
cmdline.get(args, syn)
mock_synapseutils.syncFromSynapse.assert_called_with(syn, 'syn123', './', followLink=False, manifest="suppress")
def test_get_multi_threaded_flag():
"""Test the multi threaded command line flag"""
parser = cmdline.build_parser()
args = parser.parse_args(['get', '--multiThreaded', 'syn123'])
assert args.multiThreaded
# defaults to True
args = parser.parse_args(['get', 'syn123'])
assert args.multiThreaded
def test_get_sts_token():
"""Test getting an STS token."""
folder_id = 'syn_1'
permission = 'read_write'
syn = Mock()
expected_output = 'export foo=bar'
syn.get_sts_storage_token.return_value = expected_output
parser = cmdline.build_parser()
args = parser.parse_args(['get-sts-token', folder_id, permission, '-o', 'shell'])
cmdline.get_sts_token(args, syn)
syn.get_sts_storage_token.assert_called_with(folder_id, permission, output_format='shell')
syn.logger.info.assert_called_once_with(expected_output)
def test_authenticate_login__username_password(syn):
"""Verify happy path for _authenticate_login"""
with patch.object(syn, 'login'):
cmdline._authenticate_login(syn, 'foo', 'bar', rememberMe=True, silent=True)
syn.login.assert_called_once_with('foo', password='bar', rememberMe=True, silent=True)
def test_authenticate_login__api_key(syn):
"""Verify attempting to authenticate when supplying an api key as the password.
Should attempt to treat the password as an api key after the initial failures as a password and token"""
username = 'foo'
password = base64.b64encode(b'bar').decode('utf-8')
login_kwargs = {'rememberMe': True}
expected_login_calls = [
call(username, password=password, **login_kwargs),
call(username, authToken=password, **login_kwargs),
call(username, apiKey=password, **login_kwargs),
]
with patch.object(syn, 'login') as login:
login.side_effect = SynapseAuthenticationError()
# simulate failure both as password and as api key
with pytest.raises(SynapseAuthenticationError):
cmdline._authenticate_login(syn, username, password, **login_kwargs)
assert expected_login_calls == login.call_args_list
login.reset_mock()
# now simulate success when used as an api key
def login_side_effect(*args, **kwargs):
api_key = kwargs.get('apiKey')
if not api_key:
raise SynapseAuthenticationError()
login.side_effect = login_side_effect
cmdline._authenticate_login(syn, username, password, **login_kwargs)
assert expected_login_calls == login.call_args_list
def test_authenticate_login__auth_token(syn):
"""Verify attempting to authenticate when supplying an auth bearer token instead of an password (or api key).
Should attempt to treat the password as token after the initial failure as a password."""
username = 'foo'
auth_token = 'auth_bearer_token'
login_kwargs = {'rememberMe': True}
expected_login_calls = [
call(username, password=auth_token, **login_kwargs),
call(username, authToken=auth_token, **login_kwargs),
]
with patch.object(syn, 'login') as login:
login.side_effect = SynapseAuthenticationError()
# simulate failure both as password and as auth token.
# token is not a base 64 encoded string so we don't expect it to be
# tried as an api key
with pytest.raises(SynapseAuthenticationError):
cmdline._authenticate_login(syn, username, auth_token, **login_kwargs)
assert expected_login_calls == login.call_args_list
login.reset_mock()
def login_side_effect(*args, **kwargs):
# simulate a failure when called with other than auth token
passed_auth_token = kwargs.get('authToken')
if not passed_auth_token:
raise SynapseAuthenticationError()
login.side_effect = login_side_effect
cmdline._authenticate_login(syn, username, auth_token, **login_kwargs)
assert expected_login_calls == login.call_args_list
def test_authenticate_login__no_input(mocker, syn):
"""Verify attempting to authenticate with a bare login command (i.e. expecting
to derive credentials from config for cache)"""
login_kwargs = {'rememberMe': True}
call(**login_kwargs),
mock_login = mocker.patch.object(syn, 'login')
cmdline._authenticate_login(syn, None, None, **login_kwargs)
mock_login.assert_called_once_with(None, **login_kwargs)
def test_authenticate_login__failure(mocker, syn):
"""Verify that a login with invalid credentials raises an error (the
first error when multiple login methods were attempted."""
login_kwargs = {'rememberMe': True}
call(**login_kwargs),
mock_login = mocker.patch.object(syn, 'login')
def login_side_effect(*args, **kwargs):
raise SynapseAuthenticationError("call{}".format(mock_login.call_count))
mock_login.side_effect = login_side_effect
with pytest.raises(SynapseAuthenticationError) as ex_cm:
cmdline._authenticate_login(syn, None, None, **login_kwargs)
assert str(ex_cm.value) == 'call1'
@patch.object(cmdline, '_authenticate_login')
def test_login_with_prompt(mock_authenticate_login, syn):
"""Verify logging in when username/pass supplied as args to the command"""
user = 'foo'
password = 'bar'
login_kwargs = {
'rememberMe': False,
'silent': True,
'forced': True,
}
cmdline.login_with_prompt(syn, user, password, **login_kwargs)
mock_authenticate_login.assert_called_once_with(syn, user, password, **login_kwargs)
@pytest.mark.parametrize(
'username,expected_pass_prompt',
[
('foo', 'Password, api key, or auth token for user foo:'),
('', 'Auth token:'),
]
)
def test_login_with_prompt__getpass(mocker, username, expected_pass_prompt, syn):
"""
Verify logging in when entering username and a secret from the console.
The secret prompt should be customized depending on whether a username was entered
or not (if not prompt for an auth token since username is not required for an auth token).
"""
mock_sys = mocker.patch.object(cmdline, 'sys')
mock_getpass = mocker.patch.object(cmdline, 'getpass')
mock_input = mocker.patch.object(cmdline, 'input')
mock_authenticate_login = mocker.patch.object(cmdline, '_authenticate_login')
password = 'bar'
login_kwargs = {
'rememberMe': False,
'silent': True,
'forced': True,
}
def authenticate_side_effect(*args, **kwargs):
if mock_authenticate_login.call_count == 1:
# the first authenticate call doesn't take any input from console
# (i.e. tries to use cache or config), when that returns no credentials
# it prompts for username and a secret
raise SynapseNoCredentialsError()
return
mock_sys.stdin.isatty.return_value = True
mock_authenticate_login.side_effect = authenticate_side_effect
mock_input.return_value = username
mock_getpass.getpass.return_value = password
cmdline.login_with_prompt(syn, None, None, **login_kwargs)
mock_input.assert_called_once_with("Synapse username (leave blank if using an auth token): ")
mock_getpass.getpass.assert_called_once_with(expected_pass_prompt)
expected_authenticate_calls = [
call(syn, None, None, **login_kwargs),
call(syn, username, password, **{k: v for k, v in login_kwargs.items() if k != 'silent'})
]
assert expected_authenticate_calls == mock_authenticate_login.call_args_list
def test_syn_commandline_silent_mode():
"""
Test the silent argument from commandline
"""
parser = cmdline.build_parser()
args = parser.parse_args([])
assert args.silent is False
parser = cmdline.build_parser()
args = parser.parse_args(['--silent'])
assert args.silent is True
@patch("synapseclient.Synapse")
def test_commandline_main(mock_syn):
"""
Test the main method
"""
configPath = os.path.join(os.path.expanduser('~'), '.synapseConfig')
args = cmdline.build_parser().parse_args(['-u', 'testUser', '--silent'])
with patch.object(cmdline, 'build_parser') as mock_build_parser:
mock_build_parser.return_value.parse_args.return_value = args
cmdline.main()
mock_syn.assert_called_once_with(debug=False, skip_checks=False,
configPath=configPath, silent=True)
@patch.object(cmdline, 'sys')
@patch.object(cmdline, 'input')
@patch.object(cmdline, '_authenticate_login')
def test_login_with_prompt_no_tty(mock_authenticate_login, mock_input, mock_sys, syn):
"""
Verify login_with_prompt when the terminal is not a tty,
we are unable to read from standard input and throw a SynapseAuthenticationError
"""
user = 'test_user'
login_kwargs = {
'rememberMe': False,
'silent': True,
'forced': True,
}
mock_authenticate_login.side_effect = SynapseNoCredentialsError()
mock_sys.stdin.isatty.return_value = False
mock_input.return_value = user
with pytest.raises(SynapseAuthenticationError):
cmdline.login_with_prompt(syn, None, None, **login_kwargs)
def test_login_with_prompt__user_supplied(mocker, syn):
"""
Verify that if we login_with_prompt and the username was supplied then we don't prompt the
user for a username.
"""
username = 'shrek'
password = 'testpass'
mock_sys = mocker.patch.object(cmdline, 'sys')
mock_sys.isatty.return_value = True
mock_getpass = mocker.patch.object(cmdline, 'getpass')
mock_getpass.getpass.return_value = password
mock_input = mocker.patch.object(cmdline, 'input')
mock_authenticate_login = mocker.patch.object(cmdline, '_authenticate_login')
mock_authenticate_login.side_effect = [SynapseNoCredentialsError(), None]
cmdline.login_with_prompt(syn, username, None)
assert not mock_input.called
mock_authenticate_login.assert_called_with(
syn,
username,
password,
forced=False,
rememberMe=False,
)
@patch.object(cmdline, 'build_parser')
def test_no_command_print_help(mock_build_parser, syn):
"""
Verify command without any function,
we are automatically print out help instructions.
"""
args = cmdline.build_parser().parse_args(['-u', 'test_user'])
mock_build_parser.assert_called_once_with()
cmdline.perform_main(args, syn)
mock_build_parser.call_count == 2
mock_build_parser.return_value.print_help.assert_called_once_with()
@patch.object(cmdline.sys, 'exit')
@patch.object(cmdline, 'login_with_prompt')
def test_command_auto_login(mock_login_with_prompt, mock_sys_exit, syn):
"""
Verify command with the function but without login function,
we are calling login_with_prompt automatically.
"""
mock_login_with_prompt.assert_not_called()
args = cmdline.build_parser().parse_args(['-u', 'test_user', 'get'])
cmdline.perform_main(args, syn)
mock_login_with_prompt.assert_called_once_with(syn, 'test_user', None, silent=True)
mock_sys_exit.assert_called_once_with(1)
class TestGetFunction:
@patch('synapseclient.client.Synapse')
def setup(self, mock_syn):
self.syn = mock_syn
@patch.object(synapseutils, 'syncFromSynapse')
def test_get__with_arg_recursive(self, mock_syncFromSynapse):
parser = cmdline.build_parser()
args = parser.parse_args(['get', '-r', 'syn123'])
cmdline.get(args, self.syn)
mock_syncFromSynapse.assert_called_once_with(self.syn, 'syn123', './', followLink=False, manifest='all')
@patch.object(cmdline, '_getIdsFromQuery')
def test_get__with_arg_queryString(self, mock_getIdsFromQuery):
parser = cmdline.build_parser()
args = parser.parse_args(['get', '-q', 'test_query'])
mock_getIdsFromQuery.return_value = ['syn123', 'syn456']
cmdline.get(args, self.syn)
mock_getIdsFromQuery.assert_called_once_with('test_query', self.syn, './')
assert self.syn.get.call_args_list == [call('syn123', downloadLocation='./'),
call('syn456', downloadLocation='./')]
@patch.object(cmdline, 'os')
def test_get__with_id_path(self, mock_os):
parser = cmdline.build_parser()
args = parser.parse_args(['get', './temp/path'])
mock_os.path.isfile.return_value = True
self.syn.get.return_value = {}
cmdline.get(args, self.syn)
self.syn.get.assert_called_once_with('./temp/path', version=None, limitSearch=None, downloadFile=False)
@patch.object(cmdline, 'os')
def test_get__with_normal_id(self, mock_os):
parser = cmdline.build_parser()
args = parser.parse_args(['get', 'syn123'])
mock_entity = MagicMock(id='syn123')
mock_os.path.isfile.return_value = False
self.syn.get.return_value = mock_entity
cmdline.get(args, self.syn)
self.syn.get.assert_called_once_with('syn123', version=None, followLink=False, downloadLocation='./')
assert self.syn.logger.info.call_args_list == [call('WARNING: No files associated with entity %s\n', 'syn123'),
call(mock_entity)]
mock_entity2 = File(path='./tmp_path', parent='syn123')
self.syn.get.return_value = mock_entity2
mock_os.path.exists.return_value = True
mock_os.path.basename.return_value = "./base_tmp_path"
cmdline.get(args, self.syn)
assert self.syn.logger.info.call_args_list == [call('WARNING: No files associated with entity %s\n', 'syn123'),
call(mock_entity),
call('Downloaded file: %s', './base_tmp_path'),
call('Creating %s', './tmp_path')]
def test_get__without_synapse_id(self):
# test normal get command without synapse ID
parser = cmdline.build_parser()
with pytest.raises(ValueError) as ve:
args = parser.parse_args(['get'])
cmdline.get(args, self.syn)
assert str(ve.value) == "Missing expected id argument for use with the get command"
# test get command with -r but without synapse ID
parser = cmdline.build_parser()
with pytest.raises(ValueError) as ve:
args = parser.parse_args(['get', '-r'])
cmdline.get(args, self.syn)
assert str(ve.value) == "Missing expected id argument for use with the get command"
class TestStoreFunction:
@patch('synapseclient.client.Synapse')
def setup(self, mock_syn):
self.syn = mock_syn
def test_get__without_file_args(self):
parser = cmdline.build_parser()
args = parser.parse_args(['store', '--parentid', 'syn123', '--used', 'syn456'])
with pytest.raises(ValueError) as ve:
cmdline.store(args, self.syn)
assert str(ve.value) == "store missing required FILE argument"
| |
#
# Copyright (c) 2010, Nick Blundell
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Nick Blundell nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
#
# Author: Nick Blundell <blundeln [AT] gmail [DOT] com>
# Organisation: www.nickblundell.org.uk
#
# Description:
# Longer tests, which must have suffix '_test' to be picked up for automated
# testing. Some of these are based on tricky config file examples given in the Augeas system
# Note that these lenses may not be completely accurate but are an aid to testing.
#
from pylens import *
def auto_list_test() :
lens = Repeat(AnyOf(nums, type=int), type=list, auto_list=True)
d("GET")
assert(lens.get("123") == [1,2,3])
assert(lens.get("1") == 1)
d("PUT")
assert(lens.put([5,6,7]) == "567")
assert(lens.put(5) == "5")
# Test list_source_meta_data preservation - assertion will fail if not preserved.
assert(lens.put(lens.get("1")) == "1")
def dict_test() :
# Test use of static labels.
lens = Group(AnyOf(nums, type=int, label="number") + AnyOf(alphas, type=str, label="character"), type=dict, alignment=SOURCE)
d("GET")
assert(lens.get("1a") == {"number":1, "character":"a"})
d("PUT")
assert(lens.put({"number":4, "character":"q"}, "1a") == "4q")
with assert_raises(NoTokenToConsumeException) :
lens.put({"number":4, "wrong_label":"q"}, "1a")
# Test dynamic labels
key_value_lens = Group(AnyOf(alphas, type=str, is_label=True) + AnyOf("*+-", default="*") + AnyOf(nums, type=int), type=list)
lens = Repeat(key_value_lens, type=dict, alignment=SOURCE)
d("GET")
got = lens.get("a+3c-2z*7")
d(got)
assert(got == {"a":[3], "c":[2], "z":[7]})
d("PUT")
output = lens.put({"b":[9], "x":[5]})
d(output)
assert(output in ["b*9x*5","x*5b*9"]) # Could be any order.
d("Test manipulation")
got = lens.get("a+3c-2z*7")
del got["c"]
output = lens.put(got)
assert(output == "a+3z*7") # Should have kept SOURCE alignment.
d("Test with auto list, which should keep source state")
key_value_lens = Group(AnyOf(alphas, type=str, is_label=True) + AnyOf("*+-", default="*") + AnyOf(nums, type=int), type=list, auto_list=True)
lens = Repeat(key_value_lens, type=dict, alignment=SOURCE)
d("GET")
got = lens.get("a+3c-2z*7")
d(got)
assert(got == {"a":3, "c":2, "z":7})
d("PUT")
output = lens.put(got)
assert(output == "a+3c-2z*7")
# For now this will loose some concrete, but later we will consider user-implied alignment
# or at least label alignment rather than source alignment.
d("Test auto_list with modification.")
got = lens.get("a+3c-2z*7")
got["c"] = 4
output = lens.put(got)
assert_equal(output, "a+3z*7c*4")
def consumption_test():
test_description("Test input consumption")
lens = Repeat(AnyOf(nums, type=int), type=list)
with assert_raises(NotFullyConsumedException):
lens.get("123abc") # This will leave 'abc'
with assert_raises(NotFullyConsumedException):
lens.put([1,2], "123abc") # This will leave 'abc'
test_description("Test container consumption")
# This will consume input but leave "a" in list.
with assert_raises(NotFullyConsumedException):
lens.put([1,2,"a"], "67")
def list_test() :
lens = Repeat(AnyOf(nums, type=int), type=list)
d("GET")
assert(lens.get("123") == [1,2,3])
d("PUT")
assert(lens.put([5,6,7]) == "567")
d("GET-PUT")
assert(lens.put(lens.get("1")) == "1")
def model_ordered_matching_list_test() :
lens = Repeat(
Group(AnyOf(alphas, type=str) + AnyOf("*+-", default="*") + AnyOf(nums, type=int), type=list),
type=list, alignment=MODEL)
d("GET")
got = lens.get("a+3c-2z*7")
assert(got == [["a",3],["c",2],["z",7]])
# Move the front item to the end - should affect positional ordering.
got.append(got.pop(0))
output = lens.put(got)
d(output)
assert(output == "c-2z*7a+3")
d("With deletion and creation")
d("GET")
got = lens.get("a+3c-2z*7")
# Move the front item to the end - should affect positional ordering.
got.append(got.pop(0))
# Now remove the middle item
del got[1] # z*7
# And add a new item
got.append(["m",6])
output = lens.put(got)
d(output)
assert(output == "c-2a+3m*6")
def source_ordered_matching_list_test() :
lens = Repeat(
Group(AnyOf(alphas, type=str) + AnyOf("*+-", default="*") + AnyOf(nums, type=int), type=list),
type=list, alignment=SOURCE)
d("Without deletion")
d("GET")
got = lens.get("a+3c-2z*7")
assert(got == [["a",3],["c",2],["z",7]])
# Move the front item to the end - should affect positional ordering.
got.append(got.pop(0))
output = lens.put(got)
d(output)
assert_equal(output, "a+3c-2z*7")
d("With deletion and creation")
d("GET")
got = lens.get("a+3c-2z*7")
# Move the front item to the end - should affect positional ordering.
got.append(got.pop(0))
# Now remove the middle item
del got[1] # z*7
# And add a new item
got.append(["m",6])
output = lens.put(got)
d(output)
assert(output == "a+3c-2m*6")
def state_recovery_test():
test_description("Test that the user's item's state is recovered after consumption.")
INPUT = "x=y;p=q"
lens = List(KeyValue(Word(alphas, is_label=True)+"="+Word(alphas, type=str)), ";", type=dict)
got = lens.get(INPUT)
my_dict = {}
my_dict["beans"] = "yummy"
my_dict["marmite"] = "eurgh"
lens.put(my_dict)
assert_equal(my_dict, {"beans":"yummy", "marmite":"eurgh"})
# XXX: Actually, due to DictContainer implementation, this state would not be
# lost anyway, though a similar test with LensObject below flexes this test
# case. I will leave this test, should the implemenation change in someway to
# warrent this test case.
def lens_object_test():
"""
Here we demonstrate the use of classes to define our data model which are
related to a lens.
"""
# Define our Person class, which internally defines its lens.
class Person(LensObject) :
__lens__ = "Person::" + List(
KeyValue(Word(alphas+" ", is_label=True) + ":" + Word(alphas+" ", type=str)),
";",
type=None # XXX: I should get rid of default list type on List
)
def __init__(self, name, last_name) :
self.name, self.last_name = name, last_name
test_description("GET")
# Here we use the high-level API get() function, which is for convenience and
# which equates to:
# lens = Group(Person.__lens__, type=Person)
# person = lens.get("Person::Name:nick;Last Name:blundell")
person = get(Person, "Person::Name:nick;Last Name:blundell")
assert(person.name == "nick" and person.last_name == "blundell")
test_description("PUT")
# Now we PUT it back with no modification and should get what we started with.
output = put(person)
assert_equal(output, "Person::Name:nick;Last Name:blundell")
# And we do this again to check the consumed state of person was restored
# after the successful PUT.
output = put(person)
assert_equal(output, "Person::Name:nick;Last Name:blundell")
test_description("CREATE")
new_person = Person("james", "bond")
output = put(new_person)
# Test that consumed state is restored on a successful PUT.
assert(new_person.name == "james" and new_person.last_name == "bond")
# XXX: Would be nice to control the order, but need to think of a nice way to
# do this - need to cache source info of a label, which we can use when we
# loose source info, also when a user declares attributes we can remember the
# order and force this as model order.
assert(output == "Person::Last Name:bond;Name:james" or output == "Person::Name:james;Last Name:bond")
got_person = get(Person, output)
# If all went well, we should GET back what we PUT.
assert(got_person.name == "james" and got_person.last_name == "bond")
def constrained_lens_object_test():
"""
Here we show how the user can constrain valid attributes of a LensObject.
"""
return # TODO
def advanced_lens_object_test() :
# Ref: http://manpages.ubuntu.com/manpages/hardy/man5/interfaces.5.html
INPUT = """
iface eth0-home inet static
address 192.168.1.1
netmask 255.255.255.0
gateway 67.207.128.1
dns-nameservers 67.207.128.4 67.207.128.5
up flush-mail
auto lo eth0
# A comment
auto eth1
"""
class NetworkInterface(LensObject) :
__lens__ = "iface" + WS(" ") + Keyword(additional_chars="_-", is_label=True) + WS(" ") + \
Keyword(label="address_family") + WS(" ") + Keyword(label="method") + NL() + \
ZeroOrMore(
KeyValue(WS(" ") + Keyword(additional_chars="_-", is_label=True) + WS(" ") + Until(NL(), type=str) + NL())
)
def __init__(self, **kargs) :
for key, value in kargs.iteritems() :
setattr(self, key, value)
def _map_label_to_identifier(self, label) :
return label.replace("-","_")
def _map_identifier_to_label(self, attribute_name) :
return attribute_name.replace("_", "-")
GlobalSettings.check_consumption = False
if True :
test_description("Test GET NetworkInterface")
interface = get(BlankLine() + NetworkInterface, INPUT)
# Do some spot checks of our extracted object.
assert_equal(interface._meta_data.singleton_meta_data.label, "eth0-home")
assert_equal(interface.address_family, "inet")
assert_equal(interface.method, "static")
assert_equal(interface.dns_nameservers, "67.207.128.4 67.207.128.5")
assert_equal(interface.up, "flush-mail")
test_description("Test PUT NetworkInterface")
interface.cheese_type = "cheshire"
interface.address = "bananas"
output = put(interface)
assert_equal(output, """iface eth0-home inet static
netmask 255.255.255.0
gateway 67.207.128.1
dns-nameservers 67.207.128.4 67.207.128.5
up flush-mail
cheese-type cheshire
address bananas\n""")
# Try creating from scratch.
interface = NetworkInterface(address_family="inet", method="static", dns_nameservers="1.2.3.4 1.2.3.5", netmask="255.255.255.0")
output = put(interface, label="wlan3")
assert_equal(output, """iface wlan3 inet static
dns-nameservers 1.2.3.4 1.2.3.5
netmask 255.255.255.0\n""")
#
# Now let's create a class to represent the whole configuration.
#
class InterfaceConfiguration(LensObject) :
auto_lens = Group("auto" + WS(" ") + List(Keyword(additional_chars="_-", type=str), WS(" "), type=None) + WS("") + NL(), type=list, name="auto_lens")
__lens__ = ZeroOrMore(NetworkInterface | auto_lens | HashComment() | BlankLine())
interfaces = Container(store_items_of_type=[NetworkInterface], type=dict)
auto_interfaces = Container(store_items_from_lenses=[auto_lens], type=list)
if True:
test_description("GET InterfaceConfiguration")
config = get(InterfaceConfiguration, INPUT)
assert_equal(config.interfaces["eth0-home"].address, "192.168.1.1")
assert_equal(config.auto_interfaces[0][1],"eth0")
assert_equal(len(config.auto_interfaces),2)
test_description("PUT InterfaceConfiguration")
config.interfaces["eth0-home"].netmask = "bananas"
config.auto_interfaces[0].insert(1,"wlan2")
output = put(config)
assert_equal(output, """
iface eth0-home inet static
address 192.168.1.1
gateway 67.207.128.1
dns-nameservers 67.207.128.4 67.207.128.5
up flush-mail
netmask bananas
auto lo wlan2 eth0
# A comment
auto eth1
""")
test_description("CREATE InterfaceConfiguration")
GlobalSettings.check_consumption = True
interface = NetworkInterface(address_family="inet", method="static", dns_nameservers="1.2.3.4 1.2.3.5", netmask="255.255.255.0")
interface.some_thing = "something or another"
config = InterfaceConfiguration()
config.interfaces = {"eth3":interface}
config.auto_interfaces = [["eth0"], ["wlan2", "eth2"]]
output = put(config)
assert_equal(output, """iface eth3 inet static
dns-nameservers 1.2.3.4 1.2.3.5
some-thing something or another
netmask 255.255.255.0
auto eth0
auto wlan2 eth2
""")
def init_test():
"""
Just a few tests to figure out how we can use __new__ in object creation.
"""
# What we want:
# Want to create an object with initial state regardless of constructor
# args.
class Person(object):
age = 10
def __new__(cls, *args, **kargs) :
# It seems to me the args are passed only to allow customisation based
# on them, since they are then passed to __init__ following this call in
# typical creation.
# Create the instance, also passing args - since may also be used for
# customisation.
self = super(Person, cls).__new__(cls, *args, **kargs)
# Initialise some variables.
self.name = None
self.surname = None
self.age = 3
# Return the instance.
return self
def __init__(self, name, surname):
d("Constructor called")
self.name, self.surname = name, surname
def __str__(self) :
return "[%s, %s]" % (self.name, self.surname)
person = Person("john", "smith")
assert(person.name == "john" and person.surname == "smith")
person = Person.__new__(Person)
assert(person.name == None and person.surname == None)
# So it seems python falls back on class var if obj var of same name not found.
d(person.__class__.__dict__)
d(person.age)
d(person.__class__.age)
| |
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class FreezeFrameTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'sensitivity': 'SensitivityType',
'time_range_enabled': 'bool',
'start_time': 'float',
'end_time': 'float',
'start_range_tolerance': 'float',
'time_secs_or_frames': 'SecsOrFramesType',
'end_range_enabled': 'bool',
'end_range': 'float',
'end_range_duration': 'float',
'end_range_tolerance': 'float',
'end_secs_or_frames': 'SecsOrFramesType',
'not_at_any_other_time': 'bool',
'max_time_allowed': 'float',
'max_time_allowed_secs_or_frames': 'SecsOrFramesType',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'sensitivity': 'sensitivity',
'time_range_enabled': 'time_range_enabled',
'start_time': 'start_time',
'end_time': 'end_time',
'start_range_tolerance': 'start_range_tolerance',
'time_secs_or_frames': 'time_secs_or_frames',
'end_range_enabled': 'end_range_enabled',
'end_range': 'end_range',
'end_range_duration': 'end_range_duration',
'end_range_tolerance': 'end_range_tolerance',
'end_secs_or_frames': 'end_secs_or_frames',
'not_at_any_other_time': 'not_at_any_other_time',
'max_time_allowed': 'max_time_allowed',
'max_time_allowed_secs_or_frames': 'max_time_allowed_secs_or_frames',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, sensitivity=None, time_range_enabled=None, start_time=None, end_time=None, start_range_tolerance=None, time_secs_or_frames=None, end_range_enabled=None, end_range=None, end_range_duration=None, end_range_tolerance=None, end_secs_or_frames=None, not_at_any_other_time=None, max_time_allowed=None, max_time_allowed_secs_or_frames=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""FreezeFrameTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._sensitivity = None
self._time_range_enabled = None
self._start_time = None
self._end_time = None
self._start_range_tolerance = None
self._time_secs_or_frames = None
self._end_range_enabled = None
self._end_range = None
self._end_range_duration = None
self._end_range_tolerance = None
self._end_secs_or_frames = None
self._not_at_any_other_time = None
self._max_time_allowed = None
self._max_time_allowed_secs_or_frames = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if sensitivity is not None:
self.sensitivity = sensitivity
if time_range_enabled is not None:
self.time_range_enabled = time_range_enabled
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if start_range_tolerance is not None:
self.start_range_tolerance = start_range_tolerance
if time_secs_or_frames is not None:
self.time_secs_or_frames = time_secs_or_frames
if end_range_enabled is not None:
self.end_range_enabled = end_range_enabled
if end_range is not None:
self.end_range = end_range
if end_range_duration is not None:
self.end_range_duration = end_range_duration
if end_range_tolerance is not None:
self.end_range_tolerance = end_range_tolerance
if end_secs_or_frames is not None:
self.end_secs_or_frames = end_secs_or_frames
if not_at_any_other_time is not None:
self.not_at_any_other_time = not_at_any_other_time
if max_time_allowed is not None:
self.max_time_allowed = max_time_allowed
if max_time_allowed_secs_or_frames is not None:
self.max_time_allowed_secs_or_frames = max_time_allowed_secs_or_frames
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def sensitivity(self):
"""Gets the sensitivity of this FreezeFrameTest. # noqa: E501
:return: The sensitivity of this FreezeFrameTest. # noqa: E501
:rtype: SensitivityType
"""
return self._sensitivity
@sensitivity.setter
def sensitivity(self, sensitivity):
"""Sets the sensitivity of this FreezeFrameTest.
:param sensitivity: The sensitivity of this FreezeFrameTest. # noqa: E501
:type: SensitivityType
"""
self._sensitivity = sensitivity
@property
def time_range_enabled(self):
"""Gets the time_range_enabled of this FreezeFrameTest. # noqa: E501
:return: The time_range_enabled of this FreezeFrameTest. # noqa: E501
:rtype: bool
"""
return self._time_range_enabled
@time_range_enabled.setter
def time_range_enabled(self, time_range_enabled):
"""Sets the time_range_enabled of this FreezeFrameTest.
:param time_range_enabled: The time_range_enabled of this FreezeFrameTest. # noqa: E501
:type: bool
"""
self._time_range_enabled = time_range_enabled
@property
def start_time(self):
"""Gets the start_time of this FreezeFrameTest. # noqa: E501
:return: The start_time of this FreezeFrameTest. # noqa: E501
:rtype: float
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this FreezeFrameTest.
:param start_time: The start_time of this FreezeFrameTest. # noqa: E501
:type: float
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this FreezeFrameTest. # noqa: E501
:return: The end_time of this FreezeFrameTest. # noqa: E501
:rtype: float
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this FreezeFrameTest.
:param end_time: The end_time of this FreezeFrameTest. # noqa: E501
:type: float
"""
self._end_time = end_time
@property
def start_range_tolerance(self):
"""Gets the start_range_tolerance of this FreezeFrameTest. # noqa: E501
:return: The start_range_tolerance of this FreezeFrameTest. # noqa: E501
:rtype: float
"""
return self._start_range_tolerance
@start_range_tolerance.setter
def start_range_tolerance(self, start_range_tolerance):
"""Sets the start_range_tolerance of this FreezeFrameTest.
:param start_range_tolerance: The start_range_tolerance of this FreezeFrameTest. # noqa: E501
:type: float
"""
self._start_range_tolerance = start_range_tolerance
@property
def time_secs_or_frames(self):
"""Gets the time_secs_or_frames of this FreezeFrameTest. # noqa: E501
:return: The time_secs_or_frames of this FreezeFrameTest. # noqa: E501
:rtype: SecsOrFramesType
"""
return self._time_secs_or_frames
@time_secs_or_frames.setter
def time_secs_or_frames(self, time_secs_or_frames):
"""Sets the time_secs_or_frames of this FreezeFrameTest.
:param time_secs_or_frames: The time_secs_or_frames of this FreezeFrameTest. # noqa: E501
:type: SecsOrFramesType
"""
self._time_secs_or_frames = time_secs_or_frames
@property
def end_range_enabled(self):
"""Gets the end_range_enabled of this FreezeFrameTest. # noqa: E501
:return: The end_range_enabled of this FreezeFrameTest. # noqa: E501
:rtype: bool
"""
return self._end_range_enabled
@end_range_enabled.setter
def end_range_enabled(self, end_range_enabled):
"""Sets the end_range_enabled of this FreezeFrameTest.
:param end_range_enabled: The end_range_enabled of this FreezeFrameTest. # noqa: E501
:type: bool
"""
self._end_range_enabled = end_range_enabled
@property
def end_range(self):
"""Gets the end_range of this FreezeFrameTest. # noqa: E501
:return: The end_range of this FreezeFrameTest. # noqa: E501
:rtype: float
"""
return self._end_range
@end_range.setter
def end_range(self, end_range):
"""Sets the end_range of this FreezeFrameTest.
:param end_range: The end_range of this FreezeFrameTest. # noqa: E501
:type: float
"""
self._end_range = end_range
@property
def end_range_duration(self):
"""Gets the end_range_duration of this FreezeFrameTest. # noqa: E501
:return: The end_range_duration of this FreezeFrameTest. # noqa: E501
:rtype: float
"""
return self._end_range_duration
@end_range_duration.setter
def end_range_duration(self, end_range_duration):
"""Sets the end_range_duration of this FreezeFrameTest.
:param end_range_duration: The end_range_duration of this FreezeFrameTest. # noqa: E501
:type: float
"""
self._end_range_duration = end_range_duration
@property
def end_range_tolerance(self):
"""Gets the end_range_tolerance of this FreezeFrameTest. # noqa: E501
:return: The end_range_tolerance of this FreezeFrameTest. # noqa: E501
:rtype: float
"""
return self._end_range_tolerance
@end_range_tolerance.setter
def end_range_tolerance(self, end_range_tolerance):
"""Sets the end_range_tolerance of this FreezeFrameTest.
:param end_range_tolerance: The end_range_tolerance of this FreezeFrameTest. # noqa: E501
:type: float
"""
self._end_range_tolerance = end_range_tolerance
@property
def end_secs_or_frames(self):
"""Gets the end_secs_or_frames of this FreezeFrameTest. # noqa: E501
:return: The end_secs_or_frames of this FreezeFrameTest. # noqa: E501
:rtype: SecsOrFramesType
"""
return self._end_secs_or_frames
@end_secs_or_frames.setter
def end_secs_or_frames(self, end_secs_or_frames):
"""Sets the end_secs_or_frames of this FreezeFrameTest.
:param end_secs_or_frames: The end_secs_or_frames of this FreezeFrameTest. # noqa: E501
:type: SecsOrFramesType
"""
self._end_secs_or_frames = end_secs_or_frames
@property
def not_at_any_other_time(self):
"""Gets the not_at_any_other_time of this FreezeFrameTest. # noqa: E501
:return: The not_at_any_other_time of this FreezeFrameTest. # noqa: E501
:rtype: bool
"""
return self._not_at_any_other_time
@not_at_any_other_time.setter
def not_at_any_other_time(self, not_at_any_other_time):
"""Sets the not_at_any_other_time of this FreezeFrameTest.
:param not_at_any_other_time: The not_at_any_other_time of this FreezeFrameTest. # noqa: E501
:type: bool
"""
self._not_at_any_other_time = not_at_any_other_time
@property
def max_time_allowed(self):
"""Gets the max_time_allowed of this FreezeFrameTest. # noqa: E501
:return: The max_time_allowed of this FreezeFrameTest. # noqa: E501
:rtype: float
"""
return self._max_time_allowed
@max_time_allowed.setter
def max_time_allowed(self, max_time_allowed):
"""Sets the max_time_allowed of this FreezeFrameTest.
:param max_time_allowed: The max_time_allowed of this FreezeFrameTest. # noqa: E501
:type: float
"""
self._max_time_allowed = max_time_allowed
@property
def max_time_allowed_secs_or_frames(self):
"""Gets the max_time_allowed_secs_or_frames of this FreezeFrameTest. # noqa: E501
:return: The max_time_allowed_secs_or_frames of this FreezeFrameTest. # noqa: E501
:rtype: SecsOrFramesType
"""
return self._max_time_allowed_secs_or_frames
@max_time_allowed_secs_or_frames.setter
def max_time_allowed_secs_or_frames(self, max_time_allowed_secs_or_frames):
"""Sets the max_time_allowed_secs_or_frames of this FreezeFrameTest.
:param max_time_allowed_secs_or_frames: The max_time_allowed_secs_or_frames of this FreezeFrameTest. # noqa: E501
:type: SecsOrFramesType
"""
self._max_time_allowed_secs_or_frames = max_time_allowed_secs_or_frames
@property
def reject_on_error(self):
"""Gets the reject_on_error of this FreezeFrameTest. # noqa: E501
:return: The reject_on_error of this FreezeFrameTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this FreezeFrameTest.
:param reject_on_error: The reject_on_error of this FreezeFrameTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this FreezeFrameTest. # noqa: E501
:return: The checked of this FreezeFrameTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this FreezeFrameTest.
:param checked: The checked of this FreezeFrameTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FreezeFrameTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FreezeFrameTest):
return True
return self.to_dict() != other.to_dict()
| |
from itertools import product as cartes
from sympy import (
limit, exp, oo, log, sqrt, Limit, sin, floor, cos, ceiling,
atan, gamma, Symbol, S, pi, Integral, Rational, I, EulerGamma,
tan, cot, integrate, Sum, sign, Function, subfactorial, symbols,
binomial, simplify, frac, Float)
from sympy.calculus.util import AccumBounds
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.series.limits import heuristics
from sympy.series.order import Order
from sympy.utilities.pytest import XFAIL, raises
from sympy.abc import x, y, z, k
n = Symbol('n', integer=True, positive=True)
def test_basic1():
assert limit(x, x, oo) == oo
assert limit(x, x, -oo) == -oo
assert limit(-x, x, oo) == -oo
assert limit(x**2, x, -oo) == oo
assert limit(-x**2, x, oo) == -oo
assert limit(x*log(x), x, 0, dir="+") == 0
assert limit(1/x, x, oo) == 0
assert limit(exp(x), x, oo) == oo
assert limit(-exp(x), x, oo) == -oo
assert limit(exp(x)/x, x, oo) == oo
assert limit(1/x - exp(-x), x, oo) == 0
assert limit(x + 1/x, x, oo) == oo
assert limit(x - x**2, x, oo) == -oo
assert limit((1 + x)**(1 + sqrt(2)), x, 0) == 1
assert limit((1 + x)**oo, x, 0) == oo
assert limit((1 + x)**oo, x, 0, dir='-') == 0
assert limit((1 + x + y)**oo, x, 0, dir='-') == (1 + y)**(oo)
assert limit(y/x/log(x), x, 0) == -oo*sign(y)
assert limit(cos(x + y)/x, x, 0) == sign(cos(y))*oo
assert limit(gamma(1/x + 3), x, oo) == 2
assert limit(S.NaN, x, -oo) == S.NaN
assert limit(Order(2)*x, x, S.NaN) == S.NaN
assert limit(1/(x - 1), x, 1, dir="+") == oo
assert limit(1/(x - 1), x, 1, dir="-") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="+") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="-") == oo
assert limit(1/sin(x), x, pi, dir="+") == -oo
assert limit(1/sin(x), x, pi, dir="-") == oo
assert limit(1/cos(x), x, pi/2, dir="+") == -oo
assert limit(1/cos(x), x, pi/2, dir="-") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="+") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="-") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="+") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="-") == oo
# approaching 0
# from dir="+"
assert limit(1 + 1/x, x, 0) == oo
# from dir='-'
# Add
assert limit(1 + 1/x, x, 0, dir='-') == -oo
# Pow
assert limit(x**(-2), x, 0, dir='-') == oo
assert limit(x**(-3), x, 0, dir='-') == -oo
assert limit(1/sqrt(x), x, 0, dir='-') == (-oo)*I
assert limit(x**2, x, 0, dir='-') == 0
assert limit(sqrt(x), x, 0, dir='-') == 0
assert limit(x**-pi, x, 0, dir='-') == oo*sign((-1)**(-pi))
assert limit((1 + cos(x))**oo, x, 0) == oo
def test_basic2():
assert limit(x**x, x, 0, dir="+") == 1
assert limit((exp(x) - 1)/x, x, 0) == 1
assert limit(1 + 1/x, x, oo) == 1
assert limit(-exp(1/x), x, oo) == -1
assert limit(x + exp(-x), x, oo) == oo
assert limit(x + exp(-x**2), x, oo) == oo
assert limit(x + exp(-exp(x)), x, oo) == oo
assert limit(13 + 1/x - exp(-x), x, oo) == 13
def test_basic3():
assert limit(1/x, x, 0, dir="+") == oo
assert limit(1/x, x, 0, dir="-") == -oo
def test_basic4():
assert limit(2*x + y*x, x, 0) == 0
assert limit(2*x + y*x, x, 1) == 2 + y
assert limit(2*x**8 + y*x**(-3), x, -2) == 512 - y/8
assert limit(sqrt(x + 1) - sqrt(x), x, oo) == 0
assert integrate(1/(x**3 + 1), (x, 0, oo)) == 2*pi*sqrt(3)/9
def test_basic5():
class my(Function):
@classmethod
def eval(cls, arg):
if arg is S.Infinity:
return S.NaN
assert limit(my(x), x, oo) == Limit(my(x), x, oo)
def test_issue_3885():
assert limit(x*y + x*z, z, 2) == x*y + 2*x
def test_Limit():
assert Limit(sin(x)/x, x, 0) != 1
assert Limit(sin(x)/x, x, 0).doit() == 1
def test_floor():
assert limit(floor(x), x, -2, "+") == -2
assert limit(floor(x), x, -2, "-") == -3
assert limit(floor(x), x, -1, "+") == -1
assert limit(floor(x), x, -1, "-") == -2
assert limit(floor(x), x, 0, "+") == 0
assert limit(floor(x), x, 0, "-") == -1
assert limit(floor(x), x, 1, "+") == 1
assert limit(floor(x), x, 1, "-") == 0
assert limit(floor(x), x, 2, "+") == 2
assert limit(floor(x), x, 2, "-") == 1
assert limit(floor(x), x, 248, "+") == 248
assert limit(floor(x), x, 248, "-") == 247
def test_floor_requires_robust_assumptions():
assert limit(floor(sin(x)), x, 0, "+") == 0
assert limit(floor(sin(x)), x, 0, "-") == -1
assert limit(floor(cos(x)), x, 0, "+") == 0
assert limit(floor(cos(x)), x, 0, "-") == 0
assert limit(floor(5 + sin(x)), x, 0, "+") == 5
assert limit(floor(5 + sin(x)), x, 0, "-") == 4
assert limit(floor(5 + cos(x)), x, 0, "+") == 5
assert limit(floor(5 + cos(x)), x, 0, "-") == 5
def test_ceiling():
assert limit(ceiling(x), x, -2, "+") == -1
assert limit(ceiling(x), x, -2, "-") == -2
assert limit(ceiling(x), x, -1, "+") == 0
assert limit(ceiling(x), x, -1, "-") == -1
assert limit(ceiling(x), x, 0, "+") == 1
assert limit(ceiling(x), x, 0, "-") == 0
assert limit(ceiling(x), x, 1, "+") == 2
assert limit(ceiling(x), x, 1, "-") == 1
assert limit(ceiling(x), x, 2, "+") == 3
assert limit(ceiling(x), x, 2, "-") == 2
assert limit(ceiling(x), x, 248, "+") == 249
assert limit(ceiling(x), x, 248, "-") == 248
def test_ceiling_requires_robust_assumptions():
assert limit(ceiling(sin(x)), x, 0, "+") == 1
assert limit(ceiling(sin(x)), x, 0, "-") == 0
assert limit(ceiling(cos(x)), x, 0, "+") == 1
assert limit(ceiling(cos(x)), x, 0, "-") == 1
assert limit(ceiling(5 + sin(x)), x, 0, "+") == 6
assert limit(ceiling(5 + sin(x)), x, 0, "-") == 5
assert limit(ceiling(5 + cos(x)), x, 0, "+") == 6
assert limit(ceiling(5 + cos(x)), x, 0, "-") == 6
def test_atan():
x = Symbol("x", real=True)
assert limit(atan(x)*sin(1/x), x, 0) == 0
assert limit(atan(x) + sqrt(x + 1) - sqrt(x), x, oo) == pi/2
def test_abs():
assert limit(abs(x), x, 0) == 0
assert limit(abs(sin(x)), x, 0) == 0
assert limit(abs(cos(x)), x, 0) == 1
assert limit(abs(sin(x + 1)), x, 0) == sin(1)
def test_heuristic():
x = Symbol("x", real=True)
assert heuristics(sin(1/x) + atan(x), x, 0, '+') == AccumBounds(-1, 1)
assert limit(log(2 + sqrt(atan(x))*sqrt(sin(1/x))), x, 0) == log(2)
def test_issue_3871():
z = Symbol("z", positive=True)
f = -1/z*exp(-z*x)
assert limit(f, x, oo) == 0
assert f.limit(x, oo) == 0
def test_exponential():
n = Symbol('n')
x = Symbol('x', real=True)
assert limit((1 + x/n)**n, n, oo) == exp(x)
assert limit((1 + x/(2*n))**n, n, oo) == exp(x/2)
assert limit((1 + x/(2*n + 1))**n, n, oo) == exp(x/2)
assert limit(((x - 1)/(x + 1))**x, x, oo) == exp(-2)
assert limit(1 + (1 + 1/x)**x, x, oo) == 1 + S.Exp1
@XFAIL
def test_exponential2():
n = Symbol('n')
assert limit((1 + x/(n + sin(n)))**n, n, oo) == exp(x)
def test_doit():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
assert l.doit() == oo
def test_AccumBounds():
assert limit(sin(k) - sin(k + 1), k, oo) == AccumBounds(-2, 2)
assert limit(cos(k) - cos(k + 1) + 1, k, oo) == AccumBounds(-1, 3)
# not the exact bound
assert limit(sin(k) - sin(k)*cos(k), k, oo) == AccumBounds(-2, 2)
# test for issue #9934
t1 = Mul(S(1)/2, 1/(-1 + cos(1)), Add(AccumBounds(-3, 1), cos(1)))
assert limit(simplify(Sum(cos(n).rewrite(exp), (n, 0, k)).doit().rewrite(sin)), k, oo) == t1
t2 = Mul(Add(AccumBounds(-2, 2), sin(1)), 1/(-2*cos(1) + 2))
assert limit(simplify(Sum(sin(n).rewrite(exp), (n, 0, k)).doit().rewrite(sin)), k, oo) == t2
assert limit(frac(x)**x, x, oo) == AccumBounds(0, oo)
assert limit(((sin(x) + 1)/2)**x, x, oo) == AccumBounds(0, oo)
@XFAIL
def test_doit2():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
# limit() breaks on the contained Integral.
assert l.doit(deep=False) == l
def test_issue_3792():
assert limit((1 - cos(x))/x**2, x, S(1)/2) == 4 - 4*cos(S(1)/2)
assert limit(sin(sin(x + 1) + 1), x, 0) == sin(1 + sin(1))
assert limit(abs(sin(x + 1) + 1), x, 0) == 1 + sin(1)
def test_issue_4090():
assert limit(1/(x + 3), x, 2) == S(1)/5
assert limit(1/(x + pi), x, 2) == S(1)/(2 + pi)
assert limit(log(x)/(x**2 + 3), x, 2) == log(2)/7
assert limit(log(x)/(x**2 + pi), x, 2) == log(2)/(4 + pi)
def test_issue_4547():
assert limit(cot(x), x, 0, dir='+') == oo
assert limit(cot(x), x, pi/2, dir='+') == 0
def test_issue_5164():
assert limit(x**0.5, x, oo) == oo**0.5 == oo
assert limit(x**0.5, x, 16) == S(16)**0.5
assert limit(x**0.5, x, 0) == 0
assert limit(x**(-0.5), x, oo) == 0
assert limit(x**(-0.5), x, 4) == S(4)**(-0.5)
def test_issue_5183():
# using list(...) so py.test can recalculate values
tests = list(cartes([x, -x],
[-1, 1],
[2, 3, Rational(1, 2), Rational(2, 3)],
['-', '+']))
results = (oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3), oo,
0, 0, 0, 0, 0, 0, 0, 0,
oo, oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3),
0, 0, 0, 0, 0, 0, 0, 0)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
y, s, e, d = args
eq = y**(s*e)
try:
assert limit(eq, x, 0, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, d, limit(eq, x, 0, dir=d))
else:
assert None
def test_issue_5184():
assert limit(sin(x)/x, x, oo) == 0
assert limit(atan(x), x, oo) == pi/2
assert limit(gamma(x), x, oo) == oo
assert limit(cos(x)/x, x, oo) == 0
assert limit(gamma(x), x, Rational(1, 2)) == sqrt(pi)
r = Symbol('r', real=True, finite=True)
assert limit(r*sin(1/r), r, 0) == 0
def test_issue_5229():
assert limit((1 + y)**(1/y) - S.Exp1, y, 0) == 0
def test_issue_4546():
# using list(...) so py.test can recalculate values
tests = list(cartes([cot, tan],
[-pi/2, 0, pi/2, pi, 3*pi/2],
['-', '+']))
results = (0, 0, -oo, oo, 0, 0, -oo, oo, 0, 0,
oo, -oo, 0, 0, oo, -oo, 0, 0, oo, -oo)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
f, l, d = args
eq = f(x)
try:
assert limit(eq, x, l, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, l, d, limit(eq, x, l, dir=d))
else:
assert None
def test_issue_3934():
assert limit((1 + x**log(3))**(1/x), x, 0) == 1
assert limit((5**(1/x) + 3**(1/x))**x, x, 0) == 5
def test_calculate_series():
# needs gruntz calculate_series to go to n = 32
assert limit(x**(S(77)/3)/(1 + x**(S(77)/3)), x, oo) == 1
# needs gruntz calculate_series to go to n = 128
assert limit(x**101.1/(1 + x**101.1), x, oo) == 1
def test_issue_5955():
assert limit((x**16)/(1 + x**16), x, oo) == 1
assert limit((x**100)/(1 + x**100), x, oo) == 1
assert limit((x**1885)/(1 + x**1885), x, oo) == 1
assert limit((x**1000/((x + 1)**1000 + exp(-x))), x, oo) == 1
def test_newissue():
assert limit(exp(1/sin(x))/exp(cot(x)), x, 0) == 1
def test_extended_real_line():
assert limit(x - oo, x, oo) == -oo
assert limit(oo - x, x, -oo) == oo
assert limit(x**2/(x - 5) - oo, x, oo) == -oo
assert limit(1/(x + sin(x)) - oo, x, 0) == -oo
assert limit(oo/x, x, oo) == oo
assert limit(x - oo + 1/x, x, oo) == -oo
assert limit(x - oo + 1/x, x, 0) == -oo
@XFAIL
def test_order_oo():
x = Symbol('x', positive=True, finite=True)
assert Order(x)*oo != Order(1, x)
assert limit(oo/(x**2 - 4), x, oo) == oo
def test_issue_5436():
raises(NotImplementedError, lambda: limit(exp(x*y), x, oo))
raises(NotImplementedError, lambda: limit(exp(-x*y), x, oo))
def test_Limit_dir():
raises(TypeError, lambda: Limit(x, x, 0, dir=0))
raises(ValueError, lambda: Limit(x, x, 0, dir='0'))
def test_polynomial():
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, oo) == 1
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, -oo) == 1
def test_rational():
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, oo) == (z - 1)/(y*z)
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, -oo) == (z - 1)/(y*z)
def test_issue_5740():
assert limit(log(x)*z - log(2*x)*y, x, 0) == oo*sign(y - z)
def test_issue_6366():
n = Symbol('n', integer=True, positive=True)
r = (n + 1)*x**(n + 1)/(x**(n + 1) - 1) - x/(x - 1)
assert limit(r, x, 1).simplify() == n/2
def test_factorial():
from sympy import factorial, E
f = factorial(x)
assert limit(f, x, oo) == oo
assert limit(x/f, x, oo) == 0
# see Stirling's approximation:
# http://en.wikipedia.org/wiki/Stirling's_approximation
assert limit(f/(sqrt(2*pi*x)*(x/E)**x), x, oo) == 1
assert limit(f, x, -oo) == factorial(-oo)
assert limit(f, x, x**2) == factorial(x**2)
assert limit(f, x, -x**2) == factorial(-x**2)
def test_issue_6560():
e = (5*x**3/4 - 3*x/4 + (y*(3*x**2/2 - S(1)/2) +
35*x**4/8 - 15*x**2/4 + S(3)/8)/(2*(y + 1)))
assert limit(e, y, oo) == (5*x**3 + 3*x**2 - 3*x - 1)/4
def test_issue_5172():
n = Symbol('n')
r = Symbol('r', positive=True)
c = Symbol('c')
p = Symbol('p', positive=True)
m = Symbol('m', negative=True)
expr = ((2*n*(n - r + 1)/(n + r*(n - r + 1)))**c +
(r - 1)*(n*(n - r + 2)/(n + r*(n - r + 1)))**c - n)/(n**c - n)
expr = expr.subs(c, c + 1)
raises(NotImplementedError, lambda: limit(expr, n, oo))
assert limit(expr.subs(c, m), n, oo) == 1
assert limit(expr.subs(c, p), n, oo).simplify() == \
(2**(p + 1) + r - 1)/(r + 1)**(p + 1)
def test_issue_7088():
a = Symbol('a')
assert limit(sqrt(x/(x + a)), x, oo) == 1
def test_issue_6364():
a = Symbol('a')
e = z/(1 - sqrt(1 + z)*sin(a)**2 - sqrt(1 - z)*cos(a)**2)
assert limit(e, z, 0).simplify() == 2/cos(2*a)
def test_issue_4099():
a = Symbol('a')
assert limit(a/x, x, 0) == oo*sign(a)
assert limit(-a/x, x, 0) == -oo*sign(a)
assert limit(-a*x, x, oo) == -oo*sign(a)
assert limit(a*x, x, oo) == oo*sign(a)
def test_issue_4503():
dx = Symbol('dx')
assert limit((sqrt(1 + exp(x + dx)) - sqrt(1 + exp(x)))/dx, dx, 0) == \
exp(x)/(2*sqrt(exp(x) + 1))
def test_issue_8730():
assert limit(subfactorial(x), x, oo) == oo
def test_issue_10801():
# make sure limits work with binomial
assert limit(16**k / (k * binomial(2*k, k)**2), k, oo) == pi
def test_issue_9205():
x, y, a = symbols('x, y, a')
assert Limit(x, x, a).free_symbols == {a}
assert Limit(x, x, a, '-').free_symbols == {a}
assert Limit(x + y, x + y, a).free_symbols == {a}
assert Limit(-x**2 + y, x**2, a).free_symbols == {y, a}
def test_limit_seq():
assert limit(Sum(1/x, (x, 1, y)) - log(y), y, oo) == EulerGamma
assert limit(Sum(1/x, (x, 1, y)) - 1/y, y, oo) == S.Infinity
assert (limit(binomial(2*x, x) / Sum(binomial(2*y, y), (y, 1, x)), x, oo) ==
S(3) / 4)
assert (limit(Sum(y**2 * Sum(2**z/z, (z, 1, y)), (y, 1, x)) /
(2**x*x), x, oo) == 4)
def test_issue_11879():
assert simplify(limit(((x+y)**n-x**n)/y, y, 0)) == n*x**(n-1)
def test_limit_with_Float():
k = symbols("k")
assert limit(1.0 ** k, k, oo) == 1
assert limit(0.3*1.0**k, k, oo) == Float(0.3)
| |
from functools import partial
from sqlalchemy.ext.associationproxy import association_proxy, AssociationProxy
from sqlalchemy.orm import Query, aliased, mapper, relationship, synonym
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm.scoping import ScopedSession
from sqlalchemy.orm.session import Session, object_session
from sqlalchemy.schema import Column, ForeignKey, Table
from sqlalchemy.sql.expression import and_, bindparam, select, exists
from sqlalchemy.sql.operators import ColumnOperators
from sqlalchemy.types import Integer
from pokedex.db import markdown
class LocalAssociationProxy(AssociationProxy, ColumnOperators):
"""An association proxy for names in the default language
Over the regular association_proxy, this provides sorting and filtering
capabilities, implemented via SQL subqueries.
"""
def __clause_element__(self):
q = select([self.remote_attr])
q = q.where(self.target_class.foreign_id == self.owning_class.id)
q = q.where(self.target_class.local_language_id == bindparam('_default_language_id'))
return q
def operate(self, op, *other, **kwargs):
q = select([self.remote_attr])
q = q.where(self.target_class.foreign_id == self.owning_class.id)
q = q.where(self.target_class.local_language_id == bindparam('_default_language_id'))
q = q.where(op(self.remote_attr, *other))
return exists(q)
def _getset_factory_factory(column_name, string_getter):
"""Hello! I am a factory for creating getset_factory functions for SQLA.
I exist to avoid the closure-in-a-loop problem.
"""
def getset_factory(underlying_type, instance):
def getter(translations):
if translations is None:
return None
text = getattr(translations, column_name)
if text is None:
return text
session = object_session(translations)
language = translations.local_language
return string_getter(text, session, language)
def setter(translations, value):
# The string must be set on the Translation directly.
raise AttributeError("Cannot set %s" % column_name)
return getter, setter
return getset_factory
def create_translation_table(_table_name, foreign_class, relation_name,
language_class, relation_lazy='select', **kwargs):
"""Creates a table that represents some kind of data attached to the given
foreign class, but translated across several languages. Returns the new
table's mapped class. It won't be declarative, but it will have a
`__table__` attribute so you can retrieve the Table object.
`foreign_class` must have a `__singlename__`, currently only used to create
the name of the foreign key column.
Also supports the notion of a default language, which is attached to the
session. This is English by default, for historical and practical reasons.
Usage looks like this:
class Foo(Base): ...
create_translation_table('foo_bars', Foo, 'bars',
name = Column(...),
)
# Now you can do the following:
foo.name
foo.name_map['en']
foo.foo_bars['en']
foo.name_map['en'] = "new name"
del foo.name_map['en']
q.options(joinedload(Foo.bars_local))
q.options(joinedload(Foo.bars))
The following properties are added to the passed class:
- `(relation_name)`, a relation to the new table. It uses a dict-based
collection class, where the keys are language identifiers and the values
are rows in the created tables.
- `(relation_name)_local`, a relation to the row in the new table that
matches the current default language.
- `(relation_name)_table`, the class created by this function.
Note that these are distinct relations. Even though the former necessarily
includes the latter, SQLAlchemy doesn't treat them as linked; loading one
will not load the other. Modifying both within the same transaction has
undefined behavior.
For each column provided, the following additional attributes are added to
Foo:
- `(column)_map`, an association proxy onto `foo_bars`.
- `(column)`, an association proxy onto `foo_bars_local`.
Pardon the naming disparity, but the grammar suffers otherwise.
Modifying these directly is not likely to be a good idea.
For Markdown-formatted columns, `(column)_map` and `(column)` will give
Markdown objects.
"""
# n.b.: language_class only exists for the sake of tests, which sometimes
# want to create tables entirely separate from the pokedex metadata
foreign_key_name = foreign_class.__singlename__ + '_id'
Translations = type(_table_name, (object,), {
'_language_identifier': association_proxy('local_language', 'identifier'),
'relation_name': relation_name,
'__tablename__': _table_name,
})
# Create the table object
table = Table(_table_name, foreign_class.__table__.metadata,
Column(foreign_key_name, Integer, ForeignKey(foreign_class.id),
primary_key=True, nullable=False,
doc=u"ID of the %s these texts relate to" % foreign_class.__singlename__),
Column('local_language_id', Integer, ForeignKey(language_class.id),
primary_key=True, nullable=False,
doc=u"Language these texts are in"),
)
Translations.__table__ = table
# Add ye columns
# Column objects have a _creation_order attribute in ascending order; use
# this to get the (unordered) kwargs sorted correctly
kwitems = kwargs.items()
kwitems.sort(key=lambda kv: kv[1]._creation_order)
for name, column in kwitems:
column.name = name
table.append_column(column)
# Construct ye mapper
mapper(Translations, table, properties={
'foreign_id': synonym(foreign_key_name),
'local_language': relationship(language_class,
primaryjoin=table.c.local_language_id == language_class.id,
innerjoin=True),
})
# Add full-table relations to the original class
# Foo.bars_table
setattr(foreign_class, relation_name + '_table', Translations)
# Foo.bars
setattr(foreign_class, relation_name, relationship(Translations,
primaryjoin=foreign_class.id == Translations.foreign_id,
collection_class=attribute_mapped_collection('local_language'),
))
# Foo.bars_local
# This is a bit clever; it uses bindparam() to make the join clause
# modifiable on the fly. db sessions know the current language and
# populate the bindparam.
# The 'dummy' value is to trick SQLA; without it, SQLA thinks this
# bindparam is just its own auto-generated clause and everything gets
# fucked up.
local_relation_name = relation_name + '_local'
setattr(foreign_class, local_relation_name, relationship(Translations,
primaryjoin=and_(
Translations.foreign_id == foreign_class.id,
Translations.local_language_id == bindparam('_default_language_id',
value='dummy', type_=Integer, required=True),
),
foreign_keys=[Translations.foreign_id, Translations.local_language_id],
uselist=False,
lazy=relation_lazy,
))
# Add per-column proxies to the original class
for name, column in kwitems:
getset_factory = None
string_getter = column.info.get('string_getter')
if string_getter:
getset_factory = _getset_factory_factory(
column.name, string_getter)
# Class.(column) -- accessor for the default language's value
setattr(foreign_class, name,
LocalAssociationProxy(local_relation_name, name,
getset_factory=getset_factory))
# Class.(column)_map -- accessor for the language dict
# Need a custom creator since Translations doesn't have an init, and
# these are passed as *args anyway
def creator(language, value):
row = Translations()
row.local_language = language
setattr(row, name, value)
return row
setattr(foreign_class, name + '_map',
association_proxy(relation_name, name, creator=creator,
getset_factory=getset_factory))
# Add to the list of translation classes
foreign_class.translation_classes.append(Translations)
# Done
return Translations
class MultilangQuery(Query):
def __iter__(self):
if '_default_language_id' not in self._params or self._params['_default_language_id'] == 'dummy':
self._params = self._params.copy()
self._params['_default_language_id'] = self.session.default_language_id
return super(MultilangQuery, self).__iter__()
class MultilangSession(Session):
"""A tiny Session subclass that adds support for a default language.
Needs to be used with `MultilangScopedSession`, below.
"""
default_language_id = None
markdown_extension_class = markdown.PokedexLinkExtension
def __init__(self, *args, **kwargs):
if 'default_language_id' in kwargs:
self.default_language_id = kwargs.pop('default_language_id')
markdown_extension_class = kwargs.pop('markdown_extension_class',
self.markdown_extension_class)
self.markdown_extension = markdown_extension_class(self)
kwargs.setdefault('query_cls', MultilangQuery)
super(MultilangSession, self).__init__(*args, **kwargs)
class MultilangScopedSession(ScopedSession):
"""Dispatches language selection to the attached Session."""
@property
def default_language_id(self):
"""Passes the new default language id through to the current session.
"""
return self.registry().default_language_id
@default_language_id.setter
def default_language_id(self, new):
self.registry().default_language_id = new
@property
def markdown_extension(self):
return self.registry().markdown_extension
| |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce import util
from tensorforce.core.memories import Memory
class PrioritizedReplay(Memory):
"""
Memory organized as a priority queue, which randomly retrieves experiences sampled according
their priority values.
"""
def __init__(
self,
states,
internals,
actions,
include_next_states,
capacity,
prioritization_weight=1.0,
buffer_size=100,
scope='queue',
summary_labels=None
):
"""
Prioritized experience replay.
Args:
states: States specifiction.
internals: Internal states specification.
actions: Actions specification.
include_next_states: Include subsequent state if true.
capacity: Memory capacity.
prioritization_weight: Prioritization weight.
buffer_size: Buffer size. The buffer is used to insert experiences before experiences
have been computed via updates.
"""
super(PrioritizedReplay, self).__init__(
states=states,
internals=internals,
actions=actions,
include_next_states=include_next_states,
scope=scope,
summary_labels=summary_labels
)
self.capacity = capacity
self.buffer_size = buffer_size
self.prioritization_weight = prioritization_weight
def custom_getter(getter, name, registered=False, **kwargs):
variable = getter(name=name, registered=True, **kwargs)
if not registered:
assert not kwargs.get('trainable', False)
self.variables[name] = variable
return variable
self.retrieve_indices = tf.make_template(
name_=(scope + '/retrieve_indices'),
func_=self.tf_retrieve_indices,
custom_getter_=custom_getter
)
def tf_initialize(self):
# States
self.states_memory = dict()
for name, state in self.states_spec.items():
self.states_memory[name] = tf.get_variable(
name=('state-' + name),
shape=(self.capacity,) + tuple(state['shape']),
dtype=util.tf_dtype(state['type']),
trainable=False
)
# Internals
self.internals_memory = dict()
for name, internal in self.internals_spec.items():
self.internals_memory[name] = tf.get_variable(
name=('internal-' + name),
shape=(self.capacity,) + tuple(internal['shape']),
dtype=util.tf_dtype(internal['type']),
trainable=False
)
# Actions
self.actions_memory = dict()
for name, action in self.actions_spec.items():
self.actions_memory[name] = tf.get_variable(
name=('action-' + name),
shape=(self.capacity,) + tuple(action['shape']),
dtype=util.tf_dtype(action['type']),
trainable=False
)
# Terminal
self.terminal_memory = tf.get_variable(
name='terminal',
shape=(self.capacity,),
dtype=util.tf_dtype('bool'),
initializer=tf.constant_initializer(
value=tuple(n == self.capacity - 1 for n in range(self.capacity)),
dtype=util.tf_dtype('bool')
),
trainable=False
)
# Reward
self.reward_memory = tf.get_variable(
name='reward',
shape=(self.capacity,),
dtype=util.tf_dtype('float'),
trainable=False
)
# Memory index - current insertion index.
self.memory_index = tf.get_variable(
name='memory-index',
dtype=util.tf_dtype('int'),
initializer=0,
trainable=False
)
# Priorities
self.priorities = tf.get_variable(
name='priorities',
shape=(self.capacity,),
dtype=util.tf_dtype('float'),
trainable=False
)
# Buffer variables. The buffer is used to insert data for which we
# do not have priorities yet.
self.buffer_index = tf.get_variable(
name='buffer-index',
dtype=util.tf_dtype('int'),
initializer=0,
trainable=False
)
self.states_buffer = dict()
for name, state in self.states_spec.items():
self.states_buffer[name] = tf.get_variable(
name=('state-buffer-' + name),
shape=(self.buffer_size,) + tuple(state['shape']),
dtype=util.tf_dtype(state['type']),
trainable=False
)
# Internals
self.internals_buffer = dict()
for name, internal in self.internals_spec.items():
self.internals_buffer[name] = tf.get_variable(
name=('internal-buffer-' + name),
shape=(self.capacity,) + tuple(internal['shape']),
dtype=util.tf_dtype(internal['type']),
trainable=False
)
# Actions
self.actions_buffer = dict()
for name, action in self.actions_spec.items():
self.actions_buffer[name] = tf.get_variable(
name=('action-buffer-' + name),
shape=(self.buffer_size,) + tuple(action['shape']),
dtype=util.tf_dtype(action['type']),
trainable=False
)
# Terminal
self.terminal_buffer = tf.get_variable(
name='terminal-buffer',
shape=(self.capacity,),
dtype=util.tf_dtype('bool'),
initializer=tf.constant_initializer(
value=tuple(n == self.buffer_size - 1 for n in range(self.capacity)),
dtype=util.tf_dtype('bool')
),
trainable=False
)
# Reward
self.reward_buffer = tf.get_variable(
name='reward-buffer',
shape=(self.buffer_size,),
dtype=util.tf_dtype('float'),
trainable=False
)
# Indices of batch experiences in main memory.
self.batch_indices = tf.get_variable(
name='batch-indices',
dtype=util.tf_dtype('int'),
shape=(self.capacity,),
trainable=False
)
# Number of elements taken from the buffer in the last batch.
self.last_batch_buffer_elems = tf.get_variable(
name='last-batch-buffer-elems',
dtype=util.tf_dtype('int'),
initializer=0,
trainable=False
)
self.memory_size = tf.get_variable(
name='memory-size',
dtype=util.tf_dtype('int'),
initializer=0,
trainable=False
)
def tf_store(self, states, internals, actions, terminal, reward):
# We first store new experiences into a buffer that is separate from main memory.
# We insert these into the main memory once we have computed priorities on a given batch.
num_instances = tf.shape(input=terminal)[0]
start_index = self.buffer_index
# TODO this is dangerous if the buffer is too small for the rate at which new
# experiences arrive and are not taking out of the buffer
end_index = self.buffer_index + num_instances
# Assign new observations.
assignments = list()
for name, state in states.items():
assignments.append(tf.assign(ref=self.states_buffer[name][start_index:end_index], value=state))
for name, internal in internals.items():
assignments.append(tf.assign(
ref=self.internals_buffer[name][start_index:end_index],
value=internal
))
# start_index = tf.Print(start_index, [start_index], 'start buffer index in store=')
# end_index = tf.Print(end_index, [end_index], 'end buffer index in store=')
for name, action in actions.items():
assignments.append(tf.assign(ref=self.actions_buffer[name][start_index:end_index], value=action))
assignments.append(tf.assign(ref=self.terminal_buffer[start_index:end_index], value=terminal))
assignments.append(tf.assign(ref=self.reward_buffer[start_index:end_index], value=reward))
# Increment memory index.
with tf.control_dependencies(control_inputs=assignments):
assignment = tf.assign(ref=self.buffer_index, value=(self.buffer_index + num_instances))
with tf.control_dependencies(control_inputs=(assignment,)):
return tf.no_op()
def tf_retrieve_timesteps(self, n):
num_buffer_elems = tf.minimum(x=self.buffer_index, y=n)
# We can only sample from priority memory if buffer elements were previously inserted.
num_priority_elements = tf.cond(
pred=self.memory_size > 0,
true_fn=lambda: n - num_buffer_elems,
false_fn=lambda: 0
)
num_priority_elements = tf.Print(num_priority_elements, [num_priority_elements], 'num_priority_elements in retrieve = ')
def sampling_fn():
# Vectorized sampling.
sum_priorities = tf.reduce_sum(input_tensor=self.priorities, axis=0)
sample = tf.random_uniform(shape=(num_priority_elements,), dtype=tf.float32)
indices = tf.zeros(shape=(num_priority_elements,), dtype=tf.int32)
def cond(loop_index, sample):
return tf.reduce_all(input_tensor=(sample <= 0.0))
def sampling_body(loop_index, sample):
priority = tf.gather(params=self.priorities, indices=loop_index)
sample -= priority / sum_priorities
loop_index += tf.cast(
x=(sample > 0.0),
dtype=tf.int32,
)
return loop_index, sample
priority_indices = tf.while_loop(
cond=cond,
body=sampling_body,
loop_vars=(indices, sample)
)[0]
return priority_indices
priority_indices = tf.cond(
pred=num_priority_elements > 0,
true_fn=sampling_fn,
false_fn=lambda: tf.zeros(shape=(num_priority_elements,), dtype=tf.int32)
)
priority_terminal = tf.gather(params=self.terminal_memory, indices=priority_indices)
# TODO
priority_indices = tf.Print(priority_indices, [priority_indices], 'priority indices before masking in retrieve=', summarize=1000)
priority_indices = tf.boolean_mask(tensor=priority_indices, mask=tf.logical_not(x=priority_terminal))
# Store how many elements we retrieved from the buffer for updating priorities.
# Note that this is just the count, as we can reconstruct the indices from that.
assignments = list()
assignments.append(tf.assign(ref=self.last_batch_buffer_elems, value=num_buffer_elems))
# Store indices used from priority memory. Note that these are the full indices
# as they were not taken in order.
priority_indices = tf.Print(priority_indices, [priority_indices], 'priority indices in retrieve=', summarize=1000)
assignments.append(tf.scatter_update(
ref=self.batch_indices,
indices=priority_indices,
updates=tf.ones(shape=tf.shape(input=priority_indices), dtype=tf.int32))
)
# Fetch results.
with tf.control_dependencies(control_inputs=assignments):
return self.retrieve_indices(buffer_elements=num_buffer_elems, priority_indices=priority_indices)
def tf_retrieve_indices(self, buffer_elements, priority_indices):
"""
Fetches experiences for given indices by combining entries from buffer
which have no priorities, and entries from priority memory.
Args:
buffer_elements: Number of buffer elements to retrieve
priority_indices: Index tensor for priority memory
Returns: Batch of experiences
"""
states = dict()
# buffer_elements = tf.Print(buffer_elements, [buffer_elements], 'buffer elements in retrieve =', summarize=100)
buffer_start = self.buffer_index - buffer_elements
# buffer_start = tf.Print(buffer_start, [buffer_start], 'buffer start in retrieve =', summarize=100)
buffer_end = self.buffer_index
# buffer_end = tf.Print(buffer_end, [buffer_end], 'buffer_end in retrieve =', summarize=100)
# Fetch entries from respective memories, concat.
for name, state_memory in self.states_memory.items():
buffer_state_memory = self.states_buffer[name]
buffer_states = buffer_state_memory[buffer_start:buffer_end]
memory_states = tf.gather(params=state_memory, indices=priority_indices)
# buffer_states = tf.Print(buffer_states, [buffer_states], "buffer states=", summarize=100)
# memory_states = tf.Print(memory_states, [memory_states], "memory states=", summarize=100)
states[name] = tf.concat(values=(buffer_states, memory_states), axis=0)
internals = dict()
for name, internal_memory in self.internals_memory.items():
internal_buffer_memory = self.internals_buffer[name]
buffer_internals = internal_buffer_memory[buffer_start:buffer_end]
memory_internals = tf.gather(params=internal_memory, indices=priority_indices)
internals[name] = tf.concat(values=(buffer_internals, memory_internals), axis=0)
actions = dict()
for name, action_memory in self.actions_memory.items():
action_buffer_memory = self.actions_buffer[name]
buffer_action = action_buffer_memory[buffer_start:buffer_end]
memory_action = tf.gather(params=action_memory, indices=priority_indices)
actions[name] = tf.concat(values=(buffer_action, memory_action), axis=0)
buffer_terminal = self.terminal_buffer[buffer_start:buffer_end]
priority_terminal = tf.gather(params=self.terminal_memory, indices=priority_indices)
terminal = tf.concat(values=(buffer_terminal, priority_terminal), axis=0)
buffer_reward = self.reward_buffer[buffer_start:buffer_end]
priority_reward = tf.gather(params=self.reward_memory, indices=priority_indices)
reward = tf.concat(values=(buffer_reward, priority_reward), axis=0)
if self.include_next_states:
assert util.rank(priority_indices) == 1
next_priority_indices = (priority_indices + 1) % self.capacity
next_buffer_start = (buffer_start + 1) % self.buffer_size
next_buffer_end = (buffer_end + 1) % self.buffer_size
# else:
# next_indices = (indices[:, -1] + 1) % self.capacity
next_states = dict()
for name, state_memory in self.states_memory.items():
buffer_state_memory = self.states_buffer[name]
buffer_next_states = buffer_state_memory[next_buffer_start:next_buffer_end]
memory_next_states = tf.gather(params=state_memory, indices=next_priority_indices)
next_states[name] = tf.concat(values=(buffer_next_states, memory_next_states), axis=0)
next_internals = dict()
for name, internal_memory in self.internals_memory.items():
buffer_internal_memory = self.internals_buffer[name]
buffer_next_internals = buffer_internal_memory[next_buffer_start:next_buffer_end]
memory_next_internals = tf.gather(params=internal_memory, indices=next_priority_indices)
next_internals[name] = tf.concat(values=(buffer_next_internals, memory_next_internals), axis=0)
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
else:
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
def tf_update_batch(self, loss_per_instance):
"""
Updates priority memory by performing the following steps:
1. Use saved indices from prior retrieval to reconstruct the batch
elements which will have their priorities updated.
2. Compute priorities for these elements.
3. Insert buffer elements to memory, potentially overwriting existing elements.
4. Update priorities of existing memory elements
5. Resort memory.
6. Update buffer insertion index.
Note that this implementation could be made more efficient by maintaining
a sorted version via sum trees.
:param loss_per_instance: Losses from recent batch to perform priority update
"""
# 1. We reconstruct the batch from the buffer and the priority memory via
# the TensorFlow variables holding the respective indices.
mask = tf.not_equal(
x=self.batch_indices,
y=tf.zeros(shape=tf.shape(input=self.batch_indices), dtype=tf.int32)
)
priority_indices = tf.reshape(tensor=tf.where(condition=mask), shape=[-1])
priority_indices = tf.Print(priority_indices, [priority_indices, tf.shape(priority_indices)], message="Priority indices in update")
# These are elements from the buffer which first need to be inserted into the main memory.
sampled_buffer_batch = self.tf_retrieve_indices(
buffer_elements=self.last_batch_buffer_elems,
priority_indices=priority_indices
)
# Extract batch elements.
states = sampled_buffer_batch['states']
internals = sampled_buffer_batch['internals']
actions = sampled_buffer_batch['actions']
terminal = sampled_buffer_batch['terminal']
reward = sampled_buffer_batch['reward']
# 2. Compute priorities for all batch elements.
priorities = loss_per_instance ** self.prioritization_weight
assignments = list()
# Slice out priorities of buffer.
buffer_priorities = priorities[0:self.last_batch_buffer_elems]
buffer_priorities = tf.Print(buffer_priorities, [buffer_priorities, tf.shape(buffer_priorities)], 'buffer priorities =', summarize=1000)
# 3. Insert the buffer elements from the recent batch into memory.
start_index = self.memory_index
# start_index = tf.Print(start_index, [start_index], 'memory start index in update =')
end_index = (start_index + self.last_batch_buffer_elems) % self.capacity
# end_index = tf.Print(end_index, [end_index], 'memory end index in update =')
reward = tf.Print(reward, [reward, tf.shape(reward)], 'reward=', summarize=1000)
for name, state in states.items():
assignments.append(tf.assign(ref=self.states_memory[name][start_index:end_index], value=state))
for name, internal in internals.items():
assignments.append(tf.assign(
ref=self.internals_buffer[name][start_index:end_index],
value=internal
))
assignments.append(tf.assign(ref=self.priorities[start_index:end_index], value=buffer_priorities))
assignments.append(tf.assign(ref=self.terminal_memory[start_index:end_index], value=terminal))
assignments.append(tf.assign(ref=self.reward_memory[start_index:end_index], value=reward))
assignments.append(tf.assign(ref=self.priorities[start_index:end_index], value=priorities))
for name, action in actions.items():
assignments.append(tf.assign(ref=self.actions_memory[name][start_index:end_index], value=action))
# 4.Update the priorities of the elements already in the memory.
# TODO this could now overwrite priorities from inserted buffer elements?
# Slice out remaining elements - [] if all batch elements were from buffer.
main_memory_priorities = priorities[self.last_batch_buffer_elems:]
assignments.append(tf.scatter_update(
ref=self.priorities,
indices=priority_indices,
updates=main_memory_priorities
))
with tf.control_dependencies(control_inputs=assignments):
# 5. Re-sort memory according to priorities.
assignments = list()
# Obtain sorted order and indices.
sorted_priorities, sorted_indices = tf.nn.top_k(
input=self.priorities,
k=self.capacity,
sorted=True
)
# Re-assign elements according to priorities.
# Priorities was the tensor we used to sort, so this can be directly assigned.
assignments.append(tf.assign(ref=self.priorities, value=sorted_priorities))
# All other memory variables are assigned via scatter updates using the indices
# returned by the sort:
tf.scatter_update(
ref=self.terminal_memory,
indices=sorted_indices,
updates=self.terminal_memory # TODO is ref = updates tensor allowed?
)
for name, state_memory in self.states_memory.items():
tf.scatter_update(
ref=self.states_memory[name],
indices=sorted_indices,
updates=self.states_memory[name]
)
for name, action in self.actions_memory.items():
tf.scatter_update(
ref=self.actions_memory[name],
indices=sorted_indices,
updates=self.actions_memory[name]
)
for name, internal in self.internals_memory.items():
tf.scatter_update(
ref=self.internals_memory[name],
indices=sorted_indices,
updates=self.internals_memory[name]
)
tf.scatter_update(
ref=self.reward_memory,
indices=sorted_indices,
updates=self.reward_memory
)
# 6. Reset buffer index and increment memory index by inserted elements.
with tf.control_dependencies(control_inputs=assignments):
assignments = list()
# Decrement pointer of last elements used.
assignments.append(tf.assign_sub(ref=self.buffer_index, value=self.last_batch_buffer_elems))
# Increment memory insertion index.
assignments.append(tf.assign(ref=self.memory_index, value=end_index))
# Keep track of memory size as to know whether we can sample from the main memory.
assignments.append(tf.assign(ref=self.memory_size, value=tf.maximum(x=end_index, y=self.capacity)))
# Zero out processed elements from batch indices.
assignments.append(tf.scatter_update(
ref=self.batch_indices,
indices=priority_indices,
updates=tf.zeros(shape=tf.shape(priority_indices), dtype=tf.int32)
))
with tf.control_dependencies(control_inputs=assignments):
return tf.no_op()
# These are not supported for prioritized replay currently.
def tf_retrieve_episodes(self, n):
pass
def tf_retrieve_sequences(self, n, sequence_length):
pass
| |
"""
Please read README.md for usage instructions.
Extracts Caffe parameters from a given caffemodel/prototxt to a dictionary of numpy arrays,
ready for conversion to TensorFlow variables. Writes the dictionary to a .npy file.
"""
import argparse
import caffe
import numpy as np
import os
import tempfile
FLAGS = None
ARCHS = {
'C': {
'CAFFEMODEL': '../models/FlowNet2-C/FlowNet2-C_weights.caffemodel',
'DEPLOY_PROTOTXT': '../models/FlowNet2-C/FlowNet2-C_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
'conv1': 'FlowNetC/conv1',
'conv2': 'FlowNetC/conv2',
'conv3': 'FlowNetC/conv3',
'conv_redir': 'FlowNetC/conv_redir',
'conv3_1': 'FlowNetC/conv3_1',
'conv4': 'FlowNetC/conv4',
'conv4_1': 'FlowNetC/conv4_1',
'conv5': 'FlowNetC/conv5',
'conv5_1': 'FlowNetC/conv5_1',
'conv6': 'FlowNetC/conv6',
'conv6_1': 'FlowNetC/conv6_1',
'Convolution1': 'FlowNetC/predict_flow6',
'deconv5': 'FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetC/predict_flow5',
'deconv4': 'FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetC/predict_flow4',
'deconv3': 'FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetC/predict_flow3',
'deconv2': 'FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetC/predict_flow2',
}
},
'S': {
'CAFFEMODEL': '../models/FlowNet2-S/FlowNet2-S_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-S/FlowNet2-S_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
'conv1': 'FlowNetS/conv1',
'conv2': 'FlowNetS/conv2',
'conv3': 'FlowNetS/conv3',
'conv3_1': 'FlowNetS/conv3_1',
'conv4': 'FlowNetS/conv4',
'conv4_1': 'FlowNetS/conv4_1',
'conv5': 'FlowNetS/conv5',
'conv5_1': 'FlowNetS/conv5_1',
'conv6': 'FlowNetS/conv6',
'conv6_1': 'FlowNetS/conv6_1',
'Convolution1': 'FlowNetS/predict_flow6',
'deconv5': 'FlowNetS/deconv5',
'upsample_flow6to5': 'FlowNetS/upsample_flow6to5',
'Convolution2': 'FlowNetS/predict_flow5',
'deconv4': 'FlowNetS/deconv4',
'upsample_flow5to4': 'FlowNetS/upsample_flow5to4',
'Convolution3': 'FlowNetS/predict_flow4',
'deconv3': 'FlowNetS/deconv3',
'upsample_flow4to3': 'FlowNetS/upsample_flow4to3',
'Convolution4': 'FlowNetS/predict_flow3',
'deconv2': 'FlowNetS/deconv2',
'upsample_flow3to2': 'FlowNetS/upsample_flow3to2',
'Convolution5': 'FlowNetS/predict_flow2',
}
},
'CS': {
'CAFFEMODEL': '../models/FlowNet2-CS/FlowNet2-CS_weights.caffemodel',
'DEPLOY_PROTOTXT': '../models/FlowNet2-CS/FlowNet2-CS_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetCS/FlowNetC/predict_flow2',
# Net S
'net2_conv1': 'FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNetCS/FlowNetS/predict_flow2',
}
},
'CSS': {
'CAFFEMODEL': '../models/FlowNet2-CSS/FlowNet2-CSS_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-CSS/FlowNet2-CSS_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNetCSS/FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNetCSS/FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2',
# Net S 1
'net2_conv1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2',
# Net S 2
'net3_conv1': 'FlowNetCSS/FlowNetS/conv1',
'net3_conv2': 'FlowNetCSS/FlowNetS/conv2',
'net3_conv3': 'FlowNetCSS/FlowNetS/conv3',
'net3_conv3_1': 'FlowNetCSS/FlowNetS/conv3_1',
'net3_conv4': 'FlowNetCSS/FlowNetS/conv4',
'net3_conv4_1': 'FlowNetCSS/FlowNetS/conv4_1',
'net3_conv5': 'FlowNetCSS/FlowNetS/conv5',
'net3_conv5_1': 'FlowNetCSS/FlowNetS/conv5_1',
'net3_conv6': 'FlowNetCSS/FlowNetS/conv6',
'net3_conv6_1': 'FlowNetCSS/FlowNetS/conv6_1',
'net3_predict_conv6': 'FlowNetCSS/FlowNetS/predict_flow6',
'net3_deconv5': 'FlowNetCSS/FlowNetS/deconv5',
'net3_net3_upsample_flow6to5': 'FlowNetCSS/FlowNetS/upsample_flow6to5',
'net3_predict_conv5': 'FlowNetCSS/FlowNetS/predict_flow5',
'net3_deconv4': 'FlowNetCSS/FlowNetS/deconv4',
'net3_net3_upsample_flow5to4': 'FlowNetCSS/FlowNetS/upsample_flow5to4',
'net3_predict_conv4': 'FlowNetCSS/FlowNetS/predict_flow4',
'net3_deconv3': 'FlowNetCSS/FlowNetS/deconv3',
'net3_net3_upsample_flow4to3': 'FlowNetCSS/FlowNetS/upsample_flow4to3',
'net3_predict_conv3': 'FlowNetCSS/FlowNetS/predict_flow3',
'net3_deconv2': 'FlowNetCSS/FlowNetS/deconv2',
'net3_net3_upsample_flow3to2': 'FlowNetCSS/FlowNetS/upsample_flow3to2',
'net3_predict_conv2': 'FlowNetCSS/FlowNetS/predict_flow2',
},
},
'CSS-ft-sd': {
'CAFFEMODEL': '../models/FlowNet2-CSS-ft-sd/FlowNet2-CSS-ft-sd_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-CSS-ft-sd/FlowNet2-CSS-ft-sd_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNetCSS/FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNetCSS/FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNetCSS/FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2',
# Net S 1
'net2_conv1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNetCSS/FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNetCSS/FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2',
# Net S 2
'net3_conv1': 'FlowNetCSS/FlowNetS/conv1',
'net3_conv2': 'FlowNetCSS/FlowNetS/conv2',
'net3_conv3': 'FlowNetCSS/FlowNetS/conv3',
'net3_conv3_1': 'FlowNetCSS/FlowNetS/conv3_1',
'net3_conv4': 'FlowNetCSS/FlowNetS/conv4',
'net3_conv4_1': 'FlowNetCSS/FlowNetS/conv4_1',
'net3_conv5': 'FlowNetCSS/FlowNetS/conv5',
'net3_conv5_1': 'FlowNetCSS/FlowNetS/conv5_1',
'net3_conv6': 'FlowNetCSS/FlowNetS/conv6',
'net3_conv6_1': 'FlowNetCSS/FlowNetS/conv6_1',
'net3_predict_conv6': 'FlowNetCSS/FlowNetS/predict_flow6',
'net3_deconv5': 'FlowNetCSS/FlowNetS/deconv5',
'net3_net3_upsample_flow6to5': 'FlowNetCSS/FlowNetS/upsample_flow6to5',
'net3_predict_conv5': 'FlowNetCSS/FlowNetS/predict_flow5',
'net3_deconv4': 'FlowNetCSS/FlowNetS/deconv4',
'net3_net3_upsample_flow5to4': 'FlowNetCSS/FlowNetS/upsample_flow5to4',
'net3_predict_conv4': 'FlowNetCSS/FlowNetS/predict_flow4',
'net3_deconv3': 'FlowNetCSS/FlowNetS/deconv3',
'net3_net3_upsample_flow4to3': 'FlowNetCSS/FlowNetS/upsample_flow4to3',
'net3_predict_conv3': 'FlowNetCSS/FlowNetS/predict_flow3',
'net3_deconv2': 'FlowNetCSS/FlowNetS/deconv2',
'net3_net3_upsample_flow3to2': 'FlowNetCSS/FlowNetS/upsample_flow3to2',
'net3_predict_conv2': 'FlowNetCSS/FlowNetS/predict_flow2',
},
},
'SD': {
'CAFFEMODEL': '../models/FlowNet2-SD/FlowNet2-SD_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2-SD/FlowNet2-SD_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
'conv0': 'FlowNetSD/conv0',
'conv1': 'FlowNetSD/conv1',
'conv1_1': 'FlowNetSD/conv1_1',
'conv2': 'FlowNetSD/conv2',
'conv2_1': 'FlowNetSD/conv2_1',
'conv3': 'FlowNetSD/conv3',
'conv3_1': 'FlowNetSD/conv3_1',
'conv4': 'FlowNetSD/conv4',
'conv4_1': 'FlowNetSD/conv4_1',
'conv5': 'FlowNetSD/conv5',
'conv5_1': 'FlowNetSD/conv5_1',
'conv6': 'FlowNetSD/conv6',
'conv6_1': 'FlowNetSD/conv6_1',
'Convolution1': 'FlowNetSD/predict_flow6',
'deconv5': 'FlowNetSD/deconv5',
'upsample_flow6to5': 'FlowNetSD/upsample_flow6to5',
'interconv5': 'FlowNetSD/interconv5',
'Convolution2': 'FlowNetSD/predict_flow5',
'deconv4': 'FlowNetSD/deconv4',
'upsample_flow5to4': 'FlowNetSD/upsample_flow5to4',
'interconv4': 'FlowNetSD/interconv4',
'Convolution3': 'FlowNetSD/predict_flow4',
'deconv3': 'FlowNetSD/deconv3',
'upsample_flow4to3': 'FlowNetSD/upsample_flow4to3',
'interconv3': 'FlowNetSD/interconv3',
'Convolution4': 'FlowNetSD/predict_flow3',
'deconv2': 'FlowNetSD/deconv2',
'upsample_flow3to2': 'FlowNetSD/upsample_flow3to2',
'interconv2': 'FlowNetSD/interconv2',
'Convolution5': 'FlowNetSD/predict_flow2',
},
},
'2': {
'CAFFEMODEL': '../models/FlowNet2/FlowNet2_weights.caffemodel.h5',
'DEPLOY_PROTOTXT': '../models/FlowNet2/FlowNet2_deploy.prototxt.template',
# Mappings between Caffe parameter names and TensorFlow variable names
'PARAMS': {
# Net C
'conv1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv1',
'conv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv2',
'conv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv3',
'conv_redir': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv_redir',
'conv3_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv3_1',
'conv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv4',
'conv4_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv4_1',
'conv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv5',
'conv5_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv5_1',
'conv6': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv6',
'conv6_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/conv6_1',
'Convolution1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow6',
'deconv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv5',
'upsample_flow6to5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow6to5',
'Convolution2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow5',
'deconv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv4',
'upsample_flow5to4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow5to4',
'Convolution3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow4',
'deconv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv3',
'upsample_flow4to3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow4to3',
'Convolution4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow3',
'deconv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/deconv2',
'upsample_flow3to2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/upsample_flow3to2',
'Convolution5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetC/predict_flow2',
# Net S 1
'net2_conv1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv1',
'net2_conv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv2',
'net2_conv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv3',
'net2_conv3_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv3_1',
'net2_conv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv4',
'net2_conv4_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv4_1',
'net2_conv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv5',
'net2_conv5_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv5_1',
'net2_conv6': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv6',
'net2_conv6_1': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/conv6_1',
'net2_predict_conv6': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow6',
'net2_deconv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv5',
'net2_net2_upsample_flow6to5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow6to5',
'net2_predict_conv5': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow5',
'net2_deconv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv4',
'net2_net2_upsample_flow5to4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow5to4',
'net2_predict_conv4': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow4',
'net2_deconv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv3',
'net2_net2_upsample_flow4to3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow4to3',
'net2_predict_conv3': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow3',
'net2_deconv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/deconv2',
'net2_net2_upsample_flow3to2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/upsample_flow3to2',
'net2_predict_conv2': 'FlowNet2/FlowNetCSS/FlowNetCS/FlowNetS/predict_flow2',
# Net S 2
'net3_conv1': 'FlowNet2/FlowNetCSS/FlowNetS/conv1',
'net3_conv2': 'FlowNet2/FlowNetCSS/FlowNetS/conv2',
'net3_conv3': 'FlowNet2/FlowNetCSS/FlowNetS/conv3',
'net3_conv3_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv3_1',
'net3_conv4': 'FlowNet2/FlowNetCSS/FlowNetS/conv4',
'net3_conv4_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv4_1',
'net3_conv5': 'FlowNet2/FlowNetCSS/FlowNetS/conv5',
'net3_conv5_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv5_1',
'net3_conv6': 'FlowNet2/FlowNetCSS/FlowNetS/conv6',
'net3_conv6_1': 'FlowNet2/FlowNetCSS/FlowNetS/conv6_1',
'net3_predict_conv6': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow6',
'net3_deconv5': 'FlowNet2/FlowNetCSS/FlowNetS/deconv5',
'net3_net3_upsample_flow6to5': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow6to5',
'net3_predict_conv5': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow5',
'net3_deconv4': 'FlowNet2/FlowNetCSS/FlowNetS/deconv4',
'net3_net3_upsample_flow5to4': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow5to4',
'net3_predict_conv4': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow4',
'net3_deconv3': 'FlowNet2/FlowNetCSS/FlowNetS/deconv3',
'net3_net3_upsample_flow4to3': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow4to3',
'net3_predict_conv3': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow3',
'net3_deconv2': 'FlowNet2/FlowNetCSS/FlowNetS/deconv2',
'net3_net3_upsample_flow3to2': 'FlowNet2/FlowNetCSS/FlowNetS/upsample_flow3to2',
'net3_predict_conv2': 'FlowNet2/FlowNetCSS/FlowNetS/predict_flow2',
# Net SD
'netsd_conv0': 'FlowNet2/FlowNetSD/conv0',
'netsd_conv1': 'FlowNet2/FlowNetSD/conv1',
'netsd_conv1_1': 'FlowNet2/FlowNetSD/conv1_1',
'netsd_conv2': 'FlowNet2/FlowNetSD/conv2',
'netsd_conv2_1': 'FlowNet2/FlowNetSD/conv2_1',
'netsd_conv3': 'FlowNet2/FlowNetSD/conv3',
'netsd_conv3_1': 'FlowNet2/FlowNetSD/conv3_1',
'netsd_conv4': 'FlowNet2/FlowNetSD/conv4',
'netsd_conv4_1': 'FlowNet2/FlowNetSD/conv4_1',
'netsd_conv5': 'FlowNet2/FlowNetSD/conv5',
'netsd_conv5_1': 'FlowNet2/FlowNetSD/conv5_1',
'netsd_conv6': 'FlowNet2/FlowNetSD/conv6',
'netsd_conv6_1': 'FlowNet2/FlowNetSD/conv6_1',
'netsd_Convolution1': 'FlowNet2/FlowNetSD/predict_flow6',
'netsd_deconv5': 'FlowNet2/FlowNetSD/deconv5',
'netsd_upsample_flow6to5': 'FlowNet2/FlowNetSD/upsample_flow6to5',
'netsd_interconv5': 'FlowNet2/FlowNetSD/interconv5',
'netsd_Convolution2': 'FlowNet2/FlowNetSD/predict_flow5',
'netsd_deconv4': 'FlowNet2/FlowNetSD/deconv4',
'netsd_upsample_flow5to4': 'FlowNet2/FlowNetSD/upsample_flow5to4',
'netsd_interconv4': 'FlowNet2/FlowNetSD/interconv4',
'netsd_Convolution3': 'FlowNet2/FlowNetSD/predict_flow4',
'netsd_deconv3': 'FlowNet2/FlowNetSD/deconv3',
'netsd_upsample_flow4to3': 'FlowNet2/FlowNetSD/upsample_flow4to3',
'netsd_interconv3': 'FlowNet2/FlowNetSD/interconv3',
'netsd_Convolution4': 'FlowNet2/FlowNetSD/predict_flow3',
'netsd_deconv2': 'FlowNet2/FlowNetSD/deconv2',
'netsd_upsample_flow3to2': 'FlowNet2/FlowNetSD/upsample_flow3to2',
'netsd_interconv2': 'FlowNet2/FlowNetSD/interconv2',
'netsd_Convolution5': 'FlowNet2/FlowNetSD/predict_flow2',
# Fusion Net
'fuse_conv0': 'FlowNet2/fuse_conv0',
'fuse_conv1': 'FlowNet2/fuse_conv1',
'fuse_conv1_1': 'FlowNet2/fuse_conv1_1',
'fuse_conv2': 'FlowNet2/fuse_conv2',
'fuse_conv2_1': 'FlowNet2/fuse_conv2_1',
'fuse__Convolution5': 'FlowNet2/predict_flow2',
'fuse_deconv1': 'FlowNet2/fuse_deconv1',
'fuse_upsample_flow2to1': 'FlowNet2/fuse_upsample_flow2to1',
'fuse_interconv1': 'FlowNet2/fuse_interconv1',
'fuse__Convolution6': 'FlowNet2/predict_flow1',
'fuse_deconv0': 'FlowNet2/fuse_deconv0',
'fuse_upsample_flow1to0': 'FlowNet2/fuse_upsample_flow1to0',
'fuse_interconv0': 'FlowNet2/fuse_interconv0',
'fuse__Convolution7': 'FlowNet2/predict_flow0',
}
},
}
arch = None
# Setup variables to be injected into prototxt.template
# For now, use the dimensions of the Flying Chair Dataset
vars = {}
vars['TARGET_WIDTH'] = vars['ADAPTED_WIDTH'] = 512
vars['TARGET_HEIGHT'] = vars['ADAPTED_HEIGHT'] = 384
vars['SCALE_WIDTH'] = vars['SCALE_HEIGHT'] = 1.0
def main():
# Create tempfile to hold prototxt
tmp = tempfile.NamedTemporaryFile(mode='w', delete=True)
# Parse prototxt and inject `vars`
proto = open(arch['DEPLOY_PROTOTXT']).readlines()
for line in proto:
for key, value in vars.items():
tag = "$%s$" % key
line = line.replace(tag, str(value))
tmp.write(line)
tmp.flush()
# Instantiate Caffe Model
net = caffe.Net(tmp.name, arch['CAFFEMODEL'], caffe.TEST)
out = {}
for (caffe_param, tf_param) in arch['PARAMS'].items():
# Caffe stores weights as (channels_out, channels_in, h, w)
# but TF expects (h, w, channels_in, channels_out)
out[tf_param + '/weights'] = net.params[caffe_param][0].data.transpose((2, 3, 1, 0))
out[tf_param + '/biases'] = net.params[caffe_param][1].data
np.save(FLAGS.out, out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--out',
type=str,
required=True,
help='Output file path, eg /foo/bar.npy'
)
parser.add_argument(
'--arch',
type=str,
choices=['C', 'S', 'CS', 'CSS', 'CSS-ft-sd', 'SD', '2'],
required=True,
help='Name of the FlowNet arch: C, S, CS, CSS, CSS-ft-sd, SD or 2'
)
FLAGS = parser.parse_args()
arch = ARCHS[FLAGS.arch]
main()
| |
import logging
import numpy as np
def identity(x):
return x
def relim(lo, hi, log=False):
logging.getLogger(__name__).debug("Inputs to relim: %r %r", lo, hi)
x, y = lo, hi
if log:
if lo < 0:
x = 1e-5
if hi < 0:
y = 1e5
return x * .95, y * 1.05
delta = y - x
return (x - .02 * delta, y + .02 * delta)
def file_format(filename):
if filename.find('.') == -1:
return ''
if filename.lower().endswith('.gz'):
result = filename.lower().rsplit('.', 2)[1]
else:
result = filename.lower().rsplit('.', 1)[1]
return result
def point_contour(x, y, data):
"""Calculate the contour that passes through (x,y) in data
:param x: x location
:param y: y location
:param data: 2D image
:type data: :class:`numpy.ndarray`
Returns:
* A (nrow, 2column) numpy array. The two columns give the x and
y locations of the contour vertices
"""
try:
from scipy import ndimage
except ImportError:
raise ImportError("Image processing in Glue requires SciPy")
inten = data[y, x]
labeled, nr_objects = ndimage.label(data >= inten)
z = data * (labeled == labeled[y, x])
y, x = np.mgrid[0:data.shape[0], 0:data.shape[1]]
from matplotlib import _cntr
cnt = _cntr.Cntr(x, y, z)
xy = cnt.trace(inten)
if not xy:
return None
xy = xy[0]
return xy
def split_component_view(arg):
"""Split the input to data or subset.__getitem__ into its pieces.
:param arg: The input passed to data or subset.__getitem__.
Assumed to be either a scalar or tuple
:rtype: tuple
The first item is the Component selection (a ComponentID or
string)
The second item is a view (tuple of slices, slice scalar, or view
object)
"""
if isinstance(arg, tuple):
if len(arg) == 1:
raise TypeError("Expected a scalar or >length-1 tuple, "
"got length-1 tuple")
if len(arg) == 2:
return arg[0], arg[1]
return arg[0], arg[1:]
else:
return arg, None
def join_component_view(component, view):
"""Pack a componentID and optional view into single tuple
Returns an object compatible with data.__getitem__ and related
methods. Handles edge cases of when view is None, a scalar, a
tuple, etc.
:param component: ComponentID
:param view: view into data, or None
"""
if view is None:
return component
result = [component]
try:
result.extend(view)
except TypeError: # view is a scalar
result = [component, view]
return tuple(result)
def view_shape(shape, view):
"""Return the shape of a view of an array
:param shape: Tuple describing shape of the array
:param view: View object -- a valid index into a numpy array, or None
Returns equivalent of np.zeros(shape)[view].shape
"""
if view is None:
return shape
shp = tuple(slice(0, s, 1) for s in shape)
xy = np.broadcast_arrays(*np.ogrid[shp])
assert xy[0].shape == shape
return xy[0][view].shape
def color2rgb(color):
from matplotlib.colors import ColorConverter
result = ColorConverter().to_rgb(color)
return result
def facet_subsets(data_collection, cid, lo=None, hi=None, steps=5,
prefix=None, log=False):
"""Create a series of subsets that partition the values of
a particular attribute into several bins
This creates `steps` new subet groups, adds them to the data collection,
and returns the list of newly created subset groups.
:param data: DataCollection object to use
:type data: :class:`~glue.core.data_collection.DataCollection`
:param cid: ComponentID to facet on
:type data: :class:`~glue.core.data.ComponentID`
:param lo: The lower bound for the faceting. Defaults to minimum value
in data
:type lo: float
:param hi: The upper bound for the faceting. Defaults to maximum
value in data
:type hi: float
:param steps: The number of subsets to create. Defaults to 5
:type steps: int
:param prefix: If present, the new subsets will be labeled `prefix_1`, etc.
:type prefix: str
:param log: If True, space divisions logarithmically. Default=False
:type log: bool
:returns: List of :class:`~glue.core.subset_group.SubsetGroup` instances
added to `data`
Example::
facet_subset(data, data.id['mass'], lo=0, hi=10, steps=2)
creates 2 new subsets. The first represents the constraint 0 <=
mass < 5. The second represents 5 <= mass < 10::
facet_subset(data, data.id['mass'], lo=10, hi=0, steps=2)
Creates 2 new subsets. The first represents the constraint 10 >= x > 5
The second represents 5 >= mass > 0::
facet_subset(data, data.id['mass'], lo=0, hi=10, steps=2, prefix='m')
Labels the subsets ``m_1`` and ``m_2``
"""
from .exceptions import IncompatibleAttribute
if lo is None or hi is None:
for data in data_collection:
try:
vals = data[cid]
break
except IncompatibleAttribute:
continue
else:
raise ValueError("Cannot infer data limits for ComponentID %s"
% cid)
if lo is None:
lo = np.nanmin(vals)
if hi is None:
hi = np.nanmax(vals)
prefix = prefix or cid.label
reverse = lo > hi
if log:
rng = np.logspace(np.log10(lo), np.log10(hi), steps + 1)
else:
rng = np.linspace(lo, hi, steps + 1)
states = []
for i in range(steps):
if reverse:
states.append((cid <= rng[i]) & (cid > rng[i + 1]))
else:
states.append((cid >= rng[i]) & (cid < rng[i + 1]))
result = []
for i, s in enumerate(states, start=1):
result.append(data_collection.new_subset_group())
result[-1].subset_state = s
result[-1].label = "%s_%i" % (prefix, i)
return result
def colorize_subsets(subsets, cmap, lo=0, hi=1):
"""Re-color a list of subsets according to a colormap
:param subsets: List of subsets
:param cmap: Matplotlib colormap instance
:param lo: Start location in colormap. 0-1. Defaults to 0
:param hi: End location in colormap. 0-1. Defaults to 1
The colormap will be sampled at `len(subsets)` even intervals
between `lo` and `hi`. The color at the `ith` interval will be
applied to `subsets[i]`
"""
from matplotlib import cm
sm = cm.ScalarMappable(cmap=cmap)
sm.norm.vmin = 0
sm.norm.vmax = 1
vals = np.linspace(lo, hi, len(subsets))
rgbas = sm.to_rgba(vals)
for color, subset in zip(rgbas, subsets):
r, g, b, a = color
r = int(255 * r)
g = int(255 * g)
b = int(255 * b)
subset.style.color = '#%2.2x%2.2x%2.2x' % (r, g, b)
def coerce_numeric(arr):
"""Coerce an array into a numeric array, replacing
non-numeric elements with nans.
If the array is already a numeric type, it is returned
unchanged
:param arr: array to coerce
:type arr: :class:`numpy.ndarray`
:returns: array.
"""
# already numeric type
if np.issubdtype(arr.dtype, np.number):
return arr
if np.issubdtype(arr.dtype, np.bool_):
return arr.astype(np.int)
# a string dtype
if np.issubdtype(arr.dtype, np.character):
lens = np.char.str_len(arr)
lmax = lens.max()
nonnull = lens > 0
coerced = np.genfromtxt(arr, delimiter=lmax + 1)
has_missing = not nonnull.all()
dtype = np.float if has_missing else coerced.dtype
result = np.empty(arr.shape, dtype=dtype)
result[nonnull] = coerced
if has_missing:
result[~nonnull] = np.nan
return result
return np.genfromtxt(arr)
def check_sorted(array):
""" Return True if the array is sorted, False otherwise.
"""
# this ignores NANs, and does the right thing if nans
# are concentrated at beginning or end of array
# otherwise, it will miss things at nan/finite boundaries
return not (array[:-1] > array[1:]).any()
def lookup_class(ref):
""" Look up an object via it's module string (e.g., 'glue.core.Data')
:param ref: reference
:type ref: str
:rtype: object, or None if not found
"""
mod = ref.split('.')[0]
try:
result = __import__(mod)
except ImportError:
return None
try:
for attr in ref.split('.')[1:]:
result = getattr(result, attr)
return result
except AttributeError:
return None
class PropertySetMixin(object):
"""An object that provides a set of properties that
are meant to encapsulate state information
This class exposes a properties attribute, which is a dict
of all properties. Similarly, assigning to the properties dict
will update the individual properties
"""
_property_set = []
@property
def properties(self):
""" A dict mapping property names to values """
return {p: getattr(self, p) for p in self._property_set}
@properties.setter
def properties(self, value):
""" Update the properties with a new dict.
Keys in the new dict must be valid property names defined in
the _property_set class level attribute"""
invalid = set(value.keys()) - set(self._property_set)
if invalid:
raise ValueError("Invalid property values: %s" % invalid)
for k in self._property_set:
if k not in value:
continue
setattr(self, k, value[k])
class CallbackMixin(object):
"""
A mixin that provides a utility for attaching callback
functions to methods
"""
def __init__(self):
self._callbacks = []
def add_callback(self, function):
self._callbacks.append(function)
def remove_callback(self, function):
self._callbacks.remove(function)
def notify(self, *args, **kwargs):
for func in self._callbacks:
func(*args, **kwargs)
def as_list(x):
if isinstance(x, list):
return x
return [x]
class Pointer(object):
def __init__(self, key):
self.key = key
def __get__(self, instance, type=None):
val = instance
for k in self.key.split('.'):
val = getattr(val, k, None)
return val
def __set__(self, instance, value):
v = self.key.split('.')
attr = reduce(getattr, [instance] + v[:-1])
setattr(attr, v[-1], value)
| |
#!/usr/bin/env python
""" generated source for module EReader """
#
# Original file copyright original author(s).
# This file copyright Troy Melhase, troy@gci.net.
#
# WARNING: all changes to this file will be lost.
from ib.lib import Boolean, Double, DataInputStream, Integer, Long, StringBuffer, Thread
from ib.lib.overloading import overloaded
from ib.ext.Contract import Contract
from ib.ext.ContractDetails import ContractDetails
from ib.ext.ComboLeg import ComboLeg
from ib.ext.CommissionReport import CommissionReport
from ib.ext.EClientErrors import EClientErrors
from ib.ext.Execution import Execution
from ib.ext.Order import Order
from ib.ext.OrderComboLeg import OrderComboLeg
from ib.ext.OrderState import OrderState
from ib.ext.TagValue import TagValue
from ib.ext.TickType import TickType
from ib.ext.UnderComp import UnderComp
from ib.ext.Util import Util
#
# * EReader.java
# *
#
# package: com.ib.client
class EReader(Thread):
""" generated source for class EReader """
# incoming msg id's
TICK_PRICE = 1
TICK_SIZE = 2
ORDER_STATUS = 3
ERR_MSG = 4
OPEN_ORDER = 5
ACCT_VALUE = 6
PORTFOLIO_VALUE = 7
ACCT_UPDATE_TIME = 8
NEXT_VALID_ID = 9
CONTRACT_DATA = 10
EXECUTION_DATA = 11
MARKET_DEPTH = 12
MARKET_DEPTH_L2 = 13
NEWS_BULLETINS = 14
MANAGED_ACCTS = 15
RECEIVE_FA = 16
HISTORICAL_DATA = 17
BOND_CONTRACT_DATA = 18
SCANNER_PARAMETERS = 19
SCANNER_DATA = 20
TICK_OPTION_COMPUTATION = 21
TICK_GENERIC = 45
TICK_STRING = 46
TICK_EFP = 47
CURRENT_TIME = 49
REAL_TIME_BARS = 50
FUNDAMENTAL_DATA = 51
CONTRACT_DATA_END = 52
OPEN_ORDER_END = 53
ACCT_DOWNLOAD_END = 54
EXECUTION_DATA_END = 55
DELTA_NEUTRAL_VALIDATION = 56
TICK_SNAPSHOT_END = 57
MARKET_DATA_TYPE = 58
COMMISSION_REPORT = 59
POSITION = 61
POSITION_END = 62
ACCOUNT_SUMMARY = 63
ACCOUNT_SUMMARY_END = 64
m_parent = None
m_dis = None
def parent(self):
""" generated source for method parent """
return self.m_parent
def eWrapper(self):
""" generated source for method eWrapper """
return self.parent().wrapper()
@overloaded
def __init__(self, parent, dis):
""" generated source for method __init__ """
self.__init__("EReader", parent, dis)
@__init__.register(object, str, object, DataInputStream)
def __init___0(self, name, parent, dis):
""" generated source for method __init___0 """
Thread.__init__(self, name, parent, dis)
self.setName(name)
self.m_parent = parent
self.m_dis = dis
def run(self):
""" generated source for method run """
try:
# loop until thread is terminated
while not self.isInterrupted() and self.processMsg(self.readInt()):
pass
except Exception as ex:
if self.parent().isConnected():
self.eWrapper().error(ex)
if self.parent().isConnected():
self.m_parent.close()
try:
self.m_dis.close()
self.m_dis = None
except Exception as e:
pass
# Overridden in subclass.
def processMsg(self, msgId):
""" generated source for method processMsg """
if msgId == -1:
return False
if msgId == self.TICK_PRICE:
version = self.readInt()
tickerId = self.readInt()
tickType = self.readInt()
price = self.readDouble()
size = 0
if version >= 2:
size = self.readInt()
canAutoExecute = 0
if version >= 3:
canAutoExecute = self.readInt()
self.eWrapper().tickPrice(tickerId, tickType, price, canAutoExecute)
if version >= 2:
# not a tick
sizeTickType = -1
if tickType == 1:
# BID
sizeTickType = 0
# BID_SIZE
elif tickType == 2:
# ASK
sizeTickType = 3
# ASK_SIZE
elif tickType == 4:
# LAST
sizeTickType = 5
# LAST_SIZE
if sizeTickType != -1:
self.eWrapper().tickSize(tickerId, sizeTickType, size)
elif msgId == self.TICK_SIZE:
version = self.readInt()
tickerId = self.readInt()
tickType = self.readInt()
size = self.readInt()
self.eWrapper().tickSize(tickerId, tickType, size)
elif msgId==self.POSITION:
version = self.readInt()
account = self.readStr()
contract = Contract()
contract.m_conId = self.readInt()
contract.m_symbol = self.readStr()
contract.m_secType = self.readStr()
contract.m_expiry = self.readStr()
contract.m_strike = self.readDouble()
contract.m_right = self.readStr()
contract.m_multiplier = self.readStr()
contract.m_exchange = self.readStr()
contract.m_currency = self.readStr()
contract.m_localSymbol = self.readStr()
if version >= 2:
contract.m_tradingClass = self.readStr()
pos = self.readInt()
avgCost = 0
if version >= 3:
avgCost = self.readDouble()
self.eWrapper().position(account, contract, pos, avgCost)
elif msgId==self.POSITION_END:
version = self.readInt()
self.eWrapper().positionEnd()
elif msgId==self.ACCOUNT_SUMMARY:
version = self.readInt()
reqId = self.readInt()
account = self.readStr()
tag = self.readStr()
value = self.readStr()
currency = self.readStr()
self.eWrapper().accountSummary(reqId, account, tag, value, currency)
elif msgId==self.ACCOUNT_SUMMARY_END:
version = self.readInt()
reqId = self.readInt()
self.eWrapper().accountSummaryEnd(reqId)
elif msgId == self.TICK_OPTION_COMPUTATION:
version = self.readInt()
tickerId = self.readInt()
tickType = self.readInt()
impliedVol = self.readDouble()
if impliedVol < 0: # -1 is the "not yet computed" indicator
impliedVol = Double.MAX_VALUE
delta = self.readDouble()
if abs(delta) > 1: # -2 is the "not yet computed" indicator
delta = Double.MAX_VALUE
optPrice = Double.MAX_VALUE
pvDividend = Double.MAX_VALUE
gamma = Double.MAX_VALUE
vega = Double.MAX_VALUE
theta = Double.MAX_VALUE
undPrice = Double.MAX_VALUE
if version >= 6 or (tickType == TickType.MODEL_OPTION):
# introduced in version == 5
optPrice = self.readDouble()
if optPrice < 0: # -1 is the "not yet computed" indicator
optPrice = Double.MAX_VALUE
pvDividend = self.readDouble()
if pvDividend < 0: # -1 is the "not yet computed" indicator
pvDividend = Double.MAX_VALUE
if version >= 6:
gamma = self.readDouble()
if abs(gamma) > 1: # -2 is the "not yet computed" indicator
gamma = Double.MAX_VALUE
vega = self.readDouble()
if abs(vega) > 1: # -2 is the "not yet computed" indicator
vega = Double.MAX_VALUE
theta = self.readDouble()
if abs(theta) > 1: # -2 is the "not yet computed" indicator
theta = Double.MAX_VALUE
undPrice = self.readDouble()
if undPrice < 0: # -1 is the "not yet computed" indicator
undPrice = Double.MAX_VALUE
self.eWrapper().tickOptionComputation(tickerId, tickType, impliedVol, delta, optPrice, pvDividend, gamma, vega, theta, undPrice)
elif msgId == self.TICK_GENERIC:
version = self.readInt()
tickerId = self.readInt()
tickType = self.readInt()
value = self.readDouble()
self.eWrapper().tickGeneric(tickerId, tickType, value)
elif msgId == self.TICK_STRING:
version = self.readInt()
tickerId = self.readInt()
tickType = self.readInt()
value = self.readStr()
self.eWrapper().tickString(tickerId, tickType, value)
elif msgId == self.TICK_EFP:
version = self.readInt()
tickerId = self.readInt()
tickType = self.readInt()
basisPoints = self.readDouble()
formattedBasisPoints = self.readStr()
impliedFuturesPrice = self.readDouble()
holdDays = self.readInt()
futureExpiry = self.readStr()
dividendImpact = self.readDouble()
dividendsToExpiry = self.readDouble()
self.eWrapper().tickEFP(tickerId, tickType, basisPoints, formattedBasisPoints, impliedFuturesPrice, holdDays, futureExpiry, dividendImpact, dividendsToExpiry)
elif msgId == self.ORDER_STATUS:
version = self.readInt()
id = self.readInt()
status = self.readStr()
filled = self.readInt()
remaining = self.readInt()
avgFillPrice = self.readDouble()
permId = 0
if version >= 2:
permId = self.readInt()
parentId = 0
if version >= 3:
parentId = self.readInt()
lastFillPrice = 0
if version >= 4:
lastFillPrice = self.readDouble()
clientId = 0
if version >= 5:
clientId = self.readInt()
whyHeld = None
if version >= 6:
whyHeld = self.readStr()
self.eWrapper().orderStatus(id, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld)
elif msgId == self.ACCT_VALUE:
version = self.readInt()
key = self.readStr()
val = self.readStr()
cur = self.readStr()
accountName = None
if version >= 2:
accountName = self.readStr()
self.eWrapper().updateAccountValue(key, val, cur, accountName)
elif msgId == self.PORTFOLIO_VALUE:
version = self.readInt()
contract = Contract()
if version >= 6:
contract.m_conId = self.readInt()
contract.m_symbol = self.readStr()
contract.m_secType = self.readStr()
contract.m_expiry = self.readStr()
contract.m_strike = self.readDouble()
contract.m_right = self.readStr()
if version >= 7:
contract.m_multiplier = self.readStr()
contract.m_primaryExch = self.readStr()
contract.m_currency = self.readStr()
if version >= 2:
contract.m_localSymbol = self.readStr()
if version >= 8:
contract.m_tradingClass = self.readStr()
position = self.readInt()
marketPrice = self.readDouble()
marketValue = self.readDouble()
averageCost = 0.0
unrealizedPNL = 0.0
realizedPNL = 0.0
if version >= 3:
averageCost = self.readDouble()
unrealizedPNL = self.readDouble()
realizedPNL = self.readDouble()
accountName = None
if version >= 4:
accountName = self.readStr()
if version == 6 and self.m_parent.serverVersion() == 39:
contract.m_primaryExch = self.readStr()
self.eWrapper().updatePortfolio(contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName)
elif msgId == self.ACCT_UPDATE_TIME:
version = self.readInt()
timeStamp = self.readStr()
self.eWrapper().updateAccountTime(timeStamp)
elif msgId == self.ERR_MSG:
version = self.readInt()
if version < 2:
msg = self.readStr()
self.m_parent.error(msg)
else:
id = self.readInt()
errorCode = self.readInt()
errorMsg = self.readStr()
self.m_parent.error(id, errorCode, errorMsg)
elif msgId == self.OPEN_ORDER:
# read version
version = self.readInt()
# read order id
order = Order()
order.m_orderId = self.readInt()
# read contract fields
contract = Contract()
if version >= 17:
contract.m_conId = self.readInt()
contract.m_symbol = self.readStr()
contract.m_secType = self.readStr()
contract.m_expiry = self.readStr()
contract.m_strike = self.readDouble()
contract.m_right = self.readStr()
if version >= 32:
contract.m_multiplier = self.readStr()
contract.m_exchange = self.readStr()
contract.m_currency = self.readStr()
if version >= 2:
contract.m_localSymbol = self.readStr()
if version >= 32:
contract.m_tradingClass = self.readStr()
# read order fields
order.m_action = self.readStr()
order.m_totalQuantity = self.readInt()
order.m_orderType = self.readStr()
if version < 29:
order.m_lmtPrice = self.readDouble()
else:
order.m_lmtPrice = self.readDoubleMax()
if version < 30:
order.m_auxPrice = self.readDouble()
else:
order.m_auxPrice = self.readDoubleMax()
order.m_tif = self.readStr()
order.m_ocaGroup = self.readStr()
order.m_account = self.readStr()
order.m_openClose = self.readStr()
order.m_origin = self.readInt()
order.m_orderRef = self.readStr()
if version >= 3:
order.m_clientId = self.readInt()
if version >= 4:
order.m_permId = self.readInt()
if version < 18:
# will never happen
# order.m_ignoreRth =
self.readBoolFromInt()
else:
order.m_outsideRth = self.readBoolFromInt()
order.m_hidden = self.readInt() == 1
order.m_discretionaryAmt = self.readDouble()
if version >= 5:
order.m_goodAfterTime = self.readStr()
if version >= 6:
# skip deprecated sharesAllocation field
self.readStr()
if version >= 7:
order.m_faGroup = self.readStr()
order.m_faMethod = self.readStr()
order.m_faPercentage = self.readStr()
order.m_faProfile = self.readStr()
if version >= 8:
order.m_goodTillDate = self.readStr()
if version >= 9:
order.m_rule80A = self.readStr()
order.m_percentOffset = self.readDoubleMax()
order.m_settlingFirm = self.readStr()
order.m_shortSaleSlot = self.readInt()
order.m_designatedLocation = self.readStr()
if self.m_parent.serverVersion() == 51:
self.readInt() # exemptCode
elif version >= 23:
order.m_exemptCode = self.readInt()
order.m_auctionStrategy = self.readInt()
order.m_startingPrice = self.readDoubleMax()
order.m_stockRefPrice = self.readDoubleMax()
order.m_delta = self.readDoubleMax()
order.m_stockRangeLower = self.readDoubleMax()
order.m_stockRangeUpper = self.readDoubleMax()
order.m_displaySize = self.readInt()
if version < 18:
# will never happen
# order.m_rthOnly =
self.readBoolFromInt()
order.m_blockOrder = self.readBoolFromInt()
order.m_sweepToFill = self.readBoolFromInt()
order.m_allOrNone = self.readBoolFromInt()
order.m_minQty = self.readIntMax()
order.m_ocaType = self.readInt()
order.m_eTradeOnly = self.readBoolFromInt()
order.m_firmQuoteOnly = self.readBoolFromInt()
order.m_nbboPriceCap = self.readDoubleMax()
if version >= 10:
order.m_parentId = self.readInt()
order.m_triggerMethod = self.readInt()
if version >= 11:
order.m_volatility = self.readDoubleMax()
order.m_volatilityType = self.readInt()
if version == 11:
receivedInt = self.readInt()
order.m_deltaNeutralOrderType = ("NONE" if (receivedInt == 0) else "MKT")
else:
# version 12 and up
order.m_deltaNeutralOrderType = self.readStr()
order.m_deltaNeutralAuxPrice = self.readDoubleMax()
if version >= 27 and not Util.StringIsEmpty(order.m_deltaNeutralOrderType):
order.m_deltaNeutralConId = self.readInt()
order.m_deltaNeutralSettlingFirm = self.readStr()
order.m_deltaNeutralClearingAccount = self.readStr()
order.m_deltaNeutralClearingIntent = self.readStr()
if version >= 31 and not Util.StringIsEmpty(order.m_deltaNeutralOrderType):
order.m_deltaNeutralOpenClose = self.readStr()
order.m_deltaNeutralShortSale = self.readBoolFromInt()
order.m_deltaNeutralShortSaleSlot = self.readInt()
order.m_deltaNeutralDesignatedLocation = self.readStr()
order.m_continuousUpdate = self.readInt()
if self.m_parent.serverVersion() == 26:
order.m_stockRangeLower = self.readDouble()
order.m_stockRangeUpper = self.readDouble()
order.m_referencePriceType = self.readInt()
if version >= 13:
order.m_trailStopPrice = self.readDoubleMax()
if version >= 30:
order.m_trailingPercent = self.readDoubleMax()
if version >= 14:
order.m_basisPoints = self.readDoubleMax()
order.m_basisPointsType = self.readIntMax()
contract.m_comboLegsDescrip = self.readStr()
if version >= 29:
comboLegsCount = self.readInt()
if comboLegsCount > 0:
contract.m_comboLegs = []
i = 0
while i < comboLegsCount:
comboLeg = ComboLeg()
comboLeg.m_conId = self.readInt()
comboLeg.m_ratio = self.readInt()
comboLeg.m_action = self.readStr()
comboLeg.m_exchange = self.readStr()
comboLeg.m_openClose = self.readInt()
comboLeg.m_shortSaleSlot = self.readInt()
comboLeg.m_designatedLocation = self.readStr()
comboLeg.m_exemptCode = self.readInt()
contract.m_comboLegs.append(comboLeg)
i += 1
orderComboLegsCount = self.readInt()
if orderComboLegsCount > 0:
order.m_orderComboLegs = []
i = 0
while i < orderComboLegsCount:
price = self.readDoubleMax()
orderComboLeg = OrderComboLeg(price)
order.m_orderComboLegs.append(orderComboLeg)
i += 1
if version >= 26:
smartComboRoutingParamsCount = self.readInt()
if smartComboRoutingParamsCount > 0:
order.m_smartComboRoutingParams = []
i = 0
while i < smartComboRoutingParamsCount:
tagValue = TagValue()
tagValue.m_tag = self.readStr()
tagValue.m_value = self.readStr()
order.m_smartComboRoutingParams.append(tagValue)
i += 1
if version >= 15:
if version >= 20:
order.m_scaleInitLevelSize = self.readIntMax()
order.m_scaleSubsLevelSize = self.readIntMax()
else:
# int notSuppScaleNumComponents =
self.readIntMax()
order.m_scaleInitLevelSize = self.readIntMax()
order.m_scalePriceIncrement = self.readDoubleMax()
if version >= 28 and order.m_scalePriceIncrement > 0.0 and order.m_scalePriceIncrement != Double.MAX_VALUE:
order.m_scalePriceAdjustValue = self.readDoubleMax()
order.m_scalePriceAdjustInterval = self.readIntMax()
order.m_scaleProfitOffset = self.readDoubleMax()
order.m_scaleAutoReset = self.readBoolFromInt()
order.m_scaleInitPosition = self.readIntMax()
order.m_scaleInitFillQty = self.readIntMax()
order.m_scaleRandomPercent = self.readBoolFromInt()
if version >= 24:
order.m_hedgeType = self.readStr()
if not Util.StringIsEmpty(order.m_hedgeType):
order.m_hedgeParam = self.readStr()
if version >= 25:
order.m_optOutSmartRouting = self.readBoolFromInt()
if version >= 19:
order.m_clearingAccount = self.readStr()
order.m_clearingIntent = self.readStr()
if version >= 22:
order.m_notHeld = self.readBoolFromInt()
if version >= 20:
if self.readBoolFromInt():
underComp = UnderComp()
underComp.m_conId = self.readInt()
underComp.m_delta = self.readDouble()
underComp.m_price = self.readDouble()
contract.m_underComp = underComp
if version >= 21:
order.m_algoStrategy = self.readStr()
if not Util.StringIsEmpty(order.m_algoStrategy):
algoParamsCount = self.readInt()
if algoParamsCount > 0:
order.m_algoParams = []
i = 0
while i < algoParamsCount:
tagValue = TagValue()
tagValue.m_tag = self.readStr()
tagValue.m_value = self.readStr()
order.m_algoParams.append(tagValue)
i += 1
orderState = OrderState()
if version >= 16:
order.m_whatIf = self.readBoolFromInt()
orderState.m_status = self.readStr()
orderState.m_initMargin = self.readStr()
orderState.m_maintMargin = self.readStr()
orderState.m_equityWithLoan = self.readStr()
orderState.m_commission = self.readDoubleMax()
orderState.m_minCommission = self.readDoubleMax()
orderState.m_maxCommission = self.readDoubleMax()
orderState.m_commissionCurrency = self.readStr()
orderState.m_warningText = self.readStr()
self.eWrapper().openOrder(order.m_orderId, contract, order, orderState)
elif msgId == self.NEXT_VALID_ID:
version = self.readInt()
orderId = self.readInt()
self.eWrapper().nextValidId(orderId)
elif msgId == self.SCANNER_DATA:
contract = ContractDetails()
version = self.readInt()
tickerId = self.readInt()
numberOfElements = self.readInt()
ctr = 0
while ctr < numberOfElements:
rank = self.readInt()
if version >= 3:
contract.m_summary.m_conId = self.readInt()
contract.m_summary.m_symbol = self.readStr()
contract.m_summary.m_secType = self.readStr()
contract.m_summary.m_expiry = self.readStr()
contract.m_summary.m_strike = self.readDouble()
contract.m_summary.m_right = self.readStr()
contract.m_summary.m_exchange = self.readStr()
contract.m_summary.m_currency = self.readStr()
contract.m_summary.m_localSymbol = self.readStr()
contract.m_marketName = self.readStr()
contract.m_summary.m_tradingClass = self.readStr()
distance = self.readStr()
benchmark = self.readStr()
projection = self.readStr()
legsStr = None
if version >= 2:
legsStr = self.readStr()
self.eWrapper().scannerData(tickerId, rank, contract, distance, benchmark, projection, legsStr)
ctr += 1
self.eWrapper().scannerDataEnd(tickerId)
elif msgId == self.CONTRACT_DATA:
version = self.readInt()
reqId = -1
if version >= 3:
reqId = self.readInt()
contract = ContractDetails()
contract.m_summary.m_symbol = self.readStr()
contract.m_summary.m_secType = self.readStr()
contract.m_summary.m_expiry = self.readStr()
contract.m_summary.m_strike = self.readDouble()
contract.m_summary.m_right = self.readStr()
contract.m_summary.m_exchange = self.readStr()
contract.m_summary.m_currency = self.readStr()
contract.m_summary.m_localSymbol = self.readStr()
contract.m_marketName = self.readStr()
contract.m_summary.m_tradingClass = self.readStr()
contract.m_summary.m_conId = self.readInt()
contract.m_minTick = self.readDouble()
contract.m_summary.m_multiplier = self.readStr()
contract.m_orderTypes = self.readStr()
contract.m_validExchanges = self.readStr()
if version >= 2:
contract.m_priceMagnifier = self.readInt()
if version >= 4:
contract.m_underConId = self.readInt()
if version >= 5:
contract.m_longName = self.readStr()
contract.m_summary.m_primaryExch = self.readStr()
if version >= 6:
contract.m_contractMonth = self.readStr()
contract.m_industry = self.readStr()
contract.m_category = self.readStr()
contract.m_subcategory = self.readStr()
contract.m_timeZoneId = self.readStr()
contract.m_tradingHours = self.readStr()
contract.m_liquidHours = self.readStr()
if version >= 8:
contract.m_evRule = self.readStr()
contract.m_evMultiplier = self.readDouble()
if version >= 7:
secIdListCount = self.readInt()
if secIdListCount > 0:
contract.m_secIdList = []
i = 0
while i < secIdListCount:
tagValue = TagValue()
tagValue.m_tag = self.readStr()
tagValue.m_value = self.readStr()
contract.m_secIdList.append(tagValue)
i += 1
self.eWrapper().contractDetails(reqId, contract)
elif msgId == self.BOND_CONTRACT_DATA:
version = self.readInt()
reqId = -1
if version >= 3:
reqId = self.readInt()
contract = ContractDetails()
contract.m_summary.m_symbol = self.readStr()
contract.m_summary.m_secType = self.readStr()
contract.m_cusip = self.readStr()
contract.m_coupon = self.readDouble()
contract.m_maturity = self.readStr()
contract.m_issueDate = self.readStr()
contract.m_ratings = self.readStr()
contract.m_bondType = self.readStr()
contract.m_couponType = self.readStr()
contract.m_convertible = self.readBoolFromInt()
contract.m_callable = self.readBoolFromInt()
contract.m_putable = self.readBoolFromInt()
contract.m_descAppend = self.readStr()
contract.m_summary.m_exchange = self.readStr()
contract.m_summary.m_currency = self.readStr()
contract.m_marketName = self.readStr()
contract.m_summary.m_tradingClass = self.readStr()
contract.m_summary.m_conId = self.readInt()
contract.m_minTick = self.readDouble()
contract.m_orderTypes = self.readStr()
contract.m_validExchanges = self.readStr()
if version >= 2:
contract.m_nextOptionDate = self.readStr()
contract.m_nextOptionType = self.readStr()
contract.m_nextOptionPartial = self.readBoolFromInt()
contract.m_notes = self.readStr()
if version >= 4:
contract.m_longName = self.readStr()
if version >= 6:
contract.m_evRule = self.readStr()
contract.m_evMultiplier = self.readDouble()
if version >= 5:
secIdListCount = self.readInt()
if secIdListCount > 0:
contract.m_secIdList = []
i = 0
while i < secIdListCount:
tagValue = TagValue()
tagValue.m_tag = self.readStr()
tagValue.m_value = self.readStr()
contract.m_secIdList.append(tagValue)
i += 1
self.eWrapper().bondContractDetails(reqId, contract)
elif msgId == self.EXECUTION_DATA:
version = self.readInt()
reqId = -1
if version >= 7:
reqId = self.readInt()
orderId = self.readInt()
contract = Contract()
# read contract fields
if version >= 5:
contract.m_conId = self.readInt()
contract.m_symbol = self.readStr()
contract.m_secType = self.readStr()
contract.m_expiry = self.readStr()
contract.m_strike = self.readDouble()
contract.m_right = self.readStr()
if version >= 9:
contract.m_multiplier = self.readStr()
contract.m_exchange = self.readStr()
contract.m_currency = self.readStr()
contract.m_localSymbol = self.readStr()
if version >= 10:
contract.m_tradingClass = self.readStr()
exec_ = Execution()
exec_.m_orderId = orderId
exec_.m_execId = self.readStr()
exec_.m_time = self.readStr()
exec_.m_acctNumber = self.readStr()
exec_.m_exchange = self.readStr()
exec_.m_side = self.readStr()
exec_.m_shares = self.readInt()
exec_.m_price = self.readDouble()
if version >= 2:
exec_.m_permId = self.readInt()
if version >= 3:
exec_.m_clientId = self.readInt()
if version >= 4:
exec_.m_liquidation = self.readInt()
if version >= 6:
exec_.m_cumQty = self.readInt()
exec_.m_avgPrice = self.readDouble()
if version >= 8:
exec_.m_orderRef = self.readStr()
if version >= 9:
exec_.m_evRule = self.readStr()
exec_.m_evMultiplier = self.readDouble()
self.eWrapper().execDetails(reqId, contract, exec_)
elif msgId == self.MARKET_DEPTH:
version = self.readInt()
id = self.readInt()
position = self.readInt()
operation = self.readInt()
side = self.readInt()
price = self.readDouble()
size = self.readInt()
self.eWrapper().updateMktDepth(id, position, operation, side, price, size)
elif msgId == self.MARKET_DEPTH_L2:
version = self.readInt()
id = self.readInt()
position = self.readInt()
marketMaker = self.readStr()
operation = self.readInt()
side = self.readInt()
price = self.readDouble()
size = self.readInt()
self.eWrapper().updateMktDepthL2(id, position, marketMaker, operation, side, price, size)
elif msgId == self.NEWS_BULLETINS:
version = self.readInt()
newsMsgId = self.readInt()
newsMsgType = self.readInt()
newsMessage = self.readStr()
originatingExch = self.readStr()
self.eWrapper().updateNewsBulletin(newsMsgId, newsMsgType, newsMessage, originatingExch)
elif msgId == self.MANAGED_ACCTS:
version = self.readInt()
accountsList = self.readStr()
self.eWrapper().managedAccounts(accountsList)
elif msgId == self.RECEIVE_FA:
version = self.readInt()
faDataType = self.readInt()
xml = self.readStr()
self.eWrapper().receiveFA(faDataType, xml)
elif msgId == self.HISTORICAL_DATA:
version = self.readInt()
reqId = self.readInt()
startDateStr = ""
endDateStr = ""
completedIndicator = "finished"
if version >= 2:
startDateStr = self.readStr()
endDateStr = self.readStr()
completedIndicator += "-" + startDateStr + "-" + endDateStr
itemCount = self.readInt()
ctr = 0
while ctr < itemCount:
date = self.readStr()
open = self.readDouble()
high = self.readDouble()
low = self.readDouble()
close = self.readDouble()
volume = self.readInt()
WAP = self.readDouble()
hasGaps = self.readStr()
barCount = -1
if version >= 3:
barCount = self.readInt()
self.eWrapper().historicalData(reqId, date, open, high, low, close, volume, barCount, WAP, Boolean.valueOf(hasGaps).booleanValue())
ctr += 1
# send end of dataset marker
self.eWrapper().historicalData(reqId, completedIndicator, -1, -1, -1, -1, -1, -1, -1, False)
elif msgId == self.SCANNER_PARAMETERS:
version = self.readInt()
xml = self.readStr()
self.eWrapper().scannerParameters(xml)
elif msgId == self.CURRENT_TIME:
# int version =
self.readInt()
time = self.readLong()
self.eWrapper().currentTime(time)
elif msgId == self.REAL_TIME_BARS:
# int version =
self.readInt()
reqId = self.readInt()
time = self.readLong()
open = self.readDouble()
high = self.readDouble()
low = self.readDouble()
close = self.readDouble()
volume = self.readLong()
wap = self.readDouble()
count = self.readInt()
self.eWrapper().realtimeBar(reqId, time, open, high, low, close, volume, wap, count)
elif msgId == self.FUNDAMENTAL_DATA:
# int version =
self.readInt()
reqId = self.readInt()
data = self.readStr()
self.eWrapper().fundamentalData(reqId, data)
elif msgId == self.CONTRACT_DATA_END:
# int version =
self.readInt()
reqId = self.readInt()
self.eWrapper().contractDetailsEnd(reqId)
elif msgId == self.OPEN_ORDER_END:
# int version =
self.readInt()
self.eWrapper().openOrderEnd()
elif msgId == self.ACCT_DOWNLOAD_END:
# int version =
self.readInt()
accountName = self.readStr()
self.eWrapper().accountDownloadEnd(accountName)
elif msgId == self.EXECUTION_DATA_END:
# int version =
self.readInt()
reqId = self.readInt()
self.eWrapper().execDetailsEnd(reqId)
elif msgId == self.DELTA_NEUTRAL_VALIDATION:
# int version =
self.readInt()
reqId = self.readInt()
underComp = UnderComp()
underComp.m_conId = self.readInt()
underComp.m_delta = self.readDouble()
underComp.m_price = self.readDouble()
self.eWrapper().deltaNeutralValidation(reqId, underComp)
elif msgId == self.TICK_SNAPSHOT_END:
# int version =
self.readInt()
reqId = self.readInt()
self.eWrapper().tickSnapshotEnd(reqId)
elif msgId == self.MARKET_DATA_TYPE:
# int version =
self.readInt()
reqId = self.readInt()
marketDataType = self.readInt()
self.eWrapper().marketDataType(reqId, marketDataType)
elif msgId == self.COMMISSION_REPORT:
# int version =
self.readInt()
commissionReport = CommissionReport()
commissionReport.m_execId = self.readStr()
commissionReport.m_commission = self.readDouble()
commissionReport.m_currency = self.readStr()
commissionReport.m_realizedPNL = self.readDouble()
commissionReport.m_yield = self.readDouble()
commissionReport.m_yieldRedemptionDate = self.readInt()
self.eWrapper().commissionReport(commissionReport)
else:
self.m_parent.error(EClientErrors.NO_VALID_ID, EClientErrors.UNKNOWN_ID.code(), EClientErrors.UNKNOWN_ID.msg())
return False
return True
def readStr(self):
""" generated source for method readStr """
buf = StringBuffer()
while True:
c = self.m_dis.readByte()
if c == 0:
break
buf.append(c)
strval = str(buf)
return None if 0 == len(strval) else strval
def readBoolFromInt(self):
""" generated source for method readBoolFromInt """
strval = self.readStr()
return False if strval is None else (Integer.parseInt(strval) != 0)
def readInt(self):
""" generated source for method readInt """
strval = self.readStr()
return 0 if strval is None else Integer.parseInt(strval)
def readIntMax(self):
""" generated source for method readIntMax """
strval = self.readStr()
return Integer.MAX_VALUE if (strval is None or 0 == len(strval)) else Integer.parseInt(strval)
def readLong(self):
""" generated source for method readLong """
strval = self.readStr()
return 0l if strval is None else Long.parseLong(strval)
def readDouble(self):
""" generated source for method readDouble """
strval = self.readStr()
return 0 if strval is None else Double.parseDouble(strval)
def readDoubleMax(self):
""" generated source for method readDoubleMax """
strval = self.readStr()
return Double.MAX_VALUE if (strval is None or 0 == len(strval)) else Double.parseDouble(strval)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for while_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_v2
from tensorflow.python.ops.while_v2 import while_loop as while_loop_v2
from tensorflow.python.platform import test
def random_gamma(shape): # pylint: disable=invalid-name
return random_ops.random_gamma(shape, 1.0)
def random_gamma_with_alpha_beta(shape): # pylint: disable=invalid-name
return random_ops.random_gamma(
shape, alpha=[[1.], [3.], [5.], [6.]], beta=[[3., 4.]])
def random_poisson_v2(shape): # pylint: disable=invalid-name
return random_ops.random_poisson_v2(shape, 1.0)
def random_poisson_v2_with_lam(shape): # pylint: disable=invalid-name
return random_ops.random_poisson_v2(shape, [12.2, 3.3])
def fill(shape): # pylint: disable=invalid-name
return array_ops.fill(shape, 1.0)
class WhileV2Test(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testSingleLoopVar(self):
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v * v, [x], return_same_structure=False)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session():
self.assertEqual(self.evaluate(ret), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
@test_util.run_deprecated_v1
def testSingleLoopVarBackPropFalse(self):
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8.,
lambda v: v * v, [x],
return_same_structure=False,
back_prop=False)
grad = gradients_impl.gradients(ret, [x])
self.assertEqual(grad, [None])
with self.cached_session():
self.assertEqual(self.evaluate(ret), 16.)
@test_util.run_deprecated_v1
def testCustomGradient(self):
x = constant_op.constant(2.)
n = constant_op.constant(1., name="const-n")
m = variables.Variable(1.0)
self.evaluate(variables.global_variables_initializer())
def body_fn(v): # pylint: disable=invalid-name
@custom_gradient.custom_gradient
def inner_fn(v): # pylint: disable=invalid-name
def grad_fn(dy, variables=None): # pylint: disable=invalid-name, unused-argument, redefined-outer-name
return dy * 2 * v * n * m, [v * v]
return v * v * m, grad_fn
return inner_fn(v)
ret = while_loop_v2(
lambda v: v < 8., body_fn, [x], return_same_structure=False)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session():
self.assertEqual(self.evaluate(ret), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
@test_util.run_v1_only("b/120545219")
def testReturnSameStructureTrue(self):
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v * v, [x], return_same_structure=True)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session() as sess:
eval_result = sess.run(ret)
self.assertIsInstance(eval_result, list)
self.assertLen(eval_result, 1)
self.assertEqual(16., eval_result[0])
self.assertSequenceEqual(sess.run(grad), [32.])
def testVerifyInputOutputTypesMatch(self):
@def_function.function
def BuildWhile():
x = constant_op.constant(1., dtypes.float32)
def Body(x):
return math_ops.cast(x, dtypes.float16) + 1
while_loop_v2(lambda x: x < 10, Body, [x])
with self.assertRaisesRegex(
TypeError,
r"Loop var Const:0 enters the loop with type <dtype: 'float32'> "
r"but has type <dtype: 'float16'> after 1 iteration."):
BuildWhile()
@parameterized.parameters(dtypes.float32, dtypes.float64)
def testGradientTapeResourceVariable(self, dtype):
with context.eager_mode():
v = variables.Variable(1., dtype=dtype)
@def_function.function
def fnWithLoop(): # pylint: disable=invalid-name
with backprop.GradientTape() as tape:
_, x = while_loop_v2(
lambda i, _: i < 2,
lambda i, x: (i + 1, x * v),
[0, constant_op.constant(2., dtype=dtype)])
return tape.gradient(x, v)
self.assertAllEqual(fnWithLoop(), 4.0)
def testDeviceLabelsInherited(self):
def _LoopBody(i, y):
result = math_ops.cos(y)
self.assertIn("CPU:10", result.device)
with ops.device("CPU:11"):
result = array_ops.identity(result)
self.assertIn("CPU:11", result.device)
return i + 1, result
@def_function.function
def _FunctionWithWhileLoop():
x = constant_op.constant(1.)
with ops.device("CPU:10"):
_, z = while_loop_v2(
lambda i, _: i < 2,
_LoopBody,
[0, x])
return z
# The test assertion runs at trace time.
_FunctionWithWhileLoop.get_concrete_function()
def testExternalControlDependencies(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.)
self.evaluate(v.initializer)
op = v.assign_add(1.)
def body_fn(i): # pylint: disable=invalid-name
with ops.control_dependencies([op]):
return i + 1
loop = while_loop_v2(lambda i: i < 1, body_fn, [0])
loop[0].op.run()
self.assertAllEqual(self.evaluate(v), 2.0)
@test_util.run_deprecated_v1
def testMultipleLoopVarsBasic(self):
x = constant_op.constant(5.)
y = constant_op.constant(3.)
# x = 5.
# y = 3.
# while x < 45.:
# x = x * y
ret = while_loop_v2(
lambda v, _: v < 45.,
lambda v, w: (v * w, w), [x, y],
return_same_structure=False)
# ret = [x*y^2, y]
# Note: This is simply d_ret[0]/d_x since d_ret[1]/d_x is 0.
grad = gradients_impl.gradients(ret, [x]) # [2*x*y]
with self.cached_session():
self.assertSequenceEqual(self.evaluate(ret), [45., 3.])
self.assertSequenceEqual(self.evaluate(grad), [9.])
@test_util.run_deprecated_v1
def testMultipleLoopNonscalarCond(self):
x = constant_op.constant([[5.]])
y = constant_op.constant(3.)
# x = 5.
# y = 3.
# while x < 45.:
# x = x * y
ret = while_loop_v2(
lambda v, _: v < 45.,
lambda v, w: (v * w, w), [x, y],
return_same_structure=False)
# ret == [x*y^2, y]
# Note: This is simply d_ret[0]/d_x since d_ret[1]/d_x is 0.
grad = gradients_impl.gradients(ret, [x]) # [2*x*y]
with self.cached_session():
self.assertSequenceEqual(self.evaluate(ret), [45., 3.])
self.assertSequenceEqual(self.evaluate(grad), [9.])
@test_util.run_deprecated_v1
def testMultipleLoopVars(self):
x = constant_op.constant(5.)
y = constant_op.constant(3.)
# x = 5.
# y = 3.
# while x < 45.:
# x = x * y
# y = x + y
ret = while_loop_v2(
lambda v, _: v < 45.,
lambda v, w: (v * w, v + w), [x, y],
return_same_structure=False)
# ret = [y*x**2 + x*y**2, x*y + x + y]
gradx_0 = gradients_impl.gradients(ret[0], [x]) # [2*x*y + y**2]
gradx_1 = gradients_impl.gradients(ret[1], [x]) # [y + 1]
gradx_2 = gradients_impl.gradients(ret, [x]) # [2*x*y + y**2 + 2*y + 1]
grady_0 = gradients_impl.gradients(ret[0], [y]) # [2*x*y + x**2]
grady_1 = gradients_impl.gradients(ret[1], [y]) # [x + 1]
grady_2 = gradients_impl.gradients(ret, [y]) # [2*x*y + x**2 + x + 1]
with self.cached_session():
self.assertSequenceEqual(self.evaluate(ret), [120., 23.])
self.assertSequenceEqual(self.evaluate(gradx_0), [39.])
self.assertSequenceEqual(self.evaluate(gradx_1), [4.])
self.assertSequenceEqual(self.evaluate(gradx_2), [43.])
self.assertSequenceEqual(self.evaluate(grady_0), [55.])
self.assertSequenceEqual(self.evaluate(grady_1), [6.])
self.assertSequenceEqual(self.evaluate(grady_2), [61.])
@test_util.run_deprecated_v1
def testGradientTape(self):
with backprop.GradientTape() as t:
x = constant_op.constant(2.)
t.watch(x)
ret = while_loop_v2(
lambda v: v < 4., lambda v: v * v, [x],
return_same_structure=False) # x**2
grad = t.gradient(ret, x)
with self.cached_session() as sess:
self.assertAllEqual(sess.run(grad), 4.0)
@test_util.run_deprecated_v1
def testMultipleWhileLoops(self):
x = constant_op.constant(2.)
ret1 = while_loop_v2(
lambda v: v < 4., lambda v: v * v, [x],
return_same_structure=False) # x**2
ret2 = while_loop_v2(
lambda v: v < 16., lambda v: v * v, [ret1],
return_same_structure=False) # x**4
grad = gradients_impl.gradients(ret2, [x]) # 4x**3
grad_grad = gradients_impl.gradients(grad, [x]) # 12x**2
with self.cached_session():
self.assertSequenceEqual(self.evaluate(grad), [32.])
self.assertSequenceEqual(self.evaluate(grad_grad), [48.])
def testMultipleWhileLoopsWithFunc(self):
x = constant_op.constant(2.)
@def_function.function
def Fn():
ret1 = while_loop_v2(
lambda v: v < 4.,
lambda v: v * v, [x],
return_same_structure=False,
name="while_1") # x**2
ret2 = while_loop_v2(
lambda v: v < 16.,
lambda v: v * v, [x],
return_same_structure=False,
name="while_2") # x**4
return ret1, ret2
concrete_fn = Fn.get_concrete_function()
while_1 = concrete_fn.graph.get_operation_by_name("while_1")
while_2 = concrete_fn.graph.get_operation_by_name("while_2")
self.assertEqual(while_1.type, "StatelessWhile")
self.assertEqual(while_2.type, "StatelessWhile")
self.assertEmpty(while_1.control_inputs)
self.assertEmpty(while_2.control_inputs)
def testMultipleWhileLoopsGradStateless(self):
@def_function.function
def Fn():
x = constant_op.constant(2.)
with backprop.GradientTape() as tape:
tape.watch(x)
ret1 = while_loop_v2(
lambda v: v < 4.,
lambda v: v * v, [x],
return_same_structure=False,
name="while_1") # x**2
ret2 = while_loop_v2(
lambda v: v < 16.,
lambda v: v * v, [x],
return_same_structure=False,
name="while_2") # x**4
loss = ret1 + ret2
return tape.gradient(loss, x)
graph = Fn.get_concrete_function().graph
while_ops = [op for op in graph.get_operations() if "While" in op.type]
self.assertAllEqual([op.type for op in while_ops], ["StatelessWhile"] * 4,
"Must have exactly 4 StatelessWhile ops.")
for op in while_ops:
self.assertEmpty(op.control_inputs,
"{} should not have any control inputs".format(op.name))
def testMultipleWhileLoopsWithDeps(self):
x = variables.Variable(2.)
c = constant_op.constant(2.)
@def_function.function
def Fn():
def Body1(v):
x.assign(x)
return v * x
ret1 = while_loop_v2(
lambda v: v < 4.,
Body1, [c],
return_same_structure=False,
name="while_1") # 2x
def Body2(v):
x.assign(x)
return v * x * x
ret2 = while_loop_v2(
lambda v: v < 16.,
Body2, [c],
return_same_structure=False,
name="while_2") # 4x
return ret1, ret2
concrete_fn = Fn.get_concrete_function()
while_1 = concrete_fn.graph.get_operation_by_name("while_1")
while_2 = concrete_fn.graph.get_operation_by_name("while_2")
self.assertEqual(while_1.type, "While")
self.assertEqual(while_2.type, "While")
self.assertEmpty(while_1.control_inputs)
self.assertLen(while_2.control_inputs, 1)
self.assertIs(while_2.control_inputs[0], while_1)
def testMultipleWhileLoopsWithVarsDeps(self):
x1 = variables.Variable(2.)
x2 = variables.Variable(3.)
c = constant_op.constant(2.)
@def_function.function
def Fn():
def Body1(v):
x1.assign(x1)
return v * x1
ret1 = while_loop_v2(
lambda v: v < 4.,
Body1, [c],
return_same_structure=False,
name="while_1") # 2x
def Body2(v):
x1.assign(x1)
return v * x1 * x1
ret2 = while_loop_v2(
lambda v: v < 16.,
Body2, [c],
return_same_structure=False,
name="while_2") # 4x
def Body3(v):
x2.assign(x2)
return v * x2
ret3 = while_loop_v2(
lambda v: v < 4.,
Body3, [c],
return_same_structure=False,
name="while_3") # 3x
def Body4(v):
x2.assign(x2)
return v * x2 * x2
ret4 = while_loop_v2(
lambda v: v < 16.,
Body4, [c],
return_same_structure=False,
name="while_4") # 9x
ret5 = while_loop_v2(
lambda v: v < 16.,
lambda v: v * v, [c],
return_same_structure=False,
name="while_stateless") # x**2
return ret1, ret2, ret3, ret4, ret5
concrete_fn = Fn.get_concrete_function()
while_1 = concrete_fn.graph.get_operation_by_name("while_1")
while_2 = concrete_fn.graph.get_operation_by_name("while_2")
while_3 = concrete_fn.graph.get_operation_by_name("while_3")
while_4 = concrete_fn.graph.get_operation_by_name("while_4")
while_stateless = concrete_fn.graph.get_operation_by_name(
"while_stateless")
self.assertEqual(while_1.type, "While")
self.assertEqual(while_2.type, "While")
self.assertEqual(while_3.type, "While")
self.assertEqual(while_4.type, "While")
self.assertEqual(while_stateless.type, "StatelessWhile")
self.assertEmpty(while_1.control_inputs)
self.assertLen(while_2.control_inputs, 1)
self.assertIs(while_2.control_inputs[0], while_1)
self.assertEmpty(while_3.control_inputs)
self.assertLen(while_4.control_inputs, 1)
self.assertIs(while_4.control_inputs[0], while_3)
self.assertEmpty(while_stateless.control_inputs)
@test_util.run_deprecated_v1
def testDoubleDerivative(self):
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v**2, [x],
return_same_structure=False) # x**4
grad = gradients_impl.gradients(ret, [x]) # 4x**3
grad_grad = gradients_impl.gradients(grad, [x]) # 12x**2
with self.cached_session():
self.assertEqual(self.evaluate(ret), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
self.assertSequenceEqual(self.evaluate(grad_grad), [48.])
@test_util.run_v2_only
def testMultipleWhileLoopsEager(self):
@def_function.function
def Func():
x = constant_op.constant(2.)
ret1 = while_loop_v2(
lambda v: v < 4., lambda v: v * v, [x],
return_same_structure=False) # x**2
ret2 = while_loop_v2(
lambda v: v < 16.,
lambda v: v * v, [ret1],
return_same_structure=False) # x**4
grad = gradients_impl.gradients(ret2, [x])[0] # 4x**3
grad_grad = gradients_impl.gradients(grad, [x])[0] # 12x**2
return grad, grad_grad
grad, grad_grad = Func()
self.assertEqual(grad.numpy(), 32.)
self.assertEqual(grad_grad.numpy(), 48.)
@test_util.run_v2_only
def testDoubleDerivativeEager(self):
@def_function.function
def Func():
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v**2, [x],
return_same_structure=False) # x**4
grad = gradients_impl.gradients(ret, [x])[0] # 4x**3
grad_grad = gradients_impl.gradients(grad, [x])[0] # 12x**2
return ret, grad, grad_grad
ret, grad, grad_grad = Func()
self.assertEqual(ret.numpy(), 16.)
self.assertEqual(grad.numpy(), 32.)
self.assertEqual(grad_grad.numpy(), 48.)
def _testPruning(self):
x = constant_op.constant(1)
tensor_list = list_ops.empty_tensor_list(
element_dtype=x.dtype, element_shape=x.shape)
def Cond(x, tl):
del tl # Unused for Cond.
return x < 5
def Body(x, tl):
return x + 1, list_ops.tensor_list_push_back(tl, x)
outputs = control_flow_ops.while_loop(Cond, Body, [x, tensor_list])
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(outputs[0])
g = GetOptimizedGraph()
# TODO(b/136034023): while_v2 adds an extra loop_counter which is not pruned
# away, causing an extra Enter node.
enter_count = 2 if control_flow_util.ENABLE_CONTROL_FLOW_V2 else 1
self.assertLen([n for n in g.node if n.op == "Enter"], enter_count)
# Test that the TensorList is pruned out.
self.assertEmpty([
n for n in g.node if n.op == "Enter" and
n.attr["T"].type == dtypes.variant.as_datatype_enum
])
self.assertEmpty([n for n in g.node if n.op == "TensorListPushBack"])
stack = list_ops.tensor_list_stack(outputs[1], element_dtype=x.dtype)
train_op.append(stack)
g = GetOptimizedGraph()
# TODO(b/136034023): while_v2 adds an extra loop_counter which is not pruned
# away, causing an extra Enter node.
enter_count = 3 if control_flow_util.ENABLE_CONTROL_FLOW_V2 else 2
self.assertLen([n for n in g.node if n.op == "Enter"], enter_count)
# Test that the TensorList is not pruned out.
self.assertNotEmpty([
n for n in g.node if n.op == "Enter" and
n.attr["T"].type == dtypes.variant.as_datatype_enum
])
self.assertNotEmpty([n for n in g.node if n.op == "TensorListPushBack"])
@test_util.run_deprecated_v1
def testPruningV1(self):
self._testPruning()
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
def testPruningV2(self):
self._testPruning()
def _testDoNotAccumulateInvariants(self):
push_op = ("TensorListPushBack"
if control_flow_v2_toggles.control_flow_v2_enabled() else
"StackPushV2")
# Tests that loop invariants, i.e., tensors that are "captured" by the
# while loop and not passed as loop variables are not accumulated in
# gradient computation.
v = constant_op.constant(5.0, name="v")
r = control_flow_ops.while_loop(
lambda _: True, lambda x: v * x, [1.0], maximum_iterations=5)
output = gradients_impl.gradients(r, v)[0]
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(output)
g = GetOptimizedGraph()
# The gradient for v * x requires the value of both v and x. Since v is a
# loop invariant it is not accumulated so we have just one accumulator for
# x.
self.assertLen([n for n in g.node if n.op == push_op], 1)
@test_util.run_deprecated_v1
def testDoNotAccumulateInvariantsV1(self):
self._testDoNotAccumulateInvariants()
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testDoNotAccumulateInvariantsV2(self):
self._testDoNotAccumulateInvariants()
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testPruningNested(self):
assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
x = constant_op.constant(0)
tensor_list = list_ops.empty_tensor_list(
element_dtype=x.dtype, element_shape=x.shape)
def Cond(x, tl):
del tl # Unused for Cond.
return x < 25
def Body(x, tl):
def InnerCond(inner_x, unused_outer_x, unused_tl):
return inner_x < 5
def InnerBody(inner_x, outer_x, tl):
return inner_x + 1, outer_x + 1, list_ops.tensor_list_push_back(tl, x)
inner_x = constant_op.constant(0)
return control_flow_ops.while_loop(InnerCond, InnerBody,
[inner_x, x, tl])[1:]
outputs = control_flow_ops.while_loop(Cond, Body, [x, tensor_list])
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(outputs[0])
g = GetOptimizedGraph()
# TODO(b/136034023): while_v2 adds an extra loop_counter which is not pruned
# away, causing an extra Enter node.
# enter_count = 4 if control_flow_util.ENABLE_CONTROL_FLOW_V2 else 2
# self.assertLen([n for n in g.node if n.op == "Enter"], enter_count)
# Test that the TensorList is pruned out.
self.assertEmpty([
n for n in g.node if n.op == "Enter" and
n.attr["T"].type == dtypes.variant.as_datatype_enum
])
self.assertEmpty([n for n in g.node if n.op == "TensorListPushBack"])
self.assertEmpty([n for n in g.node if n.op == "_While"])
stack = list_ops.tensor_list_stack(outputs[1], element_dtype=x.dtype)
train_op.append(stack)
g = GetOptimizedGraph()
# TODO(b/136034023): while_v2 adds an extra loop_counter which is not pruned
# away, causing an extra Enter node.
# enter_count = 3 if control_flow_util.ENABLE_CONTROL_FLOW_V2 else 2
# self.assertLen([n for n in g.node if n.op == "Enter"], enter_count)
# Test that the TensorList is not pruned out.
self.assertNotEmpty([
n for n in g.node if n.op == "Enter" and
n.attr["T"].type == dtypes.variant.as_datatype_enum
])
self.assertNotEmpty([n for n in g.node if n.op == "TensorListPushBack"])
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testPruningNested2(self):
assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
v = constant_op.constant(5.0, name="v")
p = array_ops.placeholder(dtype=dtypes.int32)
def MidBodyBuilder(iterations):
def MidBody(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, math_ops.multiply(v, x, name="my_mul")),
(0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return MidBody
def OuterBody(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
MidBodyBuilder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def CreateWhileLoop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
OuterBody, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
output = CreateWhileLoop()
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(output)
g = GetOptimizedGraph()
self.assertLen([n for n in g.node if n.op == "TensorListPushBack"], 1)
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testPruningNested3(self):
assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
v = constant_op.constant(5.0, name="v")
def CreateWhileLoop():
r = control_flow_ops.while_loop(
lambda _: True,
lambda x: math_ops.multiply(v, x, name="my_mul"), [1.0],
maximum_iterations=5,
name="outer")
return array_ops.identity(r)
r = CreateWhileLoop()
output = gradients_impl.gradients(r, v)[0]
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
train_op.append(output)
g = GetOptimizedGraph()
self.assertLen([n for n in g.node if n.op == "TensorListPushBack"], 1)
def _assertNotAccumulated(self, while_op, index):
"""Asserts that `while_op` input at `index` is not accumulated."""
body_graph = while_v2._get_graph(while_op, "body", "_body_graph")
placeholder = body_graph.inputs[index]
self.assertNotIn("TensorListPushBack",
[op.type for op in placeholder.consumers()])
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testDoNotOutputLoopCounterAsIntermediate(self):
assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
v = constant_op.constant(5.0, name="v")
r = control_flow_ops.while_loop(
lambda _: True, lambda x: v * x, [1.0], maximum_iterations=5)
# Skip over Identity.
while_op = r.op.inputs[0].op
self._assertNotAccumulated(while_op, 0)
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testDoNotOutputLoopInvariantAsIntermediate(self):
assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
def GetInputIndex(op, tensor):
for index, inp in enumerate(op.inputs):
if inp is tensor:
return index
v = constant_op.constant(5.0, name="v")
r = control_flow_ops.while_loop(
lambda _: True, lambda x: v * x, [1.0], maximum_iterations=5)
# Skip over Identity.
while_op = r.op.inputs[0].op
# We can't directly use while_op.inputs.index() because Tensors are not
# hashable.
index = GetInputIndex(while_op, v)
self._assertNotAccumulated(while_op, index)
@test_util.run_deprecated_v1
def testCaptureExternalTensorInCond(self):
x = constant_op.constant(2.)
y = constant_op.constant(1.)
ret = while_loop_v2(
lambda v: v + y < 9.,
lambda v: v * 3., [x],
return_same_structure=False)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session():
self.assertEqual(self.evaluate(ret), 18.)
self.assertSequenceEqual(self.evaluate(grad), [9.])
@test_util.run_deprecated_v1
def testCaptureExternalTensorInBody(self):
x = constant_op.constant(2.)
y = constant_op.constant(3.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v * y, [x], return_same_structure=False)
grad = gradients_impl.gradients(ret, [x])
with self.cached_session():
self.assertEqual(self.evaluate(ret), 18.)
self.assertSequenceEqual(self.evaluate(grad), [9.])
@test_util.run_deprecated_v1
def testLoopWithTensorListPushBack(self):
x = constant_op.constant(2.)
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=ScalarShape())
def Cond(x, tl):
del tl # Unused for Cond.
return x < 5.
def Body(x, tl):
tl = list_ops.tensor_list_push_back(tl, x)
tl = list_ops.tensor_list_push_back(tl, constant_op.constant(100.))
return x**2., tl
ret = while_loop_v2(
Cond, Body, [x, tensor_list], return_same_structure=False)
grad = gradients_impl.gradients(ret[0], x)
with self.cached_session() as sess:
self.assertEqual(sess.run(ret[0]), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
@test_util.run_deprecated_v1
def testDuplicateAccumulator(self):
x = constant_op.constant(2.)
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=ScalarShape())
def Cond(x, tl):
del tl # Unused for Cond.
return x < 5.
def Body(x, tl):
# There is an accumulator in the loop already so we should not add
# another.
tl = list_ops.tensor_list_push_back(tl, x)
return x**2., tl
ret = while_loop_v2(
Cond, Body, [x, tensor_list], return_same_structure=False)
for op in ops.get_default_graph().get_operations():
if op.type == "While" or op.type == "StatelessWhile":
while_op = op
body_graph = while_v2._get_graph(while_op, "body", "_body_graph")
x_input_index = [i for i, inp in enumerate(while_op.inputs) if inp == x][0]
x_input_t = body_graph.inputs[x_input_index]
accumulator_count = len(
[c for c in x_input_t.consumers() if c.type == "TensorListPushBack"])
self.assertEqual(accumulator_count, 1)
grad = gradients_impl.gradients(ret[0], x)
with self.cached_session() as sess:
self.assertEqual(sess.run(ret[0]), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
@parameterized.named_parameters(
("UnknownShape", None),
("PartiallyDefinedShape", [None, 2]),
("FullyDefinedShape", [1, 2]),
)
@test_util.run_deprecated_v1
def testAccumulatorElementShape(self, shape):
def MatchShape(actual_tensor_shape):
# Compare the shapes, treating None dimensions as equal. We do not
# directly check actual_tensor_shape and tf.TensorShape(shape) for
# equality because tf.Dimension.__eq__ returns None if either dimension is
# None.
if shape is None:
self.assertIsNone(actual_tensor_shape.dims)
else:
self.assertListEqual(actual_tensor_shape.as_list(), shape)
def GetAccumulatorForInputAtIndex(while_op, idx):
body_graph = while_v2._get_graph(while_op, "body", "_body_graph")
y_input_t = body_graph.inputs[idx]
push_back_node = [c for c in y_input_t.consumers()
if c.type == "TensorListPushBack"][0]
output_idx = body_graph.outputs.index(push_back_node.outputs[0])
return while_op.outputs[output_idx]
x = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
# Forward pass.
ret = while_loop_v2(lambda v, u: v < 8.,
lambda v, u: (math_ops.pow(v, u), u),
[x, y],
return_same_structure=True)
while_op = ret[0].op.inputs[0].op
# Gradient pass.
grad = gradients_impl.gradients(ret[0], x)
# Note: There is an Identity b/w grad[0] and the While op.
grad_while_op = grad[0].op.inputs[0].op
# Get the TensorList output of While op containing the accumulated values
# of y.
x_input_index = [i for i, inp in enumerate(while_op.inputs) if x == inp][0]
output = GetAccumulatorForInputAtIndex(while_op, x_input_index)
_, val = list_ops.tensor_list_pop_back(output,
element_dtype=dtypes.float32)
MatchShape(val.shape)
# Take second derivative to generate intermediate grad_while_op outputs
gradients_impl.gradients(grad, x)
# Get the TensorList output of gradient While op containing the accumulated
# values of grad_x (note that grad_x is needed by the second derivative).
# grad_while_op.inputs:
grad_output_index = grad_while_op.outputs.index(grad[0].op.inputs[0])
grad_output = GetAccumulatorForInputAtIndex(grad_while_op,
grad_output_index)
_, val = list_ops.tensor_list_pop_back(grad_output,
element_dtype=dtypes.float32)
MatchShape(val.shape)
def _createWhile(self, name):
"""Helper function testDefaultName."""
output = while_v2.while_loop(
lambda i: i < 3,
lambda i: i + 1, [constant_op.constant(0)],
return_same_structure=False)
while_op = output.op.inputs[0].op
self.assertEqual(while_op.type, "StatelessWhile")
return while_op
def testDefaultName(self):
with ops.Graph().as_default():
while_op = self._createWhile(None)
self.assertEqual(while_op.name, "while")
self.assertRegex(while_op.get_attr("cond").name, r"while_cond_\d*")
self.assertRegex(while_op.get_attr("body").name, r"while_body_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
while1_op = self._createWhile("")
self.assertEqual(while1_op.name, "foo/while")
self.assertRegex(while1_op.get_attr("cond").name, r"foo_while_cond_\d*")
self.assertRegex(while1_op.get_attr("body").name, r"foo_while_body_\d*")
while2_op = self._createWhile(None)
self.assertEqual(while2_op.name, "foo/while_1")
self.assertRegex(
while2_op.get_attr("cond").name, r"foo_while_1_cond_\d*")
self.assertRegex(
while2_op.get_attr("body").name, r"foo_while_1_body_\d*")
@test_util.enable_control_flow_v2
@test_util.run_deprecated_v1
def testWhileAndTensorArray(self):
param = constant_op.constant(2.0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
# map_fn uses TensorArray internally.
r = map_fn.map_fn(lambda x: math_ops.multiply(x, param), y0)
grad = gradients_impl.gradients(r, param)[0]
self.assertAllClose([2.0, 4.0, 6.0, 8.0, 10.0, 12.0], self.evaluate(r))
self.assertAllClose(21.0, self.evaluate(grad))
@test_util.run_deprecated_v1
def testNestedWhile(self):
# Compute sum of geometric progression: n^0 + n^1 + ... + n^m
# We compute the pow using a while loop.
n = constant_op.constant(3.)
m = constant_op.constant(5.)
sum_of_powers = constant_op.constant(0.)
def Body(i, previous_sum):
prod = constant_op.constant(1.)
return i - 1., previous_sum + while_loop_v2(
lambda c, _: c > 0,
lambda c, v: (c - 1., v * n), [i, prod],
return_same_structure=False)[1]
result = while_loop_v2(
lambda i, _: i >= 0,
Body, [m, sum_of_powers],
return_same_structure=False)[1]
grad = gradients_impl.gradients(result, [n])
self.assertEqual(self.evaluate(result), 364.)
self.assertSequenceEqual(self.evaluate(grad), [547.])
@test_util.run_deprecated_v1
def testNestedWhileWithLegacyDefun(self):
n = constant_op.constant(3.)
m = constant_op.constant(5.)
sum_of_powers = constant_op.constant(0.)
def Body(i, previous_sum):
prod = constant_op.constant(1.)
def InnerBodyWrapper(c, v):
@function.Defun(dtypes.float32, dtypes.float32)
def InnerBody(c, v):
return c - 1., v * n
results = InnerBody(c, v)
results[0].set_shape([])
results[1].set_shape([])
return results
return i - 1., previous_sum + while_loop_v2(
lambda c, _: c > 0,
InnerBodyWrapper, [i, prod],
return_same_structure=False)[1]
result = while_loop_v2(
lambda i, _: i >= 0,
Body, [m, sum_of_powers],
return_same_structure=False)[1]
grad = gradients_impl.gradients(result, [n])
self.assertEqual(self.evaluate(result), 364.)
self.assertSequenceEqual(self.evaluate(grad), [547.])
@test_util.run_deprecated_v1
def testIdentityNodeInBody(self):
def Body(v):
v = array_ops.identity(v)
v = array_ops.identity(v)
return v * v
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., Body, [x], return_same_structure=False)
grad = gradients_impl.gradients(ret, [x])
self.assertEqual(self.evaluate(ret), 16.)
self.assertSequenceEqual(self.evaluate(grad), [32.])
@test_util.run_deprecated_v1
def testForwardPassRewrite(self):
x = constant_op.constant(1.0, name="x")
output = while_v2.while_loop(lambda x: x < 10.0,
lambda x: x * 2.0,
[x])[0]
while_op = output.op.inputs[0].op
self.assertEqual(while_op.type, "StatelessWhile")
# outputs = [loop_counter, max_iters, x]
self.assertLen(while_op.outputs, 3)
gradients_impl.gradients(output, x)
# while_op should have been rewritten to output intermediates.
# outputs = [loop_counter, max_iters, x, x_accumulator]
self.assertLen(while_op.outputs, 4)
gradients_impl.gradients(output, x)
# Computing the gradient again shouldn't rewrite while_op again.
self.assertLen(while_op.outputs, 4)
@parameterized.named_parameters(
("RandomUniform", random_ops.random_uniform, [5, 3]),
("RandomNormal", random_ops.random_normal, [5, 3]),
("ParameterizedTruncatedNormal",
random_ops.parameterized_truncated_normal, [5, 3]),
("TruncatedNormal", random_ops.truncated_normal, [5, 3]),
("RandomGamma", random_gamma, [5, 3]),
("RandomPoissonV2", random_poisson_v2, [5, 3]),
("RandomGammaWithAlphaBeta", random_gamma_with_alpha_beta, [5, 3, 4, 2]),
("RandomPoissonV2WithLam", random_poisson_v2_with_lam, [5, 3, 2]),
)
@test_util.run_deprecated_v1
def testRandomOpsShape(self, random_fn, expected_shape):
shape = constant_op.constant([3])
def Body(i, u):
shape_extended = array_ops.concat([[5], shape], axis=0)
u = random_fn(shape_extended)
assert u.shape.as_list() == expected_shape, str(u.shape.as_list())
return i + 1, u
_, _ = while_loop_v2(
cond=lambda i, _: i < 3,
body=Body,
loop_vars=[
0,
array_ops.zeros(expected_shape, dtype=dtypes.float32),
])
@test_util.run_deprecated_v1
def testReshapeShape(self):
shape = constant_op.constant([3, 4])
def Body(i, u):
shape_extended = array_ops.concat([[5], shape], axis=0)
u = array_ops.reshape(u, [-1])
assert u.shape.as_list() == [60], str(u.shape.as_list())
u = array_ops.reshape(u, shape_extended)
assert u.shape.as_list() == [5, 3, 4], str(u.shape.as_list())
return i + 1, u
_, _ = while_loop_v2(
cond=lambda i, _: i < 3,
body=Body,
loop_vars=[
0,
array_ops.zeros([5, 3, 4], dtype=dtypes.float32),
])
@parameterized.named_parameters(
("Zeros", array_ops.zeros),
("Ones", array_ops.ones),
("Fill", fill),
)
@test_util.run_deprecated_v1
def testFillOpsShape(self, fill_fn):
shape = constant_op.constant([3, 4])
def Body(i, u):
shape_extended = array_ops.concat([[5], shape], axis=0)
u = fill_fn(shape_extended)
assert u.shape.as_list() == [5, 3, 4], str(u.shape.as_list())
return i + 1, u
_, _ = while_loop_v2(
cond=lambda i, _: i < 3,
body=Body,
loop_vars=[
0,
array_ops.zeros([5, 3, 4], dtype=dtypes.float32),
])
@test_util.run_deprecated_v1
def testExternalColocationGrad(self):
external_t = constant_op.constant(2.)
v0 = constant_op.constant(2.)
def Body(v):
with ops.colocate_with(external_t):
return v * v
ret = while_loop_v2(lambda v: v < 8., Body, [v0])[0]
grad = gradients_impl.gradients(ret, [v0])[0]
self.assertAllEqual(ret, 16.)
self.assertAllEqual(grad, 32.)
@test_util.run_deprecated_v1
def testDoNotAccumulateConstNodes(self):
def Body(v):
return v * 2.0
v0 = constant_op.constant(2.)
ret = while_loop_v2(lambda v: v < 8., Body, [v0])[0]
# Gradients computation has the side-effect of updating the forward op
# which is what we want to test.
unused_grad = gradients_impl.gradients(ret, [v0])[0]
# ret is separated from the `While` op by an `Identity` so we skip over
# that.
forward_while_op = ret.op.inputs[0].op
body_graph = while_v2._get_graph(forward_while_op, "body", "_body_graph")
push_back_nodes = [
o for o in body_graph.get_operations() if o.type == "TensorListPushBack"
]
# Gradient of `Mul` requires accumulating both its inputs. But since one
# of those is a Const (2.0), we should have just one accumulator.
self.assertLen(push_back_nodes, 1)
def testDoNotAccumulateForwardTensorsForReductionOps(self):
@def_function.function
def Fn():
with backprop.GradientTape() as tape:
x = constant_op.constant(2.)
tape.watch(x)
def Body(i, x):
forward_graph = ops.get_default_graph()
@custom_gradient.custom_gradient
def SquaredWithZeroGrad(x):
def Grad(unused_g, variables=None): # pylint: disable=redefined-outer-name
del variables
gradient_graph = ops.get_default_graph()
shape = gen_array_ops.shape(x)
assert shape.graph is forward_graph
rank = gen_array_ops.rank(x)
assert rank.graph is forward_graph
size = gen_array_ops.size(x)
assert size.graph is forward_graph
zeros = array_ops.zeros(shape)
assert zeros.graph is gradient_graph
return zeros
return x * 2, Grad
return i + 1, SquaredWithZeroGrad(x)
_, result = while_loop_v2(lambda i, _: i < 2, Body, [0, x])
grad = tape.gradient(result, x)
return grad
Fn()
@test_util.run_v2_only
def testInheritParentNameScope(self):
@def_function.function
def F():
with ops.name_scope("foo"):
def Cond(unused_i):
with ops.name_scope("cond"):
actual_name_scope = ops.get_name_scope()
expected_name_scope = "foo/while/cond"
assert actual_name_scope == expected_name_scope, (
"%s does not match %s" %
(actual_name_scope, expected_name_scope))
return False
def Body(i):
with ops.name_scope("body"):
actual_name_scope = ops.get_name_scope()
expected_name_scope = "foo/while/body"
assert actual_name_scope == expected_name_scope, (
"%s does not match %s" %
(actual_name_scope, expected_name_scope))
return i
return while_v2.while_loop(Cond, Body, [0.])
F()
@test_util.run_deprecated_v1 # Need to pass RunMetadata.
def testDisableLowering(self):
old = control_flow_util_v2._DISABLE_LOWER_USING_SWITCH_MERGE
control_flow_util_v2._DISABLE_LOWER_USING_SWITCH_MERGE = True
with self.session() as sess:
x = constant_op.constant(2.)
ret = while_loop_v2(
lambda v: v < 8., lambda v: v * v, [x], return_same_structure=False)
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(sess.run(ret, options=opts, run_metadata=run_metadata),
16)
for dev_stat in run_metadata.step_stats.dev_stats:
for ns in dev_stat.node_stats:
self.assertNotIn("switch", ns.node_name)
control_flow_util_v2._DISABLE_LOWER_USING_SWITCH_MERGE = old
def _runBasicWithConfig(self, config):
with ops.device("/cpu:0"):
x = constant_op.constant(0)
ret, = while_loop_v2(lambda x: x < 1000, lambda x: x + 1, [x])
with self.cached_session(config=config):
self.assertEqual(1000, self.evaluate(ret))
@test_util.run_deprecated_v1
def testRunKernelsInline(self):
config = config_pb2.ConfigProto()
config.inter_op_parallelism_threads = -1
self._runBasicWithConfig(config)
@test_util.run_deprecated_v1
def testSingleThreadedExecution(self):
config = config_pb2.ConfigProto()
config.experimental.executor_type = "SINGLE_THREADED_EXECUTOR"
self._runBasicWithConfig(config)
def testIsControlFlowGraph(self):
x = constant_op.constant(0)
@def_function.function
def F(c):
def Cond(i):
self.assertTrue(i.graph.is_control_flow_graph)
return i < 2
def Body(i):
i = i + 1
self.assertTrue(i.graph.is_control_flow_graph)
return i
return while_loop_v2(Cond, Body, [c])
ret, = F(x)
self.assertEqual(2, self.evaluate(ret))
def testImportFromSerializedWithFunctionInBody(self):
serialized = """node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 1.0
}
}
}
}
node {
name: "while/maximum_iterations"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: -1
}
}
}
}
node {
name: "while/loop_counter"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while"
op: "StatelessWhile"
input: "while/loop_counter"
input: "while/maximum_iterations"
input: "Const"
attr {
key: "T"
value {
list {
type: DT_INT32
type: DT_INT32
type: DT_FLOAT
}
}
}
attr {
key: "_lower_using_switch_merge"
value {
b: true
}
}
attr {
key: "_num_original_outputs"
value {
i: 3
}
}
attr {
key: "_read_only_resource_inputs"
value {
list {
}
}
}
attr {
key: "body"
value {
func {
name: "while_body_822"
}
}
}
attr {
key: "cond"
value {
func {
name: "while_cond_821"
}
}
}
attr {
key: "output_shapes"
value {
list {
shape {
}
shape {
}
shape {
}
}
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Identity_1"
op: "Identity"
input: "while:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Identity_2"
op: "Identity"
input: "while:2"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
library {
function {
signature {
name: "while_body_822"
input_arg {
name: "while_loop_counter"
type: DT_INT32
}
input_arg {
name: "while_maximum_iterations_0"
type: DT_INT32
}
input_arg {
name: "placeholder"
type: DT_FLOAT
}
output_arg {
name: "add"
type: DT_INT32
}
output_arg {
name: "while_maximum_iterations"
type: DT_INT32
}
output_arg {
name: "partitionedcall"
type: DT_FLOAT
}
}
node_def {
name: "PartitionedCall"
op: "PartitionedCall"
input: "placeholder"
attr {
key: "Tin"
value {
list {
type: DT_FLOAT
}
}
}
attr {
key: "Tout"
value {
list {
type: DT_FLOAT
}
}
}
attr {
key: "_collective_manager_ids"
value {
list {
}
}
}
attr {
key: "_read_only_resource_inputs"
value {
list {
}
}
}
attr {
key: "config"
value {
s: ""
}
}
attr {
key: "config_proto"
value {
s: ""
}
}
attr {
key: "executor_type"
value {
s: ""
}
}
attr {
key: "f"
value {
func {
name: "__inference_f_841"
}
}
}
experimental_debug_info {
original_node_names: "PartitionedCall"
}
}
node_def {
name: "add/y"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
experimental_debug_info {
original_node_names: "add/y"
}
}
node_def {
name: "add_0"
op: "AddV2"
input: "while_loop_counter"
input: "add/y:output:0"
attr {
key: "T"
value {
type: DT_INT32
}
}
experimental_debug_info {
original_node_names: "add"
}
}
ret {
key: "add"
value: "add_0:z:0"
}
ret {
key: "partitionedcall"
value: "PartitionedCall:output:0"
}
ret {
key: "while_maximum_iterations"
value: "while_maximum_iterations_0"
}
arg_attr {
key: 0
value {
attr {
key: "_output_shapes"
value {
list {
shape {
}
}
}
}
}
}
arg_attr {
key: 1
value {
attr {
key: "_output_shapes"
value {
list {
shape {
}
}
}
}
}
}
arg_attr {
key: 2
value {
attr {
key: "_output_shapes"
value {
list {
shape {
}
}
}
}
}
}
}
function {
signature {
name: "while_cond_821"
input_arg {
name: "while_loop_counter"
type: DT_INT32
}
input_arg {
name: "while_maximum_iterations"
type: DT_INT32
}
input_arg {
name: "placeholder"
type: DT_FLOAT
}
output_arg {
name: "less"
type: DT_BOOL
}
}
node_def {
name: "Less/y"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 5.0
}
}
}
experimental_debug_info {
original_node_names: "Less/y"
}
}
node_def {
name: "Less"
op: "Less"
input: "placeholder"
input: "Less/y:output:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
experimental_debug_info {
original_node_names: "Less"
}
}
ret {
key: "less"
value: "Less:z:0"
}
arg_attr {
key: 0
value {
attr {
key: "_output_shapes"
value {
list {
shape {
}
}
}
}
}
}
arg_attr {
key: 1
value {
attr {
key: "_output_shapes"
value {
list {
shape {
}
}
}
}
}
}
arg_attr {
key: 2
value {
attr {
key: "_output_shapes"
value {
list {
shape {
}
}
}
}
}
}
}
function {
signature {
name: "__inference_f_841"
input_arg {
name: "mul_placeholder"
type: DT_FLOAT
}
output_arg {
name: "identity"
type: DT_FLOAT
}
}
node_def {
name: "mul/y"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 2.0
}
}
}
experimental_debug_info {
original_node_names: "mul/y"
}
}
node_def {
name: "mul"
op: "Mul"
input: "mul_placeholder"
input: "mul/y:output:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
experimental_debug_info {
original_node_names: "mul"
}
}
node_def {
name: "Identity"
op: "Identity"
input: "mul:z:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
experimental_debug_info {
original_node_names: "Identity"
}
}
ret {
key: "identity"
value: "Identity:output:0"
}
arg_attr {
key: 0
value {
attr {
key: "_output_shapes"
value {
list {
shape {
}
}
}
}
}
}
}
}
versions {
producer: 399
min_consumer: 12
}
"""
# Code for generating above graph:
#
# def Body(i):
# @tf.function
# def f():
# return i * 2
# return f()
# tf.while_loop(lambda i: i < 5., Body, [tf.constant(1.)])
graph_def = graph_pb2.GraphDef()
text_format.Parse(serialized, graph_def)
@def_function.function
def F():
x, y = importer.import_graph_def(
graph_def, return_elements=["Const:0", "while:2"])
grad_out, = gradients_impl.gradients(y, x)
return grad_out
self.assertAllEqual(F(), 8.0)
def testIndexedSlicesInIncomingGrads(self):
@def_function.function
def F():
x = constant_op.constant([2.])
# Computes x^4
ret = while_loop_v2(
lambda _: True, lambda v: v * v, [x], return_same_structure=False,
maximum_iterations=2)
v = array_ops.gather(ret, [0])
return gradients_impl.gradients(v, [x])[0] # 4*x^3
self.assertAllEqual(self.evaluate(F()), [32.])
def ScalarShape():
return ops.convert_to_tensor([], dtype=dtypes.int32)
def GetOptimizedGraph():
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
constant_folding=rewriter_config_pb2.RewriterConfig.OFF,
memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL))
return tf_optimizer.OptimizeGraph(config, mg)
if __name__ == "__main__":
test.main()
| |
#######################################################################
# This file is part of Pyblosxom.
#
# Copyright (C) 2008-2011 by the Pyblosxom team. See AUTHORS.
#
# Pyblosxom is distributed under the MIT license. See the file
# LICENSE for distribution details.
#######################################################################
"""
This module holds commandline related stuff. Installation
verification, blog creation, commandline argument parsing, ...
"""
import os
import os.path
import sys
import random
import time
from optparse import OptionParser
from Pyblosxom import __version__
from Pyblosxom.pyblosxom import Pyblosxom
from Pyblosxom.tools import run_callback, pwrap, pwrap_error
from Pyblosxom import plugin_utils
USAGE = "%prog [options] [command] [command-options]"
VERSION = "%prog " + __version__
def build_pyblosxom():
"""Imports config.py and builds an empty Pyblosxom object.
"""
pwrap("Trying to import the config module....")
try:
from config import py as cfg
except StandardError:
h, t = os.path.split(sys.argv[0])
script_name = t or h
pwrap_error("ERROR: Cannot find your config.py file. Please execute "
"%s in the directory with the config.py file in it or use "
"the --config flag.\n\n"
"See \"%s --help\" for more details." % (script_name,
script_name))
return None
return Pyblosxom(cfg, {})
def build_parser(usage):
parser = OptionParser(usage=usage, version=VERSION)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="If the quiet flag is specified, then Pyblosxom "
"will run quietly.")
parser.add_option("--config",
help="This specifies the directory that the config.py "
"for the blog you want to work with is in. If the "
"config.py file is in the current directory, then "
"you don't need to specify this. All commands except "
"the 'create' command need a config.py file.")
return parser
def generate_entries(command, argv):
"""
This function is primarily for testing purposes. It creates
a bunch of blog entries with random text in them.
"""
parser = build_parser("%prog entries [options] <num_entries>")
(options, args) = parser.parse_args()
if args:
try:
num_entries = int(args[0])
assert num_entries > 0
except ValueError:
pwrap_error("ERROR: num_entries must be a positive integer.")
return 0
else:
num_entries = 5
verbose = options.verbose
p = build_pyblosxom()
if not p:
return 0
datadir = p.get_request().config["datadir"]
sm_para = "<p>Lorem ipsum dolor sit amet.</p>"
med_para = """<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus
in mi lacus, sed interdum nisi. Vestibulum commodo urna et libero
vestibulum gravida. Vivamus hendrerit justo quis lorem auctor
consectetur. Aenean ornare, tortor in sollicitudin imperdiet, neque
diam pellentesque risus, vitae.
</p>"""
lg_para = """<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris
dictum tortor orci. Lorem ipsum dolor sit amet, consectetur
adipiscing elit. Etiam quis lectus vel odio convallis tincidunt sed
et magna. Suspendisse at dolor suscipit eros ullamcorper iaculis. In
aliquet ornare libero eget rhoncus. Sed ac ipsum eget eros fringilla
aliquet ut eget velit. Curabitur dui nibh, eleifend non suscipit at,
laoreet ac purus. Morbi id sem diam. Cras sit amet ante lacus, nec
euismod urna. Curabitur iaculis, lorem at fringilla malesuada, nunc
ligula eleifend nisi, at bibendum libero est quis
tellus. Pellentesque habitant morbi tristique senectus et netus et
malesuada.
</p>"""
paras = [sm_para, med_para, lg_para]
if verbose:
print "Creating %d entries" % num_entries
now = time.time()
for i in range(num_entries):
title = "post number %d\n" % (i + 1)
body = []
for _ in range(random.randrange(1, 6)):
body.append(random.choice(paras))
fn = os.path.join(datadir, "post%d.txt" % (i + 1))
f = open(fn, "w")
f.write(title)
f.write("\n".join(body))
f.close()
mtime = now - ((num_entries - i) * 3600)
os.utime(fn, (mtime, mtime))
if verbose:
print "Creating '%s'..." % fn
if verbose:
print "Done!"
return 0
def test_installation(command, argv):
"""
This function gets called when someone starts up pyblosxom.cgi
from the command line with no REQUEST_METHOD environment variable.
It:
1. verifies config.py file properties
2. initializes all the plugins they have installed
3. runs ``cb_verify_installation``--plugins can print out whether
they are installed correctly (i.e. have valid config property
settings and can read/write to data files)
The goal is to be as useful and informative to the user as we can
be without being overly verbose and confusing.
This is designed to make it easier for a user to verify their
Pyblosxom installation is working and also to install new plugins
and verify that their configuration is correct.
"""
parser = build_parser("%prog test [options]")
parser.parse_args()
p = build_pyblosxom()
if not p:
return 0
request = p.get_request()
config = request.config
pwrap("System Information")
pwrap("==================")
pwrap("")
pwrap("- pyblosxom: %s" % __version__)
pwrap("- sys.version: %s" % sys.version.replace("\n", " "))
pwrap("- os.name: %s" % os.name)
codebase = os.path.dirname(os.path.dirname(__file__))
pwrap("- codebase: %s" % config.get("codebase", codebase))
pwrap("")
pwrap("Checking config.py file")
pwrap("=======================")
pwrap("- properties set: %s" % len(config))
config_keys = config.keys()
if "datadir" not in config_keys:
pwrap_error("- ERROR: 'datadir' must be set. Refer to installation "
"documentation.")
elif not os.path.isdir(config["datadir"]):
pwrap_error("- ERROR: datadir '%s' does not exist."
" You need to create your datadir and give it "
" appropriate permissions." % config["datadir"])
else:
pwrap("- datadir '%s' exists." % config["datadir"])
if "flavourdir" not in config_keys:
pwrap("- WARNING: You should consider setting flavourdir and putting "
"your flavour templates there. See the documentation for "
"more details.")
elif not os.path.isdir(config["flavourdir"]):
pwrap_error("- ERROR: flavourdir '%s' does not exist."
" You need to create your flavourdir and give it "
" appropriate permissions." % config["flavourdir"])
else:
pwrap("- flavourdir '%s' exists." % config["flavourdir"])
if (("blog_encoding" in config_keys
and config["blog_encoding"].lower() != "utf-8")):
pwrap_error("- WARNING: 'blog_encoding' is set to something other "
"than 'utf-8'. As of Pyblosxom 1.5, "
"this isn't a good idea unless you're absolutely certain "
"it's going to work for your blog.")
pwrap("")
pwrap("Checking plugin configuration")
pwrap("=============================")
import traceback
no_verification_support = []
if len(plugin_utils.plugins) + len(plugin_utils.bad_plugins) == 0:
pwrap(" - There are no plugins installed.")
else:
if len(plugin_utils.bad_plugins) > 0:
pwrap("- Some plugins failed to load.")
pwrap("")
pwrap("----")
for mem in plugin_utils.bad_plugins:
pwrap("plugin: %s" % mem[0])
print "%s" % mem[1]
pwrap("----")
pwrap_error("FAIL")
return(1)
if len(plugin_utils.plugins) > 0:
pwrap("- This goes through your plugins and asks each of them "
"to verify configuration and installation.")
pwrap("")
pwrap("----")
for mem in plugin_utils.plugins:
if hasattr(mem, "verify_installation"):
pwrap("plugin: %s" % mem.__name__)
print "file: %s" % mem.__file__
print "version: %s" % (str(getattr(mem, "__version__")))
try:
if mem.verify_installation(request) == 1:
pwrap("PASS")
else:
pwrap_error("FAIL")
except StandardError:
pwrap_error("FAIL: Exception thrown:")
traceback.print_exc(file=sys.stdout)
pwrap("----")
else:
mn = mem.__name__
mf = mem.__file__
no_verification_support.append( "'%s' (%s)" % (mn, mf))
if len(no_verification_support) > 0:
pwrap("")
pwrap("The following plugins do not support installation "
"verification:")
no_verification_support.sort()
for mem in no_verification_support:
print "- %s" % mem
pwrap("")
pwrap("Verification complete. Correct any errors and warnings above.")
def create_blog(command, argv):
"""
Creates a blog in the specified directory. Mostly this involves
copying things over, but there are a few cases where we expand
template variables.
"""
parser = build_parser("%prog create [options] <dir>")
(options, args) = parser.parse_args()
if args:
d = args[0]
else:
d = "."
if d == ".":
d = "." + os.sep + "blog"
d = os.path.abspath(d)
verbose = options.verbose
if os.path.isfile(d) or os.path.isdir(d):
pwrap_error("ERROR: Cannot create '%s'--something is in the way." % d)
return 0
def _mkdir(d):
if verbose:
print "Creating '%s'..." % d
os.makedirs(d)
_mkdir(d)
_mkdir(os.path.join(d, "entries"))
_mkdir(os.path.join(d, "plugins"))
source = os.path.join(os.path.dirname(__file__), "flavours")
for root, dirs, files in os.walk(source):
if ".svn" in root:
continue
dest = os.path.join(d, "flavours", root[len(source)+1:])
if not os.path.isdir(dest):
if verbose:
print "Creating '%s'..." % dest
os.mkdir(dest)
for mem in files:
if verbose:
print "Creating file '%s'..." % os.path.join(dest, mem)
fpin = open(os.path.join(root, mem), "r")
fpout = open(os.path.join(dest, mem), "w")
fpout.write(fpin.read())
fpout.close()
fpin.close()
def _copyfile(frompath, topath, fn, fix=False):
if verbose:
print "Creating file '%s'..." % os.path.join(topath, fn)
fp = open(os.path.join(frompath, fn), "r")
filedata = fp.readlines()
fp.close()
if fix:
basedir = topath
if not basedir.endswith(os.sep):
basedir = basedir + os.sep
if os.sep == "\\":
basedir = basedir.replace(os.sep, os.sep + os.sep)
datamap = { "basedir": basedir,
"codedir": os.path.dirname(os.path.dirname(__file__)) }
filedata = [line % datamap for line in filedata]
fp = open(os.path.join(topath, fn), "w")
fp.write("".join(filedata))
fp.close()
source = os.path.join(os.path.dirname(__file__), "data")
_copyfile(source, d, "config.py", fix=True)
_copyfile(source, d, "blog.ini", fix=True)
_copyfile(source, d, "pyblosxom.cgi", fix=True)
datadir = os.path.join(d, "entries")
firstpost = os.path.join(datadir, "firstpost.txt")
if verbose:
print "Creating file '%s'..." % firstpost
fp = open(firstpost, "w")
fp.write("""First post!
<p>
This is your first post! If you can see this with a web-browser,
then it's likely that everything's working nicely!
</p>
""")
fp.close()
if verbose:
print "Done!"
return 0
def render_url(command, argv):
"""Renders a single url.
"""
parser = build_parser("%prog renderurl [options] <url> [<url>...]")
parser.add_option("--headers",
action="store_true", dest="headers", default=False,
help="Option that causes headers to be displayed "
"when rendering a single url.")
(options, args) = parser.parse_args()
if not args:
parser.print_help()
return 0
for url in args:
p = build_pyblosxom()
base_url = p.get_request().config.get("base_url", "")
if url.startswith(base_url):
url = url[len(base_url):]
p.run_render_one(url, options.headers)
return 0
def run_static_renderer(command, argv):
parser = build_parser("%prog staticrender [options]")
parser.add_option("--incremental",
action="store_true", dest="incremental", default=False,
help="Option that causes static rendering to be "
"incremental.")
(options, args) = parser.parse_args()
# Turn on memcache.
from Pyblosxom import memcache
memcache.usecache = True
p = build_pyblosxom()
if not p:
return 0
return p.run_static_renderer(options.incremental)
DEFAULT_HANDLERS = (
("create", create_blog, "Creates directory structure for a new blog."),
("test", test_installation,
"Tests installation and configuration for a blog."),
("staticrender", run_static_renderer,
"Statically renders your blog into an HTML site."),
("renderurl", render_url, "Renders a single url of your blog."),
("generate", generate_entries, "Generates random entries--helps "
"with blog setup.")
)
def get_handlers():
try:
from config import py as cfg
plugin_utils.initialize_plugins(cfg.get("plugin_dirs", []),
cfg.get("load_plugins", None))
except ImportError:
pass
handlers_dict = dict([(v[0], (v[1], v[2])) for v in DEFAULT_HANDLERS])
handlers_dict = run_callback("commandline", handlers_dict,
mappingfunc=lambda x, y: y,
defaultfunc=lambda x: x)
# test the handlers, drop any that aren't the right return type,
# and print a warning.
handlers = []
for k, v in handlers_dict.items():
if not len(v) == 2 or not callable(v[0]) or not isinstance(v[1], str):
print "Plugin returned '%s' for commandline." % ((k, v),)
continue
handlers.append((k, v[0], v[1]))
return handlers
def command_line_handler(scriptname, argv):
if "--silent" in argv:
sys.stdout = open(os.devnull, "w")
argv.remove("--silent")
print "%s version %s" % (scriptname, __version__)
# slurp off the config file setting and add it to sys.path.
# this needs to be first to pick up plugin-based command handlers.
config_dir = None
for i, mem in enumerate(argv):
if mem.startswith("--config"):
if "=" in mem:
_, config_dir = mem.split("=")
break
else:
try:
config_dir = argv[i+1]
break
except IndexError:
pwrap_error("Error: no config file argument specified.")
pwrap_error("Exiting.")
return 1
if config_dir is not None:
if config_dir.endswith("config.py"):
config_dir = config_dir[0:-9]
if not os.path.exists(config_dir):
pwrap_error("ERROR: '%s' does not exist--cannot find config.py "
"file." % config_dir)
pwrap_error("Exiting.")
return 1
if not "config.py" in os.listdir(config_dir):
pwrap_error("Error: config.py not in '%s'. "
"Cannot find config.py file." % config_dir)
pwrap_error("Exiting.")
return 1
sys.path.insert(0, config_dir)
print "Inserting %s to beginning of sys.path...." % config_dir
handlers = get_handlers()
if len(argv) == 1 or (len(argv) == 2 and argv[1] in ("-h", "--help")):
parser = build_parser("%prog [command]")
parser.print_help()
print ""
print "Commands:"
for command_str, _, command_help in handlers:
print " %-14s %s" % (command_str, command_help)
return 0
if argv[1] == "--version":
return 0
# then we execute the named command with options, or print help
if argv[1].startswith("-"):
pwrap_error("Command '%s' does not exist." % argv[1])
pwrap_error('')
pwrap_error("Commands:")
for command_str, _, command_help in handlers:
pwrap_error ( " %-14s %s" % (command_str, command_help))
return 1
command = argv.pop(1)
for (c, f, h) in handlers:
if c == command:
return f(command, argv)
pwrap_error("Command '%s' does not exist." % command)
for command_str, command_func, command_help in handlers:
pwrap_error(" %-14s %s" % (command_str, command_help))
return 1
| |
from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.core.urlresolvers import resolve, Resolver404
from django.http import (HttpResponse, HttpResponseNotFound, HttpRequest,
build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.template.engine import Engine
from django.utils.datastructures import MultiValueDict
from django.utils.html import escape
from django.utils.encoding import force_bytes, smart_text
from django.utils import lru_cache
from django.utils.module_loading import import_string
from django.utils import six
from django.utils.translation import ugettext as _
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
elif isinstance(value, MultiValueDict):
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def format_path_status(self, path):
if not os.path.exists(path):
return "File does not exist"
if not os.path.isfile(path):
return "Not a file"
if not os.access(path, os.R_OK):
return "File is not readable"
return "File exists"
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
# TODO: handle multiple template engines.
template_engine = Engine.get_default()
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.loader_debug_info = []
# If Django fails in get_template_loaders, provide an empty list
# for the following loop to not fail.
try:
template_loaders = template_engine.template_loaders
except Exception:
template_loaders = []
for loader in template_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{
'name': t,
'status': self.format_path_status(t),
} for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (template_engine.debug and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append((num, escape(template_source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(DEFAULT_URLCONF_TEMPLATE, name='Default URLconf template')
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _("Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."),
"explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>
{% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>
{{ template_info.before }}
<span class="specific">{{ template_info.during }}</span>
{{ template_info.after }}
</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""")
TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
| |
# Copyright 2015-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from botocore.client import ClientError
from collections import Counter
from concurrent.futures import as_completed
from dateutil.parser import parse
import itertools
import time
from c7n.actions import Action
from c7n.exceptions import PolicyValidationError
from c7n.filters import ValueFilter, AgeFilter, Filter
from c7n.filters.offhours import OffHour, OnHour
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n import query
from c7n.resources.securityhub import PostFinding
from c7n.tags import TagActionFilter, DEFAULT_TAG, TagCountFilter, TagTrim, TagDelayedAction
from c7n.utils import (
local_session, type_schema, chunks, get_retry, select_keys)
from .ec2 import deserialize_user_data
@resources.register('asg')
class ASG(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'autoscaling'
arn = 'AutoScalingGroupARN'
arn_type = 'autoScalingGroup'
arn_separator = ":"
id = name = 'AutoScalingGroupName'
date = 'CreatedTime'
dimension = 'AutoScalingGroupName'
enum_spec = ('describe_auto_scaling_groups', 'AutoScalingGroups', None)
filter_name = 'AutoScalingGroupNames'
filter_type = 'list'
config_type = 'AWS::AutoScaling::AutoScalingGroup'
cfn_type = 'AWS::AutoScaling::AutoScalingGroup'
default_report_fields = (
'AutoScalingGroupName',
'CreatedTime',
'LaunchConfigurationName',
'count:Instances',
'DesiredCapacity',
'HealthCheckType',
'list:LoadBalancerNames',
)
retry = staticmethod(get_retry(('ResourceInUse', 'Throttling',)))
ASG.filter_registry.register('offhour', OffHour)
ASG.filter_registry.register('onhour', OnHour)
ASG.filter_registry.register('tag-count', TagCountFilter)
ASG.filter_registry.register('marked-for-op', TagActionFilter)
ASG.filter_registry.register('network-location', net_filters.NetworkLocation)
class LaunchInfo:
permissions = ("ec2:DescribeLaunchTemplateVersions",
"autoscaling:DescribeLaunchConfigurations",)
def __init__(self, manager):
self.manager = manager
def initialize(self, asgs):
self.templates = self.get_launch_templates(asgs)
self.configs = self.get_launch_configs(asgs)
return self
def get_launch_templates(self, asgs):
tmpl_mgr = self.manager.get_resource_manager('launch-template-version')
# template ids include version identifiers
template_ids = list(tmpl_mgr.get_asg_templates(asgs))
if not template_ids:
return {}
return {
(t['LaunchTemplateId'],
str(t.get('c7n:VersionAlias', t['VersionNumber']))): t['LaunchTemplateData']
for t in tmpl_mgr.get_resources(template_ids)}
def get_launch_configs(self, asgs):
"""Return a mapping of launch configs for the given set of asgs"""
config_names = set()
for a in asgs:
if 'LaunchConfigurationName' not in a:
continue
config_names.add(a['LaunchConfigurationName'])
if not config_names:
return {}
lc_resources = self.manager.get_resource_manager('launch-config')
if len(config_names) < 5:
configs = lc_resources.get_resources(list(config_names))
else:
configs = lc_resources.resources()
return {
cfg['LaunchConfigurationName']: cfg for cfg in configs
if cfg['LaunchConfigurationName'] in config_names}
def get_launch_id(self, asg):
lid = asg.get('LaunchConfigurationName')
if lid is not None:
# We've noticed trailing white space allowed in some asgs
return lid.strip()
lid = asg.get('LaunchTemplate')
if lid is not None:
return (lid['LaunchTemplateId'], lid.get('Version', '$Default'))
if 'MixedInstancesPolicy' in asg:
mip_spec = asg['MixedInstancesPolicy'][
'LaunchTemplate']['LaunchTemplateSpecification']
return (mip_spec['LaunchTemplateId'], mip_spec.get('Version', '$Default'))
# we've noticed some corner cases where the asg name is the lc name, but not
# explicitly specified as launchconfiguration attribute.
lid = asg['AutoScalingGroupName']
return lid
def get(self, asg):
lid = self.get_launch_id(asg)
if isinstance(lid, tuple):
return self.templates.get(lid)
else:
return self.configs.get(lid)
def items(self):
return itertools.chain(*(
self.configs.items(), self.templates.items()))
def get_image_ids(self):
image_ids = {}
for cid, c in self.items():
if c.get('ImageId'):
image_ids.setdefault(c['ImageId'], []).append(cid)
return image_ids
def get_image_map(self):
# The describe_images api historically would return errors
# on an unknown ami in the set of images ids passed in.
# It now just silently drops those items, which is actually
# ideally for our use case.
#
# We used to do some balancing of picking up our asgs using
# the resource manager abstraction to take advantage of
# resource caching, but then we needed to do separate api
# calls to intersect with third party amis. Given the new
# describe behavior, we'll just do the api call to fetch the
# amis, it doesn't seem to have any upper bound on number of
# ImageIds to pass (Tested with 1k+ ImageIds)
#
# Explicitly use a describe source. Can't use a config source
# since it won't have state for third party ami, we auto
# propagate source normally. Can't use a cache either as their
# not in the account.
return {i['ImageId']: i for i in
self.manager.get_resource_manager(
'ami').get_source('describe').get_resources(
list(self.get_image_ids()), cache=False)}
def get_security_group_ids(self):
# return set of security group ids for given asg
sg_ids = set()
for k, v in self.items():
sg_ids.update(v.get('SecurityGroupIds', ()))
sg_ids.update(v.get('SecurityGroups', ()))
return sg_ids
@ASG.filter_registry.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = ""
permissions = ('ec2:DescribeSecurityGroups',) + LaunchInfo.permissions
def get_related_ids(self, asgs):
return self.launch_info.get_security_group_ids()
def process(self, asgs, event=None):
self.launch_info = LaunchInfo(self.manager).initialize(asgs)
return super(SecurityGroupFilter, self).process(asgs, event)
@ASG.filter_registry.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = ""
def get_related_ids(self, asgs):
subnet_ids = set()
for asg in asgs:
subnet_ids.update(
[sid.strip() for sid in asg.get('VPCZoneIdentifier', '').split(',')])
return subnet_ids
@ASG.filter_registry.register('launch-config')
class LaunchConfigFilter(ValueFilter):
"""Filter asg by launch config attributes.
This will also filter to launch template data in addition
to launch configurations.
:example:
.. code-block:: yaml
policies:
- name: launch-configs-with-public-address
resource: asg
filters:
- type: launch-config
key: AssociatePublicIpAddress
value: true
"""
schema = type_schema(
'launch-config', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ("autoscaling:DescribeLaunchConfigurations",)
def process(self, asgs, event=None):
self.launch_info = LaunchInfo(self.manager).initialize(asgs)
return super(LaunchConfigFilter, self).process(asgs, event)
def __call__(self, asg):
return self.match(self.launch_info.get(asg))
class ConfigValidFilter(Filter):
def get_permissions(self):
return list(itertools.chain(*[
self.manager.get_resource_manager(m).get_permissions()
for m in ('subnet', 'security-group', 'key-pair', 'elb',
'app-elb-target-group', 'ebs-snapshot', 'ami')]))
def validate(self):
if self.manager.data.get('mode'):
raise PolicyValidationError(
"invalid-config makes too many queries to be run in lambda")
return self
def initialize(self, asgs):
self.launch_info = LaunchInfo(self.manager).initialize(asgs)
# pylint: disable=attribute-defined-outside-init
self.subnets = self.get_subnets()
self.security_groups = self.get_security_groups()
self.key_pairs = self.get_key_pairs()
self.elbs = self.get_elbs()
self.appelb_target_groups = self.get_appelb_target_groups()
self.snapshots = self.get_snapshots()
self.images, self.image_snaps = self.get_images()
def get_subnets(self):
manager = self.manager.get_resource_manager('subnet')
return {s['SubnetId'] for s in manager.resources()}
def get_security_groups(self):
manager = self.manager.get_resource_manager('security-group')
return {s['GroupId'] for s in manager.resources()}
def get_key_pairs(self):
manager = self.manager.get_resource_manager('key-pair')
return {k['KeyName'] for k in manager.resources()}
def get_elbs(self):
manager = self.manager.get_resource_manager('elb')
return {e['LoadBalancerName'] for e in manager.resources()}
def get_appelb_target_groups(self):
manager = self.manager.get_resource_manager('app-elb-target-group')
return {a['TargetGroupArn'] for a in manager.resources()}
def get_images(self):
images = self.launch_info.get_image_map()
image_snaps = set()
for a in images.values():
# Capture any snapshots, images strongly reference their
# snapshots, and some of these will be third party in the
# case of a third party image.
for bd in a.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
image_snaps.add(bd['Ebs']['SnapshotId'].strip())
return set(images), image_snaps
def get_snapshots(self):
snaps = set()
for cid, cfg in self.launch_info.items():
for bd in cfg.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
snaps.add(bd['Ebs']['SnapshotId'].strip())
manager = self.manager.get_resource_manager('ebs-snapshot')
return {s['SnapshotId'] for s in manager.get_resources(
list(snaps), cache=False)}
def process(self, asgs, event=None):
self.initialize(asgs)
return super(ConfigValidFilter, self).process(asgs, event)
def get_asg_errors(self, asg):
errors = []
subnets = asg.get('VPCZoneIdentifier', '').split(',')
for subnet in subnets:
subnet = subnet.strip()
if subnet not in self.subnets:
errors.append(('invalid-subnet', subnet))
for elb in asg['LoadBalancerNames']:
elb = elb.strip()
if elb not in self.elbs:
errors.append(('invalid-elb', elb))
for appelb_target in asg.get('TargetGroupARNs', []):
appelb_target = appelb_target.strip()
if appelb_target not in self.appelb_target_groups:
errors.append(('invalid-appelb-target-group', appelb_target))
cfg_id = self.launch_info.get_launch_id(asg)
cfg = self.launch_info.get(asg)
if cfg is None:
errors.append(('invalid-config', cfg_id))
self.log.debug(
"asg:%s no launch config or template found" % asg['AutoScalingGroupName'])
asg['Invalid'] = errors
return True
for sg in itertools.chain(*(
cfg.get('SecurityGroups', ()), cfg.get('SecurityGroupIds', ()))):
sg = sg.strip()
if sg not in self.security_groups:
errors.append(('invalid-security-group', sg))
if cfg.get('KeyName') and cfg['KeyName'].strip() not in self.key_pairs:
errors.append(('invalid-key-pair', cfg['KeyName']))
if cfg.get('ImageId') and cfg['ImageId'].strip() not in self.images:
errors.append(('invalid-image', cfg['ImageId']))
for bd in cfg.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd or 'SnapshotId' not in bd['Ebs']:
continue
snapshot_id = bd['Ebs']['SnapshotId'].strip()
if snapshot_id in self.image_snaps:
continue
if snapshot_id not in self.snapshots:
errors.append(('invalid-snapshot', bd['Ebs']['SnapshotId']))
return errors
@ASG.filter_registry.register('valid')
class ValidConfigFilter(ConfigValidFilter):
"""Filters autoscale groups to find those that are structurally valid.
This operates as the inverse of the invalid filter for multi-step
workflows.
See details on the invalid filter for a list of checks made.
:example:
.. code-block:: yaml
policies:
- name: asg-valid-config
resource: asg
filters:
- valid
"""
schema = type_schema('valid')
def __call__(self, asg):
errors = self.get_asg_errors(asg)
return not bool(errors)
@ASG.filter_registry.register('invalid')
class InvalidConfigFilter(ConfigValidFilter):
"""Filter autoscale groups to find those that are structurally invalid.
Structurally invalid means that the auto scale group will not be able
to launch an instance succesfully as the configuration has
- invalid subnets
- invalid security groups
- invalid key pair name
- invalid launch config volume snapshots
- invalid amis
- invalid health check elb (slower)
Internally this tries to reuse other resource managers for better
cache utilization.
:example:
.. code-block:: yaml
policies:
- name: asg-invalid-config
resource: asg
filters:
- invalid
"""
schema = type_schema('invalid')
def __call__(self, asg):
errors = self.get_asg_errors(asg)
if errors:
asg['Invalid'] = errors
return True
@ASG.filter_registry.register('not-encrypted')
class NotEncryptedFilter(Filter):
"""Check if an ASG is configured to have unencrypted volumes.
Checks both the ami snapshots and the launch configuration.
:example:
.. code-block:: yaml
policies:
- name: asg-unencrypted
resource: asg
filters:
- type: not-encrypted
exclude_image: true
"""
schema = type_schema('not-encrypted', exclude_image={'type': 'boolean'})
permissions = (
'ec2:DescribeImages',
'ec2:DescribeSnapshots',
'autoscaling:DescribeLaunchConfigurations')
images = unencrypted_configs = unencrypted_images = None
# TODO: resource-manager, notfound err mgr
def process(self, asgs, event=None):
self.launch_info = LaunchInfo(self.manager).initialize(asgs)
self.images = self.launch_info.get_image_map()
if not self.data.get('exclude_image'):
self.unencrypted_images = self.get_unencrypted_images()
self.unencrypted_launch = self.get_unencrypted_configs()
return super(NotEncryptedFilter, self).process(asgs, event)
def __call__(self, asg):
launch = self.launch_info.get(asg)
if not launch:
self.log.warning(
"ASG %s instances: %d has missing config or template",
asg['AutoScalingGroupName'], len(asg['Instances']))
return False
launch_id = self.launch_info.get_launch_id(asg)
unencrypted = []
if not self.data.get('exclude_image'):
if launch['ImageId'] in self.unencrypted_images:
unencrypted.append('Image')
if launch_id in self.unencrypted_launch:
unencrypted.append('LaunchConfig')
if unencrypted:
asg['Unencrypted'] = unencrypted
return bool(unencrypted)
def get_unencrypted_images(self):
"""retrieve images which have unencrypted snapshots referenced."""
unencrypted_images = set()
for i in self.images.values():
for bd in i['BlockDeviceMappings']:
if 'Ebs' in bd and not bd['Ebs'].get('Encrypted'):
unencrypted_images.add(i['ImageId'])
break
return unencrypted_images
def get_unencrypted_configs(self):
"""retrieve configs that have unencrypted ebs voluems referenced."""
unencrypted_configs = set()
snaps = {}
for cid, c in self.launch_info.items():
image = self.images.get(c.get('ImageId', ''))
# image deregistered/unavailable or exclude_image set
if image is not None:
image_block_devs = {
bd['DeviceName'] for bd in
image['BlockDeviceMappings'] if 'Ebs' in bd}
else:
image_block_devs = set()
for bd in c.get('BlockDeviceMappings', ()):
if 'Ebs' not in bd:
continue
# Launch configs can shadow image devices, images have
# precedence.
if bd['DeviceName'] in image_block_devs:
continue
if 'SnapshotId' in bd['Ebs']:
snaps.setdefault(
bd['Ebs']['SnapshotId'].strip(), []).append(cid)
elif not bd['Ebs'].get('Encrypted'):
unencrypted_configs.add(cid)
if not snaps:
return unencrypted_configs
for s in self.get_snapshots(list(snaps.keys())):
if not s.get('Encrypted'):
unencrypted_configs.update(snaps[s['SnapshotId']])
return unencrypted_configs
def get_snapshots(self, snap_ids):
"""get snapshots corresponding to id, but tolerant of invalid id's."""
return self.manager.get_resource_manager('ebs-snapshot').get_resources(
snap_ids, cache=False)
@ASG.filter_registry.register('image-age')
class ImageAgeFilter(AgeFilter):
"""Filter asg by image age (in days).
:example:
.. code-block:: yaml
policies:
- name: asg-older-image
resource: asg
filters:
- type: image-age
days: 90
op: ge
"""
permissions = (
"ec2:DescribeImages",
"autoscaling:DescribeLaunchConfigurations")
date_attribute = "CreationDate"
schema = type_schema(
'image-age',
op={'$ref': '#/definitions/filters_common/comparison_operators'},
days={'type': 'number'})
def process(self, asgs, event=None):
self.launch_info = LaunchInfo(self.manager).initialize(asgs)
self.images = self.launch_info.get_image_map()
return super(ImageAgeFilter, self).process(asgs, event)
def get_resource_date(self, asg):
cfg = self.launch_info.get(asg)
if cfg is None:
cfg = {}
ami = self.images.get(cfg.get('ImageId'), {})
return parse(ami.get(
self.date_attribute, "2000-01-01T01:01:01.000Z"))
@ASG.filter_registry.register('image')
class ImageFilter(ValueFilter):
"""Filter asg by image
:example:
.. code-block:: yaml
policies:
- name: non-windows-asg
resource: asg
filters:
- type: image
key: Platform
value: Windows
op: ne
"""
permissions = (
"ec2:DescribeImages",
"autoscaling:DescribeLaunchConfigurations")
schema = type_schema('image', rinherit=ValueFilter.schema)
schema_alias = True
def process(self, asgs, event=None):
self.launch_info = LaunchInfo(self.manager).initialize(asgs)
self.images = self.launch_info.get_image_map()
return super(ImageFilter, self).process(asgs, event)
def __call__(self, i):
image = self.images.get(self.launch_info.get(i).get('ImageId', None))
# Finally, if we have no image...
if not image:
self.log.warning(
"Could not locate image for instance:%s ami:%s" % (
i['InstanceId'], i["ImageId"]))
# Match instead on empty skeleton?
return False
return self.match(image)
@ASG.filter_registry.register('vpc-id')
class VpcIdFilter(ValueFilter):
"""Filters ASG based on the VpcId
This filter is available as a ValueFilter as the vpc-id is not natively
associated to the results from describing the autoscaling groups.
:example:
.. code-block:: yaml
policies:
- name: asg-vpc-xyz
resource: asg
filters:
- type: vpc-id
value: vpc-12ab34cd
"""
schema = type_schema(
'vpc-id', rinherit=ValueFilter.schema)
schema['properties'].pop('key')
schema_alias = False
permissions = ('ec2:DescribeSubnets',)
# TODO: annotation
def __init__(self, data, manager=None):
super(VpcIdFilter, self).__init__(data, manager)
self.data['key'] = 'VpcId'
def process(self, asgs, event=None):
subnets = {}
for a in asgs:
subnet_ids = a.get('VPCZoneIdentifier', '')
if not subnet_ids:
continue
subnets.setdefault(subnet_ids.split(',')[0], []).append(a)
subnet_manager = self.manager.get_resource_manager('subnet')
# Invalid subnets on asgs happen, so query all
all_subnets = {s['SubnetId']: s for s in subnet_manager.resources()}
for s, s_asgs in subnets.items():
if s not in all_subnets:
self.log.warning(
"invalid subnet %s for asgs: %s",
s, [a['AutoScalingGroupName'] for a in s_asgs])
continue
for a in s_asgs:
a['VpcId'] = all_subnets[s]['VpcId']
return super(VpcIdFilter, self).process(asgs)
@ASG.filter_registry.register('progagated-tags') # compatibility
@ASG.filter_registry.register('propagated-tags')
class PropagatedTagFilter(Filter):
"""Filter ASG based on propagated tags
This filter is designed to find all autoscaling groups that have a list
of tag keys (provided) that are set to propagate to new instances. Using
this will allow for easy validation of asg tag sets are in place across an
account for compliance.
:example:
.. code-block:: yaml
policies:
- name: asg-non-propagated-tags
resource: asg
filters:
- type: propagated-tags
keys: ["ABC", "BCD"]
match: false
propagate: true
"""
schema = type_schema(
'progagated-tags',
aliases=('propagated-tags',),
keys={'type': 'array', 'items': {'type': 'string'}},
match={'type': 'boolean'},
propagate={'type': 'boolean'})
permissions = (
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeAutoScalingGroups")
def process(self, asgs, event=None):
keys = self.data.get('keys', [])
match = self.data.get('match', True)
results = []
for asg in asgs:
if self.data.get('propagate', True):
tags = [t['Key'] for t in asg.get('Tags', []) if t[
'Key'] in keys and t['PropagateAtLaunch']]
if match and all(k in tags for k in keys):
results.append(asg)
if not match and not all(k in tags for k in keys):
results.append(asg)
else:
tags = [t['Key'] for t in asg.get('Tags', []) if t[
'Key'] in keys and not t['PropagateAtLaunch']]
if match and all(k in tags for k in keys):
results.append(asg)
if not match and not all(k in tags for k in keys):
results.append(asg)
return results
@ASG.action_registry.register('post-finding')
class AsgPostFinding(PostFinding):
resource_type = 'AwsAutoScalingAutoScalingGroup'
launch_info = LaunchInfo(None)
def format_resource(self, r):
envelope, payload = self.format_envelope(r)
details = select_keys(r, [
'CreatedTime', 'HealthCheckType', 'HealthCheckGracePeriod', 'LoadBalancerNames'])
lid = self.launch_info.get_launch_id(r)
if isinstance(lid, tuple):
lid = "%s:%s" % lid
details['CreatedTime'] = details['CreatedTime'].isoformat()
# let's arbitrarily cut off key information per security hub's restrictions...
details['LaunchConfigurationName'] = lid[:32]
payload.update(details)
return envelope
@ASG.action_registry.register('tag-trim')
class GroupTagTrim(TagTrim):
"""Action to trim the number of tags to avoid hitting tag limits
:example:
.. code-block:: yaml
policies:
- name: asg-tag-trim
resource: asg
filters:
- type: tag-count
count: 10
actions:
- type: tag-trim
space: 1
preserve:
- OwnerName
- OwnerContact
"""
max_tag_count = 10
permissions = ('autoscaling:DeleteTags',)
def process_tag_removal(self, client, resource, candidates):
tags = []
for t in candidates:
tags.append(
dict(Key=t, ResourceType='auto-scaling-group',
ResourceId=resource['AutoScalingGroupName']))
client.delete_tags(Tags=tags)
@ASG.filter_registry.register('capacity-delta')
class CapacityDelta(Filter):
"""Filter returns ASG that have less instances than desired or required
:example:
.. code-block:: yaml
policies:
- name: asg-capacity-delta
resource: asg
filters:
- capacity-delta
"""
schema = type_schema('capacity-delta')
def process(self, asgs, event=None):
return [
a for a in asgs if len(
a['Instances']) < a['DesiredCapacity'] or len(
a['Instances']) < a['MinSize']]
@ASG.filter_registry.register('user-data')
class UserDataFilter(ValueFilter):
"""Filter on ASG's whose launch configs have matching userdata.
Note: It is highly recommended to use regexes with the ?sm flags, since Custodian
uses re.match() and userdata spans multiple lines.
:example:
.. code-block:: yaml
policies:
- name: lc_userdata
resource: asg
filters:
- type: user-data
op: regex
value: (?smi).*password=
actions:
- delete
"""
schema = type_schema('user-data', rinherit=ValueFilter.schema)
schema_alias = False
batch_size = 50
annotation = 'c7n:user-data'
def __init__(self, data, manager):
super(UserDataFilter, self).__init__(data, manager)
self.data['key'] = '"c7n:user-data"'
def get_permissions(self):
return self.manager.get_resource_manager('asg').get_permissions()
def process(self, asgs, event=None):
'''Get list of autoscaling groups whose launch configs match the
user-data filter.
:return: List of ASG's with matching launch configs
'''
self.data['key'] = '"c7n:user-data"'
launch_info = LaunchInfo(self.manager).initialize(asgs)
results = []
for asg in asgs:
launch_config = launch_info.get(asg)
if self.annotation not in launch_config:
if not launch_config.get('UserData'):
asg[self.annotation] = None
else:
asg[self.annotation] = deserialize_user_data(
launch_config['UserData'])
if self.match(asg):
results.append(asg)
return results
@ASG.action_registry.register('resize')
class Resize(Action):
"""Action to resize the min/max/desired instances in an ASG
There are several ways to use this action:
1. set min/desired to current running instances
.. code-block:: yaml
policies:
- name: asg-resize
resource: asg
filters:
- capacity-delta
actions:
- type: resize
desired-size: "current"
2. apply a fixed resize of min, max or desired, optionally saving the
previous values to a named tag (for restoring later):
.. code-block:: yaml
policies:
- name: offhours-asg-off
resource: asg
filters:
- type: offhour
offhour: 19
default_tz: bst
actions:
- type: resize
min-size: 0
desired-size: 0
save-options-tag: OffHoursPrevious
3. restore previous values for min/max/desired from a tag:
.. code-block:: yaml
policies:
- name: offhours-asg-on
resource: asg
filters:
- type: onhour
onhour: 8
default_tz: bst
actions:
- type: resize
restore-options-tag: OffHoursPrevious
"""
schema = type_schema(
'resize',
**{
'min-size': {'type': 'integer', 'minimum': 0},
'max-size': {'type': 'integer', 'minimum': 0},
'desired-size': {
"anyOf": [
{'enum': ["current"]},
{'type': 'integer', 'minimum': 0}
]
},
# support previous key name with underscore
'desired_size': {
"anyOf": [
{'enum': ["current"]},
{'type': 'integer', 'minimum': 0}
]
},
'save-options-tag': {'type': 'string'},
'restore-options-tag': {'type': 'string'},
}
)
permissions = (
'autoscaling:UpdateAutoScalingGroup',
'autoscaling:CreateOrUpdateTags'
)
def process(self, asgs):
# ASG parameters to save to/restore from a tag
asg_params = ['MinSize', 'MaxSize', 'DesiredCapacity']
# support previous param desired_size when desired-size is not present
if 'desired_size' in self.data and 'desired-size' not in self.data:
self.data['desired-size'] = self.data['desired_size']
client = local_session(self.manager.session_factory).client(
'autoscaling')
for a in asgs:
tag_map = {t['Key']: t['Value'] for t in a.get('Tags', [])}
update = {}
current_size = len(a['Instances'])
if 'restore-options-tag' in self.data:
# we want to restore all ASG size params from saved data
self.log.debug(
'Want to restore ASG %s size from tag %s' %
(a['AutoScalingGroupName'], self.data['restore-options-tag']))
if self.data['restore-options-tag'] in tag_map:
for field in tag_map[self.data['restore-options-tag']].split(';'):
(param, value) = field.split('=')
if param in asg_params:
update[param] = int(value)
else:
# we want to resize, parse provided params
if 'min-size' in self.data:
update['MinSize'] = self.data['min-size']
if 'max-size' in self.data:
update['MaxSize'] = self.data['max-size']
if 'desired-size' in self.data:
if self.data['desired-size'] == 'current':
update['DesiredCapacity'] = min(current_size, a['DesiredCapacity'])
if 'MinSize' not in update:
# unless we were given a new value for min_size then
# ensure it is at least as low as current_size
update['MinSize'] = min(current_size, a['MinSize'])
elif type(self.data['desired-size']) == int:
update['DesiredCapacity'] = self.data['desired-size']
if update:
self.log.debug('ASG %s size: current=%d, min=%d, max=%d, desired=%d'
% (a['AutoScalingGroupName'], current_size, a['MinSize'],
a['MaxSize'], a['DesiredCapacity']))
if 'save-options-tag' in self.data:
# save existing ASG params to a tag before changing them
self.log.debug('Saving ASG %s size to tag %s' %
(a['AutoScalingGroupName'], self.data['save-options-tag']))
tags = [dict(
Key=self.data['save-options-tag'],
PropagateAtLaunch=False,
Value=';'.join({'%s=%d' % (param, a[param]) for param in asg_params}),
ResourceId=a['AutoScalingGroupName'],
ResourceType='auto-scaling-group',
)]
self.manager.retry(client.create_or_update_tags, Tags=tags)
self.log.debug('Resizing ASG %s with %s' % (a['AutoScalingGroupName'],
str(update)))
self.manager.retry(
client.update_auto_scaling_group,
AutoScalingGroupName=a['AutoScalingGroupName'],
**update)
else:
self.log.debug('nothing to resize')
@ASG.action_registry.register('remove-tag')
@ASG.action_registry.register('untag') # compatibility
@ASG.action_registry.register('unmark') # compatibility
class RemoveTag(Action):
"""Action to remove tag/tags from an ASG
:example:
.. code-block:: yaml
policies:
- name: asg-remove-unnecessary-tags
resource: asg
filters:
- "tag:UnnecessaryTag": present
actions:
- type: remove-tag
key: UnnecessaryTag
"""
schema = type_schema(
'remove-tag',
aliases=('untag', 'unmark'),
tags={'type': 'array', 'items': {'type': 'string'}},
key={'type': 'string'})
permissions = ('autoscaling:DeleteTags',)
batch_size = 1
def process(self, asgs):
error = False
tags = self.data.get('tags', [])
if not tags:
tags = [self.data.get('key', DEFAULT_TAG)]
client = local_session(self.manager.session_factory).client('autoscaling')
with self.executor_factory(max_workers=2) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(
self.process_resource_set, client, asg_set, tags)] = asg_set
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
error = f.exception()
self.log.exception(
"Exception untagging asg:%s tag:%s error:%s" % (
", ".join([a['AutoScalingGroupName']
for a in asg_set]),
self.data.get('key', DEFAULT_TAG),
f.exception()))
if error:
raise error
def process_resource_set(self, client, asgs, tags):
tag_set = []
for a in asgs:
for t in tags:
tag_set.append(dict(
Key=t, ResourceType='auto-scaling-group',
ResourceId=a['AutoScalingGroupName']))
self.manager.retry(client.delete_tags, Tags=tag_set)
@ASG.action_registry.register('tag')
@ASG.action_registry.register('mark')
class Tag(Action):
"""Action to add a tag to an ASG
The *propagate* parameter can be used to specify that the tag being added
will need to be propagated down to each ASG instance associated or simply
to the ASG itself.
:example:
.. code-block:: yaml
policies:
- name: asg-add-owner-tag
resource: asg
filters:
- "tag:OwnerName": absent
actions:
- type: tag
key: OwnerName
value: OwnerName
propagate: true
"""
schema = type_schema(
'tag',
key={'type': 'string'},
value={'type': 'string'},
tags={'type': 'object'},
# Backwards compatibility
tag={'type': 'string'},
msg={'type': 'string'},
propagate={'type': 'boolean'},
aliases=('mark',)
)
permissions = ('autoscaling:CreateOrUpdateTags',)
batch_size = 1
def get_tag_set(self):
tags = []
key = self.data.get('key', self.data.get('tag', DEFAULT_TAG))
value = self.data.get(
'value', self.data.get(
'msg', 'AutoScaleGroup does not meet policy guidelines'))
if key and value:
tags.append({'Key': key, 'Value': value})
for k, v in self.data.get('tags', {}).items():
tags.append({'Key': k, 'Value': v})
return tags
def process(self, asgs):
tags = self.get_tag_set()
error = None
client = self.get_client()
with self.executor_factory(max_workers=3) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(
self.process_resource_set, client, asg_set, tags)] = asg_set
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
self.log.exception(
"Exception tagging tag:%s error:%s asg:%s" % (
tags,
f.exception(),
", ".join([a['AutoScalingGroupName']
for a in asg_set])))
if error:
raise error
def process_resource_set(self, client, asgs, tags):
tag_params = []
propagate = self.data.get('propagate', False)
for t in tags:
if 'PropagateAtLaunch' not in t:
t['PropagateAtLaunch'] = propagate
for t in tags:
for a in asgs:
atags = dict(t)
atags['ResourceType'] = 'auto-scaling-group'
atags['ResourceId'] = a['AutoScalingGroupName']
tag_params.append(atags)
self.manager.retry(client.create_or_update_tags, Tags=tag_params)
def get_client(self):
return local_session(self.manager.session_factory).client('autoscaling')
@ASG.action_registry.register('propagate-tags')
class PropagateTags(Action):
"""Propagate tags to an asg instances.
In AWS changing an asg tag does not propagate to instances.
This action exists to do that, and can also trim older tags
not present on the asg anymore that are present on instances.
:example:
.. code-block:: yaml
policies:
- name: asg-propagate-required
resource: asg
filters:
- "tag:OwnerName": present
actions:
- type: propagate-tags
tags:
- OwnerName
"""
schema = type_schema(
'propagate-tags',
tags={'type': 'array', 'items': {'type': 'string'}},
trim={'type': 'boolean'})
permissions = ('ec2:DeleteTags', 'ec2:CreateTags')
def validate(self):
if not isinstance(self.data.get('tags', []), (list, tuple)):
raise ValueError("No tags specified")
return self
def process(self, asgs):
if not asgs:
return
if self.data.get('trim', False):
self.instance_map = self.get_instance_map(asgs)
with self.executor_factory(max_workers=3) as w:
instance_count = sum(list(w.map(self.process_asg, asgs)))
self.log.info("Applied tags to %d instances" % instance_count)
def process_asg(self, asg):
client = local_session(self.manager.session_factory).client('ec2')
instance_ids = [i['InstanceId'] for i in asg['Instances']]
tag_map = {t['Key']: t['Value'] for t in asg.get('Tags', [])
if t['PropagateAtLaunch'] and not t['Key'].startswith('aws:')}
if self.data.get('tags'):
tag_map = {
k: v for k, v in tag_map.items()
if k in self.data['tags']}
tag_set = set(tag_map)
if self.data.get('trim', False):
instances = [self.instance_map[i] for i in instance_ids]
self.prune_instance_tags(client, asg, tag_set, instances)
if not self.manager.config.dryrun and instances:
client.create_tags(
Resources=instance_ids,
Tags=[{'Key': k, 'Value': v} for k, v in tag_map.items()])
return len(instance_ids)
def prune_instance_tags(self, client, asg, tag_set, instances):
"""Remove tags present on all asg instances which are not present
on the asg.
"""
instance_tags = Counter()
instance_count = len(instances)
remove_tags = []
extra_tags = []
for i in instances:
instance_tags.update([
t['Key'] for t in i['Tags']
if not t['Key'].startswith('aws:')])
for k, v in instance_tags.items():
if not v >= instance_count:
extra_tags.append(k)
continue
if k not in tag_set:
remove_tags.append(k)
if remove_tags:
self.log.debug("Pruning asg:%s instances:%d of old tags: %s" % (
asg['AutoScalingGroupName'], instance_count, remove_tags))
if extra_tags:
self.log.debug("Asg: %s has uneven tags population: %s" % (
asg['AutoScalingGroupName'], instance_tags))
# Remove orphan tags
remove_tags.extend(extra_tags)
if not self.manager.config.dryrun:
client.delete_tags(
Resources=[i['InstanceId'] for i in instances],
Tags=[{'Key': t} for t in remove_tags])
def get_instance_map(self, asgs):
instance_ids = [
i['InstanceId'] for i in
list(itertools.chain(*[
g['Instances']
for g in asgs if g['Instances']]))]
if not instance_ids:
return {}
return {i['InstanceId']: i for i in
self.manager.get_resource_manager(
'ec2').get_resources(instance_ids)}
@ASG.action_registry.register('rename-tag')
class RenameTag(Action):
"""Rename a tag on an AutoScaleGroup.
:example:
.. code-block:: yaml
policies:
- name: asg-rename-owner-tag
resource: asg
filters:
- "tag:OwnerNames": present
actions:
- type: rename-tag
propagate: true
source: OwnerNames
dest: OwnerName
"""
schema = type_schema(
'rename-tag', required=['source', 'dest'],
propagate={'type': 'boolean'},
source={'type': 'string'},
dest={'type': 'string'})
def get_permissions(self):
permissions = (
'autoscaling:CreateOrUpdateTags',
'autoscaling:DeleteTags')
if self.data.get('propagate', True):
permissions += ('ec2:CreateTags', 'ec2:DeleteTags')
return permissions
def process(self, asgs):
source = self.data.get('source')
dest = self.data.get('dest')
count = len(asgs)
filtered = []
for a in asgs:
for t in a.get('Tags'):
if t['Key'] == source:
filtered.append(a)
break
asgs = filtered
self.log.info("Filtered from %d asgs to %d", count, len(asgs))
self.log.info(
"Renaming %s to %s on %d asgs", source, dest, len(filtered))
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
def process_asg(self, asg):
"""Move source tag to destination tag.
Check tag count on asg
Create new tag tag
Delete old tag
Check tag count on instance
Create new tag
Delete old tag
"""
source_tag = self.data.get('source')
tag_map = {t['Key']: t for t in asg.get('Tags', [])}
source = tag_map[source_tag]
destination_tag = self.data.get('dest')
propagate = self.data.get('propagate', True)
client = local_session(
self.manager.session_factory).client('autoscaling')
# technically safer to create first, but running into
# max tags constraints, otherwise.
#
# delete_first = len([t for t in tag_map if not t.startswith('aws:')])
client.delete_tags(Tags=[
{'ResourceId': asg['AutoScalingGroupName'],
'ResourceType': 'auto-scaling-group',
'Key': source_tag,
'Value': source['Value']}])
client.create_or_update_tags(Tags=[
{'ResourceId': asg['AutoScalingGroupName'],
'ResourceType': 'auto-scaling-group',
'PropagateAtLaunch': propagate,
'Key': destination_tag,
'Value': source['Value']}])
if propagate:
self.propagate_instance_tag(source, destination_tag, asg)
def propagate_instance_tag(self, source, destination_tag, asg):
client = local_session(self.manager.session_factory).client('ec2')
client.delete_tags(
Resources=[i['InstanceId'] for i in asg['Instances']],
Tags=[{"Key": source['Key']}])
client.create_tags(
Resources=[i['InstanceId'] for i in asg['Instances']],
Tags=[{'Key': destination_tag, 'Value': source['Value']}])
@ASG.action_registry.register('mark-for-op')
class MarkForOp(TagDelayedAction):
"""Action to create a delayed action for a later date
:example:
.. code-block:: yaml
policies:
- name: asg-suspend-schedule
resource: asg
filters:
- type: value
key: MinSize
value: 2
actions:
- type: mark-for-op
tag: custodian_suspend
message: "Suspending: {op}@{action_date}"
op: suspend
days: 7
"""
schema = type_schema(
'mark-for-op',
op={'type': 'string'},
key={'type': 'string'},
tag={'type': 'string'},
tz={'type': 'string'},
msg={'type': 'string'},
message={'type': 'string'},
days={'type': 'number', 'minimum': 0},
hours={'type': 'number', 'minimum': 0})
schema_alias = False
default_template = (
'AutoScaleGroup does not meet org policy: {op}@{action_date}')
def get_config_values(self):
d = {
'op': self.data.get('op', 'stop'),
'tag': self.data.get('key', self.data.get('tag', DEFAULT_TAG)),
'msg': self.data.get('message', self.data.get('msg', self.default_template)),
'tz': self.data.get('tz', 'utc'),
'days': self.data.get('days', 0),
'hours': self.data.get('hours', 0)}
d['action_date'] = self.generate_timestamp(
d['days'], d['hours'])
return d
@ASG.action_registry.register('suspend')
class Suspend(Action):
"""Action to suspend ASG processes and instances
AWS ASG suspend/resume and process docs
https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html
:example:
.. code-block:: yaml
policies:
- name: asg-suspend-processes
resource: asg
filters:
- "tag:SuspendTag": present
actions:
- type: suspend
"""
permissions = ("autoscaling:SuspendProcesses", "ec2:StopInstances")
ASG_PROCESSES = [
"Launch",
"Terminate",
"HealthCheck",
"ReplaceUnhealthy",
"AZRebalance",
"AlarmNotification",
"ScheduledActions",
"AddToLoadBalancer"]
schema = type_schema(
'suspend',
exclude={
'type': 'array',
'title': 'ASG Processes to not suspend',
'items': {'enum': ASG_PROCESSES}})
ASG_PROCESSES = set(ASG_PROCESSES)
def process(self, asgs):
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_asg, asgs))
def process_asg(self, asg):
"""Multistep process to stop an asg aprori of setup
- suspend processes
- stop instances
"""
session = local_session(self.manager.session_factory)
asg_client = session.client('autoscaling')
processes = list(self.ASG_PROCESSES.difference(
self.data.get('exclude', ())))
try:
self.manager.retry(
asg_client.suspend_processes,
ScalingProcesses=processes,
AutoScalingGroupName=asg['AutoScalingGroupName'])
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
return
raise
ec2_client = session.client('ec2')
try:
instance_ids = [i['InstanceId'] for i in asg['Instances']]
if not instance_ids:
return
retry = get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
retry(ec2_client.stop_instances, InstanceIds=instance_ids)
except ClientError as e:
if e.response['Error']['Code'] in (
'InvalidInstanceID.NotFound',
'IncorrectInstanceState'):
self.log.warning("Erroring stopping asg instances %s %s" % (
asg['AutoScalingGroupName'], e))
return
raise
@ASG.action_registry.register('resume')
class Resume(Action):
"""Resume a suspended autoscale group and its instances
Parameter 'delay' is the amount of time (in seconds) to wait
between resuming instances in the asg, and restarting the internal
asg processed which gives some grace period before health checks
turn on within the ASG (default value: 30)
:example:
.. code-block:: yaml
policies:
- name: asg-resume-processes
resource: asg
filters:
- "tag:Resume": present
actions:
- type: resume
delay: 300
"""
schema = type_schema('resume', delay={'type': 'number'})
permissions = ("autoscaling:ResumeProcesses", "ec2:StartInstances")
def process(self, asgs):
original_count = len(asgs)
asgs = [a for a in asgs if a['SuspendedProcesses']]
self.delay = self.data.get('delay', 30)
self.log.debug("Filtered from %d to %d suspended asgs",
original_count, len(asgs))
session = local_session(self.manager.session_factory)
ec2_client = session.client('ec2')
asg_client = session.client('autoscaling')
with self.executor_factory(max_workers=3) as w:
futures = {}
for a in asgs:
futures[w.submit(self.resume_asg_instances, ec2_client, a)] = a
for f in as_completed(futures):
if f.exception():
self.log.error("Traceback resume asg:%s instances error:%s" % (
futures[f]['AutoScalingGroupName'],
f.exception()))
continue
self.log.debug("Sleeping for asg health check grace")
time.sleep(self.delay)
with self.executor_factory(max_workers=3) as w:
futures = {}
for a in asgs:
futures[w.submit(self.resume_asg, asg_client, a)] = a
for f in as_completed(futures):
if f.exception():
self.log.error("Traceback resume asg:%s error:%s" % (
futures[f]['AutoScalingGroupName'],
f.exception()))
def resume_asg_instances(self, ec2_client, asg):
"""Resume asg instances.
"""
instance_ids = [i['InstanceId'] for i in asg['Instances']]
if not instance_ids:
return
retry = get_retry((
'RequestLimitExceeded', 'Client.RequestLimitExceeded'))
retry(ec2_client.start_instances, InstanceIds=instance_ids)
def resume_asg(self, asg_client, asg):
"""Resume asg processes.
"""
self.manager.retry(
asg_client.resume_processes,
AutoScalingGroupName=asg['AutoScalingGroupName'])
@ASG.action_registry.register('delete')
class Delete(Action):
"""Action to delete an ASG
The 'force' parameter is needed when deleting an ASG that has instances
attached to it.
:example:
.. code-block:: yaml
policies:
- name: asg-delete-bad-encryption
resource: asg
filters:
- type: not-encrypted
exclude_image: true
actions:
- type: delete
force: true
"""
schema = type_schema('delete', force={'type': 'boolean'})
permissions = ("autoscaling:DeleteAutoScalingGroup",)
def process(self, asgs):
client = local_session(
self.manager.session_factory).client('autoscaling')
for asg in asgs:
self.process_asg(client, asg)
def process_asg(self, client, asg):
force_delete = self.data.get('force', False)
try:
self.manager.retry(
client.delete_auto_scaling_group,
AutoScalingGroupName=asg['AutoScalingGroupName'],
ForceDelete=force_delete)
except ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
return
raise
@resources.register('launch-config')
class LaunchConfig(query.QueryResourceManager):
class resource_type(query.TypeInfo):
service = 'autoscaling'
arn_type = 'launchConfiguration'
id = name = 'LaunchConfigurationName'
date = 'CreatedTime'
enum_spec = (
'describe_launch_configurations', 'LaunchConfigurations', None)
filter_name = 'LaunchConfigurationNames'
filter_type = 'list'
cfn_type = config_type = 'AWS::AutoScaling::LaunchConfiguration'
@LaunchConfig.filter_registry.register('age')
class LaunchConfigAge(AgeFilter):
"""Filter ASG launch configuration by age (in days)
:example:
.. code-block:: yaml
policies:
- name: asg-launch-config-old
resource: launch-config
filters:
- type: age
days: 90
op: ge
"""
date_attribute = "CreatedTime"
schema = type_schema(
'age',
op={'$ref': '#/definitions/filters_common/comparison_operators'},
days={'type': 'number'})
@LaunchConfig.filter_registry.register('unused')
class UnusedLaunchConfig(Filter):
"""Filters all launch configurations that are not in use but exist
:example:
.. code-block:: yaml
policies:
- name: asg-unused-launch-config
resource: launch-config
filters:
- unused
"""
schema = type_schema('unused')
def get_permissions(self):
return self.manager.get_resource_manager('asg').get_permissions()
def process(self, configs, event=None):
asgs = self.manager.get_resource_manager('asg').resources()
used = {a.get('LaunchConfigurationName', a['AutoScalingGroupName'])
for a in asgs if not a.get('LaunchTemplate')}
return [c for c in configs if c['LaunchConfigurationName'] not in used]
@LaunchConfig.action_registry.register('delete')
class LaunchConfigDelete(Action):
"""Filters all unused launch configurations
:example:
.. code-block:: yaml
policies:
- name: asg-unused-launch-config-delete
resource: launch-config
filters:
- unused
actions:
- delete
"""
schema = type_schema('delete')
permissions = ("autoscaling:DeleteLaunchConfiguration",)
def process(self, configs):
client = local_session(self.manager.session_factory).client('autoscaling')
for c in configs:
self.process_config(client, c)
def process_config(self, client, config):
try:
client.delete_launch_configuration(
LaunchConfigurationName=config[
'LaunchConfigurationName'])
except ClientError as e:
# Catch already deleted
if e.response['Error']['Code'] == 'ValidationError':
return
raise
| |
from __future__ import print_function
import os.path as op
from nose.tools import assert_true, assert_raises
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from scipy.fftpack import fft
from mne.datasets import sample
from mne import (stats, SourceEstimate, VolSourceEstimate, Label,
read_source_spaces)
from mne import read_source_estimate, morph_data, extract_label_time_course
from mne.source_estimate import (spatio_temporal_tris_connectivity,
spatio_temporal_src_connectivity,
compute_morph_matrix, grade_to_vertices)
from mne.minimum_norm import read_inverse_operator
from mne.label import read_labels_from_annot, label_sign_flip
from mne.utils import (_TempDir, requires_pandas, requires_sklearn,
requires_pytables)
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = sample.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-inv.fif')
fname_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis-grad-vol-7-fwd-sensmap-vol.w')
fname_vsrc = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-vol-7-fwd.fif')
fname_t1 = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
tempdir = _TempDir()
@sample.requires_sample_data
def test_volume_stc():
"""Test volume STCs
"""
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data, data, np.arange(2)[:, np.newaxis]]
vertno = np.arange(N)
vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
vertno_reads = [vertno, vertno, np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
stc = VolSourceEstimate(data, vertno, 0, 1)
fname_temp = op.join(tempdir, 'temp-vl.stc')
stc_new = stc
for _ in range(2):
stc_new.save(fname_temp)
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(vertno_read, stc_new.vertno)
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert_true(isinstance(stc, VolSourceEstimate))
assert_true('sample' in repr(stc))
stc_new = stc
assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
for _ in range(2):
fname_temp = op.join(tempdir, 'temp-vol.w')
stc_new.save(fname_temp, ftype='w')
stc_new = read_source_estimate(fname_temp)
assert_true(isinstance(stc_new, VolSourceEstimate))
assert_array_equal(stc.vertno, stc_new.vertno)
assert_array_almost_equal(stc.data, stc_new.data)
# save the stc as a nifti file and export
try:
import nibabel as nib
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
src = read_source_spaces(fname_vsrc)
vol_fname = op.join(tempdir, 'stc.nii.gz')
stc.save_as_volume(vol_fname, src,
dest='surf', mri_resolution=False)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == src[0]['shape'] + (len(stc.times),))
with warnings.catch_warnings(record=True): # nib<->numpy
t1_img = nib.load(fname_t1)
stc.save_as_volume(op.join(tempdir, 'stc.nii.gz'), src,
dest='mri', mri_resolution=True)
with warnings.catch_warnings(record=True): # nib<->numpy
img = nib.load(vol_fname)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
# export without saving
img = stc.as_volume(src, dest='mri', mri_resolution=True)
assert_true(img.shape == t1_img.shape + (len(stc.times),))
assert_array_almost_equal(img.get_affine(), t1_img.get_affine(),
decimal=5)
except ImportError:
print('Save as nifti test skipped, needs NiBabel')
@sample.requires_sample_data
def test_expand():
"""Test stc expansion
"""
stc = read_source_estimate(fname, 'sample')
assert_true('sample' in repr(stc))
labels_lh = read_labels_from_annot('sample', 'aparc', 'lh',
subjects_dir=subjects_dir)
stc_limited = stc.in_label(labels_lh[0] + labels_lh[1])
stc_new = stc_limited.copy()
stc_new.data.fill(0)
for label in labels_lh[:2]:
stc_new += stc.in_label(label).expand(stc_limited.vertno)
# make sure we can't add unless vertno agree
assert_raises(ValueError, stc.__add__, stc.in_label(labels_lh[0]))
def _fake_stc(n_time=10):
verts = [np.arange(10), np.arange(90)]
return SourceEstimate(np.random.rand(100, n_time), verts, 0, 1e-1, 'foo')
def test_io_stc():
"""Test IO for STC files
"""
stc = _fake_stc()
stc.save(op.join(tempdir, "tmp.stc"))
stc2 = read_source_estimate(op.join(tempdir, "tmp.stc"))
assert_array_almost_equal(stc.data, stc2.data)
assert_array_almost_equal(stc.tmin, stc2.tmin)
assert_equal(len(stc.vertno), len(stc2.vertno))
for v1, v2 in zip(stc.vertno, stc2.vertno):
assert_array_almost_equal(v1, v2)
assert_array_almost_equal(stc.tstep, stc2.tstep)
@requires_pytables()
def test_io_stc_h5():
"""Test IO for STC files using HDF5
"""
stc = _fake_stc()
assert_raises(ValueError, stc.save, op.join(tempdir, 'tmp'), ftype='foo')
out_name = op.join(tempdir, 'tmp')
stc.save(out_name, ftype='h5')
stc3 = read_source_estimate(out_name)
stc4 = read_source_estimate(out_name + '-stc.h5')
assert_raises(RuntimeError, read_source_estimate, out_name, subject='bar')
for stc_new in stc3, stc4:
assert_equal(stc_new.subject, stc.subject)
assert_array_equal(stc_new.data, stc.data)
assert_array_equal(stc_new.tmin, stc.tmin)
assert_array_equal(stc_new.tstep, stc.tstep)
assert_equal(len(stc_new.vertno), len(stc.vertno))
for v1, v2 in zip(stc_new.vertno, stc.vertno):
assert_array_equal(v1, v2)
def test_io_w():
"""Test IO for w files
"""
stc = _fake_stc(n_time=1)
w_fname = op.join(tempdir, 'fake')
stc.save(w_fname, ftype='w')
src = read_source_estimate(w_fname)
src.save(op.join(tempdir, 'tmp'), ftype='w')
src2 = read_source_estimate(op.join(tempdir, 'tmp-lh.w'))
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
def test_stc_arithmetic():
"""Test arithmetic for STC files
"""
stc = _fake_stc()
data = stc.data.copy()
out = list()
for a in [data, stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
stc_mean = stc.mean()
assert_array_equal(stc_mean.data, np.mean(stc.data, 1)[:, None])
@sample.requires_sample_data
def test_stc_methods():
"""Test stc methods lh_data, rh_data, bin(), center_of_mass(), resample()
"""
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
stc = read_source_estimate(fname)
# lh_data / rh_data
assert_array_equal(stc.lh_data, stc.data[:len(stc.lh_vertno)])
assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno):])
# bin
bin = stc.bin(.12)
a = np.array((1,), dtype=stc.data.dtype)
a[0] = np.mean(stc.data[0, stc.times < .12])
assert a[0] == bin.data[0, 0]
assert_raises(ValueError, stc.center_of_mass, 'sample')
stc.lh_data[:] = 0
vertex, hemi, t = stc.center_of_mass('sample', subjects_dir=subjects_dir)
assert_true(hemi == 1)
# XXX Should design a fool-proof test case, but here were the results:
assert_true(vertex == 90186)
assert_true(np.round(t, 3) == 0.123)
stc = read_source_estimate(fname)
stc_new = deepcopy(stc)
o_sfreq = 1.0 / stc.tstep
# note that using no padding for this STC reduces edge ringing...
stc_new.resample(2 * o_sfreq, npad=0, n_jobs=2)
assert_true(stc_new.data.shape[1] == 2 * stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep / 2)
stc_new.resample(o_sfreq, npad=0)
assert_true(stc_new.data.shape[1] == stc.data.shape[1])
assert_true(stc_new.tstep == stc.tstep)
assert_array_almost_equal(stc_new.data, stc.data, 5)
@sample.requires_sample_data
def test_extract_label_time_course():
"""Test extraction of label time courses from stc
"""
n_stcs = 3
n_times = 50
src = read_inverse_operator(fname_inv)['src']
vertices = [src[0]['vertno'], src[1]['vertno']]
n_verts = len(vertices[0]) + len(vertices[1])
# get some labels
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
labels_rh = read_labels_from_annot('sample', hemi='rh',
subjects_dir=subjects_dir)
labels = list()
labels.extend(labels_lh[:5])
labels.extend(labels_rh[:4])
n_labels = len(labels)
label_means = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
label_maxs = np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))
# compute the mean with sign flip
label_means_flipped = np.zeros_like(label_means)
for i, label in enumerate(labels):
label_means_flipped[i] = i * np.mean(label_sign_flip(label, src))
# generate some stc's with known data
stcs = list()
for i in range(n_stcs):
data = np.zeros((n_verts, n_times))
# set the value of the stc within each label
for j, label in enumerate(labels):
if label.hemi == 'lh':
idx = np.intersect1d(vertices[0], label.vertices)
idx = np.searchsorted(vertices[0], idx)
elif label.hemi == 'rh':
idx = np.intersect1d(vertices[1], label.vertices)
idx = len(vertices[0]) + np.searchsorted(vertices[1], idx)
data[idx] = label_means[j]
this_stc = SourceEstimate(data, vertices, 0, 1)
stcs.append(this_stc)
# test some invalid inputs
assert_raises(ValueError, extract_label_time_course, stcs, labels,
src, mode='notamode')
# have an empty label
empty_label = labels[0].copy()
empty_label.vertices += 1000000
assert_raises(ValueError, extract_label_time_course, stcs, empty_label,
src, mode='mean')
# but this works:
tc = extract_label_time_course(stcs, empty_label, src, mode='mean',
allow_empty=True)
for arr in tc:
assert_true(arr.shape == (1, n_times))
assert_array_equal(arr, np.zeros((1, n_times)))
# test the different modes
modes = ['mean', 'mean_flip', 'pca_flip', 'max']
for mode in modes:
label_tc = extract_label_time_course(stcs, labels, src, mode=mode)
label_tc_method = [stc.extract_label_time_course(labels, src,
mode=mode) for stc in stcs]
assert_true(len(label_tc) == n_stcs)
assert_true(len(label_tc_method) == n_stcs)
for tc1, tc2 in zip(label_tc, label_tc_method):
assert_true(tc1.shape == (n_labels, n_times))
assert_true(tc2.shape == (n_labels, n_times))
assert_true(np.allclose(tc1, tc2, rtol=1e-8, atol=1e-16))
if mode == 'mean':
assert_array_almost_equal(tc1, label_means)
if mode == 'mean_flip':
assert_array_almost_equal(tc1, label_means_flipped)
if mode == 'max':
assert_array_almost_equal(tc1, label_maxs)
# test label with very few vertices (check SVD conditionals)
label = Label(vertices=src[0]['vertno'][:2], hemi='lh')
x = label_sign_flip(label, src)
assert_true(len(x) == 2)
label = Label(vertices=[], hemi='lh')
x = label_sign_flip(label, src)
assert_true(x.size == 0)
@sample.requires_sample_data
def test_morph_data():
"""Test morphing of data
"""
subject_from = 'sample'
subject_to = 'fsaverage'
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
stc_from = read_source_estimate(fname, subject='sample')
fname = op.join(data_path, 'MEG', 'sample', 'fsaverage_audvis-meg')
stc_to = read_source_estimate(fname)
# make sure we can specify grade
stc_from.crop(0.09, 0.1) # for faster computation
stc_to.crop(0.09, 0.1) # for faster computation
stc_to1 = stc_from.morph(subject_to, grade=3, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
stc_to1.save(op.join(tempdir, '%s_audvis-meg' % subject_to))
# make sure we can specify vertices
vertices_to = grade_to_vertices(subject_to, grade=3,
subjects_dir=subjects_dir)
stc_to2 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=1000,
subjects_dir=subjects_dir)
# make sure we can use different buffer_size
stc_to3 = morph_data(subject_from, subject_to, stc_from,
grade=vertices_to, smooth=12, buffer_size=3,
subjects_dir=subjects_dir)
assert_array_almost_equal(stc_to.data, stc_to1.data, 5)
assert_array_almost_equal(stc_to1.data, stc_to2.data)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
# make sure precomputed morph matrices work
morph_mat = compute_morph_matrix(subject_from, subject_to,
stc_from.vertno, vertices_to,
smooth=12, subjects_dir=subjects_dir)
stc_to3 = stc_from.morph_precomputed(subject_to, vertices_to, morph_mat)
assert_array_almost_equal(stc_to1.data, stc_to3.data)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to1.data.mean(axis=0)
assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
# make sure we can fill by morphing
stc_to5 = morph_data(subject_from, subject_to, stc_from, grade=None,
smooth=12, buffer_size=3, subjects_dir=subjects_dir)
assert_true(stc_to5.data.shape[0] == 163842 + 163842)
# test morphing to the same subject
stc_to6 = stc_from.morph(subject_from, grade=stc_from.vertno, smooth=1,
subjects_dir=subjects_dir)
mask = np.ones(stc_from.data.shape[0], dtype=np.bool)
# XXX: there is a bug somewhere that causes a difference at 2 vertices..
mask[6799] = False
mask[6800] = False
assert_array_almost_equal(stc_from.data[mask], stc_to6.data[mask], 5)
# Morph sparse data
# Make a sparse stc
stc_from.vertno[0] = stc_from.vertno[0][[100, 500]]
stc_from.vertno[1] = stc_from.vertno[1][[200]]
stc_from._data = stc_from._data[:3]
assert_raises(RuntimeError, stc_from.morph, subject_to, sparse=True,
grade=5, subjects_dir=subjects_dir)
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
stc_from.vertno[0] = np.array([], dtype=np.int64)
stc_from._data = stc_from._data[:1]
stc_to_sparse = stc_from.morph(subject_to, grade=None, sparse=True,
subjects_dir=subjects_dir)
assert_array_almost_equal(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert_equal(len(stc_from.rh_vertno), len(stc_to_sparse.rh_vertno))
assert_equal(len(stc_from.lh_vertno), len(stc_to_sparse.lh_vertno))
assert_equal(stc_to_sparse.subject, subject_to)
assert_equal(stc_from.tmin, stc_from.tmin)
assert_equal(stc_from.tstep, stc_from.tstep)
def _my_trans(data):
"""FFT that adds an additional dimension by repeating result"""
data_t = fft(data)
data_t = np.concatenate([data_t[:, :, None], data_t[:, :, None]], axis=2)
return data_t, None
def test_transform_data():
"""Test applying linear (time) transform to data"""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = np.random.randn(n_vertices, n_sensors)
sens_data = np.random.randn(n_sensors, n_times)
vertices = np.arange(n_vertices)
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices // 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = VolSourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
def test_transform():
"""Test applying linear (time) transform to data"""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = np.random.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert_true(isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertno, stcs_t[0].vertno)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
assert_raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert_true(isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=t_idx[0],
tmax_idx=t_idx[-1])
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert_true(isinstance(stc, SourceEstimate))
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
assert_true(len(stc.vertno[0]) == 0)
assert_equal(stc.vertno[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
assert_array_equal(stc.data, data_t)
@requires_sklearn
def test_spatio_temporal_tris_connectivity():
"""Test spatio-temporal connectivity from triangles"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
connectivity = spatio_temporal_tris_connectivity(tris, 2)
x = [1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
components = stats.cluster_level._get_components(np.array(x), connectivity)
# _get_components works differently now...
old_fmt = [0, 0, -2, -2, -2, -2, 0, -2, -2, -2, -2, 1]
new_fmt = np.array(old_fmt)
new_fmt = [np.nonzero(new_fmt == v)[0]
for v in np.unique(new_fmt[new_fmt >= 0])]
assert_true(len(new_fmt), len(components))
for c, n in zip(components, new_fmt):
assert_array_equal(c, n)
@sample.requires_sample_data
def test_spatio_temporal_src_connectivity():
"""Test spatio-temporal connectivity from source spaces"""
tris = np.array([[0, 1, 2], [3, 4, 5]])
src = [dict(), dict()]
connectivity = spatio_temporal_tris_connectivity(tris, 2)
src[0]['use_tris'] = np.array([[0, 1, 2]])
src[1]['use_tris'] = np.array([[0, 1, 2]])
src[0]['vertno'] = np.array([0, 1, 2])
src[1]['vertno'] = np.array([0, 1, 2])
connectivity2 = spatio_temporal_src_connectivity(src, 2)
assert_array_equal(connectivity.todense(), connectivity2.todense())
# add test for dist connectivity
src[0]['dist'] = np.ones((3, 3)) - np.eye(3)
src[1]['dist'] = np.ones((3, 3)) - np.eye(3)
src[0]['vertno'] = [0, 1, 2]
src[1]['vertno'] = [0, 1, 2]
connectivity3 = spatio_temporal_src_connectivity(src, 2, dist=2)
assert_array_equal(connectivity.todense(), connectivity3.todense())
# add test for source space connectivity with omitted vertices
inverse_operator = read_inverse_operator(fname_inv)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src_ = inverse_operator['src']
connectivity = spatio_temporal_src_connectivity(src_, n_times=2)
assert len(w) == 1
a = connectivity.shape[0] / 2
b = sum([s['nuse'] for s in inverse_operator['src']])
assert_true(a == b)
@requires_pandas
def test_as_data_frame():
"""Test stc Pandas exporter"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = np.random.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for stc in [stc_surf, stc_vol]:
assert_raises(ValueError, stc.as_data_frame, index=['foo', 'bar'])
for ncat, ind in zip([1, 0], ['time', ['subject', 'time']]):
df = stc.as_data_frame(index=ind)
assert_true(df.index.names == ind
if isinstance(ind, list) else [ind])
assert_array_equal(df.values.T[ncat:], stc.data)
# test that non-indexed data were present as categorial variables
with warnings.catch_warnings(record=True): # pandas
df.reset_index().columns[:3] == ['subject', 'time']
def test_get_peak():
"""Test peak getter
"""
n_vert, n_times = 10, 5
vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)]
data = np.random.randn(n_vert, n_times)
stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1,
subject='sample')
stc_vol = VolSourceEstimate(data, vertices=vertices[0], tmin=0, tstep=1,
subject='sample')
for ii, stc in enumerate([stc_surf, stc_vol]):
assert_raises(ValueError, stc.get_peak, tmin=-100)
assert_raises(ValueError, stc.get_peak, tmax=90)
assert_raises(ValueError, stc.get_peak, tmin=0.002, tmax=0.001)
vert_idx, time_idx = stc.get_peak()
vertno = np.concatenate(stc.vertno) if ii == 0 else stc.vertno
assert_true(vert_idx in vertno)
assert_true(time_idx in stc.times)
ch_idx, time_idx = stc.get_peak(vert_as_index=True,
time_as_index=True)
assert_true(vert_idx < stc.data.shape[0])
assert_true(time_idx < len(stc.times))
| |
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import webob.exc
from neutron.db import db_base_plugin_v2
from neutron.db import extradhcpopt_db as edo_db
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.tests.unit.db import test_db_base_plugin_v2
DB_PLUGIN_KLASS = (
'neutron.tests.unit.extensions.test_extra_dhcp_opt.ExtraDhcpOptTestPlugin')
class ExtraDhcpOptTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
edo_db.ExtraDhcpOptMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with extra dhcp options.
"""
supported_extension_aliases = ["extra_dhcp_opt"]
def create_port(self, context, port):
with context.session.begin(subtransactions=True):
edos = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(ExtraDhcpOptTestPlugin, self).create_port(
context, port)
self._process_port_create_extra_dhcp_opts(context, new_port, edos)
return new_port
def update_port(self, context, id, port):
with context.session.begin(subtransactions=True):
rtn_port = super(ExtraDhcpOptTestPlugin, self).update_port(
context, id, port)
self._update_extra_dhcp_opts_on_port(context, id, port, rtn_port)
return rtn_port
class ExtraDhcpOptDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self, plugin=DB_PLUGIN_KLASS):
super(ExtraDhcpOptDBTestCase, self).setUp(plugin=plugin)
class TestExtraDhcpOpt(ExtraDhcpOptDBTestCase):
def _check_opts(self, expected, returned):
self.assertEqual(len(expected), len(returned))
for opt in returned:
name = opt['opt_name']
for exp in expected:
if (name == exp['opt_name']
and opt['ip_version'] == exp.get(
'ip_version', 4)):
val = exp['opt_value']
break
self.assertEqual(val, opt['opt_value'])
def test_create_port_with_extradhcpopts(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(opt_list,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_create_port_with_none_extradhcpopts(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': None},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
expected = [{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(expected,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_create_port_with_empty_router_extradhcpopts(self):
opt_list = [{'opt_name': 'router',
'opt_value': ''},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(opt_list,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_create_port_with_extradhcpopts_ipv4_opt_version(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': 4},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456',
'ip_version': 4},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123',
'ip_version': 4}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(opt_list,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_create_port_with_extradhcpopts_ipv6_opt_version(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': 6},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': 6}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
self._check_opts(opt_list,
port['port'][edo_ext.EXTRADHCPOPTS])
def _test_update_port_with_extradhcpopts(self, opt_list, upd_opts,
expected_opts):
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPOk.code, res.status_int)
port = self.deserialize('json', res)
self._check_opts(expected_opts,
port['port'][edo_ext.EXTRADHCPOPTS])
def test_update_port_with_extradhcpopts_with_same(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}]
expected_opts = opt_list[:]
for i in expected_opts:
if i['opt_name'] == upd_opts[0]['opt_name']:
i['opt_value'] = upd_opts[0]['opt_value']
break
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_additional_extradhcpopt(self):
opt_list = [{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}]
expected_opts = copy.deepcopy(opt_list)
expected_opts.append(upd_opts[0])
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_extradhcpopts(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}]
expected_opts = copy.deepcopy(opt_list)
for i in expected_opts:
if i['opt_name'] == upd_opts[0]['opt_name']:
i['opt_value'] = upd_opts[0]['opt_value']
break
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_extradhcpopt_delete(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}]
expected_opts = []
expected_opts = [opt for opt in opt_list
if opt['opt_name'] != 'bootfile-name']
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_without_extradhcpopt_delete(self):
opt_list = []
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}]
expected_opts = []
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_adding_extradhcpopts(self):
opt_list = []
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
expected_opts = copy.deepcopy(upd_opts)
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_blank_string_extradhcpopt(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': ' '}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_update_port_with_blank_name_extradhcpopt(self):
opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'}]
upd_opts = [{'opt_name': ' ', 'opt_value': 'pxelinux.0'}]
params = {edo_ext.EXTRADHCPOPTS: opt_list,
'arg_list': (edo_ext.EXTRADHCPOPTS,)}
with self.port(**params) as port:
update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int)
def test_update_port_with_blank_router_extradhcpopt(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': 4},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123',
'ip_version': 4},
{'opt_name': 'router',
'opt_value': '123.123.123.1',
'ip_version': 4}]
upd_opts = [{'opt_name': 'router',
'opt_value': '',
'ip_version': 4}]
expected_opts = copy.deepcopy(opt_list)
for i in expected_opts:
if i['opt_name'] == upd_opts[0]['opt_name']:
i['opt_value'] = upd_opts[0]['opt_value']
break
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_extradhcpopts_ipv6_change_value(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': 6},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': 6}]
upd_opts = [{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::2',
'ip_version': 6}]
expected_opts = copy.deepcopy(opt_list)
for i in expected_opts:
if i['opt_name'] == upd_opts[0]['opt_name']:
i['opt_value'] = upd_opts[0]['opt_value']
break
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
def test_update_port_with_extradhcpopts_add_another_ver_opt(self):
opt_list = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': 6},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': 6}]
upd_opts = [{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123',
'ip_version': 4}]
expected_opts = copy.deepcopy(opt_list)
expected_opts.extend(upd_opts)
self._test_update_port_with_extradhcpopts(opt_list, upd_opts,
expected_opts)
| |
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class FeatureFlags(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'account_id': 'int',
'loyalty': 'bool',
'coupons_without_count': 'bool',
'beta_effects': 'bool'
}
attribute_map = {
'account_id': 'accountId',
'loyalty': 'loyalty',
'coupons_without_count': 'coupons_without_count',
'beta_effects': 'betaEffects'
}
def __init__(self, account_id=None, loyalty=None, coupons_without_count=None, beta_effects=None, local_vars_configuration=None): # noqa: E501
"""FeatureFlags - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._account_id = None
self._loyalty = None
self._coupons_without_count = None
self._beta_effects = None
self.discriminator = None
self.account_id = account_id
if loyalty is not None:
self.loyalty = loyalty
if coupons_without_count is not None:
self.coupons_without_count = coupons_without_count
if beta_effects is not None:
self.beta_effects = beta_effects
@property
def account_id(self):
"""Gets the account_id of this FeatureFlags. # noqa: E501
The ID of the account that owns this entity. # noqa: E501
:return: The account_id of this FeatureFlags. # noqa: E501
:rtype: int
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this FeatureFlags.
The ID of the account that owns this entity. # noqa: E501
:param account_id: The account_id of this FeatureFlags. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and account_id is None: # noqa: E501
raise ValueError("Invalid value for `account_id`, must not be `None`") # noqa: E501
self._account_id = account_id
@property
def loyalty(self):
"""Gets the loyalty of this FeatureFlags. # noqa: E501
Whether the account has access to the loyalty features or not # noqa: E501
:return: The loyalty of this FeatureFlags. # noqa: E501
:rtype: bool
"""
return self._loyalty
@loyalty.setter
def loyalty(self, loyalty):
"""Sets the loyalty of this FeatureFlags.
Whether the account has access to the loyalty features or not # noqa: E501
:param loyalty: The loyalty of this FeatureFlags. # noqa: E501
:type: bool
"""
self._loyalty = loyalty
@property
def coupons_without_count(self):
"""Gets the coupons_without_count of this FeatureFlags. # noqa: E501
Whether the account queries coupons with or without total result size # noqa: E501
:return: The coupons_without_count of this FeatureFlags. # noqa: E501
:rtype: bool
"""
return self._coupons_without_count
@coupons_without_count.setter
def coupons_without_count(self, coupons_without_count):
"""Sets the coupons_without_count of this FeatureFlags.
Whether the account queries coupons with or without total result size # noqa: E501
:param coupons_without_count: The coupons_without_count of this FeatureFlags. # noqa: E501
:type: bool
"""
self._coupons_without_count = coupons_without_count
@property
def beta_effects(self):
"""Gets the beta_effects of this FeatureFlags. # noqa: E501
Whether the account can test beta effects or not # noqa: E501
:return: The beta_effects of this FeatureFlags. # noqa: E501
:rtype: bool
"""
return self._beta_effects
@beta_effects.setter
def beta_effects(self, beta_effects):
"""Sets the beta_effects of this FeatureFlags.
Whether the account can test beta effects or not # noqa: E501
:param beta_effects: The beta_effects of this FeatureFlags. # noqa: E501
:type: bool
"""
self._beta_effects = beta_effects
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FeatureFlags):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FeatureFlags):
return True
return self.to_dict() != other.to_dict()
| |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Server interface.
"""
import base64
import os
try:
import json
except ImportError:
import simplejson as json
try:
from oslo_utils import encodeutils
except ImportError:
from oslo.utils import encodeutils
import six
from six.moves.urllib import parse
from .. import base
from .. import crypto
REBOOT_SOFT, REBOOT_HARD = 'SOFT', 'HARD'
class Server(base.Resource):
HUMAN_ID = True
def __repr__(self):
return '<Server: %s>' % getattr(self, 'name', 'unknown-name')
def delete(self):
"""
Delete (i.e. shut down and delete the image) this server.
"""
self.manager.delete(self)
def update(self, name=None):
"""
Update the name for this server.
:param name: Update the server's name.
"""
self.manager.update(self, name=name)
def get_console_output(self, length=None):
"""
Get text console log output from Server.
:param length: The number of lines you would like to retrieve (as int)
"""
return self.manager.get_console_output(self, length)
def get_management_console(self):
"""
Get management console for a Server.
"""
return self.manager.get_management_console(self)
def get_vnc_console(self, console_type):
"""
Get vnc console for a Server.
:param console_type: Type of console ('novnc' or 'xvpvnc')
"""
return self.manager.get_vnc_console(self, console_type)
def get_spice_console(self, console_type):
"""
Get spice console for a Server.
:param console_type: Type of console ('spice-html5')
"""
return self.manager.get_spice_console(self, console_type)
def get_rdp_console(self, console_type):
"""
Get rdp console for a Server.
:param console_type: Type of console ('rdp-html5')
"""
return self.manager.get_rdp_console(self, console_type)
def get_serial_console(self, console_type):
"""
Get serial console for a Server.
:param console_type: Type of console ('serial')
"""
return self.manager.get_serial_console(self, console_type)
def get_password(self, private_key=None):
"""
Get password for a Server.
Returns the clear password of an baremetal if private_key is
provided, returns the ciphered password otherwise.
:param private_key: Path to private key file for decryption
(optional)
"""
return self.manager.get_password(self, private_key)
def clear_password(self):
"""
Get password for a Server.
"""
return self.manager.clear_password(self)
def add_fixed_ip(self, network_id):
"""
Add an IP address on a network.
:param network_id: The ID of the network the IP should be on.
"""
self.manager.add_fixed_ip(self, network_id)
def add_floating_ip(self, address, fixed_address=None):
"""
Add floating IP to an baremetal server
:param address: The IP address or FloatingIP to add to the baremetal server
:param fixed_address: The fixedIP address the FloatingIP is to be
associated with (optional)
"""
self.manager.add_floating_ip(self, address, fixed_address)
def remove_floating_ip(self, address):
"""
Remove floating IP from an baremetal
:param address: The IP address or FloatingIP to remove
"""
self.manager.remove_floating_ip(self, address)
def stop(self):
"""
Stop -- Stop the running server.
"""
self.manager.stop(self)
def force_delete(self):
"""
Force delete -- Force delete a server.
"""
self.manager.force_delete(self)
def restore(self):
"""
Restore -- Restore a server in 'soft-deleted' state.
"""
self.manager.restore(self)
def start(self):
"""
Start -- Start the paused server.
"""
self.manager.start(self)
def pause(self):
"""
Pause -- Pause the running server.
"""
self.manager.pause(self)
def unpause(self):
"""
Unpause -- Unpause the paused server.
"""
self.manager.unpause(self)
def lock(self):
"""
Lock -- Lock the baremetal from certain operations.
"""
self.manager.lock(self)
def unlock(self):
"""
Unlock -- Remove baremetal lock.
"""
self.manager.unlock(self)
def suspend(self):
"""
Suspend -- Suspend the running server.
"""
self.manager.suspend(self)
def resume(self):
"""
Resume -- Resume the suspended server.
"""
self.manager.resume(self)
def rescue(self, password=None, image=None):
"""
Rescue -- Rescue the problematic server.
:param password: The admin password to be set in the rescue baremetal.
:param image: The :class:`Image` to rescue with.
"""
return self.manager.rescue(self, password, image)
def unrescue(self):
"""
Unrescue -- Unrescue the rescued server.
"""
self.manager.unrescue(self)
def shelve(self):
"""
Shelve -- Shelve the server.
"""
self.manager.shelve(self)
def shelve_offload(self):
"""
Shelve_offload -- Remove a shelved server from the compute node.
"""
self.manager.shelve_offload(self)
def unshelve(self):
"""
Unshelve -- Unshelve the server.
"""
self.manager.unshelve(self)
def diagnostics(self):
"""Diagnostics -- Retrieve server diagnostics."""
return self.manager.diagnostics(self)
def migrate(self):
"""
Migrate a server to a new host.
"""
self.manager.migrate(self)
def remove_fixed_ip(self, address):
"""
Remove an IP address.
:param address: The IP address to remove.
"""
self.manager.remove_fixed_ip(self, address)
def change_password(self, password):
"""
Update the admin password for a server.
:param password: string to set as the admin password on the server
"""
self.manager.change_password(self, password)
def reboot(self, reboot_type=REBOOT_SOFT):
"""
Reboot the server.
:param reboot_type: either :data:`REBOOT_SOFT` for a software-level
reboot, or `REBOOT_HARD` for a virtual power cycle hard reboot.
"""
self.manager.reboot(self, reboot_type)
def rebuild(self, image, password=None, preserve_ephemeral=False,
**kwargs):
"""
Rebuild -- shut down and then re-image -- this server.
:param image: the :class:`Image` (or its ID) to re-image with.
:param password: string to set as the admin password on the rebuilt
server.
:param preserve_ephemeral: If True, request that any ephemeral device
be preserved when rebuilding the baremetal. Defaults to False.
"""
return self.manager.rebuild(self, image, password=password,
preserve_ephemeral=preserve_ephemeral,
**kwargs)
def resize(self, flavor, **kwargs):
"""
Resize the server's resources.
:param flavor: the :class:`Flavor` (or its ID) to resize to.
Until a resize event is confirmed with :meth:`confirm_resize`, the old
server will be kept around and you'll be able to roll back to the old
flavor quickly with :meth:`revert_resize`. All resizes are
automatically confirmed after 24 hours.
"""
self.manager.resize(self, flavor, **kwargs)
def create_image(self, image_name, metadata=None):
"""
Create an image based on this server.
:param image_name: The name to assign the newly create image.
:param metadata: Metadata to assign to the image.
"""
return self.manager.create_image(self, image_name, metadata)
def backup(self, backup_name, backup_type, rotation):
"""
Backup a baremetal server.
:param backup_name: Name of the backup image
:param backup_type: The backup type, like 'daily' or 'weekly'
:param rotation: Int parameter representing how many backups to
keep around.
"""
self.manager.backup(self, backup_name, backup_type, rotation)
def confirm_resize(self):
"""
Confirm that the resize worked, thus removing the original server.
"""
self.manager.confirm_resize(self)
def revert_resize(self):
"""
Revert a previous resize, switching back to the old server.
"""
self.manager.revert_resize(self)
@property
def networks(self):
"""
Generate a simplified list of addresses
"""
networks = {}
try:
for network_label, address_list in self.addresses.items():
networks[network_label] = [a['addr'] for a in address_list]
return networks
except Exception:
return {}
def live_migrate(self, host=None,
block_migration=False,
disk_over_commit=False):
"""
Migrates a running baremetal to a new machine.
"""
self.manager.live_migrate(self, host,
block_migration,
disk_over_commit)
def reset_state(self, state='error'):
"""
Reset the state of an baremetal to active or error.
"""
self.manager.reset_state(self, state)
def reset_network(self):
"""
Reset network of an baremetal.
"""
self.manager.reset_network(self)
def add_security_group(self, security_group):
"""
Add a security group to an baremetal.
"""
self.manager.add_security_group(self, security_group)
def remove_security_group(self, security_group):
"""
Remove a security group from an baremetal.
"""
self.manager.remove_security_group(self, security_group)
def list_security_group(self):
"""
List security group(s) of an baremetal.
"""
return self.manager.list_security_group(self)
def evacuate(self, host=None, on_shared_storage=True, password=None):
"""
Evacuate an baremetal from failed host to specified host.
:param host: Name of the target host
:param on_shared_storage: Specifies whether baremetal snapshot located
on shared storage
:param password: string to set as admin password on the evacuated
server.
"""
return self.manager.evacuate(self, host, on_shared_storage, password)
def interface_list(self):
"""
List interfaces attached to an baremetal.
"""
return self.manager.interface_list(self)
def interface_attach(self, port_id, net_id, fixed_ip):
"""
Attach a network interface to an baremetal.
"""
return self.manager.interface_attach(self, port_id, net_id, fixed_ip)
def interface_detach(self, port_id):
"""
Detach a network interface from an baremetal.
"""
return self.manager.interface_detach(self, port_id)
class ServerManager(base.BootingManagerWithFind):
resource_class = Server
def _boot(self, resource_url, response_key, name, image, flavor,
meta=None,
files=None,
userdata=None,
return_raw=False,
key_name=None,
availability_zone=None,
nics=None,
admin_pass=None,
disk_config=None, **kwargs):
"""
Create (boot) a new baremetal server.
"""
body = {
"server": {
"name": name,
# "imageRef": str(base.getid(image)) if image else '',
"flavorRef": str(base.getid(flavor)),
}
}
image = str(base.getid(image))
if image:
body['server'].update({'imageRef': image})
if userdata:
if os.path.exists(userdata):
with open(userdata, "r") as fuserdata:
userdata = fuserdata.read()
if six.PY3:
userdata = userdata.encode("utf-8")
else:
userdata = encodeutils.safe_encode(userdata)
userdata_b64 = base64.b64encode(userdata).decode('utf-8')
body["server"]["user_data"] = userdata_b64
if meta:
body["server"]["metadata"] = meta
# if reservation_id:
# body["server"]["reservation_id"] = reservation_id
if key_name:
body["server"]["key_name"] = key_name
# if scheduler_hints:
# body['os:scheduler_hints'] = scheduler_hints
# if config_drive:
# body["server"]["config_drive"] = config_drive
if admin_pass:
body["server"]["adminPass"] = admin_pass
# if not min_count:
# min_count = 1
# if not max_count:
# max_count = min_count
# body["server"]["min_count"] = min_count
# body["server"]["max_count"] = max_count
# if security_groups:
# body["server"]["security_groups"] = [{'name': sg}
# for sg in security_groups]
# Files are a slight bit tricky. They're passed in a "personality"
# list to the POST. Each item is a dict giving a file name and the
# base64-encoded contents of the file. We want to allow passing
# either an open file *or* some contents as files here.
if files:
personality = body['server']['personality'] = []
for filepath, file_or_string in sorted(files.items(),
key=lambda x: x[0]):
if hasattr(file_or_string, 'read'):
data = file_or_string.read()
else:
data = file_or_string
if six.PY3 and isinstance(data, str):
data = data.encode('utf-8')
cont = base64.b64encode(data).decode('utf-8')
personality.append({
'path': filepath,
'contents': cont,
})
if availability_zone:
body["server"]["availability_zone"] = availability_zone
# Block device mappings are passed as a list of dictionaries
# if block_device_mapping:
# body['server']['block_device_mapping'] = \
# self._parse_block_device_mapping(block_device_mapping)
# elif block_device_mapping_v2:
# body['server']['block_device_mapping_v2'] = block_device_mapping_v2
# if nics is not None:
# # NOTE(tr3buchet): nics can be an empty list
# all_net_data = []
# for nic_info in nics:
# net_data = {}
# # if value is empty string, do not send value in body
# if nic_info.get('net-id'):
# net_data['uuid'] = nic_info['net-id']
# if (nic_info.get('v4-fixed-ip') and
# nic_info.get('v6-fixed-ip')):
# raise base.exceptions.CommandError(_(
# "Only one of 'v4-fixed-ip' and 'v6-fixed-ip' may be"
# " provided."))
# elif nic_info.get('v4-fixed-ip'):
# net_data['fixed_ip'] = nic_info['v4-fixed-ip']
# elif nic_info.get('v6-fixed-ip'):
# net_data['fixed_ip'] = nic_info['v6-fixed-ip']
# if nic_info.get('port-id'):
# net_data['port'] = nic_info['port-id']
# all_net_data.append(net_data)
# body['server']['networks'] = all_net_data
if nics is not None:
body['server']['networks'] = nics
if disk_config is not None:
disk_config_dict = json.loads(disk_config)
# body['server']['OS-DCF:diskConfig'] = disk_config
for k, v in disk_config_dict.items():
body['server'][k] = v
return self._create(resource_url, body, response_key,
return_raw=return_raw, **kwargs)
def get(self, server):
"""
Get a server.
:param server: ID of the :class:`Server` to get.
:rtype: :class:`Server`
"""
return self._get("/servers/%s" % base.getid(server), "server")
def list(self, detailed=True, search_opts=None, marker=None, limit=None,
sort_keys=None, sort_dirs=None):
"""
Get a list of baremetal servers.
:param detailed: Whether to return detailed server info (optional).
:param search_opts: Search options to filter out servers (optional).
:param marker: Begin returning servers that appear later in the server
list than that represented by this server id (optional).
:param limit: Maximum number of servers to return (optional).
:param sort_keys: List of sort keys
:param sort_dirs: List of sort directions
:rtype: list of :class:`Server`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
if marker:
qparams['marker'] = marker
if limit:
qparams['limit'] = limit
# Transform the dict to a sequence of two-element tuples in fixed
# order, then the encoded string will be consistent in Python 2&3.
if qparams or sort_keys or sort_dirs:
# sort keys and directions are unique since the same parameter
# key is repeated for each associated value
# (ie, &sort_key=key1&sort_key=key2&sort_key=key3)
items = list(qparams.items())
if sort_keys:
items.extend(('sort_key', sort_key) for sort_key in sort_keys)
if sort_dirs:
items.extend(('sort_dir', sort_dir) for sort_dir in sort_dirs)
new_qparams = sorted(items, key=lambda x: x[0])
query_string = "?%s" % parse.urlencode(new_qparams)
else:
query_string = ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/servers%s%s" % (detail, query_string), "servers")
def add_fixed_ip(self, server, network_id):
"""
Add an IP address on a network.
:param server: The :class:`Server` (or its ID) to add an IP to.
:param network_id: The ID of the network the IP should be on.
"""
self._action('addFixedIp', server, {'networkId': network_id})
def remove_fixed_ip(self, server, address):
"""
Remove an IP address.
:param server: The :class:`Server` (or its ID) to add an IP to.
:param address: The IP address to remove.
"""
self._action('removeFixedIp', server, {'address': address})
def add_floating_ip(self, server, address, fixed_address=None):
"""
Add a floating IP to an baremetal
:param server: The :class:`Server` (or its ID) to add an IP to.
:param address: The FloatingIP or string floating address to add.
:param fixed_address: The FixedIP the floatingIP should be
associated with (optional)
"""
address = address.ip if hasattr(address, 'ip') else address
if fixed_address:
if hasattr(fixed_address, 'ip'):
fixed_address = fixed_address.ip
self._action('addFloatingIp', server,
{'address': address, 'fixed_address': fixed_address})
else:
self._action('addFloatingIp', server, {'address': address})
def remove_floating_ip(self, server, address):
"""
Remove a floating IP address.
:param server: The :class:`Server` (or its ID) to remove an IP from.
:param address: The FloatingIP or string floating address to remove.
"""
address = address.ip if hasattr(address, 'ip') else address
self._action('removeFloatingIp', server, {'address': address})
def get_management_console(self, server):
"""
Get a management console for an baremetal
:param server: The :class:`Server` (or its ID) to add an IP to.
"""
return self._action('os-getManagementConsole', server)[1]
def get_vnc_console(self, server, console_type):
"""
Get a vnc console for an baremetal
:param server: The :class:`Server` (or its ID) to add an IP to.
:param console_type: Type of vnc console to get ('novnc' or 'xvpvnc')
"""
return self._action('os-getVNCConsole', server,
{'type': console_type})[1]
def get_spice_console(self, server, console_type):
"""
Get a spice console for an baremetal
:param server: The :class:`Server` (or its ID) to add an IP to.
:param console_type: Type of spice console to get ('spice-html5')
"""
return self._action('os-getSPICEConsole', server,
{'type': console_type})[1]
def get_rdp_console(self, server, console_type):
"""
Get a rdp console for an baremetal
:param server: The :class:`Server` (or its ID) to add an IP to.
:param console_type: Type of rdp console to get ('rdp-html5')
"""
return self._action('os-getRDPConsole', server,
{'type': console_type})[1]
def get_serial_console(self, server, console_type):
"""
Get a serial console for an baremetal
:param server: The :class:`Server` (or its ID) to add an IP to.
:param console_type: Type of serial console to get ('serial')
"""
return self._action('os-getSerialConsole', server,
{'type': console_type})[1]
def get_password(self, server, private_key=None):
"""
Get admin password of an baremetal
Returns the admin password of an baremetal in the clear if private_key
is provided, returns the ciphered password otherwise.
Requires that openssl is installed and in the path
:param server: The :class:`Server` (or its ID) for which the admin
password is to be returned
:param private_key: The private key to decrypt password
(optional)
"""
_resp, body = self.api.client.get("/servers/%s/os-server-password"
% base.getid(server))
ciphered_pw = body.get('password', '') if body else ''
if private_key and ciphered_pw:
try:
return crypto.decrypt_password(private_key, ciphered_pw)
except Exception as exc:
return '%sFailed to decrypt:\n%s' % (exc, ciphered_pw)
return ciphered_pw
def clear_password(self, server):
"""
Clear the admin password of an baremetal
Remove the admin password for an baremetal from the metadata server.
:param server: The :class:`Server` (or its ID) for which the admin
password is to be cleared
"""
return self._delete("/servers/%s/os-server-password"
% base.getid(server))
def stop(self, server, type):
"""
Stop the server.
:param server: The :class:`Server` (or its ID) to share onto.
:param type: Server shutdown mode(HARD or SOFT).
Default value is SOFT.
"""
self._action('os-stop', server, {'type': type})
def force_delete(self, server):
"""
Force delete the server.
"""
return self._action('forceDelete', server, None)
def restore(self, server):
"""
Restore soft-deleted server.
"""
return self._action('restore', server, None)
def start(self, server):
"""
Start the server.
"""
self._action('os-start', server, None)
def pause(self, server):
"""
Pause the server.
"""
self._action('pause', server, None)
def unpause(self, server):
"""
Unpause the server.
"""
self._action('unpause', server, None)
def lock(self, server):
"""
Lock the server.
"""
self._action('lock', server, None)
def unlock(self, server):
"""
Unlock the server.
"""
self._action('unlock', server, None)
def suspend(self, server):
"""
Suspend the server.
"""
self._action('suspend', server, None)
def resume(self, server):
"""
Resume the server.
"""
self._action('resume', server, None)
def rescue(self, server, password=None, image=None):
"""
Rescue the server.
:param server: The :class:`Server` to rescue.
:param password: The admin password to be set in the rescue baremetal.
:param image: The :class:`Image` to rescue with.
"""
info = {}
if password:
info['adminPass'] = password
if image:
info['rescue_image_ref'] = base.getid(image)
return self._action('rescue', server, info or None)
def unrescue(self, server):
"""
Unrescue the server.
"""
self._action('unrescue', server, None)
def shelve(self, server):
"""
Shelve the server.
"""
self._action('shelve', server, None)
def shelve_offload(self, server):
"""
Remove a shelved baremetal from the compute node.
"""
self._action('shelveOffload', server, None)
def unshelve(self, server):
"""
Unshelve the server.
"""
self._action('unshelve', server, None)
def diagnostics(self, server):
"""Retrieve server diagnostics."""
return self.api.client.get("/servers/%s/diagnostics" %
base.getid(server))
def create(self, name, image, flavor, meta=None, files=None,
reservation_id=None, min_count=None,
max_count=None, security_groups=None, userdata=None,
key_name=None, availability_zone=None,
block_device_mapping=None, block_device_mapping_v2=None,
nics=None, scheduler_hints=None,
config_drive=None, disk_config=None, **kwargs):
# TODO(anthony): indicate in doc string if param is an extension
# and/or optional
"""
Create (boot) a new server.
:param name: Something to name the server.
:param image: The :class:`Image` to boot with.
:param flavor: The :class:`Flavor` to boot onto.
:param meta: A dict of arbitrary key/value metadata to store for this
server. A maximum of five entries is allowed, and both
keys and values must be 255 characters or less.
:param files: A dict of files to overrwrite on the server upon boot.
Keys are file names (i.e. ``/etc/passwd``) and values
are the file contents (either as a string or as a
file-like object). A maximum of five entries is allowed,
and each file must be 10k or less.
:param reservation_id: a UUID for the set of servers being requested.
:param min_count: (optional extension) The minimum number of
servers to launch.
:param max_count: (optional extension) The maximum number of
servers to launch.
:param security_groups: A list of security group names
:param userdata: user data to pass to be exposed by the metadata
server this can be a file type object as well or a
string.
:param key_name: (optional extension) name of previously created
keypair to inject into the baremetal.
:param availability_zone: Name of the availability zone for baremetal
placement.
:param block_device_mapping: (optional extension) A dict of block
device mappings for this server.
:param block_device_mapping_v2: (optional extension) A dict of block
device mappings for this server.
:param nics: (optional extension) an ordered list of nics to be
added to this server, with information about
connected networks, fixed IPs, port etc.
:param scheduler_hints: (optional extension) arbitrary key-value pairs
specified by the client to help boot an baremetal
:param config_drive: (optional extension) value for config drive
either boolean, or volume-id
:param disk_config: (optional extension) control how the disk is
partitioned when the server is created. possible
values are 'AUTO' or 'MANUAL'.
"""
if not min_count:
min_count = 1
if not max_count:
max_count = min_count
if min_count > max_count:
min_count = max_count
boot_args = [name, image, flavor]
boot_kwargs = dict(
meta=meta, files=files, userdata=userdata,
reservation_id=reservation_id, min_count=min_count,
max_count=max_count, security_groups=security_groups,
key_name=key_name, availability_zone=availability_zone,
scheduler_hints=scheduler_hints, config_drive=config_drive,
disk_config=disk_config, **kwargs)
if block_device_mapping:
resource_url = "/os-volumes_boot"
boot_kwargs['block_device_mapping'] = block_device_mapping
elif block_device_mapping_v2:
resource_url = "/os-volumes_boot"
boot_kwargs['block_device_mapping_v2'] = block_device_mapping_v2
else:
resource_url = "/servers"
if nics:
boot_kwargs['nics'] = nics
response_key = "server"
return self._boot(resource_url, response_key, *boot_args,
**boot_kwargs)
def update(self, server, name=None):
"""
Update the name or the password for a server.
:param server: The :class:`Server` (or its ID) to update.
:param name: Update the server's name.
"""
if name is None:
return
body = {
"server": {
"name": name,
},
}
return self._update("/servers/%s" % base.getid(server), body, "server")
def change_password(self, server, password):
"""
Update the password for a server.
"""
self._action("changePassword", server, {"adminPass": password})
def delete(self, server):
"""
Delete (i.e. shut down and delete the image) this server.
"""
self._delete("/servers/%s" % base.getid(server))
def reboot(self, server, reboot_type=REBOOT_SOFT):
"""
Reboot a server.
:param server: The :class:`Server` (or its ID) to share onto.
:param reboot_type: either :data:`REBOOT_SOFT` for a software-level
reboot, or `REBOOT_HARD` for a virtual power cycle hard reboot.
"""
self._action('reboot', server, {'type': reboot_type})
def update_boot_mode(self, server, reboot_type, boot_mode):
"""
Update baremetal server bootmode.
:param server: The :class:`Server` (or its ID) to share onto.
:param reboot_type: either :data:`REBOOT_SOFT` for a software-level
reboot, or `REBOOT_HARD` for a virtual power cycle hard reboot.
:param boot_mode: Server boot mode(LEGACY, DISK, PXE or ISO).
"""
self._action('boot_mode', server, {'type': reboot_type, 'boot_mode': boot_mode})
def rebuild(self, server, image, password=None, disk_config=None,
preserve_ephemeral=False, name=None, meta=None, files=None,
**kwargs):
"""
Rebuild -- shut down and then re-image -- a server.
:param server: The :class:`Server` (or its ID) to share onto.
:param image: the :class:`Image` (or its ID) to re-image with.
:param password: string to set as password on the rebuilt server.
:param disk_config: partitioning mode to use on the rebuilt server.
Valid values are 'AUTO' or 'MANUAL'
:param preserve_ephemeral: If True, request that any ephemeral device
be preserved when rebuilding the baremetal. Defaults to False.
:param name: Something to name the server.
:param meta: A dict of arbitrary key/value metadata to store for this
server. A maximum of five entries is allowed, and both
keys and values must be 255 characters or less.
:param files: A dict of files to overwrite on the server upon boot.
Keys are file names (i.e. ``/etc/passwd``) and values
are the file contents (either as a string or as a
file-like object). A maximum of five entries is allowed,
and each file must be 10k or less.
"""
body = {'imageRef': base.getid(image)}
if password is not None:
body['adminPass'] = password
if disk_config is not None:
body['OS-DCF:diskConfig'] = disk_config
if preserve_ephemeral is not False:
body['preserve_ephemeral'] = True
if name is not None:
body['name'] = name
if meta:
body['metadata'] = meta
if files:
personality = body['personality'] = []
for filepath, file_or_string in sorted(files.items(),
key=lambda x: x[0]):
if hasattr(file_or_string, 'read'):
data = file_or_string.read()
else:
data = file_or_string
cont = base64.b64encode(data.encode('utf-8')).decode('utf-8')
personality.append({
'path': filepath,
'contents': cont,
})
_resp, body = self._action('rebuild', server, body, **kwargs)
return Server(self, body['server'])
def migrate(self, server):
"""
Migrate a server to a new host.
:param server: The :class:`Server` (or its ID).
"""
self._action('migrate', server)
def resize(self, server, flavor, disk_config=None, **kwargs):
"""
Resize a server's resources.
:param server: The :class:`Server` (or its ID) to share onto.
:param flavor: the :class:`Flavor` (or its ID) to resize to.
:param disk_config: partitioning mode to use on the rebuilt server.
Valid values are 'AUTO' or 'MANUAL'
Until a resize event is confirmed with :meth:`confirm_resize`, the old
server will be kept around and you'll be able to roll back to the old
flavor quickly with :meth:`revert_resize`. All resizes are
automatically confirmed after 24 hours.
"""
info = {'flavorRef': base.getid(flavor)}
if disk_config is not None:
info['OS-DCF:diskConfig'] = disk_config
self._action('resize', server, info=info, **kwargs)
def confirm_resize(self, server):
"""
Confirm that the resize worked, thus removing the original server.
:param server: The :class:`Server` (or its ID) to share onto.
"""
self._action('confirmResize', server)
def revert_resize(self, server):
"""
Revert a previous resize, switching back to the old server.
:param server: The :class:`Server` (or its ID) to share onto.
"""
self._action('revertResize', server)
def create_image(self, server, image_name, metadata=None):
"""
Snapshot a server.
:param server: The :class:`Server` (or its ID) to share onto.
:param image_name: Name to give the snapshot image
:param metadata: Metadata to give newly-created image entity
"""
body = {'name': image_name, 'metadata': metadata or {}}
resp = self._action('createImage', server, body)[0]
location = resp.headers['location']
image_uuid = location.split('/')[-1]
return image_uuid
def backup(self, server, backup_name, backup_type, rotation):
"""
Backup a baremetal server.
:param server: The :class:`Server` (or its ID) to share onto.
:param backup_name: Name of the backup image
:param backup_type: The backup type, like 'daily' or 'weekly'
:param rotation: Int parameter representing how many backups to
keep around.
"""
body = {'name': backup_name,
'backup_type': backup_type,
'rotation': rotation}
self._action('createBackup', server, body)
def set_meta(self, server, metadata):
"""
Set a servers metadata
:param server: The :class:`Server` to add metadata to
:param metadata: A dict of metadata to add to the server
"""
body = {'metadata': metadata}
return self._create("/servers/%s/metadata" % base.getid(server),
body, "metadata")
def set_meta_item(self, server, key, value):
"""
Updates an item of server metadata
:param server: The :class:`Server` to add metadata to
:param key: metadata key to update
:param value: string value
"""
body = {'meta': {key: value}}
return self._update("/servers/%s/metadata/%s" %
(base.getid(server), key), body)
def set_meta_update_all(self, server, metadata_dict):
"""
Updates an item of server metadata
:param server: The :class:`Server` to add metadata to
:param key: metadata key to update
:param value: string value
"""
body = {'metadata': metadata_dict}
return self._update("/servers/%s/metadata" % base.getid(server), body)
def get_console_output(self, server, length=None):
"""
Get text console log output from Server.
:param server: The :class:`Server` (or its ID) whose console output
you would like to retrieve.
:param length: The number of tail loglines you would like to retrieve.
"""
return self._action('os-getConsoleOutput',
server,
{'length': length})[1]['output']
def delete_meta(self, server, keys):
"""
Delete metadata from an server
:param server: The :class:`Server` to add metadata to
:param keys: A list of metadata keys to delete from the server
"""
for k in keys:
self._delete("/servers/%s/metadata/%s" % (base.getid(server), k))
def live_migrate(self, server, host, block_migration, disk_over_commit):
"""
Migrates a running baremetal to a new machine.
:param server: baremetal id which comes from bear list.
:param host: destination host name.
:param block_migration: if True, do block_migration.
:param disk_over_commit: if True, Allow overcommit.
"""
self._action('os-migrateLive', server,
{'host': host,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit})
def reset_state(self, server, state='error'):
"""
Reset the state of an baremetal to active or error.
:param server: ID of the baremetal to reset the state of.
:param state: Desired state; either 'active' or 'error'.
Defaults to 'error'.
"""
self._action('os-resetState', server, dict(state=state))
def reset_network(self, server):
"""
Reset network of an baremetal.
"""
self._action('resetNetwork', server)
# def add_security_group(self, server, security_group):
# """
# Add a Security Group to an baremetal
# :param server: ID of the baremetal.
# :param security_group: The name of security group to add.
# """
# self._action('addSecurityGroup', server, {'name': security_group})
# def remove_security_group(self, server, security_group):
# """
# Add a Security Group to an baremetal
# :param server: ID of the baremetal.
# :param security_group: The name of security group to remove.
# """
# self._action('removeSecurityGroup', server, {'name': security_group})
# def list_security_group(self, server):
# """
# List Security Group(s) of an baremetal
# :param server: ID of the baremetal.
# """
# return self._list('/servers/%s/os-security-groups' %
# base.getid(server), 'security_groups',
# security_groups.SecurityGroup)
def evacuate(self, server, host=None, on_shared_storage=True,
password=None):
"""
Evacuate a server baremetal.
:param server: The :class:`Server` (or its ID) to share onto.
:param host: Name of the target host.
:param on_shared_storage: Specifies whether instance files located
on shared storage
:param password: string to set as password on the evacuated server.
"""
body = {'onSharedStorage': on_shared_storage}
if host is not None:
body['host'] = host
if password is not None:
body['adminPass'] = password
return self._action('evacuate', server, body)
def interface_list(self, server):
"""
List attached network interfaces
:param server: The :class:`Server` (or its ID) to query.
"""
return self._list('/servers/%s/os-interface' % base.getid(server),
'interfaceAttachments')
def interface_attach(self, server, port_id, net_id, fixed_ip):
"""
Attach a network_interface to an baremetal.
:param server: The :class:`Server` (or its ID) to attach to.
:param port_id: The port to attach.
"""
body = {'interfaceAttachment': {}}
if port_id:
body['interfaceAttachment']['port_id'] = port_id
if net_id:
body['interfaceAttachment']['net_id'] = net_id
if fixed_ip:
body['interfaceAttachment']['fixed_ips'] = [
{'ip_address': fixed_ip}]
return self._create('/servers/%s/os-interface' % base.getid(server),
body, 'interfaceAttachment')
def interface_detach(self, server, port_id):
"""
Detach a network_interface from an baremetal.
:param server: The :class:`Server` (or its ID) to detach from.
:param port_id: The port to detach.
"""
self._delete('/servers/%s/os-interface/%s' % (base.getid(server),
port_id))
def _action(self, action, server, info=None, **kwargs):
"""
Perform a server "action" -- reboot/rebuild/resize/etc.
"""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/servers/%s/action' % base.getid(server)
return self.api.client.post(url, body=body)
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base NDData class.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import numpy as np
from .nddata_base import NDDataBase
from ..units import Unit, Quantity
from .. import log
from ..utils.compat.odict import OrderedDict
from ..config import ConfigAlias
__all__ = ['NDData']
__doctest_skip__ = ['NDData']
WARN_UNSUPPORTED_CORRELATED = ConfigAlias(
'0.4', 'WARN_UNSUPPORTED_CORRELATED', 'warn_unsupported_correlated',
'astropy.nddata.nddata', 'astropy.nddata')
class NDData(NDDataBase):
"""
A basic class for array-based data.
The key distinction from raw numpy arrays is the presence of
additional metadata such as uncertainties, a mask, units,
and/or a coordinate system.
Parameters
-----------
data : `~numpy.ndarray`, `~numpy.ndarray`-like, or `NDData`
The actual data contained in this `NDData` object. If possible, data
will not be copied`data`, so you should make copy the ``data`` before
passing it in if that's the desired behavior.
uncertainty : any type, optional
Uncertainty on the data. The uncertainty *must* have a string attribute
named ``uncertainty_type``, but there is otherwise no restriction.
mask : `~numpy.ndarray`-like, optional
Mask for the data. The values must be ``False`` where
the data is *valid* and ``True`` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
wcs : undefined, optional
WCS-object containing the world coordinate system for the data.
meta : `dict`-like object, optional
Metadata for this object. Must be dict-like but no further restriction
is placed on meta.
unit : `~astropy.units.UnitBase` instance or str, optional
The units of the data. If data is an `~astropy.units.Quantity` then
``unit`` is set to the unit of the data; is a unit is also explicitly
provided an error is raised.
Notes
-----
The data in a `NDData` object should be accessed through the data
attribute.
For example::
>>> from astropy.nddata import NDData
>>> x = NDData([1,2,3])
>>> x.data
array([1, 2, 3])
"""
def __init__(self, data, uncertainty=None, mask=None, wcs=None,
meta=None, unit=None):
super(NDData, self).__init__()
if isinstance(data, NDData): # don't use self.__class__ (issue #4137)
# No need to check the data because data must have successfully
# initialized.
self._data = data._data
self._unit = data.unit # must set before uncert for NDDataArray
self.uncertainty = data.uncertainty
self._mask = data.mask
self._wcs = data.wcs
self._meta = data.meta
if uncertainty is not None:
self._uncertainty = uncertainty
log.info("Overwriting NDData's current uncertainty being"
" overwritten with specified uncertainty")
if mask is not None:
self._mask = mask
log.info("Overwriting NDData's current "
"mask with specified mask")
if wcs is not None:
self._wcs = wcs
log.info("Overwriting NDData's current wcs with specified wcs")
if meta is not None:
self._meta = meta
log.info("Overwriting NDData's current meta "
"with specified meta")
if unit is not None and unit is not data.unit:
raise ValueError('Unit provided in initializer does not '
'match data unit.')
else:
if hasattr(data, 'mask'):
self._data = np.array(data.data, subok=True, copy=False)
if mask is not None:
self._mask = mask
log.info("NDData was created with a masked array, and a "
"mask was explicitly provided to NDData. The "
"explicitly passed-in mask will be used and the "
"masked array's mask will be ignored.")
else:
self._mask = data.mask
elif isinstance(data, Quantity):
self._data = np.array(data.value, subok=True, copy=False)
self._mask = mask
elif (not hasattr(data, 'shape') or
not hasattr(data, '__getitem__') or
not hasattr(data, '__array__')):
# Data doesn't look like a numpy array, try converting it to
# one.
self._data = np.array(data, subok=True, copy=False)
# Quick check to see if what we got out looks like an array
# rather than an object (since numpy will convert a
# non-numerical input to an array of objects).
if self._data.dtype == 'O':
raise TypeError("Could not convert data to numpy array.")
self._mask = mask
else:
self._data = data # np.array(data, subok=True, copy=False)
self._mask = mask
self._wcs = wcs
if meta is None:
self._meta = OrderedDict()
elif not isinstance(meta, collections.Mapping):
raise TypeError("meta attribute must be dict-like")
else:
self._meta = meta
if isinstance(data, Quantity):
if unit is not None:
raise ValueError("Cannot use the unit argument when data "
"is a Quantity")
else:
self._unit = data.unit
else:
if unit is not None:
self._unit = Unit(unit)
else:
self._unit = None
# This must come after self's unit has been set so that the unit
# of the uncertainty, if any, can be converted to the unit of the
# unit of self.
self.uncertainty = uncertainty
def __str__(self):
return str(self.data)
def __repr__(self):
prefix = self.__class__.__name__ + '('
body = np.array2string(self.data, separator=', ', prefix=prefix)
return ''.join([prefix, body, ')'])
@property
def data(self):
return self._data
@property
def mask(self):
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
@property
def unit(self):
return self._unit
@property
def wcs(self):
return self._wcs
@property
def meta(self):
return self._meta
| |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
from ambari_commons import OSCheck
'''
from stacks.utils.RMFTestCase import *
from mock.mock import MagicMock, patch, call
import time
from resource_management.core import sudo
import glob
@patch.object(glob, "glob", new = MagicMock(return_value=["/tmp"]))
@patch.object(sudo, "read_file", new = MagicMock(return_value='{"interpreterSettings":[]}'))
class TestZeppelinMaster(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "ZEPPELIN/0.6.0.2.5/package"
STACK_VERSION = "2.5"
def assert_configure_default(self):
self.assertResourceCalled('Directory', '/var/log/zeppelin',
owner = 'zeppelin',
group = 'zeppelin',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/zeppelin',
owner = 'zeppelin',
create_parents = True,
group = 'zeppelin',
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/usr/hdp/current/zeppelin-server',
owner = 'zeppelin',
group = 'zeppelin',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chown', '-R', u'zeppelin:zeppelin', '/var/run/zeppelin'), sudo = True)
self.assertResourceCalled('XmlConfig', 'zeppelin-site.xml',
owner = 'zeppelin',
group = 'zeppelin',
conf_dir = '/etc/zeppelin/conf',
configurations = self.getConfig()['configurations']['zeppelin-config'],
)
self.assertResourceCalled('File', '/etc/zeppelin/conf/zeppelin-env.sh',
owner = 'zeppelin',
content = InlineTemplate(self.getConfig()['configurations']['zeppelin-env']['zeppelin_env_content']),
group = 'zeppelin',
)
self.assertResourceCalled('File', '/etc/zeppelin/conf/shiro.ini',
owner = 'zeppelin',
content = InlineTemplate(self.getConfig()['configurations']['zeppelin-shiro-ini']['shiro_ini_content']),
group = 'zeppelin',
)
self.assertResourceCalled('File', '/etc/zeppelin/conf/log4j.properties',
owner = u'zeppelin',
content = u'log4j.rootLogger = INFO, dailyfile',
group = u'zeppelin',
)
self.assertResourceCalled('Directory', '/etc/zeppelin/conf/external-dependency-conf',
owner = 'zeppelin',
group = 'zeppelin',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
def assert_configure_secured(self):
self.assertResourceCalled('Directory', '/var/log/zeppelin',
owner = 'zeppelin',
group = 'zeppelin',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/zeppelin',
owner = 'zeppelin',
create_parents = True,
group = 'zeppelin',
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/usr/hdp/current/zeppelin-server',
owner = 'zeppelin',
group = 'zeppelin',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chown', '-R', u'zeppelin:zeppelin', '/var/run/zeppelin'), sudo = True)
self.assertResourceCalled('XmlConfig', 'zeppelin-site.xml',
owner = 'zeppelin',
group = 'zeppelin',
conf_dir = '/etc/zeppelin/conf',
configurations = self.getConfig()['configurations']['zeppelin-config'],
)
self.assertResourceCalled('File', '/etc/zeppelin/conf/zeppelin-env.sh',
owner = 'zeppelin',
content = InlineTemplate(self.getConfig()['configurations']['zeppelin-env']['zeppelin_env_content']),
group = 'zeppelin',
)
self.assertResourceCalled('File', '/etc/zeppelin/conf/shiro.ini',
owner = 'zeppelin',
content = InlineTemplate(self.getConfig()['configurations']['zeppelin-shiro-ini']['shiro_ini_content']),
group = 'zeppelin',
)
self.assertResourceCalled('File', '/etc/zeppelin/conf/log4j.properties',
owner = u'zeppelin',
content = u'log4j.rootLogger = INFO, dailyfile',
group = u'zeppelin',
)
self.assertResourceCalled('Directory', '/etc/zeppelin/conf/external-dependency-conf',
owner = 'zeppelin',
group = 'zeppelin',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/master.py",
classname = "Master",
command = "configure",
config_file = "default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/master.py",
classname = "Master",
command = "configure",
config_file = "secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/master.py",
classname = "Master",
command = "stop",
config_file = "secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/log/zeppelin',
owner = 'zeppelin',
group = 'zeppelin',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chown', '-R', u'zeppelin:zeppelin', '/var/run/zeppelin'),
sudo = True,
)
self.assertResourceCalled('Execute', '/usr/hdp/current/zeppelin-server/bin/zeppelin-daemon.sh stop >> /var/log/zeppelin/zeppelin-setup.log',
user = 'zeppelin',
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/master.py",
classname = "Master",
command = "stop",
config_file = "default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/log/zeppelin',
owner = 'zeppelin',
group = 'zeppelin',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chown', '-R', u'zeppelin:zeppelin', '/var/run/zeppelin'),
sudo = True,
)
self.assertResourceCalled('Execute', '/usr/hdp/current/zeppelin-server/bin/zeppelin-daemon.sh stop >> /var/log/zeppelin/zeppelin-setup.log',
user = 'zeppelin',
)
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/master.py",
classname = "Master",
command = "start",
config_file = "default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', ('chown', '-R', u'zeppelin:zeppelin', '/etc/zeppelin'),
sudo = True,
)
@patch('os.path.exists', return_value = True)
def test_start_secured(self, os_path_exists_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/master.py",
classname = "Master",
command = "start",
config_file = "secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', ('chown', '-R', u'zeppelin:zeppelin', '/etc/zeppelin'),
sudo = True,
)
self.assertResourceCalled('Execute', ('chown', '-R', 'zeppelin:zeppelin', '/usr/hdp/current/zeppelin-server/notebook'),
sudo = True,
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/zeppelin.server.kerberos.keytab zeppelin@EXAMPLE.COM; ',
user = 'zeppelin',
)
self.assertResourceCalled('HdfsResource', '/user/zeppelin',
security_enabled = True,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
owner = 'zeppelin',
recursive_chown = True,
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'],
recursive_chmod = True,
)
self.assertResourceCalled('HdfsResource', '/user/zeppelin/test',
security_enabled = True,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
owner = 'zeppelin',
recursive_chown = True,
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'],
recursive_chmod = True,
)
self.assertResourceCalled('HdfsResource', '/apps/zeppelin',
security_enabled = True,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
owner = 'zeppelin',
recursive_chown = True,
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'],
recursive_chmod = True,
)
self.assertResourceCalled('HdfsResource', '/apps/zeppelin/tmp',
security_enabled = True,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
source = '/tmp',
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
replace_existing_files = True,
hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
owner = 'zeppelin',
group = 'zeppelin',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'file',
action = ['create_on_execute'],
mode = 0444,
)
self.assertResourceCalled('HdfsResource', None,
security_enabled = True,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_resource_ignore_file = '/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
action = ['execute'],
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
)
self.assertResourceCalled('File', '/etc/zeppelin/conf/interpreter.json',
content = '{\n "interpreterSettings": []\n}',
owner = 'zeppelin',
group = 'zeppelin',
)
self.assertResourceCalled('Execute', '/usr/hdp/current/zeppelin-server/bin/zeppelin-daemon.sh restart >> /var/log/zeppelin/zeppelin-setup.log',
user = 'zeppelin',
)
self.assertNoMoreResources()
| |
"""Selectors for Home Assistant."""
from __future__ import annotations
from collections.abc import Callable
from datetime import time as time_sys
from typing import Any, cast
import voluptuous as vol
from homeassistant.const import CONF_MODE, CONF_UNIT_OF_MEASUREMENT
from homeassistant.core import split_entity_id
from homeassistant.util import decorator
from . import config_validation as cv
SELECTORS: decorator.Registry[str, type[Selector]] = decorator.Registry()
def _get_selector_class(config: Any) -> type[Selector]:
"""Get selector class type."""
if not isinstance(config, dict):
raise vol.Invalid("Expected a dictionary")
if len(config) != 1:
raise vol.Invalid(f"Only one type can be specified. Found {', '.join(config)}")
selector_type: str = list(config)[0]
if (selector_class := SELECTORS.get(selector_type)) is None:
raise vol.Invalid(f"Unknown selector type {selector_type} found")
return selector_class
def selector(config: Any) -> Selector:
"""Instantiate a selector."""
selector_class = _get_selector_class(config)
selector_type = list(config)[0]
# Selectors can be empty
if config[selector_type] is None:
return selector_class({selector_type: {}})
return selector_class(config)
def validate_selector(config: Any) -> dict:
"""Validate a selector."""
selector_class = _get_selector_class(config)
selector_type = list(config)[0]
# Selectors can be empty
if config[selector_type] is None:
return {selector_type: {}}
return {
selector_type: cast(dict, selector_class.CONFIG_SCHEMA(config[selector_type]))
}
class Selector:
"""Base class for selectors."""
CONFIG_SCHEMA: Callable
config: Any
selector_type: str
def __init__(self, config: Any) -> None:
"""Instantiate a selector."""
self.config = self.CONFIG_SCHEMA(config[self.selector_type])
def serialize(self) -> Any:
"""Serialize Selector for voluptuous_serialize."""
return {"selector": {self.selector_type: self.config}}
@SELECTORS.register("entity")
class EntitySelector(Selector):
"""Selector of a single entity."""
selector_type = "entity"
CONFIG_SCHEMA = vol.Schema(
{
# Integration that provided the entity
vol.Optional("integration"): str,
# Domain the entity belongs to
vol.Optional("domain"): str,
# Device class of the entity
vol.Optional("device_class"): str,
}
)
def __call__(self, data: Any) -> str:
"""Validate the passed selection."""
try:
entity_id = cv.entity_id(data)
domain = split_entity_id(entity_id)[0]
except vol.Invalid:
# Not a valid entity_id, maybe it's an entity entry id
return cv.entity_id_or_uuid(cv.string(data))
else:
if "domain" in self.config and domain != self.config["domain"]:
raise vol.Invalid(
f"Entity {entity_id} belongs to domain {domain}, "
f"expected {self.config['domain']}"
)
return entity_id
@SELECTORS.register("device")
class DeviceSelector(Selector):
"""Selector of a single device."""
selector_type = "device"
CONFIG_SCHEMA = vol.Schema(
{
# Integration linked to it with a config entry
vol.Optional("integration"): str,
# Manufacturer of device
vol.Optional("manufacturer"): str,
# Model of device
vol.Optional("model"): str,
# Device has to contain entities matching this selector
vol.Optional("entity"): EntitySelector.CONFIG_SCHEMA,
}
)
def __call__(self, data: Any) -> str:
"""Validate the passed selection."""
return cv.string(data)
@SELECTORS.register("area")
class AreaSelector(Selector):
"""Selector of a single area."""
selector_type = "area"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional("entity"): EntitySelector.CONFIG_SCHEMA,
vol.Optional("device"): DeviceSelector.CONFIG_SCHEMA,
}
)
def __call__(self, data: Any) -> str:
"""Validate the passed selection."""
return cv.string(data)
@SELECTORS.register("number")
class NumberSelector(Selector):
"""Selector of a numeric value."""
selector_type = "number"
CONFIG_SCHEMA = vol.Schema(
{
vol.Required("min"): vol.Coerce(float),
vol.Required("max"): vol.Coerce(float),
vol.Optional("step", default=1): vol.All(
vol.Coerce(float), vol.Range(min=1e-3)
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): str,
vol.Optional(CONF_MODE, default="slider"): vol.In(["box", "slider"]),
}
)
def __call__(self, data: Any) -> float:
"""Validate the passed selection."""
value: float = vol.Coerce(float)(data)
if not self.config["min"] <= value <= self.config["max"]:
raise vol.Invalid(f"Value {value} is too small or too large")
return value
@SELECTORS.register("addon")
class AddonSelector(Selector):
"""Selector of a add-on."""
selector_type = "addon"
CONFIG_SCHEMA = vol.Schema({})
def __call__(self, data: Any) -> str:
"""Validate the passed selection."""
return cv.string(data)
@SELECTORS.register("boolean")
class BooleanSelector(Selector):
"""Selector of a boolean value."""
selector_type = "boolean"
CONFIG_SCHEMA = vol.Schema({})
def __call__(self, data: Any) -> bool:
"""Validate the passed selection."""
value: bool = vol.Coerce(bool)(data)
return value
@SELECTORS.register("time")
class TimeSelector(Selector):
"""Selector of a time value."""
selector_type = "time"
CONFIG_SCHEMA = vol.Schema({})
def __call__(self, data: Any) -> time_sys:
"""Validate the passed selection."""
return cv.time(data)
@SELECTORS.register("target")
class TargetSelector(Selector):
"""Selector of a target value (area ID, device ID, entity ID etc).
Value should follow cv.TARGET_SERVICE_FIELDS format.
"""
selector_type = "target"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional("entity"): EntitySelector.CONFIG_SCHEMA,
vol.Optional("device"): DeviceSelector.CONFIG_SCHEMA,
}
)
TARGET_SELECTION_SCHEMA = vol.Schema(cv.TARGET_SERVICE_FIELDS)
def __call__(self, data: Any) -> dict[str, list[str]]:
"""Validate the passed selection."""
target: dict[str, list[str]] = self.TARGET_SELECTION_SCHEMA(data)
return target
@SELECTORS.register("action")
class ActionSelector(Selector):
"""Selector of an action sequence (script syntax)."""
selector_type = "action"
CONFIG_SCHEMA = vol.Schema({})
def __call__(self, data: Any) -> Any:
"""Validate the passed selection."""
return data
@SELECTORS.register("object")
class ObjectSelector(Selector):
"""Selector for an arbitrary object."""
selector_type = "object"
CONFIG_SCHEMA = vol.Schema({})
def __call__(self, data: Any) -> Any:
"""Validate the passed selection."""
return data
@SELECTORS.register("text")
class StringSelector(Selector):
"""Selector for a multi-line text string."""
selector_type = "text"
CONFIG_SCHEMA = vol.Schema({vol.Optional("multiline", default=False): bool})
def __call__(self, data: Any) -> str:
"""Validate the passed selection."""
text = cv.string(data)
return text
@SELECTORS.register("select")
class SelectSelector(Selector):
"""Selector for an single-choice input select."""
selector_type = "select"
CONFIG_SCHEMA = vol.Schema(
{vol.Required("options"): vol.All([str], vol.Length(min=1))}
)
def __call__(self, data: Any) -> Any:
"""Validate the passed selection."""
selected_option = vol.In(self.config["options"])(cv.string(data))
return selected_option
| |
'''
Router script. Handles all requests and passes them to the automatically
imported controllers. This is also required by WSGI
server (`--wsgi-file` option for uWSGI).
'''
import os
import re
import sys
import importlib
import jinja2
import functools
from .el.misc import utils
from . import consts
from bottle import request, hook, default_app, get, post, \
route, debug, response, abort, url, redirect, run
from . import jinja2htmlcompress
env = jinja2.Environment(extensions=[jinja2htmlcompress.HTMLCompress],
loader=jinja2.FileSystemLoader(str(consts.VIEWS)))
# Make constants global, the regex is pretty self-explanatory
_const = re.compile('^[A-Z_]+$')
env.globals.update((k, v) for k, v in vars(consts).items() if _const.match(k))
from .el.accounts import auth, profile
from .el.accounts.records import Record
from .el.notifications import load as load_notifications
from . import controllers
@hook('before_request')
def set_environ():
request.environ['PATH_INFO'] = request.environ['PATH_INFO'].rstrip('/')
# HTTP_HOST seems to be appropriate for the job,
# I see no need for SERVER_NAME
try:
request.environ['DOTTED_DOMAIN'] = '.{host}'.format(host=request.environ["HTTP_HOST"])
except KeyError:
request.environ['DOTTED_DOMAIN'] = ''
def view(name):
presets = {}
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
tpl_vars = {}
tpl_vars.update(presets)
if response:
tpl_vars.update(response.copy())
return env.get_template(name).render(**tpl_vars)
return wrapper
return decorator
def common(json=False):
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **kw):
# Update the arguments with those we need in the function
params = kw.copy()
this = auth.Current(acid=request.cookies.get('acid'),
token=request.cookies.get('token'))
switch = request.query.get('switch')
if switch is not None:
_route_name = request['bottle.route'].name
acct = Record(name=switch)
if not acct.good():
if _route_name != 'sign_up':
redirect(url('sign_up', switch=switch))
elif acct == this.record:
if _route_name != 'main':
redirect(url('main'))
elif acct in this.records:
this.switch_to(acct)
if _route_name != 'main':
redirect(url('main'))
else:
if _route_name != 'sign_in':
redirect(url('sign_in', switch=switch))
if this.loggedin:
# That's right, set the ACID cookie on every request
response.set_cookie(name='acid', path='/', max_age=this.acid[1],
value=this.acid[0],
domain=request.environ.get('DOTTED_DOMAIN'))
if this.set_token:
response.set_cookie(name='token', max_age=this.token[1], value=this.token[0],
domain=request.environ.get('DOTTED_DOMAIN'), path='/')
if this.loggedin:
nots = {x.id: x.get_html(env.get_template('macros/notifications.html'))
for x in load_notifications(this.record)}
else:
nots = {}
params.update(
current=this,
current_profile=profile.profile(this.record) if this.loggedin else {},
notifications=nots,
accounts=[x.add({'profile': profile.profile(x)}) for x in this.records],
loggedin=this.loggedin,
env=env # TODO: Not sure. Most certainly not the best solution
)
result = func(*a, **params)
if json:
return result
elif isinstance(result, dict):
params.update(result)
return params
else:
return result
return wrapper
return decorator
@get('/', name='main')
@view('main-logged.html')
@common()
def index_page(**ka):
return controllers.index(**ka)
@route('/auth/sign-in', method=['POST', 'GET'], name='sign_in')
@view('/auth/sign-in.html')
@common()
def sign_in_page(**ka):
return controllers.sign_in(**ka)
@route('/auth/sign-up', method=['POST', 'GET'], name='sign_up')
@view('/auth/sign-up.html')
@common()
def sign_up_page(**ka):
return controllers.sign_up(**ka)
@route('/people', method=['GET'])
@view('people.html')
@common()
def people_page(**ka):
return controllers.people(**ka)
@route('/images', method=['GET'])
@view('images.html')
@common()
def images_page(**ka):
return controllers.images_page(template=env.get_template('macros/image.html'),
**ka)
@route('/<name>', method=['GET'])
@view('profile.html')
@common()
def profile_page(**ka):
return controllers.profile_page(template=env.get_template('macros/image.html'),
**ka)
@get('/account')
@view('account/main.html')
@common()
def account_main(**ka):
return controllers.accounts.main(**ka)
@get('/account/profile')
@view('account/profile.html')
@common()
def account_main(**ka):
return controllers.accounts.profile(**ka)
@get('/account/security')
@view('account/security.html')
@common()
def account_main(**ka):
return controllers.accounts.security(**ka)
@get('/auth/sign-out')
@common()
def account_main(**ka):
return controllers.accounts.sign_out(**ka)
@route('/auth/reset', method=['POST', 'GET'])
@view('auth/reset.html')
@common()
def reset_no_key(**ka):
return controllers.accounts.reset_no_key(credentials=request.POST.reset,
**ka)
@route('/auth/reset/<key>', method=['POST', 'GET'])
@view('auth/reset-key.html')
@common()
def reset_key(**ka):
return controllers.accounts.reset_with_key(**ka)
@route('/api/<version>/<part>', ['PUT', 'GET', 'POST', 'DELETE'])
@route('/api/<version>/<part>/<action>', ['PUT', 'GET', 'POST', 'DELETE'])
def main_api(version, part, action=None):
try:
api = importlib.import_module('.controllers.api.api_v{}'.format(version), 'app')
pieces = [part, request.method.lower()]
func = getattr(api, '_'.join(pieces))
except (ImportError, AttributeError):
abort(404)
else:
return common(json=True)(func)(action=action)
debug(True)
application = default_app()
if __name__ == '__main__':
import argparse
import json
from pathlib import Path
parser = argparse.ArgumentParser(description='TODO')
parser.add_argument('--config')
args = parser.parse_args()
if args.config is not None:
config = Path(args.config).resolve()
with config.open() as file:
consts.update_ext(json.load(file))
| |
# Copyright 2015 Joe H. Rahme <joehakimrahme@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import flask
import mimerender
import six
if six.PY2:
from exceptions import IOError
import sys
reload(sys) # noqa
sys.setdefaultencoding('utf-8')
import builder
import context
import utils
else:
import blogstrap.builder as builder
import blogstrap.context as context
import blogstrap.utils as utils
class ArticleNotFound(IOError):
pass
class ArticleHidden(Exception):
pass
class ArticleReader(object):
def __init__(self, path):
try:
with open(path) as article_file:
text = "".join(article_file.readlines())
text_dict = utils.parse_metadata(text)
self.content = text_dict['content']
self.metadata = text_dict['metadata']
except IOError:
raise ArticleNotFound(path)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class DefaultConfig(object):
AUTHOR = "Blogstrap"
DESCRIPTION = "Powered By Blogstrap"
DEBUG = True
BLOGROOT = "."
BLOGTITLE = "Powered by Blogstrap"
DEFAULT_LANG = "en"
NAVBAR_LINKS = []
STATIC_DIR = "images"
TOC_BLACKLIST = []
# Registering markdown as a valid MIME.
# More info: https://tools.ietf.org/html/rfc7763
mimerender.register_mime('markdown', ('text/markdown',))
mimerender = mimerender.FlaskMimeRender()
def create_app(config_file=None):
app = flask.Flask(__name__, static_url_path="/framework/static")
app.config.from_object(DefaultConfig)
if config_file:
app.config.from_pyfile(config_file)
# default static files directory
staticdir = app.config.get('STATIC_DIR').rstrip("/")
staticdir_route = os.path.basename(staticdir)
def _render(template, message=None):
ctx = context.context(app, message)
result = flask.render_template(template, **ctx)
for key, value in ctx.items():
result = result.replace("{{ %s }}" % key, str(value))
return result
def render_html(message):
return _render("index.html", message)
def render_html_exception(exception):
return _render("404.html")
def render_markdown(message):
return _render("index.md", message)
def render_md_exception(exception):
return _render("404.md")
@app.route("/")
def nothing():
if 'HOMEPAGE' in app.config:
return serve_blog(blogpost=app.config['HOMEPAGE'])
# no homepage defined return HTTP 204 No Content
return ('', 204)
@app.route("/%s/<image>" % staticdir_route)
def serve_static(image):
full_directory = os.path.join(os.getcwd(), staticdir)
if os.path.exists(os.path.join(full_directory, image)):
return flask.send_from_directory(full_directory, image)
else:
# return 404
pass
@app.route("/<blogpost>", strict_slashes=False)
@mimerender.map_exceptions(
mapping=(
(ArticleNotFound, '404 Article Not Found'),
(ArticleHidden, '404 Article Hidden'),
),
default='markdown',
markdown=render_md_exception,
html=render_html_exception,
)
@mimerender(
default='markdown',
html=render_html,
markdown=render_markdown)
def serve_blog(blogpost):
if blogpost.startswith("."):
raise ArticleHidden()
root_directory = app.config['BLOGROOT']
blogpost = "/".join((root_directory, blogpost))
accept_header = flask.request.headers.get('Accept', [])
suffix = ""
if "text/html" in accept_header:
if os.path.exists(blogpost + ".html"):
suffix = ".html"
else:
if os.path.exists(blogpost + ".md"):
suffix = ".md"
blogpost += suffix
with ArticleReader(blogpost) as article:
return {
'message': {
'content': article.content,
'metadata': article.metadata,
}
}
return app
def build_parser():
"""Builds the argument parser."""
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='Blogstrap commands')
init_parser = subparsers.add_parser(
'init',
help='Initialize the Blogstrap directory')
init_parser.set_defaults(func=init)
init_parser.add_argument('-t', '--target',
dest='target',
type=str,
default='.',
help='Target folder to generate files in')
init_parser.add_argument('--no-homepage',
action='store_true',
default=False,
help='if specified, no homepage will be created')
run_parser = subparsers.add_parser(
'run', help="Run the Flask development server")
run_parser.set_defaults(func=run)
run_parser.add_argument('-c', '--config',
dest='config',
type=str,
default=None,
help='path to a config file')
return parser
def main():
args = build_parser().parse_args()
args.func(args)
def init(args):
builder.build(args)
def run(args):
# identify which config file to use first
config = args.config
if config is not None:
# make sure any relative path is resolved relative to the
# current working dir
if not os.path.isabs(config):
config = os.path.join(os.getcwd(), config)
else:
# if no config file are defined on the cli, try to look for one
# in the default location ".blogstrap/blogstrap.conf"
default_config_path = os.path.join(os.getcwd(),
".blogstrap/blogstrap.conf")
if os.path.exists(default_config_path):
config = default_config_path
application = create_app(config)
application.run()
| |
#!/usr/bin/env python3
import unittest as ut
import subtest_fix
import tempfile
import os
import argparse
from collections import OrderedDict as odict
from c4.cmany import conf, flags, util, args as c4args
# -----------------------------------------------------------------------------
class Test00FlagState(ut.TestCase):
def test00CompilersShouldBeListedInFlags(self):
for tcn, (comps, cflags, yml) in test_cases.items():
with self.subTest(test_case=tcn):
for k, v in cflags.items():
for c in comps:
self.assertTrue(c in v.compilers, str(c) + " is not listed in " + str(v.compilers))
# -----------------------------------------------------------------------------
class Test01Dump(ut.TestCase):
def test(self):
for tcn, (comps, cflags, yml) in test_cases.items():
with self.subTest(test_case=tcn):
txt = flags.dump_yml(comps, cflags)
self.assertEqual(yml, txt, tcn)
# -----------------------------------------------------------------------------
class Test02Load(ut.TestCase):
def test00RunTestCases(self):
for tcn, (rcomps, rflags, yml) in test_cases.items():
with self.subTest(test_case=tcn):
(comps, cflags) = flags.load_txt(yml)
#print()
#print(tcn, rcomps, comps)
#print(tcn, rflags, cflags)
#for (k1, v1), (k2, v2) in zip(rflags.items(), cflags.items()):
# print(tcn, "k1v1", k1, v1)
# print(tcn, "k2v2", k2, v2)
# for c1, c2 in zip(v1.compilers, v2.compilers):
# print(tcn, "comp", c1, c2)
self.assertEqual(comps, rcomps, tcn)
self.assertTrue(same_flags(cflags, rflags), tcn)
def test01DoCommasWork(self):
yml = """c++11:
gcc,clang,icc: -std=c++11
vs: ''
"""
c, f = flags.load_txt(yml)
self.assertTrue('c++11' in f)
f11 = f['c++11']
self.assertTrue(hasattr(f11, 'gcc'))
self.assertTrue(hasattr(f11, 'icc'))
self.assertTrue(hasattr(f11, 'clang'))
self.assertEqual(f11.gcc, '-std=c++11')
self.assertEqual(f11.icc, '-std=c++11')
self.assertEqual(f11.clang, '-std=c++11')
# -----------------------------------------------------------------------------
class Test03Merge(ut.TestCase):
def test(self):
self._run('gcc-g3', 'clang-g3', 'gcc, clang-g3', 'clang, gcc-g3')
def _run(self, tc1, tc2, r1into2, r2into1):
for v1, v2, ref in ((tc1, tc2, r1into2), (tc2, tc1, r2into1)):
tcn = "merge " + v1 + " into " + v2 + ": should be same as " + ref
with self.subTest(test_case=tcn):
comps1, cflags1, yml1 = test_cases[v1]
comps2, cflags2, yml2 = test_cases[v2]
compsr, cflagsr, ymlr = test_cases[ref]
cflagsv = flags.merge(cflags2, cflags1)
compsv = flags.get_all_compilers(cflagsv)
ymlv = flags.dump_yml(compsv, cflagsv)
self.assertTrue(same_elements_in_list(compsv, compsr), tcn)
self.assertEqual(compsv, compsr, tcn)
self.assertEqual(ymlv, ymlr, tcn)
# -----------------------------------------------------------------------------
class Test04FlagsIO(ut.TestCase):
def setUp(self):
with open(os.path.join(conf.CONF_DIR, "cmany.yml"), "r") as f:
self.comps, self.flags = flags.load_txt(f.read())
@staticmethod
def _do_save(comps_, flags_, filename):
with open(filename, 'w') as f:
yml = flags.dump_yml(comps_, flags_)
f.write(yml)
@staticmethod
def _do_load(filename):
with open(filename, 'r') as f:
yml = f.read()
comps_, flags_ = flags.load_txt(yml)
return comps_, flags_
def test00SavedIsSameAsOriginal(self):
for tcn, (rcomps, rflags, yml) in test_cases.items():
with self.subTest(test_case=tcn):
fh_, fn = tmpfile()
fh = os.fdopen(fh_)
__class__._do_save(rcomps, rflags, fn)
c, f = __class__._do_load(fn)
self.assertEqual(c, rcomps)
self.assertEqual(len(f), len(rflags))
self.assertEqual(list(f.keys()), list(rflags.keys()))
for (rname, rf), (vname, vf) in zip(rflags.items(), f.items()):
self.assertEqual(rcomps, vf.compilers)
for kc in self.comps:
#print(rname, c, rf.get(c), vf.get(c))
self.assertEqual(rf.get(kc), vf.get(kc))
fh.close()
os.remove(fn)
del fh
def test01WriteReadWriteIsSame(self):
for tcn, (rcomps, rflags, yml) in test_cases.items():
with self.subTest(test_case=tcn):
frefh, fn = tmpfile()
fvalh, fn_out = tmpfile()
fref = os.fdopen(frefh)
fval = os.fdopen(fvalh)
__class__._do_save(rcomps, rflags, fn)
c, f = __class__._do_load(fn)
__class__._do_save(c, f, fn_out)
ref = list(fref.readlines())
val = list(fval.readlines())
lr = len(ref)
lv = len(val)
self.assertEqual(lr, lv)
for i in range(0, max(lr, lv)):
if i < lr and i < lv:
self.assertEqual(ref[i], val[i])
else:
break
del fref, frefh
del fval, fvalh
os.remove(fn)
os.remove(fn_out)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class Test10Flags(ut.TestCase):
def test01_cmake_vars(self):
self._do_separate_test('-V', '--cmake-vars')
def test02_defines(self):
self._do_separate_test('-D', '--defines')
def test03_cxxflags(self):
self._do_separate_test('-X', '--cxxflags')
def test04_cflags(self):
self._do_separate_test('-C', '--cflags')
def _do_separate_test(self, shortopt, longopt):
n = longopt[2:].replace('-', '_')
for o in (shortopt, longopt):
def _c(a, r): self.check_one(n, o, a, r)
_c('{} VAR1', ['VAR1'])
_c('{} VAR2,VAR3', ['VAR2', 'VAR3'])
_c('{} VAR4 {} VAR5', ['VAR4', 'VAR5'])
_c('{} VAR6,VAR7 {} VAR8,VAR9', ['VAR6', 'VAR7', 'VAR8', 'VAR9'])
_c('{} VAR\,1', ['VAR,1'])
_c('{} VAR\,2,VAR\,3', ['VAR,2', 'VAR,3'])
_c('{} VAR\,4 {} VAR\,5', ['VAR,4', 'VAR,5'])
_c('{} VAR\,6,VAR\,7 {} VAR\,8,VAR\,9', ['VAR,6', 'VAR,7', 'VAR,8', 'VAR,9'])
_c('{} VAR1=1', ['VAR1=1'])
_c('{} VAR2=2,VAR3=3', ['VAR2=2', 'VAR3=3'])
_c('{} VAR4=4 {} VAR5=5', ['VAR4=4', 'VAR5=5'])
_c('{} VAR6=6,VAR7=7 {} VAR8=8,VAR9=9', ['VAR6=6', 'VAR7=7', 'VAR8=8', 'VAR9=9'])
_c('{} VAR1=1\,a', ['VAR1=1,a'])
_c('{} VAR2=2\,a,VAR3=3\,a', ['VAR2=2,a', 'VAR3=3,a'])
_c('{} VAR4=4\,a {} VAR5=5\,a', ['VAR4=4,a', 'VAR5=5,a'])
_c('{} VAR6=6\,a,VAR7=7\,a {} VAR8=8\,a,VAR9=9\,a',
['VAR6=6,a', 'VAR7=7,a', 'VAR8=8,a', 'VAR9=9,a'])
_c(['{}', 'VAR1="1 with spaces"'], ['VAR1="1 with spaces"'])
_c(['{}', 'VAR2="2 with spaces",VAR3="3 with spaces"'],
['VAR2="2 with spaces"', 'VAR3="3 with spaces"'])
_c(['{}', 'VAR4="4 with spaces"', '{}', 'VAR5="5 with spaces"'],
['VAR4="4 with spaces"', 'VAR5="5 with spaces"'])
_c(['{}', 'VAR6="6 with spaces",VAR7="7 with spaces"', '{}',
'VAR8="8 with spaces",VAR9="9 with spaces"'],
['VAR6="6 with spaces"', 'VAR7="7 with spaces"', 'VAR8="8 with spaces"',
'VAR9="9 with spaces"'])
_c(['{}', 'VAR1="1\,a with spaces"'], ['VAR1="1,a with spaces"'])
_c(['{}', 'VAR2="2\,a with spaces",VAR3="3\,a with spaces"'],
['VAR2="2,a with spaces"', 'VAR3="3,a with spaces"'])
_c(['{}', 'VAR4="4\,a with spaces"', '{}', 'VAR5="5\,a with spaces"'],
['VAR4="4,a with spaces"', 'VAR5="5,a with spaces"'])
_c(['{}', 'VAR6="6\,a with spaces",VAR7="7\,a with spaces"', '{}',
'VAR8="8\,a with spaces",VAR9="9\,a with spaces"'],
['VAR6="6,a with spaces"', 'VAR7="7,a with spaces"', 'VAR8="8,a with spaces"',
'VAR9="9,a with spaces"'])
_c('{} "-fPIC,-Wall,-O3,-Os"', ['-fPIC', '-Wall', '-O3', '-Os'])
_c("{} '-fPIC,-Wall,-O3,-Os'", ['-fPIC', '-Wall', '-O3', '-Os'])
_c('{} "-fPIC","-Wall","-O3","-Os"', ['-fPIC', '-Wall', '-O3', '-Os'])
_c("{} '-fPIC','-Wall','-O3','-Os'", ['-fPIC', '-Wall', '-O3', '-Os'])
def check_one(self, name, opt, args, ref):
if isinstance(args, str):
args = args.split(' ')
args_ = args
args = []
for a in args_:
args.append(a.format(opt))
p = argparse.ArgumentParser()
c4args.add_bundle_flags(p)
out = p.parse_args(args)
# print(out, kwargs)
a = getattr(out, name)
self.assertEqual(ref, a)
del out
def check_many(self, args, **ref):
if isinstance(args, str):
args = util.splitesc_quoted(args, ' ')
p = argparse.ArgumentParser()
c4args.add_bundle_flags(p)
out = p.parse_args(args)
# print(out, kwargs)
for k, refval in ref.items():
result = getattr(out, k)
self.assertEqual(result, refval)
del out
def test10_mixed0(self):
self.check_many('-X "-fPIC" -D VARIANT1', cmake_vars=[], cxxflags=['-fPIC'], cflags=[],
defines=['VARIANT1'])
self.check_many('-X "-Wall" -D VARIANT2', cmake_vars=[], cxxflags=['-Wall'], cflags=[],
defines=['VARIANT2'])
self.check_many('-X nortti,c++14 -D VARIANT3', cmake_vars=[], cxxflags=['nortti', 'c++14'], cflags=[],
defines=['VARIANT3'])
self.check_many('-X "-fPIC,-Wl\,-rpath" -D VARIANT1', cmake_vars=[], cxxflags=['-fPIC', '-Wl,-rpath'],
cflags=[], defines=['VARIANT1'])
def test11_mixed1(self):
self.check_many('-X "-fPIC" -D VARIANT1,VARIANT_TYPE=1', cmake_vars=[], cxxflags=['-fPIC'], cflags=[],
defines=['VARIANT1', 'VARIANT_TYPE=1'])
self.check_many('-X "-Wall" -D VARIANT2,VARIANT_TYPE=2', cmake_vars=[], cxxflags=['-Wall'], cflags=[],
defines=['VARIANT2', 'VARIANT_TYPE=2'])
self.check_many('-X nortti,c++14 -D VARIANT3,VARIANT_TYPE=3', cmake_vars=[], cxxflags=['nortti', 'c++14'],
cflags=[], defines=['VARIANT3', 'VARIANT_TYPE=3'])
def test12_mixed2(self):
self.check_many('-X "-fPIC" -D VARIANT1,"VARIANT_TYPE=1"', cmake_vars=[], cxxflags=['-fPIC'], cflags=[],
defines=['VARIANT1', 'VARIANT_TYPE=1'])
self.check_many('-X "-Wall" -D VARIANT2,"VARIANT_TYPE=2"', cmake_vars=[], cxxflags=['-Wall'], cflags=[],
defines=['VARIANT2', 'VARIANT_TYPE=2'])
self.check_many('-X nortti,c++14 -D VARIANT3,"VARIANT_TYPE=3"', cmake_vars=[],
cxxflags=['nortti', 'c++14'], cflags=[], defines=['VARIANT3', 'VARIANT_TYPE=3'])
def test13_mixed3(self):
self.check_many('-X "-fPIC" -D "VARIANT1,VARIANT_TYPE=1"', cmake_vars=[], cxxflags=['-fPIC'], cflags=[],
defines=['VARIANT1', 'VARIANT_TYPE=1'])
self.check_many('-X "-Wall" -D "VARIANT2,VARIANT_TYPE=2"', cmake_vars=[], cxxflags=['-Wall'], cflags=[],
defines=['VARIANT2', 'VARIANT_TYPE=2'])
self.check_many('-X nortti,c++14 -D "VARIANT3,VARIANT_TYPE=3"', cmake_vars=[],
cxxflags=['nortti', 'c++14'], cflags=[], defines=['VARIANT3', 'VARIANT_TYPE=3'])
# -----------------------------------------------------------------------------
def tmpfile():
return tempfile.mkstemp('.yml', 'cmany_flags-')
def same_elements_in_list(l1, l2):
for k1, k2 in zip(l1, l2):
if not (k1 in l2):
return False
if not (k2 in l1):
return False
return True
def same_flags(f1, f2):
if len(f1) != len(f2):
return False
if not same_elements_in_list(f1.keys(), f2.keys()):
return False
for (f1n, f1v), (f2n, f2v) in zip(f1.items(), f2.items()):
if not same_elements_in_list(f1v.compilers, f2v.compilers):
return False
for c in f1v.compilers:
if f1v.get(c) != f2v.get(c):
return False
return True
# -----------------------------------------------------------------------------
def tc(name, comps, flags, **kwargs):
yml = kwargs['yml']
d_ = (name, (comps, flags, yml))
return d_
def d(*args):
l = list(args)
return odict(l)
def f(name, *args, **kwargs):
return name, flags.CFlag(name, *args, **kwargs)
with open(os.path.join(conf.CONF_DIR, "cmany.yml")) as fconf:
kc, kf = flags.load_txt(fconf.read())
kyml = flags.dump_yml(kc, kf)
test_cases = d(
tc('gcc-g', ['gcc'], d(f('g', gcc='-g')), yml="""\
g:
gcc: -g
"""),
tc('gcc-g3', ['gcc'], d(f('g3', gcc='-g3')), yml="""\
g3:
gcc: -g3
"""),
tc('clang-g3', ['clang'], d(f('g3', clang='-g3')), yml="""\
g3:
clang: -g3
"""),
tc('gcc, clang-g3', ['gcc', 'clang'], d(f('g3', gcc='-g3', clang='-g3')), yml="""\
g3:
gcc,clang: -g3
"""),
tc('clang, gcc-g3', ['clang', 'gcc'], d(f('g3', clang='-g3', gcc='-g3')), yml="""\
g3:
clang,gcc: -g3
"""),
tc('known_flags', kc, kf, yml=kyml)
)
| |
"""
Contains functions and classes for parsing and storing the results of a `net use` command on
Windows. This table describes what the mounted UNC paths.
"""
from copy import deepcopy
from win_unc.disk_drive import DiskDrive
from win_unc.errors import InvalidDiskDriveError
from win_unc.internal.utils import (
dict_map, drop_while, take_while, first, rfirst, not_,
rekey_dict, remove_nones_in_dict, subdict_matches)
from win_unc.unc_directory import UncDirectory
class NetUseColumn(object):
"""
Stores information for a parsing a single column in the output of `NET USE`. This information
includes the column's name and how to parse it from a row.
"""
def __init__(self, name, start, end):
"""
`name` is the column's name.
`start` is the index in the row that the column's data begins.
`end` is the index in the row that the column's data ends. If this is `None`, the column
ends at the end of the row's line.
"""
self.name = name
self.start = start
self.end = end
def extract(self, string):
"""
Returns the data for this column from a given row represented by `string`.
"""
return string[self.start:self.end].strip()
def __repr__(self):
return '<{cls} "{name}": {start}-{end}>'.format(
cls=self.__class__.__name__,
name=self.name,
start=self.start,
end=self.end)
class NetUseTable(object):
"""
Stores parsed data from the output of `NET USE` and provides easy access methods.
"""
def __init__(self):
self.rows = []
def add_row(self, row):
"""
Converts `row` to a standardized row and adds it to the table. The standardized row is returned.
"""
standardized_row = standardize_row(row)
self.rows.append(standardized_row)
return standardized_row
def get_column(self, column):
"""
Returns a list of all the values in a given `column`.
"""
return [row[column] for row in self.rows]
def get_connected_paths(self):
return self.get_column('remote')
def get_connected_devices(self):
return [device for device in self.get_column('local') if device]
def get_matching_rows(self, local=None, remote=None, status=None):
"""
Returns a list of rows that match a `search_dict`.
`search_dict` is a dictionary with a subset of the keys in a row.
"""
credless_remote = UncDirectory(remote.get_path()) if isinstance(remote, UncDirectory) else remote
test_row = construct_row_values(
remove_nones_in_dict(
{'local': local, 'remote': credless_remote, 'status': status}))
return [row for row in self.rows if subdict_matches(row, test_row)]
EMPTY_TABLE_INDICATOR = 'There are no entries in the list.'
LAST_TABLE_LINE = 'The command completed successfully.'
# This dictionary maps from the column names in the output of `NET USE` to standardized column
# names that should never change. This allows the output of `NET USE` to change without forcing
# the users of this module to change their code.
MAP_RAW_COLUMNS_TO_STANDARD_COLUMNS = {
'Local': 'local',
'Remote': 'remote',
'Status': 'status',
}
COLUMN_CONSTRUCTORS = {
'local': lambda x: DiskDrive(x) if x else None,
'remote': lambda x: UncDirectory(x) if x else None,
'status': lambda x: str(x).lower() if x else None,
}
def standardize_row(row):
return construct_row_values(standardize_row_keys(row))
def standardize_row_keys(row):
return rekey_dict(row, MAP_RAW_COLUMNS_TO_STANDARD_COLUMNS)
def construct_row_values(row):
return dict_map(row, COLUMN_CONSTRUCTORS)
def is_line_separator(line):
"""
Returns `True` when `line` is a line separator in a "net use" table.
"""
return line and all(char == '-' for char in line)
def get_columns(lines):
"""
Parses the column headers from a "net use" table into a list of `NetUseColumn` objects.
`lines` is a list of strings from the output of `NET USE`.
"""
header_iter = take_while(not_(is_line_separator), lines)
headings = rfirst(lambda x: x and x[0].isalpha(), header_iter)
names = headings.split()
starts = [headings.index(name) for name in names]
ends = [right - 1 for right in starts[1:]] + [None]
return [NetUseColumn(name, start, end)
for name, start, end in zip(names, starts, ends)]
def get_body(lines):
"""
Extracts only the body of the "net use" table. The body is everything between the column
headers and the end of the output.
`lines` is a list of strings from the output of `NET USE`.
"""
bottom = drop_while(not_(is_line_separator), lines)
is_last_line = lambda x: x and x != LAST_TABLE_LINE
return (take_while(is_last_line, bottom[1:])
if len(bottom) > 1
else [])
def parse_singleline_row(line, columns):
"""
Parses a single-line row from a "net use" table and returns a dictionary mapping from
standardized column names to column values.
`line` must be a single-line row from the output of `NET USE`. While `NET USE` may represent
a single row on multiple lines, `line` must be a whole row on a single line.
`columns` must be a list of `NetUseColumn` objects that correctly parses `string`.
"""
return {column.name: column.extract(line) for column in columns}
def parse_multiline_row(line1, line2, columns):
"""
Parses a row from a "net use" table that is represented by two lines instead of just one.
`line1` is the first line for the row.
`line2` is the second line for the row.
`columns` is the list of `NetUseColumn`s the would parse a single-line row, but not a
multiline row.
"""
singleline_row = line1 + ' ' + line2.strip()
custom_columns = deepcopy(columns)
custom_columns[-2].end = len(line1)
custom_columns[-1].start = len(line1) + 1
return parse_singleline_row(singleline_row, custom_columns)
def build_net_use_table_from_parts(columns, body_lines):
"""
Returns a new `NetUseTable` based on `columns` and `body_lines`.
`columns` is a list of `NetUseColumn` objects.
`body_lines` is a list of strings representing the raw rows of the table. At times, an actual
table row spans multiple lines.
"""
table = NetUseTable()
for this_row, next_row in zip(body_lines, body_lines[1:] + ['']):
if not this_row.startswith(' '):
if next_row.startswith(' '):
row_dict = parse_multiline_row(this_row, next_row, columns)
else:
row_dict = parse_singleline_row(this_row, columns)
# Ignore invalid disk drives as they are probably printer mappings.
try:
table.add_row(row_dict)
except InvalidDiskDriveError:
pass
return table
def parse_populated_net_use_table(string):
"""
Parses a non-empty table from the output of `NET USE` and returns a `NetUseTable`.
"""
lines = [line.rstrip() for line in string.split('\n')]
return build_net_use_table_from_parts(get_columns(lines), get_body(lines))
def parse_net_use_table(string):
"""
Parses `string` into a `NetUseTable` and returns it.
"""
if EMPTY_TABLE_INDICATOR in string:
return NetUseTable()
else:
return parse_populated_net_use_table(string)
| |
import numpy
import chainer
from chainer.backends import cuda
from chainer.functions.activation import lstm
from chainer.functions.array import reshape
from chainer.functions.array import stack
from chainer.functions.connection import linear
from chainer.functions.connection import n_step_rnn
from chainer.utils import argument
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cuda.cudnn
def _stack_weight(ws):
# TODO(unno): Input of the current LSTM implementaiton is shuffled
w = stack.stack(ws, axis=1)
shape = w.shape
return reshape.reshape(w, (shape[0] * shape[1],) + shape[2:])
class NStepLSTM(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='uni', rnn_mode='lstm')
class NStepBiLSTM(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='bi', rnn_mode='lstm')
def n_step_lstm(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, **kwargs):
"""n_step_lstm(n_layers, dropout_ratio, hx, cx, ws, bs, xs)
Stacked Uni-directional Long Short-Term Memory function.
This function calculates stacked Uni-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i_t &= \\sigma(W_0 x_t + W_4 h_{t-1} + b_0 + b_4) \\\\
f_t &= \\sigma(W_1 x_t + W_5 h_{t-1} + b_1 + b_5) \\\\
o_t &= \\sigma(W_2 x_t + W_6 h_{t-1} + b_2 + b_6) \\\\
a_t &= \\tanh(W_3 x_t + W_7 h_{t-1} + b_3 + b_7) \\\\
c_t &= f_t \\cdot c_{t-1} + i_t \\cdot a_t \\\\
h_t &= o_t \\cdot \\tanh(c_t)
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`8S` weight matrices and :math:`8S` bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (~chainer.Variable): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(I, N)``-shaped as
they are multiplied with input variables, where ``I`` is the size
of the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)`` where ``N`` is the dimension
of the hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. note::
The dimension of hidden units is limited to only one size ``N``. If you
want to use variable dimension of hidden units, please use
:class:`chainer.functions.lstm`.
.. seealso::
:func:`chainer.functions.lstm`
.. admonition:: Example
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [np.ones((b, in_size)).astype(np.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers, batchs[0], out_size)
>>> hx = np.ones(h_shape).astype(np.float32)
>>> cx = np.ones(h_shape).astype(np.float32)
>>> w_in = lambda i, j: in_size if i == 0 and j < 4 else out_size
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... ws.append([np.ones((out_size, w_in(n, i))).astype(np.float32) \
for i in range(8)])
... bs.append([np.ones((out_size,)).astype(np.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[1][0].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = F.n_step_lstm(
... n_layers, dropout_ratio, hx, cx, ws, bs, xs)
>>> hy.shape
(2, 3, 2)
>>> cy.shape
(2, 3, 2)
>>> [y.shape for y in ys]
[(3, 2), (2, 2), (1, 2)]
"""
return n_step_lstm_base(n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction=False, **kwargs)
def n_step_bilstm(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, **kwargs):
"""n_step_bilstm(n_layers, dropout_ratio, hx, cx, ws, bs, xs)
Stacked Bi-directional Long Short-Term Memory function.
This function calculates stacked Bi-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i^{f}_t &=& \\sigma(W^{f}_0 x_t + W^{f}_4 h_{t-1} + b^{f}_0 + b^{f}_4),
\\\\
f^{f}_t &=& \\sigma(W^{f}_1 x_t + W^{f}_5 h_{t-1} + b^{f}_1 + b^{f}_5),
\\\\
o^{f}_t &=& \\sigma(W^{f}_2 x_t + W^{f}_6 h_{t-1} + b^{f}_2 + b^{f}_6),
\\\\
a^{f}_t &=& \\tanh(W^{f}_3 x_t + W^{f}_7 h_{t-1} + b^{f}_3 + b^{f}_7),
\\\\
c^{f}_t &=& f^{f}_t \\cdot c^{f}_{t-1} + i^{f}_t \\cdot a^{f}_t,
\\\\
h^{f}_t &=& o^{f}_t \\cdot \\tanh(c^{f}_t),
\\\\
i^{b}_t &=& \\sigma(W^{b}_0 x_t + W^{b}_4 h_{t-1} + b^{b}_0 + b^{b}_4),
\\\\
f^{b}_t &=& \\sigma(W^{b}_1 x_t + W^{b}_5 h_{t-1} + b^{b}_1 + b^{b}_5),
\\\\
o^{b}_t &=& \\sigma(W^{b}_2 x_t + W^{b}_6 h_{t-1} + b^{b}_2 + b^{b}_6),
\\\\
a^{b}_t &=& \\tanh(W^{b}_3 x_t + W^{b}_7 h_{t-1} + b^{b}_3 + b^{b}_7),
\\\\
c^{b}_t &=& f^{b}_t \\cdot c^{b}_{t-1} + i^{b}_t \\cdot a^{b}_t, \\\\
h^{b}_t &=& o^{b}_t \\cdot \\tanh(c^{b}_t), \\\\
h_t &=& [h^{f}_t; h^{b}_t]
where :math:`W^{f}` is the weight matrices for forward-LSTM, :math:`W^{b}`
is weight matrices for backward-LSTM.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer of each direction. So, when :math:`S` layers
exist, you need to prepare :math:`16S` weight matrices and :math:`16S`
bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units. Because of bi-direction, the
first dimension length is ``2S``.
cx (~chainer.Variable): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[2 * l + m]`` represents the weights for the l-th layer of
the m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.) Each ``ws[i]`` is a
list containing eight matrices. ``ws[i][j]`` corresponds to
:math:`W_j` in the equation. ``ws[0][j]`` and ``ws[1][j]`` where
``0 <= j < 4`` are ``(I, N)``-shaped because they are multiplied
with input variables, where ``I`` is the size of the input.
``ws[i][j]`` where ``2 <= i`` and ``0 <= j < 4`` are
``(N, 2N)``-shaped because they are multiplied with two hidden
layers :math:`h_t = [h^{f}_t; h^{b}_t]`. All other matrices are
``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[2 * l + m]`` represents the weights for the l-th layer of
m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.)
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, 2N)`` where ``B_t``
is the mini-batch size for time ``t``, and ``N`` is size of
hidden units. Note that ``B_t`` is the same value as ``xs[t]``.
.. admonition:: Example
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [np.ones((b, in_size)).astype(np.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers * 2, batchs[0], out_size)
>>> hx = np.ones(h_shape).astype(np.float32)
>>> cx = np.ones(h_shape).astype(np.float32)
>>> def w_in(i, j):
... if i == 0 and j < 4:
... return in_size
... elif i > 0 and j < 4:
... return out_size * 2
... else:
... return out_size
...
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... for direction in (0, 1):
... ws.append([np.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([np.ones((out_size,)).astype(np.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0:2][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[2][0].shape # ws[2:][:4].shape are (out_size, 2 * out_size)
(2, 4)
>>> ws[0][4].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = F.n_step_bilstm(
... n_layers, dropout_ratio, hx, cx, ws, bs, xs)
>>> hy.shape
(4, 3, 2)
>>> cy.shape
(4, 3, 2)
>>> [y.shape for y in ys]
[(3, 4), (2, 4), (1, 4)]
"""
return n_step_lstm_base(n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction=True, **kwargs)
def n_step_lstm_base(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, use_bi_direction,
**kwargs):
"""Base function for Stack LSTM/BiLSTM functions.
This function is used at :func:`chainer.functions.n_step_lstm` and
:func:`chainer.functions.n_step_bilstm`.
This function's behavior depends on following arguments,
``activation`` and ``use_bi_direction``.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (~chainer.Variable): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(I, N)``-shape as they
are multiplied with input variables, where ``I`` is the size of
the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
use_bi_direction (bool): If ``True``, this function uses Bi-directional
LSTM.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``. Note that ``B_t`` is the same
value as ``xs[t]``.
.. seealso::
:func:`chainer.functions.n_step_lstm`
:func:`chainer.functions.n_step_bilstm`
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
xp = cuda.get_array_module(hx, hx.data)
if xp is not numpy and chainer.should_use_cudnn('>=auto', 5000):
handle = cudnn.get_handle()
states = cuda.get_cudnn_dropout_states()
cudnn.set_dropout_descriptor(states._desc, handle, dropout_ratio)
lengths = [len(x) for x in xs]
xs = chainer.functions.concat(xs, axis=0)
w = n_step_rnn.cudnn_rnn_weight_concat(
n_layers, states, use_bi_direction, 'lstm', ws, bs)
if use_bi_direction:
rnn = NStepBiLSTM
else:
rnn = NStepLSTM
hy, cy, ys = rnn(n_layers, states, lengths)(hx, cx, w, xs)
sections = numpy.cumsum(lengths[:-1])
ys = chainer.functions.split_axis(ys, sections, 0)
return hy, cy, ys
else:
return n_step_rnn.n_step_rnn_impl(
_lstm, n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction)
def _lstm(x, h, c, w, b):
xw = _stack_weight([w[2], w[0], w[1], w[3]])
hw = _stack_weight([w[6], w[4], w[5], w[7]])
xb = _stack_weight([b[2], b[0], b[1], b[3]])
hb = _stack_weight([b[6], b[4], b[5], b[7]])
lstm_in = linear.linear(x, xw, xb) + linear.linear(h, hw, hb)
c_bar, h_bar = lstm.lstm(c, lstm_in)
return h_bar, c_bar
| |
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import functools
import itertools
import random
from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib import constants as lib_const
from neutron_lib.db import api as lib_db_api
from neutron_lib.exceptions import l3 as l3_exc
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import six
from neutron.common import utils
from neutron.conf.db import l3_hamode_db
from neutron.db.models import l3agent as rb_model
from neutron.objects import l3agent as rb_obj
LOG = logging.getLogger(__name__)
cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
@six.add_metaclass(abc.ABCMeta)
class L3Scheduler(object):
def __init__(self):
self.max_ha_agents = cfg.CONF.max_l3_agents_per_router
def schedule(self, plugin, context, router_id, candidates=None):
"""Schedule the router to an active L3 agent.
Schedule the router only if it is not already scheduled.
"""
return self._schedule_router(
plugin, context, router_id, candidates=candidates)
def _router_has_binding(self, context, router_id, l3_agent_id):
router_binding_model = rb_model.RouterL3AgentBinding
query = context.session.query(router_binding_model)
query = query.filter(router_binding_model.router_id == router_id,
router_binding_model.l3_agent_id == l3_agent_id)
return query.count() > 0
def _get_routers_can_schedule(self, plugin, context, routers, l3_agent):
"""Get the subset of routers that can be scheduled on the L3 agent."""
ids_to_discard = set()
for router in routers:
# check if the l3 agent is compatible with the router
candidates = plugin.get_l3_agent_candidates(
context, router, [l3_agent])
if not candidates:
ids_to_discard.add(router['id'])
return [r for r in routers if r['id'] not in ids_to_discard]
def auto_schedule_routers(self, plugin, context, host):
"""Schedule under-scheduled routers to L3 Agents.
An under-scheduled router is a router that is either completely
un-scheduled (scheduled to 0 agents), or an HA router that is
under-scheduled (scheduled to less than max_l3_agents configuration
option. The function finds all the under-scheduled routers and
schedules them.
:param host: if unspecified, under-scheduled routers are scheduled to
all agents (not necessarily from the requesting host). If
specified, under-scheduled routers are scheduled only to
the agent on 'host'.
"""
l3_agent = plugin.get_enabled_agent_on_host(
context, lib_const.AGENT_TYPE_L3, host)
if not l3_agent:
return
underscheduled_routers = self._get_underscheduled_routers(
plugin, context)
target_routers = self._get_routers_can_schedule(
plugin, context, underscheduled_routers, l3_agent)
for router in target_routers:
self.schedule(plugin, context, router['id'], candidates=[l3_agent])
def _get_underscheduled_routers(self, plugin, context):
underscheduled_routers = []
max_agents_for_ha = plugin.get_number_of_agents_for_scheduling(context)
for router, count in plugin.get_routers_l3_agents_count(context):
if (count < 1 or
router.get('ha', False) and count < max_agents_for_ha):
# Either the router was un-scheduled (scheduled to 0 agents),
# or it's an HA router and it was under-scheduled (scheduled to
# less than max_agents_for_ha). Either way, it should be added
# to the list of routers we want to handle.
underscheduled_routers.append(router)
return underscheduled_routers
def _get_candidates(self, plugin, context, sync_router):
"""Return L3 agents where a router could be scheduled."""
is_ha = sync_router.get('ha', False)
with context.session.begin(subtransactions=True):
# allow one router is hosted by just
# one enabled l3 agent hosting since active is just a
# timing problem. Non-active l3 agent can return to
# active any time
current_l3_agents = plugin.get_l3_agents_hosting_routers(
context, [sync_router['id']], admin_state_up=True)
if current_l3_agents and not is_ha:
LOG.debug('Router %(router_id)s has already been hosted '
'by L3 agent %(agent_id)s',
{'router_id': sync_router['id'],
'agent_id': current_l3_agents[0]['id']})
return []
active_l3_agents = plugin.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warning('No active L3 agents')
return []
candidates = plugin.get_l3_agent_candidates(context,
sync_router,
active_l3_agents)
if not candidates:
LOG.warning('No L3 agents can host the router %s',
sync_router['id'])
return candidates
def _bind_routers(self, plugin, context, routers, l3_agent):
for router in routers:
if router.get('ha'):
if not self._router_has_binding(context, router['id'],
l3_agent.id):
self.create_ha_port_and_bind(
plugin, context, router['id'],
router['tenant_id'], l3_agent)
else:
self.bind_router(plugin, context, router['id'], l3_agent.id)
@lib_db_api.retry_db_errors
def bind_router(self, plugin, context, router_id, agent_id,
is_manual_scheduling=False, is_ha=False):
"""Bind the router to the l3 agent which has been chosen.
The function tries to create a RouterL3AgentBinding object and add it
to the database. It returns the binding that was created or None if it
failed to create it due to some conflict.
In the HA router case, when creating a RouterL3AgentBinding (with some
binding_index) fails because some other RouterL3AgentBinding was
concurrently created using the same binding_index, then the function
will retry to create an entry with a new binding_index. This creation
will be retried up to db_api.MAX_RETRIES times.
If, still in the HA router case, the creation failed because the
router has already been bound to the l3 agent in question or has been
removed (by a concurrent operation), then no further attempts will be
made and the function will return None.
Note that for non-HA routers, the function will always perform exactly
one try, regardless of the error preventing the addition of a new
RouterL3AgentBinding object to the database.
"""
if rb_obj.RouterL3AgentBinding.objects_exist(
context, router_id=router_id, l3_agent_id=agent_id):
LOG.debug('Router %(router_id)s has already been scheduled '
'to L3 agent %(agent_id)s.',
{'router_id': router_id, 'agent_id': agent_id})
return
if not is_ha:
binding_index = rb_model.LOWEST_BINDING_INDEX
if rb_obj.RouterL3AgentBinding.objects_exist(
context, router_id=router_id, binding_index=binding_index):
LOG.debug('Non-HA router %s has already been scheduled',
router_id)
return
else:
binding_index = plugin.get_vacant_binding_index(
context, router_id, is_manual_scheduling)
if binding_index < rb_model.LOWEST_BINDING_INDEX:
LOG.debug('Unable to find a vacant binding_index for '
'router %(router_id)s and agent %(agent_id)s',
{'router_id': router_id,
'agent_id': agent_id})
return
try:
binding = rb_obj.RouterL3AgentBinding(
context, l3_agent_id=agent_id,
router_id=router_id, binding_index=binding_index)
binding.create()
LOG.debug('Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s with binding_index %(binding_index)d',
{'router_id': router_id,
'agent_id': agent_id,
'binding_index': binding_index})
return binding
except db_exc.DBReferenceError:
LOG.debug('Router %s has already been removed '
'by concurrent operation', router_id)
def _schedule_router(self, plugin, context, router_id,
candidates=None):
if not plugin.router_supports_scheduling(context, router_id):
return
sync_router = plugin.get_router(context, router_id)
candidates = candidates or self._get_candidates(
plugin, context, sync_router)
if not candidates:
return
elif sync_router.get('ha', False):
chosen_agents = self._bind_ha_router(plugin, context,
router_id,
sync_router.get('tenant_id'),
candidates)
if not chosen_agents:
return
chosen_agent = chosen_agents[-1]
else:
chosen_agent = self._choose_router_agent(
plugin, context, candidates)
self.bind_router(plugin, context, router_id, chosen_agent.id)
return chosen_agent
@abc.abstractmethod
def _choose_router_agent(self, plugin, context, candidates):
"""Choose an agent from candidates based on a specific policy."""
pass
@abc.abstractmethod
def _choose_router_agents_for_ha(self, plugin, context, candidates):
"""Choose agents from candidates based on a specific policy."""
pass
def _get_num_of_agents_for_ha(self, candidates_count):
return (min(self.max_ha_agents, candidates_count) if self.max_ha_agents
else candidates_count)
def _add_port_from_net_and_ensure_vr_id(self, plugin, ctxt, router_db,
tenant_id, ha_net):
plugin._ensure_vr_id(ctxt, router_db, ha_net)
return plugin.add_ha_port(ctxt, router_db.id, ha_net.network_id,
tenant_id)
def create_ha_port_and_bind(self, plugin, context, router_id,
tenant_id, agent, is_manual_scheduling=False):
"""Creates and binds a new HA port for this agent."""
ctxt = context.elevated()
router_db = plugin._get_router(ctxt, router_id)
creator = functools.partial(self._add_port_from_net_and_ensure_vr_id,
plugin, ctxt, router_db, tenant_id)
dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id)
dep_creator = functools.partial(plugin._create_ha_network,
ctxt, tenant_id)
dep_deleter = functools.partial(plugin._delete_ha_network, ctxt)
dep_id_attr = 'network_id'
# This might fail in case of concurrent calls, which is good for us
# as we can skip the rest of this function.
binding = self.bind_router(
plugin, context, router_id, agent['id'],
is_manual_scheduling=is_manual_scheduling, is_ha=True)
if not binding:
return
try:
port_binding = utils.create_object_with_dependency(
creator, dep_getter, dep_creator,
dep_id_attr, dep_deleter)[0]
with lib_db_api.autonested_transaction(context.session):
port_binding.l3_agent_id = agent['id']
except db_exc.DBDuplicateEntry:
LOG.debug("Router %(router)s already scheduled for agent "
"%(agent)s", {'router': router_id,
'agent': agent['id']})
port_id = port_binding.port_id
# Below call will also delete entry from L3HARouterAgentPortBinding
# and RouterPort tables
plugin._core_plugin.delete_port(context, port_id,
l3_port_check=False)
except l3_exc.RouterNotFound:
LOG.debug('Router %s has already been removed '
'by concurrent operation', router_id)
# we try to clear the HA network here in case the port we created
# blocked the concurrent router delete operation from getting rid
# of the HA network
ha_net = plugin.get_ha_network(ctxt, tenant_id)
if ha_net:
plugin.safe_delete_ha_network(ctxt, ha_net, tenant_id)
def _filter_scheduled_agents(self, plugin, context, router_id, candidates):
hosting = plugin.get_l3_agents_hosting_routers(context, [router_id])
# convert to comparable types
hosting_list = [tuple(host) for host in hosting]
return list(set(candidates) - set(hosting_list))
def _bind_ha_router(self, plugin, context, router_id,
tenant_id, candidates):
"""Bind a HA router to agents based on a specific policy."""
candidates = self._filter_scheduled_agents(plugin, context, router_id,
candidates)
chosen_agents = self._choose_router_agents_for_ha(
plugin, context, candidates)
for agent in chosen_agents:
self.create_ha_port_and_bind(plugin, context, router_id,
tenant_id, agent)
return chosen_agents
class ChanceScheduler(L3Scheduler):
"""Randomly allocate an L3 agent for a router."""
def _choose_router_agent(self, plugin, context, candidates):
return random.choice(candidates)
def _choose_router_agents_for_ha(self, plugin, context, candidates):
num_agents = self._get_num_of_agents_for_ha(len(candidates))
return random.sample(candidates, num_agents)
class LeastRoutersScheduler(L3Scheduler):
"""Allocate to an L3 agent with the least number of routers bound."""
def _choose_router_agent(self, plugin, context, candidates):
candidate_ids = [candidate['id'] for candidate in candidates]
chosen_agent = plugin.get_l3_agent_with_min_routers(
context, candidate_ids)
return chosen_agent
def _choose_router_agents_for_ha(self, plugin, context, candidates):
num_agents = self._get_num_of_agents_for_ha(len(candidates))
ordered_agents = plugin.get_l3_agents_ordered_by_num_routers(
context, [candidate['id'] for candidate in candidates])
return ordered_agents[:num_agents]
class AZLeastRoutersScheduler(LeastRoutersScheduler):
"""Availability zone aware scheduler.
If a router is ha router, allocate L3 agents distributed AZs
according to router's az_hints.
"""
def _get_az_hints(self, router):
return (router.get(az_def.AZ_HINTS) or
cfg.CONF.default_availability_zones)
def _get_routers_can_schedule(self, plugin, context, routers, l3_agent):
"""Overwrite L3Scheduler's method to filter by availability zone."""
target_routers = []
for r in routers:
az_hints = self._get_az_hints(r)
if not az_hints or l3_agent['availability_zone'] in az_hints:
target_routers.append(r)
if not target_routers:
return []
return super(AZLeastRoutersScheduler, self)._get_routers_can_schedule(
plugin, context, target_routers, l3_agent)
def _get_candidates(self, plugin, context, sync_router):
"""Overwrite L3Scheduler's method to filter by availability zone."""
all_candidates = (
super(AZLeastRoutersScheduler, self)._get_candidates(
plugin, context, sync_router))
candidates = []
az_hints = self._get_az_hints(sync_router)
for agent in all_candidates:
if not az_hints or agent['availability_zone'] in az_hints:
candidates.append(agent)
return candidates
def _choose_router_agents_for_ha(self, plugin, context, candidates):
ordered_agents = plugin.get_l3_agents_ordered_by_num_routers(
context, [candidate['id'] for candidate in candidates])
num_agents = self._get_num_of_agents_for_ha(len(ordered_agents))
# Order is kept in each az
group_by_az = collections.defaultdict(list)
for agent in ordered_agents:
az = agent['availability_zone']
group_by_az[az].append(agent)
selected_agents = []
for az, agents in itertools.cycle(group_by_az.items()):
if not agents:
continue
selected_agents.append(agents.pop(0))
if len(selected_agents) >= num_agents:
break
return selected_agents
| |
# -*- coding: utf-8 -*-
from os.path import dirname, join, isfile
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from yaml import load, dump
# -- Helper functions --------------------------------------------------
def load_api(file_name):
"""Loads an YAML API file."""
with open(file_name) as f:
return load(f)
def flatmap(f, xs):
return sum(map(f, xs), [])
def find_by_path(path, api):
"""Finds an object by path in an API object. Path names are separated by dots."""
names = path.split('.')
return reduce(lambda r, x: flatmap(lambda y: find_by_name(x, y.get('members', [])), r),
names,
[{"members":api}])
def find_by_name(name, members):
"""Finds an object by name in the API. May resolve to more than one."""
return filter(lambda x: x['name'] == name, members)
def filter_by_type(kind, members):
"""Filters a list of members by type."""
return filter(lambda x: x['type'] == kind, members)
def get_parent(path):
new_path = '.'.join(path.split('.')[:-1])
if new_path == '':
return None
else:
return new_path
def group_by_category(members):
"""Groups members by their categories."""
result = {}
for item in members:
key = item.get("category")
xs = result.get(key, [])
xs.append(item)
result[key] = xs
return result
def bool_option(arg):
"""Used to convert flag options in a directive."""
return True
def qualify_name(kind, parent, name):
prefix = 'prototype.' if kind in ['method', 'attribute'] else ''
if parent is None:
return name
else:
return parent + '.' + prefix + name
def without_nones(xs):
return filter(lambda x: x is not None, xs)
def identity(a):
return a
def maybe(v, t, f = identity):
if v is None:
return f(v)
else:
return t(v)
def normalise_options(data):
return maybe(data.get('meta'),
lambda x: PrettyBlock([PrettyOptions(x), PrettyText('')]))
def normalise_signature(data):
return maybe(data.get('signature'), rst_signature)
def normalise_data(data, parent):
return {
"name": qualify_name(data['type'], parent, data['name']),
"meta": normalise_options(data),
"signature": normalise_signature(data)
}
def toc_name(data):
kind = data['type']
if kind == "staticmethod":
return "." + data['name'] + "()"
elif kind == "method":
return "#" + data['name'] + "()"
elif kind == "attribute":
return "#" + data['name']
elif kind == "function":
return data['name'] + "()"
else:
return data['name']
# -- Pretty printing ---------------------------------------------------
class PrettyPrinter(object):
pass
class PrettyText(PrettyPrinter):
def __init__(self, value):
self.value = value
def render(self):
return self.value
class PrettySeq(PrettyPrinter):
def __init__(self, values):
self.values = values
def render(self):
return ''.join(map(lambda x: x.render(), self.values))
class PrettyBlock(PrettyPrinter):
def __init__(self, values, indent = 0):
self.values = values
self.indent = indent
def render(self):
content = '\n'.join(map(lambda x: x.render(), self.values)).split('\n')
pad = (' ' * self.indent)
return pad + ('\n' + pad).join(content)
class PrettyOptions(PrettyPrinter):
def __init__(self, values):
self.values = values
def render(self):
def isnt_empty(x):
return x != ''
def render_item(pair):
(name, value) = pair
if value == '':
return ''
else:
values = value.split('\n')
key = ':' + name + ':'
padded_values = '\n'.join(map(lambda x: ' ' + x, values))
return key + '\n' + padded_values
return '\n'.join(filter(isnt_empty, map(render_item, self.values.items())))
# -- Rendering functions -----------------------------------------------
def rst_directive(name, arg = '', opts = None, content = PrettyText('')):
return PrettyBlock([
PrettySeq([
PrettyText('.. '),
PrettyText(name),
PrettyText(':: '),
PrettyText(arg)
]),
PrettyBlock(without_nones([
opts,
PrettyText(''), # We need a blank line between the two
content,
PrettyText('')
]), 3)
])
def rst_title(title, fill = '-'):
return PrettyBlock([
PrettyText(''),
PrettyText(title),
PrettyText(fill * len(title)),
PrettyText('')
])
def rst_signature(sig):
return rst_directive('code-block', 'haskell', content = PrettyText(sig))
def has_page(source, name):
return isfile(join(dirname(source), name + '.rst'))
def rst_link(title, doc, source = None):
link = rst_directive(
'rst-class', 'detail-link',
content = PrettyText(':doc:`' + title + ' <' + doc + '>`')
)
if source is not None:
if isfile(join(dirname(source), doc + ".rst")):
return link
else:
return None
else:
return link
def rst_module(data, parent = None, more_content = None, brief = False, **kwargs):
x = normalise_data(data, parent)
if more_content is not None:
more_content = PrettyText(more_content + '\n\n')
return PrettyBlock(without_nones([
rst_directive(
'module',
x['name'],
PrettyOptions({
"synopsis": data.get('synopsis', ''),
"platform": data.get('platform', '')
})
),
x['meta'],
x['signature'],
PrettyText(data.get('synopsis', '')),
PrettyText(''),
more_content,
rst_members(x['name'], data.get('members'), **kwargs)
]))
def rst_class(data, parent = None, more_content = None, brief = True, **kwargs):
x = normalise_data(data, parent)
source = kwargs.get('source')
if more_content is not None:
more_content = PrettyText(more_content + '\n\n')
if brief:
meta = PrettyOptions({ "noindex": "" }) if has_page(source, data['name']) else None
link = rst_link('+', data['name'], kwargs.get('source'))
mems = None
else:
meta = None
link = None
mems = rst_members(data['name'], data.get('members'), **kwargs)
return PrettyBlock(without_nones([
rst_directive(
'class',
x['name'],
meta,
PrettyBlock(without_nones([
x['meta'],
x['signature'],
PrettyText(data.get('synopsis', '')),
PrettyText(''),
more_content,
link
]))
),
PrettyText(''),
mems
]))
def rst_object(data, parent = None, more_content = None, brief = True, **kwargs):
x = normalise_data(data, parent)
name = qualify_name(data['type'], parent, data.get('header', data['name']))
if more_content is not None:
more_content = PrettyText(more_content + '\n\n')
if brief:
meta = PrettyOptions({ "noindex": "" })
link = rst_link('+', data['name'], kwargs.get('source'))
mems = None
preamble = PrettyBlock([
PrettyText('.. rst-class:: hidden-heading'),
PrettyText(''),
rst_title(toc_name(data), '~')
])
else:
meta = None
link = None
mems = rst_members(x['name'], data.get('members'), **kwargs)
preamble = None
return PrettyBlock(without_nones([
preamble,
rst_directive(
data['type'],
name,
meta,
PrettyBlock(without_nones([
x['meta'],
x['signature'],
PrettyText(data.get('synopsis', '')),
PrettyText(''),
more_content,
link,
mems
]))
)
]))
def rst_members(parent, members, **kwargs):
def render_category(pair):
(cat, mems) = pair
return PrettyBlock([
rst_title(cat or "Uncategorised"),
rst_dirlist(parent, mems, **kwargs)
])
if members is not None:
items = sorted(group_by_category(members).items(), key=lambda x: x[0])
return PrettyBlock(map(render_category, items))
else:
return None
def rst_dirlist(parent, members, **kwargs):
return PrettyBlock(map(lambda x: rst_object(x, parent, **kwargs), members))
# -- Directives --------------------------------------------------------
class ApiDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
handlers = {
"module": rst_module,
"function": rst_object,
"method": rst_object,
"staticmethod": rst_object,
"attribute": rst_object,
"data": rst_object,
"class": rst_class
}
def run(self):
config = self.state.document.settings.env.config
reporter = self.state.document.reporter
api = load_api(config['api_path'])
obj_type = self.name[3:]
objs = filter_by_type(obj_type, find_by_path(self.arguments[0], api))
parent = get_parent(self.arguments[0])
render_fn = self.get_writer(obj_type)
text = '\n'.join(self.content)
content = '\n\n'.join(map(lambda x: render_fn(x, parent=parent, more_content=text, brief=False, source = reporter.source).render(), objs))
node = nodes.section()
node.document = self.state.document
nested_parse_with_titles(self.state, ViewList(content.split('\n')), node)
return node.children
def get_writer(self, obj_type):
return ApiDirective.handlers[obj_type]
def setup(app):
app.add_config_value('api_path', None, True)
app.add_directive('apimodule', ApiDirective)
app.add_directive('apiclass', ApiDirective)
app.add_directive('apifunction', ApiDirective)
app.add_directive('apimethod', ApiDirective)
app.add_directive('apistaticmethod', ApiDirective)
app.add_directive('apiattribute', ApiDirective)
app.add_directive('apidata', ApiDirective)
| |
"""
homeassistant.components.mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MQTT component, using paho-mqtt. This component needs a MQTT broker like
Mosquitto or Mosca. The Eclipse Foundation is running a public MQTT server
at iot.eclipse.org. If you prefer to use that one, keep in mind to adjust
the topic/client ID and that your messages are public.
Configuration:
To use MQTT you will need to add something like the following to your
config/configuration.yaml.
mqtt:
broker: 127.0.0.1
Or, if you want more options:
mqtt:
broker: 127.0.0.1
port: 1883
client_id: home-assistant-1
keepalive: 60
username: your_username
password: your_secret_password
Variables:
broker
*Required
This is the IP address of your MQTT broker, e.g. 192.168.1.32.
port
*Optional
The network port to connect to. Default is 1883.
client_id
*Optional
Client ID that Home Assistant will use. Has to be unique on the server.
Default is a random generated one.
keepalive
*Optional
The keep alive in seconds for this client. Default is 60.
"""
import logging
import socket
from homeassistant.core import HomeAssistantError
import homeassistant.util as util
from homeassistant.helpers import validate_config
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt"
MQTT_CLIENT = None
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_QOS = 0
SERVICE_PUBLISH = 'publish'
EVENT_MQTT_MESSAGE_RECEIVED = 'MQTT_MESSAGE_RECEIVED'
DEPENDENCIES = []
REQUIREMENTS = ['paho-mqtt>=1.1']
CONF_BROKER = 'broker'
CONF_PORT = 'port'
CONF_CLIENT_ID = 'client_id'
CONF_KEEPALIVE = 'keepalive'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
ATTR_QOS = 'qos'
ATTR_TOPIC = 'topic'
ATTR_PAYLOAD = 'payload'
def publish(hass, topic, payload):
""" Send an MQTT message. """
data = {
ATTR_TOPIC: topic,
ATTR_PAYLOAD: payload,
}
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
def subscribe(hass, topic, callback, qos=0):
""" Subscribe to a topic. """
def mqtt_topic_subscriber(event):
""" Match subscribed MQTT topic. """
if _match_topic(topic, event.data[ATTR_TOPIC]):
callback(event.data[ATTR_TOPIC], event.data[ATTR_PAYLOAD],
event.data[ATTR_QOS])
hass.bus.listen(EVENT_MQTT_MESSAGE_RECEIVED, mqtt_topic_subscriber)
if topic not in MQTT_CLIENT.topics:
MQTT_CLIENT.subscribe(topic, qos)
def setup(hass, config):
""" Get the MQTT protocol service. """
if not validate_config(config, {DOMAIN: ['broker']}, _LOGGER):
return False
conf = config[DOMAIN]
broker = conf[CONF_BROKER]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
client_id = util.convert(conf.get(CONF_CLIENT_ID), str)
keepalive = util.convert(conf.get(CONF_KEEPALIVE), int, DEFAULT_KEEPALIVE)
username = util.convert(conf.get(CONF_USERNAME), str)
password = util.convert(conf.get(CONF_PASSWORD), str)
global MQTT_CLIENT
try:
MQTT_CLIENT = MQTT(hass, broker, port, client_id, keepalive, username,
password)
except socket.error:
_LOGGER.exception("Can't connect to the broker. "
"Please check your settings and the broker "
"itself.")
return False
def stop_mqtt(event):
""" Stop MQTT component. """
MQTT_CLIENT.stop()
def start_mqtt(event):
""" Launch MQTT component when Home Assistant starts up. """
MQTT_CLIENT.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_mqtt)
def publish_service(call):
""" Handle MQTT publish service calls. """
msg_topic = call.data.get(ATTR_TOPIC)
payload = call.data.get(ATTR_PAYLOAD)
if msg_topic is None or payload is None:
return
MQTT_CLIENT.publish(msg_topic, payload)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_mqtt)
hass.services.register(DOMAIN, SERVICE_PUBLISH, publish_service)
return True
# This is based on one of the paho-mqtt examples:
# http://git.eclipse.org/c/paho/org.eclipse.paho.mqtt.python.git/tree/examples/sub-class.py
# pylint: disable=too-many-arguments
class MQTT(object): # pragma: no cover
""" Implements messaging service for MQTT. """
def __init__(self, hass, broker, port, client_id, keepalive, username,
password):
import paho.mqtt.client as mqtt
self.hass = hass
self._progress = {}
self.topics = {}
if client_id is None:
self._mqttc = mqtt.Client()
else:
self._mqttc = mqtt.Client(client_id)
if username is not None:
self._mqttc.username_pw_set(username, password)
self._mqttc.on_subscribe = self._mqtt_on_subscribe
self._mqttc.on_unsubscribe = self._mqtt_on_unsubscribe
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_message = self._mqtt_on_message
self._mqttc.connect(broker, port, keepalive)
def publish(self, topic, payload):
""" Publish a MQTT message. """
self._mqttc.publish(topic, payload)
def unsubscribe(self, topic):
""" Unsubscribe from topic. """
result, mid = self._mqttc.unsubscribe(topic)
_raise_on_error(result)
self._progress[mid] = topic
def start(self):
""" Run the MQTT client. """
self._mqttc.loop_start()
def stop(self):
""" Stop the MQTT client. """
self._mqttc.loop_stop()
def subscribe(self, topic, qos):
""" Subscribe to a topic. """
if topic in self.topics:
return
result, mid = self._mqttc.subscribe(topic, qos)
_raise_on_error(result)
self._progress[mid] = topic
self.topics[topic] = None
def _mqtt_on_connect(self, mqttc, obj, flags, result_code):
""" On connect, resubscribe to all topics we were subscribed to. """
old_topics = self.topics
self._progress = {}
self.topics = {}
for topic, qos in old_topics.items():
# qos is None if we were in process of subscribing
if qos is not None:
self._mqttc.subscribe(topic, qos)
def _mqtt_on_subscribe(self, mqttc, obj, mid, granted_qos):
""" Called when subscribe succesfull. """
topic = self._progress.pop(mid, None)
if topic is None:
return
self.topics[topic] = granted_qos
def _mqtt_on_unsubscribe(self, mqttc, obj, mid, granted_qos):
""" Called when subscribe succesfull. """
topic = self._progress.pop(mid, None)
if topic is None:
return
self.topics.pop(topic, None)
def _mqtt_on_message(self, mqttc, obj, msg):
""" Message callback """
self.hass.bus.fire(EVENT_MQTT_MESSAGE_RECEIVED, {
ATTR_TOPIC: msg.topic,
ATTR_QOS: msg.qos,
ATTR_PAYLOAD: msg.payload.decode('utf-8'),
})
def _raise_on_error(result): # pragma: no cover
""" Raise error if error result. """
if result != 0:
raise HomeAssistantError('Error talking to MQTT: {}'.format(result))
def _match_topic(subscription, topic):
""" Returns if topic matches subscription. """
if subscription.endswith('#'):
return (subscription[:-2] == topic or
topic.startswith(subscription[:-1]))
sub_parts = subscription.split('/')
topic_parts = topic.split('/')
return (len(sub_parts) == len(topic_parts) and
all(a == b for a, b in zip(sub_parts, topic_parts) if a != '+'))
| |
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import eventlet
import greenlet
from oslo.config import cfg
import six
from oslo.log.openstack.common import excutils
from oslo.log.openstack.common.gettextutils import _, _LE, _LI
from oslo.log.openstack.common import importutils
from oslo.log.openstack.common import jsonutils
from oslo.log.openstack.common import log as logging
from oslo.log.openstack.common.rpc import amqp as rpc_amqp
from oslo.log.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version()
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the receiver on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_LE("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = msg_id
else:
raise_invalid_topology_version()
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version()
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
else:
raise_invalid_topology_version()
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""Init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.connection = None
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
brokers_count = len(self.brokers)
self.next_broker_indices = itertools.cycle(range(brokers_count))
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
delay = 1
while True:
# Close the session if necessary
if self.connection is not None and self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.MessagingError:
pass
broker = self.brokers[next(self.next_broker_indices)]
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.MessagingError as e:
msg_dict = dict(e=e, delay=delay)
msg = _LE("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(delay + 1, 5)
else:
LOG.info(_LI('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in six.itervalues(consumers):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug("Re-established AMQP queues")
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.MessagingError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': exc}
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug('Timed out waiting for RPC response: %s' %
exc)
raise rpc_common.Timeout()
else:
LOG.exception(_LE('Failed to consume message from queue: %s') %
exc)
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_LE("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': exc}
LOG.exception(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
six.next(it)
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
| |
"""
Copyright (C) 2010 - 2013 TopCoder Inc., All Rights Reserved.
This module defines the SQL generator, which works according to the visitor
pattern, and converts a syntax tree to the SQLite 3 dialect. Conversion
details are covered in method documentations.
@version 1.0 (Healthcare Fraud Prevention - Query Parsing and Query Generation)
@author: TCSASSEMBLER
"""
from query.common_visitor import CommonVisitor
class SQLGenerator(CommonVisitor):
"""
This implementation of SQLGenerator is thread safe and performs read-only
operations. A single instance of this class can be used concurrently for
multiple syntax trees and by multiple clients.
"""
def visit(self, node):
"""
Invoke this method to start generating SQL for node.
"""
return node.accept(self)
def visit_query(self, query):
"""
Query -> Clause | Group | And_sequence | Or_sequence
Parameter `query` is an instance of treenode.Query, which has one
member `query`.
"""
return query.query.accept(self)
def visit_and_sequence(self, sequence):
"""
And_sequence -> Group "AND" And_sequence_tail
Parameter `sequence` is an instance of treenode.AndSequence, which has
two members `group` and `tail`.
"""
return sequence.group.accept(self) + " AND " + \
sequence.tail.accept(self)
def visit_and_sequence_tail(self, tail):
"""
And_sequence_tail -> Group | Group "AND" And_sequence_tail
Parameter `tail` is an instance of treenode.AndSequenceTail, which has
two members `group` and `tail`, where `tail` could be None.
"""
if tail.tail is None:
return tail.group.accept(self)
else:
return tail.group.accept(self) + " AND " + tail.tail.accept(self)
def visit_or_sequence(self, sequence):
"""
Or_sequence -> Group "OR" Or_sequence_tail
Parameter `sequence` is an instance of treenode.OrSequence, which has
two members `group` and `tail`.
"""
return sequence.group.accept(self) + " OR " + \
sequence.tail.accept(self)
def visit_or_sequence_tail(self, tail):
"""
Or_sequence_tail -> Group | Group "OR" Or_sequence_tail
Parameter `tail` is an instance of treenode.OrSequenceTail, which has
two members `group` and `tail`, where `tail` could be None.
"""
if tail.tail is None:
return tail.group.accept(self)
else:
return tail.group.accept(self) + " OR " + tail.tail.accept(self)
def visit_group(self, group):
"""
Group -> Affirmative_group | Negated_group
Parameter `group` is an instance of treenode.Group, which has one
member `group`.
"""
return group.group.accept(self)
def visit_affirmative_group(self, group):
"""
Affirmative_group -> "(" Query ")"
Parameter `group` is an instance of treenode.AffirmativeGroup, which
has one member `query`.
"""
return "(" + group.query.accept(self) + ")"
def visit_negated_group(self, group):
"""
Negated_group -> "NOT" "(" Query ")"
Parameter `group` is an instance of treenode.NegatedGroup, which has
one member `query`.
"""
return "NOT(" + group.query.accept(self) + ")"
def visit_clause(self, clause):
"""
Clause -> Numerical_clause | Logical_clause | Textual_clause
Parameter `clause` is an instance of treenode.Clause, which has one
member `clause`.
"""
return clause.clause.accept(self)
def visit_numerical_clause(self, clause):
"""
Numerical_clause ->
Numerical_attribute Numerical_operator Numerical_value
Parameter `clause` is an instance of treenode.NumericalClause, which
has three members `attribute`, `operator`, and `value`.
"""
return clause.attribute.accept(self) + " " + \
clause.operator.accept(self) + " " + \
clause.value.accept(self)
def visit_numerical_attribute(self, attribute):
"""
Numerical_attribute -> "County code" | "State code" | ...
Parameter `attribute` is an instance of treenode.NumericalAttribute,
which has two members `attribute` and `code`.
"""
return attribute.code
def visit_numerical_operator(self, operator):
"""
Numerical_operator -> "equal to" | "less than"
| "less than or equal to" | "greater than"
| "greater than or equal to"
Parameter `operator` is an instance of treenode.NumericalOperator,
which has two members `operator` and `code`.
"""
return operator.code
def visit_numerical_value(self, value):
"""
Numerical_value -> Integer_value | Negative_integer_value
| Real_value | Negative_real_value
Parameter `value` is an instance of treenode.NumericalValue, which has
one member `value`.
"""
return value.value.accept(self)
def visit_negative_integer_value(self, value):
"""
Negative_integer_value -> "-" Integer_value
Parameter `value` is an instance of treenode.NegativeIntegerValue,
which has one member `integer_value`.
"""
return "-" + value.integer_value.accept(self)
def visit_negative_real_value(self, value):
"""
Negative_real_value -> "-" Real_value
Parameter `value` is an instance of treenode.NegativeRealValue, which
has one member `real_value`.
"""
return "-" + value.real_value.accept(self)
def visit_real_value(self, value):
"""
Real_value -> Integer_value "." Integer_value
Parameter `value` is an instance of treenode.RealValue, which has two
members `integer_part` and `fractional_part`.
"""
return value.integer_part.accept(self) + "." + \
value.fractional_part.accept(self)
def visit_integer_value(self, value):
"""
Integer_value -> Digit+
Digit -> "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9"
Parameter `value` is an instance of treenode.IntegerValue, which has
one member `digits` of string type. The parser guarantees that all the
characters in `digits` are indeed numerical digits, so there is no need
to perform any kind of escaping here.
"""
return value.digits
def visit_logical_clause(self, clause):
"""
Logical_clause -> Logical_attribute "is" Logical_value
Parameter `clause` is an instance of treenode.LogicalClause, which has
three members `attribute`, `operator`, and `value`. Although currently
the only valid operator is "is", we still have the member `operator`
for extension purposes in the future. However, `operator` does not
have its own syntax tree node, but is a vanilla Python string "is".
"""
return clause.attribute.accept(self) + " " + clause.operator + " " + \
clause.value.accept(self)
def visit_logical_attribute(self, attribute):
"""
Logical_attribute -> "End stage renal disease indicator" | ...
Parameter `attribute` is an instance of treenode.LogicalAttribute,
which has two members `attribute` and `code`.
"""
return attribute.code
def visit_logical_value(self, value):
"""
Logical_value -> "true" | "false"
Parameter `value` is an instance of treenode.LogicalValue, which has
one member `value` of string type.
According to the description of SQLite datatypes, which is available at
http://www.sqlite.org/datatype3.html, SQLite does not have a separate
Boolean storage class. Instead, Boolean values are stored as integers
0 (false) and 1 (true).
"""
if value.value == "false":
return "0"
if value.value == "true":
return "1"
raise ValueError("unexpected logical value " + value.value)
def visit_textual_clause(self, clause):
"""
Textual_clause -> Textual_attribute Textual_operator Textual_value
Parameter `clause` is an instance of treenode.TextualClause, which has
three attributes `attribute`, `operator`, and `value`.
"""
return clause.attribute.accept(self) + " " + \
clause.operator.accept(self) + " " + \
clause.value.accept(self)
def visit_textual_attribute(self, attribute):
"""
Textual_attribute -> "Beneficiary code" | "Date of birth" | ...
Parameter `attribute` is an instance of treenode.TextualAttribute,
which has two members `attribute` and `code`.
"""
return attribute.code
def visit_textual_operator(self, operator):
"""
Textual_operator -> "is" | "matches"
Parameter `operator` is an instance of treenode.TextualOperator, which
has one member `operator` of string type. "matches" is translated to
"like" according to http://www.sqlite.org/lang_expr.html
"""
if operator.operator == "is":
return "="
if operator.operator == "matches":
return "LIKE"
raise ValueError("unexpected textual operator " + operator.operator)
def visit_textual_value(self, value):
"""
According to the definition of the SQLite query language, which is
available at http://www.sqlite.org/lang_expr.html, a string constant is
formed by enclosing the string in single quotes ('), and a single quote
within the string can be encoded by putting two single quotes in a row.
C-style escapes using the backslash character are not supported because
they are not standard SQL.
However, other databases may support other string escape mechanisms.
MySQL, for instance, also permits using the backslash character to
escape a quote. Pay attention to this when migrating to other systems.
"""
return "'" + self.sqlite_quote_string(value.characters) + "'"
def sqlite_quote_string(self, s):
"""
This is a helper function, which escapes single quote characters in the
given string parameter `s`. Escaped result is returned via `r` below.
"""
r = ""
for c in s:
if c == "'":
r += "''"
else:
r += c
return r
def visit_between_clause(self, between):
return between.attribute.accept(self) + " BETWEEN " + \
between.value1.accept(self) + " AND " + \
between.value2.accept(self)
if __name__ == "__main__":
import sys
from .parser import Parser, ParserError
parser = Parser()
generator = SQLGenerator()
try:
parse_tree = parser.parse(str(sys.argv[1]))
print(generator.visit(parse_tree))
except ParserError:
print("syntax error")
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test base for tf.data service tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.core.protobuf import service_config_pb2
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.experimental.service import server_lib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.platform import googletest
# This will be resolved to a tmp directory by `start_dispatch_server`.
TMP_WORK_DIR = "tmp_work_dir_placeholder"
# `""` indicates not to use a work directory.
NO_WORK_DIR = ""
# We use a faster than normal heartbeat interval so that tests run faster.
TEST_HEARTBEAT_INTERVAL_MS = 100
TEST_DISPATCHER_TIMEOUT_MS = 1000
PROTOCOL = "grpc"
# Some clusters may take a long time to shut down due to blocked outstanding
# RPCs. We store the clusters here so that they are destroyed at end of process
# instead of slowing down unit tests.
GLOBAL_CLUSTERS = set()
def all_cluster_configurations():
with_work_dir = combinations.combine(
work_dir=TMP_WORK_DIR, fault_tolerant_mode=[True, False])
without_work_dir = combinations.combine(
work_dir=NO_WORK_DIR, fault_tolerant_mode=False)
return with_work_dir + without_work_dir
def _make_worker(dispatcher_address, shutdown_quiet_period_ms=0, port=0):
"""Creates a worker server."""
defaults = server_lib.WorkerConfig(dispatcher_address=dispatcher_address)
config_proto = service_config_pb2.WorkerConfig(
dispatcher_address=dispatcher_address,
worker_address=defaults.worker_address,
port=port,
protocol=PROTOCOL,
heartbeat_interval_ms=TEST_HEARTBEAT_INTERVAL_MS,
dispatcher_timeout_ms=TEST_DISPATCHER_TIMEOUT_MS,
data_transfer_protocol=None,
shutdown_quiet_period_ms=shutdown_quiet_period_ms)
return server_lib.WorkerServer(config_proto, start=False)
class TestWorker(object):
"""A tf.data service worker."""
def __init__(self, dispatcher_address, shutdown_quiet_period_ms):
self._dispatcher_address = dispatcher_address
self._shutdown_quiet_period_ms = shutdown_quiet_period_ms
self._server = _make_worker(dispatcher_address, shutdown_quiet_period_ms)
self._running = False
def stop(self):
self._server._stop() # pylint: disable=protected-access
self._running = False
def start(self):
self._server.start()
# pylint: disable=protected-access
self._port = int(self._server._address.split(":")[1])
self._running = True
def restart(self, use_same_port=True):
"""Restarts the worker, stopping it first if it is already running."""
if self._running:
self.stop()
port = 0
if use_same_port:
port = self._port
self._server = _make_worker(self._dispatcher_address,
self._shutdown_quiet_period_ms, port)
self._server.start()
# pylint: disable=protected-access
self._port = int(self._server._address.split(":")[1])
self._running = True
def num_tasks(self):
# pylint: disable=protected-access
return self._server._num_tasks()
class TestCluster(object):
"""Test tf.data service cluster."""
def __init__(self,
num_workers,
dispatcher_port=0,
work_dir=TMP_WORK_DIR,
fault_tolerant_mode=True,
job_gc_check_interval_ms=None,
job_gc_timeout_ms=None,
worker_shutdown_quiet_period_ms=0,
start=True):
"""Creates a tf.data service test cluster.
Args:
num_workers: The number of workers to initially add to the cluster.
dispatcher_port: The port to use for the dispatcher.
work_dir: The work directory to use for the dispatcher. If set to
`TMP_WORK_DIR`, the cluster will create a new temporary directory to use
as the work directory. If set to `NO_WORK_DIR`, no work directory will
be used.
fault_tolerant_mode: Whether the dispatcher should write its state to a
journal so that it can recover from restarts.
job_gc_check_interval_ms: How often the dispatcher should scan through to
delete old and unused jobs, in milliseconds.
job_gc_timeout_ms: How long a job needs to be unused before it becomes a
candidate for garbage collection, in milliseconds.
worker_shutdown_quiet_period_ms: When shutting down a worker, how long to
wait for the gRPC server to process the final requests.
start: Whether to immediately start the servers in the cluster. If
`False`, the servers can be started later by calling
`start_dispatcher()` and `start_workers()`.
"""
if work_dir == TMP_WORK_DIR:
work_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
self._worker_shutdown_quiet_period_ms = worker_shutdown_quiet_period_ms
self.dispatcher = server_lib.DispatchServer(
server_lib.DispatcherConfig(
port=dispatcher_port,
work_dir=work_dir,
protocol=PROTOCOL,
fault_tolerant_mode=fault_tolerant_mode,
job_gc_check_interval_ms=job_gc_check_interval_ms,
job_gc_timeout_ms=job_gc_timeout_ms),
start=start)
self.workers = []
for _ in range(num_workers):
self.add_worker(start=start)
def dispatcher_address(self):
return self.dispatcher.target.split("://")[1]
def add_worker(self, start=True):
worker = TestWorker(self.dispatcher_address(),
self._worker_shutdown_quiet_period_ms)
if start:
worker.start()
self.workers.append(worker)
def start_dispatcher(self):
self.dispatcher.start()
def start_workers(self):
for worker in self.workers:
worker.start()
def stop_dispatcher(self):
# pylint: disable=protected-access
self.dispatcher._stop()
# pylint: disable=protected-access
def restart_dispatcher(self):
"""Stops `dispatcher` and creates a new dispatcher with the same port.
Restarting is supported only when the dispatcher is configured with
`fault_tolerant_mode=True`.
"""
if not self.dispatcher._config.fault_tolerant_mode:
raise ValueError(
"Trying to restart the dispatcher without fault-tolerance.")
port = int(self.dispatcher_address().split(":")[1])
self.dispatcher._stop()
self.dispatcher = server_lib.DispatchServer(
server_lib.DispatcherConfig(
port=port,
work_dir=self.dispatcher._config.work_dir,
protocol=PROTOCOL,
fault_tolerant_mode=self.dispatcher._config.fault_tolerant_mode))
def num_registered_workers(self):
return self.dispatcher._num_workers()
def __del__(self):
# Destroy workers before the dispatcher for clean shutdown.
self.workers.clear()
del self.dispatcher
class TestBase(test_base.DatasetTestBase):
"""Base class for tf.data service tests."""
def make_distributed_dataset(self,
dataset,
cluster,
processing_mode="parallel_epochs",
job_name=None,
consumer_index=None,
num_consumers=None,
max_outstanding_requests=None,
compression="AUTO"):
# pylint: disable=protected-access
return dataset.apply(
data_service_ops._distribute(
processing_mode,
cluster.dispatcher_address(),
job_name=job_name,
consumer_index=consumer_index,
num_consumers=num_consumers,
max_outstanding_requests=max_outstanding_requests,
task_refresh_interval_hint_ms=20,
compression=compression))
def make_distributed_range_dataset(self,
num_elements,
cluster,
processing_mode="parallel_epochs",
job_name=None,
max_outstanding_requests=None,
compression="AUTO"):
dataset = dataset_ops.Dataset.range(num_elements)
return self.make_distributed_dataset(
dataset,
cluster,
processing_mode=processing_mode,
job_name=job_name,
max_outstanding_requests=max_outstanding_requests,
compression=compression)
def make_round_robin_dataset(self, cluster, num_consumers):
"""Creates a dataset that performs round-robin reads.
The dataset simulates `num_consumers` consumers by using parallel
interleave to read with `num_consumers` threads, one for each consumer. The
nth element of the dataset is produced by consumer `n % num_consumers`.
The dataset executed on each worker counts upwards from 0.
Args:
cluster: A tf.data service `TestCluster`.
num_consumers: The number of consumers to simulate.
Returns:
A dataset that simulates reading with `num_consumers` consumers.
"""
ds = dataset_ops.Dataset.range(100000000).repeat()
consumers = []
for consumer_index in range(num_consumers):
consumers.append(
self.make_distributed_dataset(
ds,
cluster,
job_name="test",
consumer_index=consumer_index,
num_consumers=num_consumers))
# Use parallel interleave to read from consumers in parallel.
ds = dataset_ops.Dataset.from_tensor_slices(consumers)
ds = ds.interleave(
lambda x: x,
cycle_length=num_consumers,
num_parallel_calls=num_consumers)
return ds
def checkRoundRobinGroups(self, results, num_consumers):
groups = [
results[start:start + num_consumers]
for start in range(0, len(results), num_consumers)
]
incorrect_groups = []
for group in groups:
if group[0] % num_consumers != 0:
incorrect_groups.append(group)
break
# Check that each group of `num_consumers` results are consecutive.
for offset in range(1, len(group)):
if group[0] + offset != group[offset]:
incorrect_groups.append(group)
break
self.assertEmpty(
incorrect_groups,
"Incorrect groups: {}.\nAll groups: {}".format(incorrect_groups,
groups))
def read(self, get_next, results, count):
for _ in range(count):
results.append(self.evaluate(get_next()))
| |
import cgi
import codecs
import copy
from io import BytesIO
from itertools import chain
from urllib.parse import quote, urlencode, urljoin, urlsplit
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost, ImproperlyConfigured, RequestDataTooBig,
)
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils.datastructures import (
CaseInsensitiveMapping, ImmutableList, MultiValueDict,
)
from django.utils.encoding import escape_uri_path, iri_to_uri
from django.utils.functional import cached_property
from django.utils.http import is_same_domain, limited_parse_qsl
from django.utils.regex_helper import _lazy_re_compile
RAISE_ERROR = object()
host_validation_re = _lazy_re_compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:\d+)?$")
class UnreadablePostError(OSError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest:
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return '<%s>' % self.__class__.__name__
return '<%s: %s %r>' % (self.__class__.__name__, self.method, self.get_full_path())
@cached_property
def headers(self):
return HttpHeaders(self.META)
def _set_content_type_params(self, meta):
"""Set content_type, content_params, and encoding."""
self.content_type, self.content_params = cgi.parse_header(meta.get('CONTENT_TYPE', ''))
if 'charset' in self.content_params:
try:
codecs.lookup(self.content_params['charset'])
except LookupError:
pass
else:
self.encoding = self.content_params['charset']
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = self.get_port()
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = ['localhost', '127.0.0.1', '[::1]']
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:
port = self.META['HTTP_X_FORWARDED_PORT']
else:
port = self.META['SERVER_PORT']
return str(port)
def get_full_path(self, force_append_slash=False):
return self._get_full_path(self.path, force_append_slash)
def get_full_path_info(self, force_append_slash=False):
return self._get_full_path(self.path_info, force_append_slash)
def _get_full_path(self, path, force_append_slash):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s%s' % (
escape_uri_path(path),
'/' if force_append_slash and not path.endswith('/') else '',
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempt to return a signed cookie. If the signature fails or the
cookie has expired, raise an exception, unless the `default` argument
is provided, in which case return that value.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def get_raw_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return '{scheme}://{host}{path}'.format(
scheme=self.scheme,
host=self._get_raw_host(),
path=self.get_full_path(),
)
def build_absolute_uri(self, location=None):
"""
Build an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, build the absolute URI
using request.get_full_path(). If the location is absolute, convert it
to an RFC 3987 compliant URI and return it. If location is relative or
is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base
URL constructed from the request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
# Handle the simple, most common case. If the location is absolute
# and a scheme or host (netloc) isn't provided, skip an expensive
# urljoin() as long as no path segments are '.' or '..'.
if (bits.path.startswith('/') and not bits.scheme and not bits.netloc and
'/./' not in bits.path and '/../' not in bits.path):
# If location starts with '//' but has no netloc, reuse the
# schema and netloc from the current request. Strip the double
# slashes and continue as if it wasn't specified.
if location.startswith('//'):
location = location[2:]
location = self._current_scheme_host + location
else:
# Join the constructed URL with the provided location, which
# allows the provided location to apply query strings to the
# base path.
location = urljoin(self._current_scheme_host + self.path, location)
return iri_to_uri(location)
@cached_property
def _current_scheme_host(self):
return '{}://{}'.format(self.scheme, self.get_host())
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Return 'http' by
default.
"""
return 'http'
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, secure_value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
header_value = self.META.get(header)
if header_value is not None:
return 'https' if header_value == secure_value else 'http'
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, 'GET'):
del self.GET
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Return a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
# Limit the maximum request data size that will be handled in-memory.
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
try:
self._body = self.read()
except OSError as e:
raise UnreadablePostError(*e.args) from e
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.content_type == 'multipart/form-data':
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
self._mark_post_parse_error()
raise
elif self.content_type == 'application/x-www-form-urlencoded':
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def __iter__(self):
return iter(self.readline, b'')
def readlines(self):
return list(self)
class HttpHeaders(CaseInsensitiveMapping):
HTTP_PREFIX = 'HTTP_'
# PEP 333 gives two headers which aren't prepended with HTTP_.
UNPREFIXED_HEADERS = {'CONTENT_TYPE', 'CONTENT_LENGTH'}
def __init__(self, environ):
headers = {}
for header, value in environ.items():
name = self.parse_header_name(header)
if name:
headers[name] = value
super().__init__(headers)
def __getitem__(self, key):
"""Allow header lookup using underscores in place of hyphens."""
return super().__getitem__(key.replace('_', '-'))
@classmethod
def parse_header_name(cls, header):
if header.startswith(cls.HTTP_PREFIX):
header = header[len(cls.HTTP_PREFIX):]
elif header not in cls.UNPREFIXED_HEADERS:
return None
return header.replace('_', '-').title()
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to str.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super().__init__()
self.encoding = encoding or settings.DEFAULT_CHARSET
query_string = query_string or ''
parse_qsl_kwargs = {
'keep_blank_values': True,
'fields_limit': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS,
'encoding': self.encoding,
}
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(self.encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs):
self.appendlist(key, value)
self._mutable = mutable
@classmethod
def fromkeys(cls, iterable, value='', mutable=False, encoding=None):
"""
Return a new QueryDict with keys (may be repeated) from an iterable and
values from value.
"""
q = cls('', mutable=True, encoding=encoding)
for key in iterable:
q.appendlist(key, value)
if not mutable:
q._mutable = False
return q
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super().__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in self.lists():
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in self.lists():
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super().setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super().setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super().pop(key, *args)
def popitem(self):
self._assert_mutable()
return super().popitem()
def clear(self):
self._assert_mutable()
super().clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super().setdefault(key, default)
def copy(self):
"""Return a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Return an encoded string of all query string arguments.
`safe` specifies characters which don't require quoting, for example::
>>> q = QueryDict(mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = safe.encode(self.encoding)
def encode(k, v):
return '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
def encode(k, v):
return urlencode({k: v})
for k, list_ in self.lists():
output.extend(
encode(k.encode(self.encoding), str(v).encode(self.encoding))
for v in list_
)
return '&'.join(output)
# It's neither necessary nor appropriate to use
# django.utils.encoding.force_str() for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Convert bytes objects to strings, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Return any non-bytes objects without change.
"""
if isinstance(s, bytes):
return str(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lowercased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
domain, port = bits if len(bits) == 2 else (bits[0], '')
# Remove a trailing dot (if present) from the domain.
domain = domain[:-1] if domain.endswith('.') else domain
return domain, port
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lowercased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
return any(pattern == '*' or is_same_domain(host, pattern) for pattern in allowed_hosts)
| |
import os
import re
import json
import copy
import codecs
from lxml import etree
from lxml.etree import ElementTree
from operator import itemgetter
from pyxform import builder
## {{{ http://code.activestate.com/recipes/573463/ (r7)
class XmlDictObject(dict):
"""
Adds object like functionality to the standard dictionary.
"""
def __init__(self, initdict=None):
if initdict is None:
initdict = {}
dict.__init__(self, initdict)
def __getattr__(self, item):
return self.__getitem__(item)
def __setattr__(self, item, value):
self.__setitem__(item, value)
def __str__(self):
if '_text' in self:
return self.__getitem__('_text')
else:
return ''
@staticmethod
def Wrap(x):
"""
Static method to wrap a dictionary recursively as an XmlDictObject
"""
if isinstance(x, dict):
return XmlDictObject(
(k, XmlDictObject.Wrap(v)) for (k, v) in x.iteritems())
elif isinstance(x, list):
return [XmlDictObject.Wrap(v) for v in x]
else:
return x
@staticmethod
def _UnWrap(x):
if isinstance(x, dict):
return dict(
(k, XmlDictObject._UnWrap(v)) for (k, v) in x.iteritems())
elif isinstance(x, list):
return [XmlDictObject._UnWrap(v) for v in x]
else:
return x
def UnWrap(self):
"""
Recursively converts an XmlDictObject to a standard dictionary
and returns the result.
"""
return XmlDictObject._UnWrap(self)
def _ConvertDictToXmlRecurse(parent, dictitem):
assert not isinstance(dictitem, list)
if isinstance(dictitem, dict):
for (tag, child) in dictitem.iteritems():
if str(tag) == '_text':
parent.text = str(child)
elif isinstance(child, list):
# iterate through the array and convert
for listchild in child:
elem = ElementTree.Element(tag)
parent.append(elem)
_ConvertDictToXmlRecurse(elem, listchild)
else:
elem = ElementTree.Element(tag)
parent.append(elem)
_ConvertDictToXmlRecurse(elem, child)
else:
parent.text = str(dictitem)
def ConvertDictToXml(xmldict):
"""
Converts a dictionary to an XML ElementTree Element
"""
roottag = xmldict.keys()[0]
root = ElementTree.Element(roottag)
_ConvertDictToXmlRecurse(root, xmldict[roottag])
return root
def _ConvertXmlToDictRecurse(node, dictclass):
nodedict = dictclass()
if len(node.items()) > 0:
# if we have attributes, set them
nodedict.update(dict(node.items()))
for child in node:
# recursively add the element's children
newitem = _ConvertXmlToDictRecurse(child, dictclass)
# if tag in between text node, capture the tail end
if child.tail is not None and child.tail.strip() != '':
newitem['tail'] = child.tail
if child.tag in nodedict:
# found duplicate tag, force a list
if isinstance(nodedict[child.tag], list):
# append to existing list
nodedict[child.tag].append(newitem)
else:
# convert to list
nodedict[child.tag] = [nodedict[child.tag], newitem]
else:
# only one, directly set the dictionary
nodedict[child.tag] = newitem
if node.text is None:
text = ''
else:
text = node.text.strip()
if len(nodedict) > 0:
# if we have a dictionary
# add the text as a dictionary value (if there is any)
if len(text) > 0:
nodedict['_text'] = text
else:
# if we don't have child nodes or attributes, just set the text
nodedict = text
return nodedict
def ConvertXmlToDict(root, dictclass=XmlDictObject):
"""
Converts an XML file or ElementTree Element to a dictionary
"""
# If a string is passed in, try to open it as a file
if isinstance(root, basestring):
if os.path.exists(root):
root = etree.parse(root).getroot()
else:
root = etree.fromstring(root)
elif not isinstance(root, etree._Element):
raise TypeError('Expected ElementTree.Element or file path string')
return dictclass({root.tag: _ConvertXmlToDictRecurse(root, dictclass)})
## end of http://code.activestate.com/recipes/573463/ }}}
class XFormToDict:
def __init__(self, root):
if isinstance(root, basestring):
parser = etree.XMLParser(remove_comments=True)
if len(root) < 255 and os.path.exists(root):
self._root = etree.parse(root, parser=parser).getroot()
else:
self._root = etree.fromstring(root, parser)
self._dict = ConvertXmlToDict(self._root)
elif not isinstance(root, etree.Element):
raise TypeError('Expected ElementTree.Element or file path string')
def get_dict(self):
json_str = json.dumps(self._dict)
for k in self._root.nsmap:
json_str = json_str.replace('{%s}' % self._root.nsmap[k], '')
return json.loads(json_str)
def create_survey_element_from_xml(xml_file):
sb = XFormToDictBuilder(xml_file)
return sb.survey()
class XFormToDictBuilder:
'''Experimental XFORM xml to XFORM JSON'''
QUESTION_TYPES = {
'select': 'select all that apply',
'select1': 'select one',
'int': 'integer',
'dateTime': 'datetime',
'string': 'text'
}
def __init__(self, xml_file):
doc_as_dict = XFormToDict(xml_file).get_dict()
self._xmldict = doc_as_dict
assert 'html' in doc_as_dict
assert 'body' in doc_as_dict['html']
assert 'head' in doc_as_dict['html']
assert 'model' in doc_as_dict['html']['head']
assert 'title' in doc_as_dict['html']['head']
assert 'bind' in doc_as_dict['html']['head']['model']
self.body = doc_as_dict['html']['body']
self.model = doc_as_dict['html']['head']['model']
self.bindings = copy.deepcopy(self.model['bind'])
self._bind_list = copy.deepcopy(self.model['bind'])
self.title = doc_as_dict['html']['head']['title']
self.new_doc = {
"type": "survey",
"title": self.title,
"children": [],
"id_string": self.title,
"sms_keyword": self.title,
"default_language": "default",
}
self._set_submission_info()
self._set_survey_name()
self.children = []
self.ordered_binding_refs = []
self._set_binding_order()
# set self.translations
self._set_translations()
for key, obj in self.body.iteritems():
if isinstance(obj, dict):
self.children.append(
self._get_question_from_object(obj, type=key))
elif isinstance(obj, list):
for item in obj:
self.children.append(
self._get_question_from_object(item, type=key))
self._cleanup_bind_list()
self._cleanup_children()
self.new_doc['children'] = self.children
def _set_binding_order(self):
self.ordered_binding_refs = []
for bind in self.bindings:
self.ordered_binding_refs.append(bind['nodeset'])
def _set_survey_name(self):
obj = self.bindings[0]
name = obj['nodeset'].split('/')[1]
self.new_doc['name'] = name
self.new_doc['id_string'] = self.model['instance'][name]['id']
def _set_submission_info(self):
if 'submission' in self.model:
submission = self.model['submission']
if 'action' in submission:
self.new_doc['submission_url'] = submission['action']
if 'base64RsaPublicKey' in submission:
self.new_doc['public_key'] = submission['base64RsaPublicKey']
def _cleanup_children(self):
def remove_refs(children):
for child in children:
if isinstance(child, dict):
if 'nodeset' in child:
del child['nodeset']
if 'ref' in child:
del child['ref']
if '__order' in child:
del child['__order']
if 'children' in child:
remove_refs(child['children'])
# do some ordering, order is specified by bindings
def order_children(children):
if isinstance(children, list):
try:
children.sort(key=itemgetter('__order'))
except KeyError:
pass
for child in children:
if isinstance(child, dict) and 'children' in child:
order_children(child['children'])
order_children(self.children)
remove_refs(self.children)
def _cleanup_bind_list(self):
for item in self._bind_list:
ref = item['nodeset']
name = self._get_name_from_ref(ref)
parent_ref = ref[:ref.find('/%s' % name)]
question = self._get_question_params_from_bindings(ref)
question['name'] = name
question['__order'] = self._get_question_order(ref)
if 'calculate' in item:
question['type'] = 'calculate'
if ref.split('/').__len__() == 3:
# just append on root node, has no group
question['ref'] = ref
self.children.append(question)
continue
for child in self.children:
if child['ref'] == parent_ref:
question['ref'] = ref
updated = False
for c in child['children']:
if isinstance(c, dict) \
and 'ref' in c and c['ref'] == ref:
c.update(question)
updated = True
if not updated:
child['children'].append(question)
if 'ref' not in question:
new_ref = u'/'.join(ref.split('/')[2:])
root_ref = u'/'.join(ref.split('/')[:2])
q = self._get_item_func(root_ref, new_ref, item)
if 'type' not in q and 'type' in question:
q.update(question)
if q['type'] == 'group' and q['name'] == 'meta':
q['control'] = {'bodyless': True}
q['__order'] = self._get_question_order(ref)
self.children.append(q)
self._bind_list.append(item)
break
if self._bind_list:
self._cleanup_bind_list()
def _get_item_func(self, ref, name, item):
rs = {}
name_splits = name.split('/')
rs['name'] = name_splits[0]
ref = '%s/%s' % (ref, rs['name'])
rs['ref'] = ref
if name_splits.__len__() > 1:
rs['type'] = 'group'
rs['children'] = [
self._get_item_func(ref, '/'.join(name_splits[1:]), item)]
return rs
def survey(self):
new_doc = json.dumps(self.new_doc)
_survey = builder.create_survey_element_from_json(new_doc)
return _survey
def _get_question_order(self, ref):
try:
return self.ordered_binding_refs.index(ref)
except ValueError:
# likely a group
for i in self.ordered_binding_refs:
if i.startswith(ref):
return self.ordered_binding_refs.index(i) + 1
return self.ordered_binding_refs.__len__() + 1
def _get_question_from_object(self, obj, type=None):
ref = None
try:
ref = obj['ref']
except KeyError:
try:
ref = obj['nodeset']
except KeyError:
raise TypeError(
'cannot find "ref" or "nodeset" in {}'.format(repr(obj)))
question = {'ref': ref, '__order': self._get_question_order(ref)}
question['name'] = self._get_name_from_ref(ref)
if 'hint' in obj:
k, v = self._get_label(obj['hint'], 'hint')
question[k] = v
if 'label' in obj:
k, v = self._get_label(obj['label'])
if isinstance(v, dict) and 'label' in v.keys() \
and 'media' in v.keys():
for _k, _v in v.iteritems():
question[_k] = _v
else:
question[k] = v
if 'autoplay' in obj or 'appearance' in obj \
or 'count' in obj or 'rows' in obj:
question['control'] = {}
if 'appearance' in obj:
question["control"].update({'appearance': obj['appearance']})
if 'rows' in obj:
question['control'].update({'rows': obj['rows']})
if 'autoplay' in obj:
question['control'].update({'autoplay': obj['autoplay']})
question_params = self._get_question_params_from_bindings(ref)
if isinstance(question_params, dict):
for k, v in question_params.iteritems():
question[k] = v
# has to come after the above block
if 'mediatype' in obj:
question['type'] = obj['mediatype'].replace('/*', '')
if 'item' in obj:
children = []
for i in obj['item']:
if isinstance(i, dict) and\
'label' in i.keys() and 'value' in i.keys():
k, v = self._get_label(i['label'])
children.append(
{'name': i['value'], k: v})
question['children'] = children
question_type = question['type'] if 'type' in question else type
if question_type == 'text' and 'bind' in question \
and 'readonly' in question['bind']:
question_type = question['type'] = 'note'
del question['bind']['readonly']
if len(question['bind'].keys()) == 0:
del question['bind']
if question_type in ['group', 'repeat']:
if question_type == 'group' and 'repeat' in obj:
question['children'] = \
self._get_children_questions(obj['repeat'])
question_type = 'repeat'
if 'count' in obj['repeat']:
if 'control' not in question:
question['control'] = {}
question['control'].update(
{'jr:count':
self._shorten_xpaths_in_string(
obj['repeat']['count'].strip())})
else:
question['children'] = self._get_children_questions(obj)
question['type'] = question_type
if type == 'trigger':
question['type'] = 'acknowledge'
if question_type == 'geopoint' and 'hint' in question:
del question['hint']
if 'type' not in question and type:
question['type'] = question_type
return question
def _get_children_questions(self, obj):
children = []
for k, v in obj.iteritems():
if k in ['ref', 'label', 'nodeset']:
continue
if isinstance(v, dict):
child = self._get_question_from_object(v, type=k)
children.append(child)
elif isinstance(v, list):
for i in v:
child = self._get_question_from_object(i, type=k)
children.append(child)
return children
def _get_question_params_from_bindings(self, ref):
for item in self.bindings:
if item['nodeset'] == ref:
try:
self._bind_list.remove(item)
except ValueError:
pass
rs = {}
for k, v in item.iteritems():
if k == 'nodeset':
continue
if k == 'type':
v = self._get_question_type(v)
if k in ['relevant', 'required', 'constraint',
'constraintMsg', 'readonly', 'calculate',
'noAppErrorString', 'requiredMsg']:
if k == 'noAppErrorString':
k = 'jr:noAppErrorString'
if k == 'requiredMsg':
k = 'jr:requiredMsg'
if k == 'constraintMsg':
k = "jr:constraintMsg"
v = self._get_constraintMsg(v)
if k == 'required':
if v == 'true()':
v = 'yes'
elif v == 'false()':
v = 'no'
if k in ['constraint', 'relevant', 'calculate']:
v = self._shorten_xpaths_in_string(v)
if 'bind' not in rs:
rs['bind'] = {}
rs['bind'][k] = v
continue
rs[k] = v
if 'preloadParams' in rs and 'preload' in rs:
rs['type'] = rs['preloadParams']
del rs['preloadParams']
del rs['preload']
return rs
return None
def _get_question_type(self, type):
if type in self.QUESTION_TYPES.keys():
return self.QUESTION_TYPES[type]
return type
def _set_translations(self):
if 'itext' not in self.model:
self.translations = []
return
assert 'translation' in self.model['itext']
self.translations = self.model['itext']['translation']
if isinstance(self.translations, dict):
self.translations = [self.translations]
assert 'text' in self.translations[0]
assert 'lang' in self.translations[0]
def _get_label(self, label_obj, key='label'):
if isinstance(label_obj, dict):
try:
ref = label_obj['ref'].replace(
'jr:itext(\'', '').replace('\')', '')
except KeyError:
return key, self._get_output_text(label_obj)
else:
return self._get_text_from_translation(ref, key)
return key, label_obj
def _get_output_text(self, value):
text = ''
if 'output' in value and '_text' in value:
v = [value['_text']]
v.append(self._get_bracketed_name(
value['output']['value']))
text = u' '.join(v)
if 'tail' in value['output']:
text = u''.join(
[text, value['output']['tail']])
elif 'output' in value and '_text' not in value:
text = self._get_bracketed_name(
value['output']['value'])
else:
return value
return text
def _get_text_from_translation(self, ref, key='label'):
label = {}
for translation in self.translations:
lang = translation['lang']
label_list = translation['text']
for l in label_list:
if l['value'] == '-': # skip blank label
continue
if l['id'] == ref:
text = value = l['value']
if isinstance(value, dict):
if 'output' in value:
text = self._get_output_text(value)
if 'form' in value and '_text' in value:
key = u'media'
v = value['_text']
if value['form'] == 'image':
v = v.replace('jr://images/', '')
else:
v = v.replace('jr://%s/' % value['form'], '')
if v == '-': # skip blank
continue
text = {value['form']: v}
if isinstance(value, list):
for item in value:
if 'form' in item and '_text' in item:
k = u'media'
m_type = item['form']
v = item['_text']
if m_type == 'image':
v = v.replace('jr://images/', '')
else:
v = v.replace('jr://%s/' % m_type, '')
if v == '-':
continue
if k not in label:
label[k] = {}
if m_type not in label[k]:
label[k][m_type] = {}
label[k][m_type][lang] = v
continue
if isinstance(item, basestring):
if item == '-':
continue
if 'label' not in label:
label['label'] = {}
label['label'][lang] = item
continue
label[lang] = text
break
if key == u'media' and label.keys() == ['default']:
label = label['default']
return key, label
def _get_bracketed_name(self, ref):
name = self._get_name_from_ref(ref)
return u''.join([u'${', name.strip(), u'}'])
def _get_constraintMsg(self, constraintMsg):
if isinstance(constraintMsg, basestring):
if constraintMsg.find(':jr:constraintMsg') != -1:
ref = constraintMsg.replace(
'jr:itext(\'', '').replace('\')', '')
k, constraintMsg = self._get_text_from_translation(ref)
return constraintMsg
def _get_name_from_ref(self, ref):
'''given /xlsform_spec_test/launch,
return the string after the last occurance of the character '/'
'''
pos = ref.rfind('/')
if pos == -1:
return ref
else:
return ref[pos + 1:].strip()
def _expand_child(self, obj_list):
return obj_list
def _shorten_xpaths_in_string(self, text):
def get_last_item(xpathStr):
l = xpathStr.split("/")
return l[len(l) - 1].strip()
def replace_function(match):
return "${%s}" % get_last_item(match.group())
#moving re flags into compile for python 2.6 compat
pattern = "( /[a-z0-9\-_]+(?:/[a-z0-9\-_]+)+ )"
text = re.compile(pattern, flags=re.I).sub(replace_function, text)
pattern = "(/[a-z0-9\-_]+(?:/[a-z0-9\-_]+)+)"
text = re.compile(pattern, flags=re.I).sub(replace_function, text)
return text
def write_object_to_file(filename, obj):
f = codecs.open(filename, 'w', encoding='utf-8')
f.write(json.dumps(obj, indent=2))
f.close()
print "object written to file: ", filename
| |
# orm/unitofwork.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The internals for the Unit Of Work system.
Includes hooks into the attributes package enabling the routing of
change events to Unit Of Work objects, as well as the flush()
mechanism which creates a dependency structure that executes change
operations.
A Unit of Work is essentially a system of maintaining a graph of
in-memory objects and their modified state. Objects are maintained as
unique against their primary key identity using an *identity map*
pattern. The Unit of Work then maintains lists of objects that are
new, dirty, or deleted and provides the capability to flush all those
changes at once.
"""
from sqlalchemy import util, log, topological
from sqlalchemy.orm import attributes, interfaces
from sqlalchemy.orm import util as mapperutil
from sqlalchemy.orm.mapper import _state_mapper
# Load lazily
object_session = None
_state_session = None
class UOWEventHandler(interfaces.AttributeExtension):
"""An event handler added to all relation attributes which handles
session cascade operations.
"""
active_history = False
def __init__(self, key):
self.key = key
def append(self, state, item, initiator):
# process "save_update" cascade rules for when an instance is appended to the list of another instance
sess = _state_session(state)
if sess:
prop = _state_mapper(state).get_property(self.key)
if prop.cascade.save_update and item not in sess:
sess.add(item)
return item
def remove(self, state, item, initiator):
sess = _state_session(state)
if sess:
prop = _state_mapper(state).get_property(self.key)
# expunge pending orphans
if prop.cascade.delete_orphan and \
item in sess.new and \
prop.mapper._is_orphan(attributes.instance_state(item)):
sess.expunge(item)
def set(self, state, newvalue, oldvalue, initiator):
# process "save_update" cascade rules for when an instance is attached to another instance
if oldvalue is newvalue:
return newvalue
sess = _state_session(state)
if sess:
prop = _state_mapper(state).get_property(self.key)
if newvalue is not None and prop.cascade.save_update and newvalue not in sess:
sess.add(newvalue)
if prop.cascade.delete_orphan and oldvalue in sess.new and \
prop.mapper._is_orphan(attributes.instance_state(oldvalue)):
sess.expunge(oldvalue)
return newvalue
class UOWTransaction(object):
"""Handles the details of organizing and executing transaction
tasks during a UnitOfWork object's flush() operation.
The central operation is to form a graph of nodes represented by the
``UOWTask`` class, which is then traversed by a ``UOWExecutor`` object
that issues SQL and instance-synchronizing operations via the related
packages.
"""
def __init__(self, session):
self.session = session
self.mapper_flush_opts = session._mapper_flush_opts
# stores tuples of mapper/dependent mapper pairs,
# representing a partial ordering fed into topological sort
self.dependencies = set()
# dictionary of mappers to UOWTasks
self.tasks = {}
# dictionary used by external actors to store arbitrary state
# information.
self.attributes = {}
self.processors = set()
def get_attribute_history(self, state, key, passive=True):
hashkey = ("history", state, key)
# cache the objects, not the states; the strong reference here
# prevents newly loaded objects from being dereferenced during the
# flush process
if hashkey in self.attributes:
(history, cached_passive) = self.attributes[hashkey]
# if the cached lookup was "passive" and now we want non-passive, do a non-passive
# lookup and re-cache
if cached_passive and not passive:
history = attributes.get_state_history(state, key, passive=False)
self.attributes[hashkey] = (history, passive)
else:
history = attributes.get_state_history(state, key, passive=passive)
self.attributes[hashkey] = (history, passive)
if not history or not state.get_impl(key).uses_objects:
return history
else:
return history.as_state()
def register_object(self, state, isdelete=False,
listonly=False, postupdate=False, post_update_cols=None):
# if object is not in the overall session, do nothing
if not self.session._contains_state(state):
return
mapper = _state_mapper(state)
task = self.get_task_by_mapper(mapper)
if postupdate:
task.append_postupdate(state, post_update_cols)
else:
task.append(state, listonly=listonly, isdelete=isdelete)
# ensure the mapper for this object has had its
# DependencyProcessors added.
if mapper not in self.processors:
mapper._register_processors(self)
self.processors.add(mapper)
if mapper.base_mapper not in self.processors:
mapper.base_mapper._register_processors(self)
self.processors.add(mapper.base_mapper)
def set_row_switch(self, state):
"""mark a deleted object as a 'row switch'.
this indicates that an INSERT statement elsewhere corresponds to this DELETE;
the INSERT is converted to an UPDATE and the DELETE does not occur.
"""
mapper = _state_mapper(state)
task = self.get_task_by_mapper(mapper)
taskelement = task._objects[state]
taskelement.isdelete = "rowswitch"
def is_deleted(self, state):
"""return true if the given state is marked as deleted within this UOWTransaction."""
mapper = _state_mapper(state)
task = self.get_task_by_mapper(mapper)
return task.is_deleted(state)
def get_task_by_mapper(self, mapper, dontcreate=False):
"""return UOWTask element corresponding to the given mapper.
Will create a new UOWTask, including a UOWTask corresponding to the
"base" inherited mapper, if needed, unless the dontcreate flag is True.
"""
try:
return self.tasks[mapper]
except KeyError:
if dontcreate:
return None
base_mapper = mapper.base_mapper
if base_mapper in self.tasks:
base_task = self.tasks[base_mapper]
else:
self.tasks[base_mapper] = base_task = UOWTask(self, base_mapper)
base_mapper._register_dependencies(self)
if mapper not in self.tasks:
self.tasks[mapper] = task = UOWTask(self, mapper, base_task=base_task)
mapper._register_dependencies(self)
else:
task = self.tasks[mapper]
return task
def register_dependency(self, mapper, dependency):
"""register a dependency between two mappers.
Called by ``mapper.PropertyLoader`` to register the objects
handled by one mapper being dependent on the objects handled
by another.
"""
# correct for primary mapper
# also convert to the "base mapper", the parentmost task at the top of an inheritance chain
# dependency sorting is done via non-inheriting mappers only, dependencies between mappers
# in the same inheritance chain is done at the per-object level
mapper = mapper.primary_mapper().base_mapper
dependency = dependency.primary_mapper().base_mapper
self.dependencies.add((mapper, dependency))
def register_processor(self, mapper, processor, mapperfrom):
"""register a dependency processor, corresponding to
operations which occur between two mappers.
"""
# correct for primary mapper
mapper = mapper.primary_mapper()
mapperfrom = mapperfrom.primary_mapper()
task = self.get_task_by_mapper(mapper)
targettask = self.get_task_by_mapper(mapperfrom)
up = UOWDependencyProcessor(processor, targettask)
task.dependencies.add(up)
def execute(self):
"""Execute this UOWTransaction.
This will organize all collected UOWTasks into a dependency-sorted
list which is then traversed using the traversal scheme
encoded in the UOWExecutor class. Operations to mappers and dependency
processors are fired off in order to issue SQL to the database and
synchronize instance attributes with database values and related
foreign key values."""
# pre-execute dependency processors. this process may
# result in new tasks, objects and/or dependency processors being added,
# particularly with 'delete-orphan' cascade rules.
# keep running through the full list of tasks until all
# objects have been processed.
while True:
ret = False
for task in self.tasks.values():
for up in list(task.dependencies):
if up.preexecute(self):
ret = True
if not ret:
break
tasks = self._sort_dependencies()
if self._should_log_info():
self.logger.info("Task dump:\n%s", self._dump(tasks))
UOWExecutor().execute(self, tasks)
self.logger.info("Execute Complete")
def _dump(self, tasks):
from uowdumper import UOWDumper
return UOWDumper.dump(tasks)
@property
def elements(self):
"""Iterate UOWTaskElements."""
for task in self.tasks.itervalues():
for elem in task.elements:
yield elem
def finalize_flush_changes(self):
"""mark processed objects as clean / deleted after a successful flush().
this method is called within the flush() method after the
execute() method has succeeded and the transaction has been committed.
"""
for elem in self.elements:
if elem.isdelete:
self.session._remove_newly_deleted(elem.state)
elif not elem.listonly:
self.session._register_newly_persistent(elem.state)
def _sort_dependencies(self):
nodes = topological.sort_with_cycles(self.dependencies,
[t.mapper for t in self.tasks.itervalues() if t.base_task is t]
)
ret = []
for item, cycles in nodes:
task = self.get_task_by_mapper(item)
if cycles:
for t in task._sort_circular_dependencies(
self,
[self.get_task_by_mapper(i) for i in cycles]
):
ret.append(t)
else:
ret.append(task)
return ret
log.class_logger(UOWTransaction)
class UOWTask(object):
"""A collection of mapped states corresponding to a particular mapper."""
def __init__(self, uowtransaction, mapper, base_task=None):
self.uowtransaction = uowtransaction
# base_task is the UOWTask which represents the "base mapper"
# in our mapper's inheritance chain. if the mapper does not
# inherit from any other mapper, the base_task is self.
# the _inheriting_tasks dictionary is a dictionary present only
# on the "base_task"-holding UOWTask, which maps all mappers within
# an inheritance hierarchy to their corresponding UOWTask instances.
if base_task is None:
self.base_task = self
self._inheriting_tasks = {mapper:self}
else:
self.base_task = base_task
base_task._inheriting_tasks[mapper] = self
# the Mapper which this UOWTask corresponds to
self.mapper = mapper
# mapping of InstanceState -> UOWTaskElement
self._objects = {}
self.dependent_tasks = []
self.dependencies = set()
self.cyclical_dependencies = set()
@util.memoized_property
def inheriting_mappers(self):
return list(self.mapper.polymorphic_iterator())
@property
def polymorphic_tasks(self):
"""Return an iterator of UOWTask objects corresponding to the
inheritance sequence of this UOWTask's mapper.
e.g. if mapper B and mapper C inherit from mapper A, and
mapper D inherits from B:
mapperA -> mapperB -> mapperD
-> mapperC
the inheritance sequence starting at mapper A is a depth-first
traversal:
[mapperA, mapperB, mapperD, mapperC]
this method will therefore return
[UOWTask(mapperA), UOWTask(mapperB), UOWTask(mapperD),
UOWTask(mapperC)]
The concept of "polymporphic iteration" is adapted into
several property-based iterators which return object
instances, UOWTaskElements and UOWDependencyProcessors in an
order corresponding to this sequence of parent UOWTasks. This
is used to issue operations related to inheritance-chains of
mappers in the proper order based on dependencies between
those mappers.
"""
for mapper in self.inheriting_mappers:
t = self.base_task._inheriting_tasks.get(mapper, None)
if t is not None:
yield t
def is_empty(self):
"""return True if this UOWTask is 'empty', meaning it has no child items.
used only for debugging output.
"""
return not self._objects and not self.dependencies
def append(self, state, listonly=False, isdelete=False):
if state not in self._objects:
self._objects[state] = rec = UOWTaskElement(state)
else:
rec = self._objects[state]
rec.update(listonly, isdelete)
def append_postupdate(self, state, post_update_cols):
"""issue a 'post update' UPDATE statement via this object's mapper immediately.
this operation is used only with relations that specify the `post_update=True`
flag.
"""
# postupdates are UPDATED immeditely (for now)
# convert post_update_cols list to a Set so that __hash__() is used to compare columns
# instead of __eq__()
self.mapper._save_obj([state], self.uowtransaction, postupdate=True, post_update_cols=set(post_update_cols))
def __contains__(self, state):
"""return True if the given object is contained within this UOWTask or inheriting tasks."""
for task in self.polymorphic_tasks:
if state in task._objects:
return True
else:
return False
def is_deleted(self, state):
"""return True if the given object is marked as to be deleted within this UOWTask."""
try:
return self._objects[state].isdelete
except KeyError:
return False
def _polymorphic_collection(fn):
"""return a property that will adapt the collection returned by the
given callable into a polymorphic traversal."""
@property
def collection(self):
for task in self.polymorphic_tasks:
for rec in fn(task):
yield rec
return collection
def _polymorphic_collection_filtered(fn):
def collection(self, mappers):
for task in self.polymorphic_tasks:
if task.mapper in mappers:
for rec in fn(task):
yield rec
return collection
@property
def elements(self):
return self._objects.values()
@_polymorphic_collection
def polymorphic_elements(self):
return self.elements
@_polymorphic_collection_filtered
def filter_polymorphic_elements(self):
return self.elements
@property
def polymorphic_tosave_elements(self):
return [rec for rec in self.polymorphic_elements if not rec.isdelete]
@property
def polymorphic_todelete_elements(self):
return [rec for rec in self.polymorphic_elements if rec.isdelete]
@property
def polymorphic_tosave_objects(self):
return [
rec.state for rec in self.polymorphic_elements
if rec.state is not None and not rec.listonly and rec.isdelete is False
]
@property
def polymorphic_todelete_objects(self):
return [
rec.state for rec in self.polymorphic_elements
if rec.state is not None and not rec.listonly and rec.isdelete is True
]
@_polymorphic_collection
def polymorphic_dependencies(self):
return self.dependencies
@_polymorphic_collection
def polymorphic_cyclical_dependencies(self):
return self.cyclical_dependencies
def _sort_circular_dependencies(self, trans, cycles):
"""Topologically sort individual entities with row-level dependencies.
Builds a modified UOWTask structure, and is invoked when the
per-mapper topological structure is found to have cycles.
"""
dependencies = {}
def set_processor_for_state(state, depprocessor, target_state, isdelete):
if state not in dependencies:
dependencies[state] = {}
tasks = dependencies[state]
if depprocessor not in tasks:
tasks[depprocessor] = UOWDependencyProcessor(
depprocessor.processor,
UOWTask(self.uowtransaction, depprocessor.targettask.mapper)
)
tasks[depprocessor].targettask.append(target_state, isdelete=isdelete)
cycles = set(cycles)
def dependency_in_cycles(dep):
proctask = trans.get_task_by_mapper(dep.processor.mapper.base_mapper, True)
targettask = trans.get_task_by_mapper(dep.targettask.mapper.base_mapper, True)
return targettask in cycles and (proctask is not None and proctask in cycles)
deps_by_targettask = {}
extradeplist = []
for task in cycles:
for dep in task.polymorphic_dependencies:
if not dependency_in_cycles(dep):
extradeplist.append(dep)
for t in dep.targettask.polymorphic_tasks:
l = deps_by_targettask.setdefault(t, [])
l.append(dep)
object_to_original_task = {}
tuples = []
for task in cycles:
for subtask in task.polymorphic_tasks:
for taskelement in subtask.elements:
state = taskelement.state
object_to_original_task[state] = subtask
if subtask not in deps_by_targettask:
continue
for dep in deps_by_targettask[subtask]:
if not dep.processor.has_dependencies or not dependency_in_cycles(dep):
continue
(processor, targettask) = (dep.processor, dep.targettask)
isdelete = taskelement.isdelete
# list of dependent objects from this object
(added, unchanged, deleted) = dep.get_object_dependencies(state, trans, passive=True)
if not added and not unchanged and not deleted:
continue
# the task corresponding to saving/deleting of those dependent objects
childtask = trans.get_task_by_mapper(processor.mapper)
childlist = added + unchanged + deleted
for o in childlist:
if o is None:
continue
if o not in childtask:
childtask.append(o, listonly=True)
object_to_original_task[o] = childtask
whosdep = dep.whose_dependent_on_who(state, o)
if whosdep is not None:
tuples.append(whosdep)
if whosdep[0] is state:
set_processor_for_state(whosdep[0], dep, whosdep[0], isdelete=isdelete)
else:
set_processor_for_state(whosdep[0], dep, whosdep[1], isdelete=isdelete)
else:
# TODO: no test coverage here
set_processor_for_state(state, dep, state, isdelete=isdelete)
t = UOWTask(self.uowtransaction, self.mapper)
t.dependencies.update(extradeplist)
used_tasks = set()
# rationale for "tree" sort as opposed to a straight
# dependency - keep non-dependent objects
# grouped together, so that insert ordering as determined
# by session.add() is maintained.
# An alternative might be to represent the "insert order"
# as part of the topological sort itself, which would
# eliminate the need for this step (but may make the original
# topological sort more expensive)
head = topological.sort_as_tree(tuples, object_to_original_task.iterkeys())
if head is not None:
original_to_tasks = {}
stack = [(head, t)]
while stack:
((state, cycles, children), parenttask) = stack.pop()
originating_task = object_to_original_task[state]
used_tasks.add(originating_task)
if (parenttask, originating_task) not in original_to_tasks:
task = UOWTask(self.uowtransaction, originating_task.mapper)
original_to_tasks[(parenttask, originating_task)] = task
parenttask.dependent_tasks.append(task)
else:
task = original_to_tasks[(parenttask, originating_task)]
task.append(state, originating_task._objects[state].listonly, isdelete=originating_task._objects[state].isdelete)
if state in dependencies:
task.cyclical_dependencies.update(dependencies[state].itervalues())
stack += [(n, task) for n in children]
ret = [t]
# add tasks that were in the cycle, but didnt get assembled
# into the cyclical tree, to the start of the list
for t2 in cycles:
if t2 not in used_tasks and t2 is not self:
localtask = UOWTask(self.uowtransaction, t2.mapper)
for state in t2.elements:
localtask.append(state, t2.listonly, isdelete=t2._objects[state].isdelete)
for dep in t2.dependencies:
localtask.dependencies.add(dep)
ret.insert(0, localtask)
return ret
def __repr__(self):
return ("UOWTask(%s) Mapper: '%r'" % (hex(id(self)), self.mapper))
class UOWTaskElement(object):
"""Corresponds to a single InstanceState to be saved, deleted,
or otherwise marked as having dependencies. A collection of
UOWTaskElements are held by a UOWTask.
"""
def __init__(self, state):
self.state = state
self.listonly = True
self.isdelete = False
self.preprocessed = set()
def update(self, listonly, isdelete):
if not listonly and self.listonly:
self.listonly = False
self.preprocessed.clear()
if isdelete and not self.isdelete:
self.isdelete = True
self.preprocessed.clear()
def __repr__(self):
return "UOWTaskElement/%d: %s/%d %s" % (
id(self),
self.state.class_.__name__,
id(self.state.obj()),
(self.listonly and 'listonly' or (self.isdelete and 'delete' or 'save'))
)
class UOWDependencyProcessor(object):
"""In between the saving and deleting of objects, process
dependent data, such as filling in a foreign key on a child item
from a new primary key, or deleting association rows before a
delete. This object acts as a proxy to a DependencyProcessor.
"""
def __init__(self, processor, targettask):
self.processor = processor
self.targettask = targettask
prop = processor.prop
# define a set of mappers which
# will filter the lists of entities
# this UOWDP processes. this allows
# MapperProperties to be overridden
# at least for concrete mappers.
self._mappers = set([
m
for m in self.processor.parent.polymorphic_iterator()
if m._props[prop.key] is prop
]).union(self.processor.mapper.polymorphic_iterator())
def __repr__(self):
return "UOWDependencyProcessor(%s, %s)" % (str(self.processor), str(self.targettask))
def __eq__(self, other):
return other.processor is self.processor and other.targettask is self.targettask
def __hash__(self):
return hash((self.processor, self.targettask))
def preexecute(self, trans):
"""preprocess all objects contained within this ``UOWDependencyProcessor``s target task.
This may locate additional objects which should be part of the
transaction, such as those affected deletes, orphans to be
deleted, etc.
Once an object is preprocessed, its ``UOWTaskElement`` is marked as processed. If subsequent
changes occur to the ``UOWTaskElement``, its processed flag is reset, and will require processing
again.
Return True if any objects were preprocessed, or False if no
objects were preprocessed. If True is returned, the parent ``UOWTransaction`` will
ultimately call ``preexecute()`` again on all processors until no new objects are processed.
"""
def getobj(elem):
elem.preprocessed.add(self)
return elem.state
ret = False
elements = [getobj(elem) for elem in
self.targettask.filter_polymorphic_elements(self._mappers)
if self not in elem.preprocessed and not elem.isdelete]
if elements:
ret = True
self.processor.preprocess_dependencies(self.targettask, elements, trans, delete=False)
elements = [getobj(elem) for elem in
self.targettask.filter_polymorphic_elements(self._mappers)
if self not in elem.preprocessed and elem.isdelete]
if elements:
ret = True
self.processor.preprocess_dependencies(self.targettask, elements, trans, delete=True)
return ret
def execute(self, trans, delete):
"""process all objects contained within this ``UOWDependencyProcessor``s target task."""
elements = [e for e in
self.targettask.filter_polymorphic_elements(self._mappers)
if bool(e.isdelete)==delete]
self.processor.process_dependencies(
self.targettask,
[elem.state for elem in elements],
trans,
delete=delete)
def get_object_dependencies(self, state, trans, passive):
return trans.get_attribute_history(state, self.processor.key, passive=passive)
def whose_dependent_on_who(self, state1, state2):
"""establish which object is operationally dependent amongst a parent/child
using the semantics stated by the dependency processor.
This method is used to establish a partial ordering (set of dependency tuples)
when toplogically sorting on a per-instance basis.
"""
return self.processor.whose_dependent_on_who(state1, state2)
class UOWExecutor(object):
"""Encapsulates the execution traversal of a UOWTransaction structure."""
def execute(self, trans, tasks, isdelete=None):
if isdelete is not True:
for task in tasks:
self.execute_save_steps(trans, task)
if isdelete is not False:
for task in reversed(tasks):
self.execute_delete_steps(trans, task)
def save_objects(self, trans, task):
task.mapper._save_obj(task.polymorphic_tosave_objects, trans)
def delete_objects(self, trans, task):
task.mapper._delete_obj(task.polymorphic_todelete_objects, trans)
def execute_dependency(self, trans, dep, isdelete):
dep.execute(trans, isdelete)
def execute_save_steps(self, trans, task):
self.save_objects(trans, task)
for dep in task.polymorphic_cyclical_dependencies:
self.execute_dependency(trans, dep, False)
for dep in task.polymorphic_cyclical_dependencies:
self.execute_dependency(trans, dep, True)
self.execute_cyclical_dependencies(trans, task, False)
self.execute_dependencies(trans, task)
def execute_delete_steps(self, trans, task):
self.execute_cyclical_dependencies(trans, task, True)
self.delete_objects(trans, task)
def execute_dependencies(self, trans, task):
polymorphic_dependencies = list(task.polymorphic_dependencies)
for dep in polymorphic_dependencies:
self.execute_dependency(trans, dep, False)
for dep in reversed(polymorphic_dependencies):
self.execute_dependency(trans, dep, True)
def execute_cyclical_dependencies(self, trans, task, isdelete):
for t in task.dependent_tasks:
self.execute(trans, [t], isdelete)
| |
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import xml.sax
import time
import boto
from boto.connection import AWSAuthConnection
from boto import handler
from boto.cloudfront.distribution import Distribution, DistributionSummary, DistributionConfig
from boto.cloudfront.distribution import StreamingDistribution, StreamingDistributionSummary, StreamingDistributionConfig
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.identity import OriginAccessIdentitySummary
from boto.cloudfront.identity import OriginAccessIdentityConfig
from boto.cloudfront.invalidation import InvalidationBatch, InvalidationSummary, InvalidationListResultSet
from boto.resultset import ResultSet
from boto.cloudfront.exception import CloudFrontServerError
class CloudFrontConnection(AWSAuthConnection):
DefaultHost = 'cloudfront.amazonaws.com'
Version = '2010-11-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True):
super(CloudFrontConnection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
validate_certs=validate_certs)
def get_etag(self, response):
response_headers = response.msg
for key in response_headers.keys():
if key.lower() == 'etag':
return response_headers[key]
return None
def _required_auth_capability(self):
return ['cloudfront']
# Generics
def _get_all_objects(self, resource, tags, result_set_class=None,
result_set_kwargs=None):
if not tags:
tags = [('DistributionSummary', DistributionSummary)]
response = self.make_request('GET', '/%s/%s' % (self.Version,
resource))
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise CloudFrontServerError(response.status, response.reason, body)
rs_class = result_set_class or ResultSet
rs_kwargs = result_set_kwargs or dict()
rs = rs_class(tags, **rs_kwargs)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def _get_info(self, id, resource, dist_class):
uri = '/%s/%s/%s' % (self.Version, resource, id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise CloudFrontServerError(response.status, response.reason, body)
d = dist_class(connection=self)
response_headers = response.msg
for key in response_headers.keys():
if key.lower() == 'etag':
d.etag = response_headers[key]
h = handler.XmlHandler(d, self)
xml.sax.parseString(body, h)
return d
def _get_config(self, id, resource, config_class):
uri = '/%s/%s/%s/config' % (self.Version, resource, id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise CloudFrontServerError(response.status, response.reason, body)
d = config_class(connection=self)
d.etag = self.get_etag(response)
h = handler.XmlHandler(d, self)
xml.sax.parseString(body, h)
return d
def _set_config(self, distribution_id, etag, config):
if isinstance(config, StreamingDistributionConfig):
resource = 'streaming-distribution'
else:
resource = 'distribution'
uri = '/%s/%s/%s/config' % (self.Version, resource, distribution_id)
headers = {'If-Match': etag, 'Content-Type': 'text/xml'}
response = self.make_request('PUT', uri, headers, config.to_xml())
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise CloudFrontServerError(response.status, response.reason, body)
return self.get_etag(response)
def _create_object(self, config, resource, dist_class):
response = self.make_request('POST', '/%s/%s' % (self.Version,
resource),
{'Content-Type': 'text/xml'},
data=config.to_xml())
body = response.read()
boto.log.debug(body)
if response.status == 201:
d = dist_class(connection=self)
h = handler.XmlHandler(d, self)
xml.sax.parseString(body, h)
d.etag = self.get_etag(response)
return d
else:
raise CloudFrontServerError(response.status, response.reason, body)
def _delete_object(self, id, etag, resource):
uri = '/%s/%s/%s' % (self.Version, resource, id)
response = self.make_request('DELETE', uri, {'If-Match': etag})
body = response.read()
boto.log.debug(body)
if response.status != 204:
raise CloudFrontServerError(response.status, response.reason, body)
# Distributions
def get_all_distributions(self):
tags = [('DistributionSummary', DistributionSummary)]
return self._get_all_objects('distribution', tags)
def get_distribution_info(self, distribution_id):
return self._get_info(distribution_id, 'distribution', Distribution)
def get_distribution_config(self, distribution_id):
return self._get_config(distribution_id, 'distribution',
DistributionConfig)
def set_distribution_config(self, distribution_id, etag, config):
return self._set_config(distribution_id, etag, config)
def create_distribution(self, origin, enabled, caller_reference='',
cnames=None, comment='', trusted_signers=None):
config = DistributionConfig(origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers)
return self._create_object(config, 'distribution', Distribution)
def delete_distribution(self, distribution_id, etag):
return self._delete_object(distribution_id, etag, 'distribution')
# Streaming Distributions
def get_all_streaming_distributions(self):
tags = [('StreamingDistributionSummary', StreamingDistributionSummary)]
return self._get_all_objects('streaming-distribution', tags)
def get_streaming_distribution_info(self, distribution_id):
return self._get_info(distribution_id, 'streaming-distribution',
StreamingDistribution)
def get_streaming_distribution_config(self, distribution_id):
return self._get_config(distribution_id, 'streaming-distribution',
StreamingDistributionConfig)
def set_streaming_distribution_config(self, distribution_id, etag, config):
return self._set_config(distribution_id, etag, config)
def create_streaming_distribution(self, origin, enabled,
caller_reference='',
cnames=None, comment='',
trusted_signers=None):
config = StreamingDistributionConfig(origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers)
return self._create_object(config, 'streaming-distribution',
StreamingDistribution)
def delete_streaming_distribution(self, distribution_id, etag):
return self._delete_object(distribution_id, etag,
'streaming-distribution')
# Origin Access Identity
def get_all_origin_access_identity(self):
tags = [('CloudFrontOriginAccessIdentitySummary',
OriginAccessIdentitySummary)]
return self._get_all_objects('origin-access-identity/cloudfront', tags)
def get_origin_access_identity_info(self, access_id):
return self._get_info(access_id, 'origin-access-identity/cloudfront',
OriginAccessIdentity)
def get_origin_access_identity_config(self, access_id):
return self._get_config(access_id,
'origin-access-identity/cloudfront',
OriginAccessIdentityConfig)
def set_origin_access_identity_config(self, access_id,
etag, config):
return self._set_config(access_id, etag, config)
def create_origin_access_identity(self, caller_reference='', comment=''):
config = OriginAccessIdentityConfig(caller_reference=caller_reference,
comment=comment)
return self._create_object(config, 'origin-access-identity/cloudfront',
OriginAccessIdentity)
def delete_origin_access_identity(self, access_id, etag):
return self._delete_object(access_id, etag,
'origin-access-identity/cloudfront')
# Object Invalidation
def create_invalidation_request(self, distribution_id, paths,
caller_reference=None):
"""Creates a new invalidation request
:see: http://goo.gl/8vECq
"""
# We allow you to pass in either an array or
# an InvalidationBatch object
if not isinstance(paths, InvalidationBatch):
paths = InvalidationBatch(paths)
paths.connection = self
uri = '/%s/distribution/%s/invalidation' % (self.Version,
distribution_id)
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'},
data=paths.to_xml())
body = response.read()
if response.status == 201:
h = handler.XmlHandler(paths, self)
xml.sax.parseString(body, h)
return paths
else:
raise CloudFrontServerError(response.status, response.reason, body)
def invalidation_request_status(self, distribution_id,
request_id, caller_reference=None):
uri = '/%s/distribution/%s/invalidation/%s' % (self.Version,
distribution_id,
request_id)
response = self.make_request('GET', uri, {'Content-Type': 'text/xml'})
body = response.read()
if response.status == 200:
paths = InvalidationBatch([])
h = handler.XmlHandler(paths, self)
xml.sax.parseString(body, h)
return paths
else:
raise CloudFrontServerError(response.status, response.reason, body)
def get_invalidation_requests(self, distribution_id, marker=None,
max_items=None):
"""
Get all invalidation requests for a given CloudFront distribution.
This returns an instance of an InvalidationListResultSet that
automatically handles all of the result paging, etc. from CF - you just
need to keep iterating until there are no more results.
:type distribution_id: string
:param distribution_id: The id of the CloudFront distribution
:type marker: string
:param marker: Use this only when paginating results and only in
follow-up request after you've received a response where
the results are truncated. Set this to the value of the
Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results and only in a
follow-up request to indicate the maximum number of
invalidation requests you want in the response. You
will need to pass the next_marker property from the
previous InvalidationListResultSet response in the
follow-up request in order to get the next 'page' of
results.
:rtype: :class:`boto.cloudfront.invalidation.InvalidationListResultSet`
:returns: An InvalidationListResultSet iterator that lists invalidation
requests for a given CloudFront distribution. Automatically
handles paging the results.
"""
uri = 'distribution/%s/invalidation' % distribution_id
params = dict()
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
if params:
uri += '?%s=%s' % params.popitem()
for k, v in params.items():
uri += '&%s=%s' % (k, v)
tags=[('InvalidationSummary', InvalidationSummary)]
rs_class = InvalidationListResultSet
rs_kwargs = dict(connection=self, distribution_id=distribution_id,
max_items=max_items, marker=marker)
return self._get_all_objects(uri, tags, result_set_class=rs_class,
result_set_kwargs=rs_kwargs)
| |
#!/usr/bin/python
import os
import sys
import argparse
basedir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(basedir, 'build'))
#print sys.path
#import reloader
print basedir
import cnidariapy
print cnidariapy.fact(3)
cnidariapy.version()
#for i in `seq 1 20`; do
# echo $i
# ../../cnidaria.py -n -thr 3 -np 20 -pn $i ../../tmp_21/test10/test10.cne ../../data/raw/external/new/*.21.jf &
#done
#../../cnidaria.py -np 20 --merge_only ../../tmp_21/test10/test10.cne ../../data/raw/external/new/*.21.jf
#def send_data(*args):
#def merge_data(*args):
class cnidaria(object):
EXT_COMPLETE = '.cne'
EXT_SUMMARY = '.cns'
EXT_MATRIX = '.cnm'
EXT_MATRIXJ = '.json'
EXT_JELLYFISH = '.jf'
def __init__(self, infiles, out_file, num_threads=1, minVal=2, save_every=1, export_complete=False, export_summary=False, export_matrix=True, num_pieces=1, piece_num=None, dry_run=False):
self.infiles = infiles
self.out_file = out_file
self.num_threads = num_threads
self.minVal = minVal
self.save_every = save_every
self.export_complete = export_complete
self.export_summary = export_summary
self.export_matrix = export_matrix
self.num_pieces = num_pieces
self.piece_num = piece_num
self.dry_run = dry_run
self.prefixes = []
self.srcfiles_complete = []
self.srcfiles_matrix = []
self.srcfiles_matrixj = []
self.ofiles = []
self.gen_names()
print "prefixes " , self.prefixes
print "srcfiles_complete" , self.srcfiles_complete
print "srcfiles_matrix " , self.srcfiles_matrix
print "srcfiles_matrixj " , self.srcfiles_matrixj
if self.piece_num is not None:
if self.piece_num == -1:
self.merge_pieces()
else:
self.run_piece(piece_num)
def dump(self):
cnidariapy.dump(self.infiles)
def run(self, do_merge=True):
for piece in xrange(self.num_pieces):
self.run_piece(piece)
if do_merge:
self.merge_pieces()
def run_piece(self, piece_num):
exists = True
for files, exp in [[self.srcfiles_complete,self.export_complete], [self.srcfiles_matrix, self.export_matrix], [self.srcfiles_matrixj, self.export_matrix]]:
if exp:
fn = files[piece_num-1]
e = os.path.exists(fn)
exists = exists and e
print "piece %d ( required) output file %s: " % (piece_num, fn), ("" if e else "does not"), "exists"
else:
fn = files[piece_num-1]
e = os.path.exists(fn)
exists = exists and e
print "piece %d (not required) output file %s: " % (piece_num, fn), ("" if e else "does not"), "exists"
if not exists:
print "piece num %d does not exists exists. running" % piece_num
if not self.dry_run:
cnidariapy.send_data(self.infiles, self.prefixes[piece_num-1], self.num_threads, self.minVal, self.save_every, self.export_complete, self.export_summary, self.export_matrix, self.num_pieces, piece_num-1)
else:
print "piece num %d already exists" % piece_num
def merge_pieces(self):
exists = True
for piece_num in xrange(1, self.num_pieces+1):
for files, exp in [
[self.srcfiles_complete,self.export_complete],
[self.srcfiles_matrix , self.export_matrix ],
[self.srcfiles_matrixj , self.export_matrix ]
]:
if exp:
fn = files[piece_num-1]
e = os.path.exists(fn)
exists = exists and e
print "piece %d output file %s: " % (piece_num, fn), ("" if e else "does not"), "exists"
if not exists:
print "merge requested but not all files exists"
exit(1)
exists = False
for ofile in self.ofiles:
e = os.path.exists(ofile)
exists = exists or e
print "merge output file %s: " % (ofile), ("" if e else "does not"), "exists"
if exists:
print "merging output file already exists."
sys.exit(1)
print "all input files exists. merging"
if not self.dry_run:
if self.num_pieces == 1:
print "one piece. no merging needed"
if self.export_complete:
print "symlinking %s to %s" % (self.srcfiles_complete[0], self.ofiles[0])
os.symlink( self.srcfiles_complete[0], self.ofiles[0] )
if self.export_matrix:
print "symlinking %s to %s" % (self.srcfiles_matrix[0] , self.ofiles[1])
os.symlink( self.srcfiles_matrix[0] , self.ofiles[1] )
print "symlinking %s to %s" % (self.srcfiles_matrixj[0], self.ofiles[2])
os.symlink( self.srcfiles_matrixj[0], self.ofiles[2] )
else:
print "more than one piece. merging"
print self.out_file, self.srcfiles_complete, self.srcfiles_matrix, self.srcfiles_matrixj
cnidariapy.merge_data(self.out_file, self.srcfiles_complete, self.srcfiles_matrix, self.srcfiles_matrixj, self.export_complete, self.export_matrix)
print "merged"
def gen_names(self):
self.prefixes = [None]*self.num_pieces
self.srcfiles_complete = [None]*self.num_pieces
self.srcfiles_matrix = [None]*self.num_pieces
self.srcfiles_matrixj = [None]*self.num_pieces
for j in xrange(1, self.num_pieces+1):
i = j-1
self.prefixes[ i ] = "%s_%04d_%04d" % (self.out_file, j, self.num_pieces)
#if self.export_complete:
self.srcfiles_complete[ i ] = self.prefixes[i] + cnidaria.EXT_COMPLETE
#if self.export_matrix:
self.srcfiles_matrix[ i ] = self.prefixes[i] + cnidaria.EXT_MATRIX
self.srcfiles_matrixj[ i ] = self.prefixes[i] + cnidaria.EXT_MATRIXJ
#if self.export_complete:
self.ofiles.append( self.out_file + cnidaria.EXT_COMPLETE )
#if self.export_matrix:
self.ofiles.append( self.out_file + cnidaria.EXT_MATRIX )
self.ofiles.append( self.out_file + cnidaria.EXT_MATRIXJ )
def test():
infiles = [
"/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Arabidopsis_lyrata.fasta.31.jf",
"/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Arabidopsis_thaliana_TAIR10_genome.fas.31.jf",
"/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Citrus_sinensis.fasta.31.jf"
]
out_file = "pytest"
num_threads = 1
minVal = 2
save_every = 1
export_complete = True
export_summary = False
export_matrix = True
num_pieces = 2
#piece_num = 0
cni = cnidaria(infiles, out_file, num_threads=num_threads, minVal=minVal, save_every=save_every, export_complete=export_complete, export_summary=export_summary, export_matrix=export_matrix, num_pieces=num_pieces)
cni.run()
def validate_args(args):
if len(args.infiles) < 2 and not args.dump_only:
print "not enought input files. Cnidaria needs at least 2 databases"
sys.exit(1)
for infile in args.infiles:
if not os.path.exists( infile ):
print "input file %s does not exists" % infile
sys.exit(1)
if not (infile.endswith(cnidaria.EXT_JELLYFISH) or infile.endswith(cnidaria.EXT_COMPLETE)):
print "input file %s is not a jellyfish database or does not ends in %s or %s" % (infile, cnidaria.EXT_JELLYFISH, cnidaria.EXT_COMPLETE)
sys.exit(1)
args.infiles = [os.path.abspath(x) for x in args.infiles]
for ext, exp in [ [cnidaria.EXT_COMPLETE, args.export_complete], [cnidaria.EXT_MATRIX, args.export_matrix], [cnidaria.EXT_MATRIXJ, args.export_matrix] ]:
out_file = args.out_file + ext
if exp and os.path.exists( out_file ):
print "output file %s already exists" % out_file
sys.exit(1)
if args.num_threads <=0:
print "invalid number of threads: %d should be >=1" % args.num_threads
sys.exit(1)
if args.export_complete and args.num_threads > 1:
print "export complete is incompatible with multiple threads. use multiple pieces"
sys.exit(1)
if args.minVal <=0:
print "invalid minimum number of shared k-mers: %d should be >=1" % args.minVal
sys.exit(1)
if args.save_every <=0:
print "invalid number of k-mers to skip: %d should be >=1" % args.save_every
sys.exit(1)
if args.num_pieces <=0:
print "invalid number of pieces: %d should be >=1" % args.num_pieces
sys.exit(1)
if args.piece_num is not None:
if args.piece_num <=0:
print "invalid piece number: %d should be >=1" % args.piece_num
sys.exit(1)
if args.piece_num > args.num_pieces:
print "invalid piece number: %d should be <= piece_num (%d)" % ( args.piece_num, args.num_pieces )
sys.exit(1)
if args.merge_only:
print "merge only is not compatible with piece number"
sys.exit(1)
if args.merge_only and not args.do_merge:
print "merge only and no merge options are mutually exclusive"
sys.exit(1)
print "all arguments are valid"
def main():
parser = argparse.ArgumentParser( description='Cnidaria Merger', formatter_class=argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument('infiles', nargs='+', type=str, help='Input Jellyfish databases')
parser.add_argument('-out' , '--outfile' , dest='out_file' , default='cnidaria_db', type=str , nargs='?', help='Prefix of output file')
parser.add_argument('-thr' , '--threads' , dest='num_threads' , default=1 , type=int , nargs='?', help='Number of threads. Not compatible with COMPLETE')
parser.add_argument('-min' , '--min-val' , dest='minVal' , default=2 , type=int , nargs='?', help='Minimum number of shared species to start counting')
parser.add_argument('-se' , '--save-every' , dest='save_every' , default=1 , type=int , nargs='?', help='Count every N k-mers. Speeds analysis while skipping data')
parser.add_argument('-np' , '--num-pieces' , dest='num_pieces' , default=1 , type=int , nargs='?', help='Number of pieces')
parser.add_argument('-pn' , '--piece-num' , dest='piece_num' , default=None , type=int , nargs='?', help='Piece number')
parser.add_argument('-n' , '-dry' , '--dry-run' , dest='dry_run' , action='store_true' , help='Dry run')
parser.add_argument('-d' , '-dump' , '--dump-only' , dest='dump_only' , action='store_true' , help='Dump only')
parser.add_argument('-me' , '-merge' , '--merge-only' , dest='merge_only' , action='store_true' , help='Merge only')
parser.add_argument('-nm' , '-nomerge' , '--do-not-merge' , dest='do_merge' , action='store_false', help='Do not merge')
parser.add_argument('-ec' , '-complete', '--export-complete' , dest='export_complete', action='store_true' , help='Export COMPLETE database')
#parser.add_argument('-es' , '-summary' , '--export_summary' , dest='export_summary' , action='store_true' , help='Export SUMMARY database. (default: False)')
parser.add_argument('-nem' , '-nomatrix', '--no-export-matrix', dest='export_matrix' , action='store_false', help='DO NOT Export MATRIX database')
args = parser.parse_args()
#this is disabled
args.export_summary = False
#print args
validate_args(args)
#if args.piece_num is not None:
# #piece num is zero based
# args.piece_num -= 1
print "running %d files" % len(args.infiles)
cni = cnidaria(args.infiles, args.out_file, num_threads=args.num_threads, minVal=args.minVal, save_every=args.save_every, export_complete=args.export_complete, export_summary=args.export_summary, export_matrix=args.export_matrix, num_pieces=args.num_pieces, dry_run=args.dry_run)
if args.dump_only:
print "dumping"
cni.dump()
print "dump"
elif args.merge_only:
print "merging"
cni.merge_pieces()
print "merged"
elif args.piece_num is not None:
print "running piece %d/%d" % (args.piece_num, args.num_pieces)
cni.run_piece(args.piece_num)
print "runned piece %d/%d" % (args.piece_num, args.num_pieces)
else:
print "running all %d pieces" % (args.num_pieces)
cni.run(do_merge=args.do_merge)
print "runned all %d pieces" % (args.num_pieces)
#/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Arabidopsis_lyrata.fasta.31.jf /mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Arabidopsis_thaliana_TAIR10_genome.fas.31.jf /mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Citrus_sinensis.fasta.31.jf
#test()
if __name__ == '__main__':
main()
#def fact(*args):
#def openoutfile(*args):
#def openinfile(*args):
#def merge_complete(*args):
#def merge_complete_parallel(*args):
#def merge_complete_parallel_piece(*args):
#def merge_matrix(*args):
#def merge_matrixj(*args):
#class piece_data(_object):
# __swig_setmethods__["srcfiles"] = _cnidariapy.piece_data_srcfiles_set
# __swig_setmethods__["out_file"] = _cnidariapy.piece_data_out_file_set
# __swig_setmethods__["num_threads"] = _cnidariapy.piece_data_num_threads_set
# __swig_setmethods__["minVal"] = _cnidariapy.piece_data_minVal_set
# __swig_setmethods__["save_every"] = _cnidariapy.piece_data_save_every_set
# __swig_setmethods__["export_complete"] = _cnidariapy.piece_data_export_complete_set
# __swig_setmethods__["export_summary"] = _cnidariapy.piece_data_export_summary_set
# __swig_setmethods__["export_matrix"] = _cnidariapy.piece_data_export_matrix_set
# __swig_setmethods__["num_pieces"] = _cnidariapy.piece_data_num_pieces_set
# __swig_setmethods__["piece_num"] = _cnidariapy.piece_data_piece_num_set
# __swig_setmethods__["merger"] = _cnidariapy.piece_data_merger_set
# __swig_setmethods__["locker"] = _cnidariapy.piece_data_locker_set
#def send_data(*args):
#def merge_data(*args):
#def send_pieces(*args):
#def send_piece(*args):
#int fact(int n) {
#void openoutfile( std::ofstream &outfile_, string_t filename ) {
#void openinfile( std::ifstream &infile_ , string_t filename ) {
#void merge_complete( string_t out_file, string_vec_t cfiles ) {
#void merge_complete_parallel( string_t out_file, string_vec_t cfiles, baseInt num_threads = 5 ) {
#void merge_complete_parallel_piece( string_t out_file, baseInt numCFiles, baseInt fileCount, pos_type begin_pos, string_t infile ) {
#void merge_matrix( string_t out_file, string_vec_t cfiles ) {
#void merge_matrixj( string_t out_file, string_vec_t cfiles ) {
#struct piece_data {
# string_vec_t srcfiles;
# string_t out_file;
# baseInt num_threads;
# baseInt minVal;
# baseInt save_every;
# bool export_complete;
# bool export_summary;
# bool export_matrix;
# baseInt num_pieces;
# baseInt piece_num;
# merge_jfs *merger;
# boost::recursive_mutex *locker;
# piece_data(
# string_vec_t &srcfiles_,
# string_t &out_file_,
# baseInt num_threads_,
# baseInt minVal_,
# baseInt save_every_,
# bool export_complete_,
# bool export_summary_,
# bool export_matrix_,
# baseInt num_pieces_,
# baseInt piece_num_,
# merge_jfs *merger_,
# boost::recursive_mutex *locker_
# ):
# srcfiles( srcfiles_),
# out_file( out_file_),
# num_threads( num_threads_),
# minVal( minVal_),
# save_every( save_every_),
# export_complete( export_complete_),
# export_summary( export_summary_),
# export_matrix( export_matrix_),
# num_pieces( num_pieces_),
# piece_num( piece_num_),
# merger( merger_),
# locker( locker_) {}
#};
#void send_data(
# string_vec_t &srcfiles,
# string_t &out_file,
# baseInt num_threads,
# baseInt minVal,
# baseInt save_every,
# bool export_complete,
# bool export_summary,
# bool export_matrix,
# baseInt num_pieces,
# baseInt piece_num
# ) {
#void merge_data( string_t out_file, string_vec_t srcfiles_complete, string_vec_t srcfiles_matrix, string_vec_t srcfiles_matrixj ) {
#void send_pieces( piece_data_vec_t data ) {
#void send_piece( piece_data data ) {
##
## DEBUG
##
#void run_test_pieces( string_vec_t srcfiles, string_t out_file, uint_t num_pieces, uint_t piece_num ) {
# if ( piece_num == num_pieces+1 ) {
# run_test_pieces_merge( out_file, num_pieces );
# } else {
# run_test_pieces_split( srcfiles, out_file, num_pieces, piece_num );
# }
#}
#
#void run_test_pieces_merge( string_t out_file, uint_t num_pieces ) {
# std::cout << out_file << " merge" << std::endl;
#
# string_vec_t srcfiles_complete;
# string_vec_t srcfiles_matrix;
# string_vec_t srcfiles_matrixj;
#
# for ( uint_t piece_num = 0; piece_num < num_pieces; ++piece_num ) {
# string_t name_complete = (boost::format("%s_%04d_%04d%s") % out_file % piece_num % num_pieces % EXT_COMPLETE).str();
# std::cout << "adding " << name_complete << " to merging" << std::endl;
# srcfiles_complete.push_back( name_complete );
#
# string_t name_matrix = (boost::format("%s_%04d_%04d%s") % out_file % piece_num % num_pieces % EXT_MATRIX ).str();
# std::cout << "adding " << name_matrix << " to merging" << std::endl;
# srcfiles_matrix.push_back( name_matrix );
#
# string_t name_matrixj = (boost::format("%s_%04d_%04d%s") % out_file % piece_num % num_pieces % EXT_JMATRIX ).str();
# std::cout << "adding " << name_matrixj << " to merging" << std::endl;
# srcfiles_matrixj.push_back( name_matrixj );
# }
#
# merge_complete( out_file, srcfiles_complete );
# //merge_complete_parallel( out_file, srcfiles_complete );
# merge_matrix( out_file, srcfiles_matrix );
# merge_matrixj( out_file, srcfiles_matrixj );
#}
#
#void run_test_pieces_split( string_vec_t srcfiles, string_t out_file, uint_t num_pieces, uint_t piece_num ) {
# std::cout << out_file << std::endl;
# std::cout << out_file << " :: reading jf test 2" << std::endl;
# progressBar progressl1("read jf " + out_file, 0, 1000);
# progressl1.print( 1 );
#
# baseInt num_threads = 1;
# baseInt minVal = 2;
# baseInt save_every = 1;
# //baseInt save_every = 100000;
# bool export_complete = true;
# bool export_summary = false;
# bool export_matrix = true;
# boost::recursive_mutex g_guard_s;
#
#
# piece_data_vec_t pieces;
#
# string_t name = (boost::format("%s_%04d_%04d") % out_file % piece_num % num_pieces).str();
#
# piece_data d = piece_data( srcfiles, name, num_threads, minVal, save_every, export_complete, export_summary, export_matrix, num_pieces, piece_num, new merge_jfs( srcfiles, name ), &g_guard_s );
#
# send_piece( d );
#
# progressl1.print( 500 );
#
# std::cout << out_file << " :: exporting" << std::endl;
#
# d.merger->save_all( name );
#
# progressl1.print( 1000 );
#
# std::cout << out_file << " :: read jf test 2 FINISHED" << std::endl;
#}
#
#void test1( uint_t num_pieces, uint_t piece_num ) {
# string_vec_t srcfiles;
# string_t outfile = "test1";
#
# {
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Arabidopsis_lyrata.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Arabidopsis_thaliana_TAIR10_genome.fas.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Citrus_sinensis.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Glycine_max.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Malus_domestica.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Nicotiana_benthamiana_Niben.genome.v0.4.4.scaffolds.nrcontigs.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Nicotiana_tabacum_tobacco_genome_sequences_assembly.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Oryza_brachyantha.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Oryza_sativa_build_5.00_IRGSPb5.fa.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Populus_trichocarpa.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/S_lycopersicum_chromosomes.2.40.fa.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Solanum_peruvianum_Speru_denovo.fa.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Solanum_tuberosum_PGSC_DM_v3_superscaffolds.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Spimpinellifolium_genome.contigs.fasta.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Vitis_vinifera_Genoscope_12X_2010_02_12_scaffolds.fa.31.jf" );
# srcfiles.push_back( "/mnt/scratch/aflit001/nobackup/phylogenomics_raw/external/Plants/Zea_mays.fasta.31.jf" );
# }
#
# run_test_pieces( srcfiles, outfile, num_pieces, piece_num );
# //run_test_single( srcfiles, outfile );
#}
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
if sys.version >= '3':
basestring = str
long = int
from pyspark import since
from pyspark.context import SparkContext
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.types import *
__all__ = ["DataFrame", "Column", "SchemaRDD", "DataFrameNaFunctions",
"DataFrameStatFunctions"]
def _create_column_from_literal(literal):
sc = SparkContext._active_spark_context
return sc._jvm.functions.lit(literal)
def _create_column_from_name(name):
sc = SparkContext._active_spark_context
return sc._jvm.functions.col(name)
def _to_java_column(col):
if isinstance(col, Column):
jcol = col._jc
else:
jcol = _create_column_from_name(col)
return jcol
def _to_seq(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM Seq of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toSeq(cols)
def _to_list(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM (Scala) List of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toList(cols)
def _unary_op(name, doc="unary operator"):
""" Create a method for given unary operator """
def _(self):
jc = getattr(self._jc, name)()
return Column(jc)
_.__doc__ = doc
return _
def _func_op(name, doc=''):
def _(self):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
def _bin_func_op(name, reverse=False, doc="binary function"):
def _(self, other):
sc = SparkContext._active_spark_context
fn = getattr(sc._jvm.functions, name)
jc = other._jc if isinstance(other, Column) else _create_column_from_literal(other)
njc = fn(self._jc, jc) if not reverse else fn(jc, self._jc)
return Column(njc)
_.__doc__ = doc
return _
def _bin_op(name, doc="binary operator"):
""" Create a method for given binary operator
"""
def _(self, other):
jc = other._jc if isinstance(other, Column) else other
njc = getattr(self._jc, name)(jc)
return Column(njc)
_.__doc__ = doc
return _
def _reverse_op(name, doc="binary operator"):
""" Create a method for binary operator (this object is on right side)
"""
def _(self, other):
jother = _create_column_from_literal(other)
jc = getattr(jother, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
class Column(object):
"""
A column in a DataFrame.
:class:`Column` instances can be created by::
# 1. Select a column out of a DataFrame
df.colName
df["colName"]
# 2. Create from an expression
df.colName + 1
1 / df.colName
.. note:: Experimental
.. versionadded:: 1.3
"""
def __init__(self, jc):
self._jc = jc
# arithmetic operators
__neg__ = _func_op("negate")
__add__ = _bin_op("plus")
__sub__ = _bin_op("minus")
__mul__ = _bin_op("multiply")
__div__ = _bin_op("divide")
__truediv__ = _bin_op("divide")
__mod__ = _bin_op("mod")
__radd__ = _bin_op("plus")
__rsub__ = _reverse_op("minus")
__rmul__ = _bin_op("multiply")
__rdiv__ = _reverse_op("divide")
__rtruediv__ = _reverse_op("divide")
__rmod__ = _reverse_op("mod")
__pow__ = _bin_func_op("pow")
__rpow__ = _bin_func_op("pow", reverse=True)
# logistic operators
__eq__ = _bin_op("equalTo")
__ne__ = _bin_op("notEqual")
__lt__ = _bin_op("lt")
__le__ = _bin_op("leq")
__ge__ = _bin_op("geq")
__gt__ = _bin_op("gt")
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = _bin_op('and')
__or__ = _bin_op('or')
__invert__ = _func_op('not')
__rand__ = _bin_op("and")
__ror__ = _bin_op("or")
# container operators
__contains__ = _bin_op("contains")
__getitem__ = _bin_op("apply")
# bitwise operators
bitwiseOR = _bin_op("bitwiseOR")
bitwiseAND = _bin_op("bitwiseAND")
bitwiseXOR = _bin_op("bitwiseXOR")
@since(1.3)
def getItem(self, key):
"""
An expression that gets an item at position ``ordinal`` out of a list,
or gets an item by key out of a dict.
>>> df = sc.parallelize([([1, 2], {"key": "value"})]).toDF(["l", "d"])
>>> df.select(df.l.getItem(0), df.d.getItem("key")).show()
+----+------+
|l[0]|d[key]|
+----+------+
| 1| value|
+----+------+
>>> df.select(df.l[0], df.d["key"]).show()
+----+------+
|l[0]|d[key]|
+----+------+
| 1| value|
+----+------+
"""
return self[key]
@since(1.3)
def getField(self, name):
"""
An expression that gets a field by name in a StructField.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([Row(r=Row(a=1, b="b"))]).toDF()
>>> df.select(df.r.getField("b")).show()
+----+
|r[b]|
+----+
| b|
+----+
>>> df.select(df.r.a).show()
+----+
|r[a]|
+----+
| 1|
+----+
"""
return self[name]
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
return self.getField(item)
def __iter__(self):
raise TypeError("Column is not iterable")
# string methods
rlike = _bin_op("rlike")
like = _bin_op("like")
startswith = _bin_op("startsWith")
endswith = _bin_op("endsWith")
@ignore_unicode_prefix
@since(1.3)
def substr(self, startPos, length):
"""
Return a :class:`Column` which is a substring of the column.
:param startPos: start position (int or Column)
:param length: length of the substring (int or Column)
>>> df.select(df.name.substr(1, 3).alias("col")).collect()
[Row(col=u'Ali'), Row(col=u'Bob')]
"""
if type(startPos) != type(length):
raise TypeError("Can not mix the type")
if isinstance(startPos, (int, long)):
jc = self._jc.substr(startPos, length)
elif isinstance(startPos, Column):
jc = self._jc.substr(startPos._jc, length._jc)
else:
raise TypeError("Unexpected type: %s" % type(startPos))
return Column(jc)
__getslice__ = substr
@ignore_unicode_prefix
@since(1.3)
def inSet(self, *cols):
"""
A boolean expression that is evaluated to true if the value of this
expression is contained by the evaluated values of the arguments.
>>> df[df.name.inSet("Bob", "Mike")].collect()
[Row(age=5, name=u'Bob')]
>>> df[df.age.inSet([1, 2, 3])].collect()
[Row(age=2, name=u'Alice')]
.. note:: Deprecated in 1.5, use :func:`Column.isin` instead.
"""
warnings.warn("inSet is deprecated. Use isin() instead.")
return self.isin(*cols)
@ignore_unicode_prefix
@since(1.5)
def isin(self, *cols):
"""
A boolean expression that is evaluated to true if the value of this
expression is contained by the evaluated values of the arguments.
>>> df[df.name.isin("Bob", "Mike")].collect()
[Row(age=5, name=u'Bob')]
>>> df[df.age.isin([1, 2, 3])].collect()
[Row(age=2, name=u'Alice')]
"""
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
cols = [c._jc if isinstance(c, Column) else _create_column_from_literal(c) for c in cols]
sc = SparkContext._active_spark_context
jc = getattr(self._jc, "isin")(_to_seq(sc, cols))
return Column(jc)
# order
asc = _unary_op("asc", "Returns a sort expression based on the"
" ascending order of the given column name.")
desc = _unary_op("desc", "Returns a sort expression based on the"
" descending order of the given column name.")
isNull = _unary_op("isNull", "True if the current expression is null.")
isNotNull = _unary_op("isNotNull", "True if the current expression is not null.")
@since(1.3)
def alias(self, *alias):
"""
Returns this column aliased with a new name or names (in the case of expressions that
return more than one column, such as explode).
>>> df.select(df.age.alias("age2")).collect()
[Row(age2=2), Row(age2=5)]
"""
if len(alias) == 1:
return Column(getattr(self._jc, "as")(alias[0]))
else:
sc = SparkContext._active_spark_context
return Column(getattr(self._jc, "as")(_to_seq(sc, list(alias))))
@ignore_unicode_prefix
@since(1.3)
def cast(self, dataType):
""" Convert the column into type ``dataType``.
>>> df.select(df.age.cast("string").alias('ages')).collect()
[Row(ages=u'2'), Row(ages=u'5')]
>>> df.select(df.age.cast(StringType()).alias('ages')).collect()
[Row(ages=u'2'), Row(ages=u'5')]
"""
if isinstance(dataType, basestring):
jc = self._jc.cast(dataType)
elif isinstance(dataType, DataType):
sc = SparkContext._active_spark_context
ssql_ctx = sc._jvm.SQLContext(sc._jsc.sc())
jdt = ssql_ctx.parseDataType(dataType.json())
jc = self._jc.cast(jdt)
else:
raise TypeError("unexpected type: %s" % type(dataType))
return Column(jc)
astype = cast
@since(1.3)
def between(self, lowerBound, upperBound):
"""
A boolean expression that is evaluated to true if the value of this
expression is between the given columns.
>>> df.select(df.name, df.age.between(2, 4)).show()
+-----+--------------------------+
| name|((age >= 2) && (age <= 4))|
+-----+--------------------------+
|Alice| true|
| Bob| false|
+-----+--------------------------+
"""
return (self >= lowerBound) & (self <= upperBound)
@since(1.4)
def when(self, condition, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 4, 1).when(df.age < 3, -1).otherwise(0)).show()
+-----+--------------------------------------------------------+
| name|CASE WHEN (age > 4) THEN 1 WHEN (age < 3) THEN -1 ELSE 0|
+-----+--------------------------------------------------------+
|Alice| -1|
| Bob| 1|
+-----+--------------------------------------------------------+
"""
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = self._jc.when(condition._jc, v)
return Column(jc)
@since(1.4)
def otherwise(self, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param value: a literal value, or a :class:`Column` expression.
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 3, 1).otherwise(0)).show()
+-----+---------------------------------+
| name|CASE WHEN (age > 3) THEN 1 ELSE 0|
+-----+---------------------------------+
|Alice| 0|
| Bob| 1|
+-----+---------------------------------+
"""
v = value._jc if isinstance(value, Column) else value
jc = self._jc.otherwise(v)
return Column(jc)
@since(1.4)
def over(self, window):
"""
Define a windowing column.
:param window: a :class:`WindowSpec`
:return: a Column
>>> from pyspark.sql import Window
>>> window = Window.partitionBy("name").orderBy("age").rowsBetween(-1, 1)
>>> from pyspark.sql.functions import rank, min
>>> # df.select(rank().over(window), min('age').over(window))
.. note:: Window functions is only supported with HiveContext in 1.4
"""
from pyspark.sql.window import WindowSpec
if not isinstance(window, WindowSpec):
raise TypeError("window should be WindowSpec")
jc = self._jc.over(window._jspec)
return Column(jc)
def __nonzero__(self):
raise ValueError("Cannot convert column into bool: please use '&' for 'and', '|' for 'or', "
"'~' for 'not' when building DataFrame boolean expressions.")
__bool__ = __nonzero__
def __repr__(self):
return 'Column<%s>' % self._jc.toString().encode('utf8')
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SQLContext
import pyspark.sql.column
globs = pyspark.sql.column.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
(failure_count, test_count) = doctest.testmod(
pyspark.sql.column, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 05 14:05:24 2013
Aug 15 2020: add brunnermunzel, rank_compare_2indep
Author: Josef Perktold
"""
from statsmodels.compat.python import lzip
import numpy as np
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_)
from scipy import stats
import pytest
from statsmodels.stats.contingency_tables import (
mcnemar, cochrans_q, SquareTable)
from statsmodels.sandbox.stats.runs import (Runs,
runstest_1samp, runstest_2samp)
from statsmodels.sandbox.stats.runs import mcnemar as sbmcnemar
from statsmodels.stats.nonparametric import (
rank_compare_2indep, rank_compare_2ordinal, prob_larger_continuous,
cohensd2problarger)
from statsmodels.tools.testing import Holder
def _expand_table(table):
'''expand a 2 by 2 contingency table to observations
'''
return np.repeat([[1, 1], [1, 0], [0, 1], [0, 0]], table.ravel(), axis=0)
def test_mcnemar_exact():
f_obs1 = np.array([[101, 121], [59, 33]])
f_obs2 = np.array([[101, 70], [59, 33]])
f_obs3 = np.array([[101, 80], [59, 33]])
f_obs4 = np.array([[101, 30], [60, 33]])
f_obs5 = np.array([[101, 10], [30, 33]])
f_obs6 = np.array([[101, 10], [10, 33]])
#vassar college online computation
res1 = 0.000004
res2 = 0.378688
res3 = 0.089452
res4 = 0.00206
res5 = 0.002221
res6 = 1.
stat = mcnemar(f_obs1, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res1], decimal=6)
stat = mcnemar(f_obs2, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res2], decimal=6)
stat = mcnemar(f_obs3, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res3], decimal=6)
stat = mcnemar(f_obs4, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [30, res4], decimal=6)
stat = mcnemar(f_obs5, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [10, res5], decimal=6)
stat = mcnemar(f_obs6, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [10, res6], decimal=6)
def test_mcnemar_chisquare():
f_obs1 = np.array([[101, 121], [59, 33]])
f_obs2 = np.array([[101, 70], [59, 33]])
f_obs3 = np.array([[101, 80], [59, 33]])
#> mcn = mcnemar.test(matrix(c(101, 121, 59, 33),nrow=2))
res1 = [2.067222e01, 5.450095e-06]
res2 = [0.7751938, 0.3786151]
res3 = [2.87769784, 0.08981434]
stat = mcnemar(f_obs1, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res1, rtol=1e-6)
stat = mcnemar(f_obs2, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res2, rtol=1e-6)
stat = mcnemar(f_obs3, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res3, rtol=1e-6)
# test correction = False
res1 = [2.135556e01, 3.815136e-06]
res2 = [0.9379845, 0.3327967]
res3 = [3.17266187, 0.07488031]
res = mcnemar(f_obs1, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res1, rtol=1e-6)
res = mcnemar(f_obs2, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res2, rtol=1e-6)
res = mcnemar(f_obs3, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res3, rtol=1e-6)
def test_mcnemar_vectorized(reset_randomstate):
ttk = np.random.randint(5,15, size=(2,2,3))
with pytest.deprecated_call():
res = sbmcnemar(ttk, exact=False)
with pytest.deprecated_call():
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False) for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
with pytest.deprecated_call():
res = sbmcnemar(ttk, exact=False, correction=False)
with pytest.deprecated_call():
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False, correction=False)
for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
with pytest.deprecated_call():
res = sbmcnemar(ttk, exact=True)
with pytest.deprecated_call():
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=True) for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
def test_symmetry_bowker():
table = np.array([0, 3, 4, 4, 2, 4, 1, 2, 4, 3, 5, 3, 0, 0, 2, 2, 3, 0, 0,
1, 5, 5, 5, 5, 5]).reshape(5, 5)
res = SquareTable(table, shift_zeros=False).symmetry()
mcnemar5_1 = dict(statistic=7.001587, pvalue=0.7252951, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_1['statistic'], mcnemar5_1['pvalue']],
rtol=1e-7)
res = SquareTable(1 + table, shift_zeros=False).symmetry()
mcnemar5_1b = dict(statistic=5.355988, pvalue=0.8661652, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_1b['statistic'], mcnemar5_1b['pvalue']],
rtol=1e-7)
table = np.array([2, 2, 3, 6, 2, 3, 4, 3, 6, 6, 6, 7, 1, 9, 6, 7, 1, 1, 9,
8, 0, 1, 8, 9, 4]).reshape(5, 5)
res = SquareTable(table, shift_zeros=False).symmetry()
mcnemar5_2 = dict(statistic=18.76432, pvalue=0.04336035, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_2['statistic'], mcnemar5_2['pvalue']],
rtol=1.5e-7)
res = SquareTable(1 + table, shift_zeros=False).symmetry()
mcnemar5_2b = dict(statistic=14.55256, pvalue=0.1492461, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_2b['statistic'], mcnemar5_2b['pvalue']],
rtol=1e-7)
def test_cochransq():
#example from dataplot docs, Conovover p. 253
#http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/cochran.htm
x = np.array([[1, 1, 1],
[1, 1, 1],
[0, 1, 0],
[1, 1, 0],
[0, 0, 0],
[1, 1, 1],
[1, 1, 1],
[1, 1, 0],
[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 1, 1]])
res_qstat = 2.8
res_pvalue = 0.246597
res = cochrans_q(x)
assert_almost_equal([res.statistic, res.pvalue], [res_qstat, res_pvalue])
#equivalence of mcnemar and cochranq for 2 samples
a,b = x[:,:2].T
res = cochrans_q(x[:, :2])
with pytest.deprecated_call():
assert_almost_equal(sbmcnemar(a, b, exact=False, correction=False),
[res.statistic, res.pvalue])
def test_cochransq2():
# from an example found on web, verifies 13.286
data = np.array('''
0 0 0 1
0 0 0 1
0 0 0 1
1 1 1 1
1 0 0 1
0 1 0 1
1 0 0 1
0 0 0 1
0 1 0 0
0 0 0 0
1 0 0 1
0 0 1 1'''.split(), int).reshape(-1, 4)
res = cochrans_q(data)
assert_allclose([res.statistic, res.pvalue], [13.2857143, 0.00405776], rtol=1e-6)
def test_cochransq3():
# another example compared to SAS
# in frequency weight format
dt = [('A', 'S1'), ('B', 'S1'), ('C', 'S1'), ('count', int)]
dta = np.array([('F', 'F', 'F', 6),
('U', 'F', 'F', 2),
('F', 'F', 'U', 16),
('U', 'F', 'U', 4),
('F', 'U', 'F', 2),
('U', 'U', 'F', 6),
('F', 'U', 'U', 4),
('U', 'U', 'U', 6)], dt)
cases = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 0],
[1, 1, 0],
[0, 1, 1],
[1, 1, 1]])
count = np.array([ 6, 2, 16, 4, 2, 6, 4, 6])
data = np.repeat(cases, count, 0)
res = cochrans_q(data)
assert_allclose([res.statistic, res.pvalue], [8.4706, 0.0145], atol=5e-5)
def test_runstest(reset_randomstate):
#comparison numbers from R, tseries, runs.test
#currently only 2-sided used
x = np.array([1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1])
z_twosided = 1.386750
pvalue_twosided = 0.1655179
z_greater = 1.386750
pvalue_greater = 0.08275893
z_less = 1.386750
pvalue_less = 0.917241
#print Runs(x).runs_test(correction=False)
assert_almost_equal(np.array(Runs(x).runs_test(correction=False)),
[z_twosided, pvalue_twosided], decimal=6)
# compare with runstest_1samp which should have same indicator
assert_almost_equal(runstest_1samp(x, correction=False),
[z_twosided, pvalue_twosided], decimal=6)
x2 = x - 0.5 + np.random.uniform(-0.1, 0.1, size=len(x))
assert_almost_equal(runstest_1samp(x2, cutoff=0, correction=False),
[z_twosided, pvalue_twosided], decimal=6)
assert_almost_equal(runstest_1samp(x2, cutoff='mean', correction=False),
[z_twosided, pvalue_twosided], decimal=6)
assert_almost_equal(runstest_1samp(x2, cutoff=x2.mean(), correction=False),
[z_twosided, pvalue_twosided], decimal=6)
# check median
assert_almost_equal(runstest_1samp(x2, cutoff='median', correction=False),
runstest_1samp(x2, cutoff=np.median(x2), correction=False),
decimal=6)
def test_runstest_2sample():
# regression test, checked with MonteCarlo and looks reasonable
x = [31.8, 32.8, 39.2, 36, 30, 34.5, 37.4]
y = [35.5, 27.6, 21.3, 24.8, 36.7, 30]
y[-1] += 1e-6 #avoid tie that creates warning
groups = np.concatenate((np.zeros(len(x)), np.ones(len(y))))
res = runstest_2samp(x, y)
res1 = (0.022428065200812752, 0.98210649318649212)
assert_allclose(res, res1, rtol=1e-6)
# check as stacked array
res2 = runstest_2samp(x, y)
assert_allclose(res2, res, rtol=1e-6)
xy = np.concatenate((x, y))
res_1s = runstest_1samp(xy)
assert_allclose(res_1s, res1, rtol=1e-6)
# check cutoff
res2_1s = runstest_1samp(xy, xy.mean())
assert_allclose(res2_1s, res_1s, rtol=1e-6)
def test_brunnermunzel_one_sided():
# copied from scipy with adjustment
x = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
significant = 13
# revere direction to match our definition
x, y = y, x
# Results are compared with R's lawstat package.
u1, p1 = rank_compare_2indep(x, y
).test_prob_superior(alternative='smaller')
u2, p2 = rank_compare_2indep(y, x
).test_prob_superior(alternative='larger')
u3, p3 = rank_compare_2indep(x, y
).test_prob_superior(alternative='larger')
u4, p4 = rank_compare_2indep(y, x
).test_prob_superior(alternative='smaller')
assert_approx_equal(p1, p2, significant=significant)
assert_approx_equal(p3, p4, significant=significant)
assert_(p1 != p3)
assert_approx_equal(u1, 3.1374674823029505,
significant=significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=significant)
assert_approx_equal(u3, 3.1374674823029505,
significant=significant)
assert_approx_equal(u4, -3.1374674823029505,
significant=significant)
# Note: scipy and lawstat tail is reversed compared to test statistic
assert_approx_equal(p3, 0.0028931043330757342,
significant=significant)
assert_approx_equal(p1, 0.99710689566692423,
significant=significant)
def test_brunnermunzel_two_sided():
# copied from scipy with adjustment
x = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
significant = 13
# revere direction to match our definition
x, y = y, x
# Results are compared with R's lawstat package.
res1 = rank_compare_2indep(x, y)
u1, p1 = res1
t1 = res1.test_prob_superior(alternative='two-sided')
res2 = rank_compare_2indep(y, x)
u2, p2 = res2
t2 = res2.test_prob_superior(alternative='two-sided')
assert_approx_equal(p1, p2, significant=significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=significant)
assert_approx_equal(p2, 0.0057862086661515377,
significant=significant)
assert_allclose(t1[0], u1, rtol=1e-13)
assert_allclose(t2[0], u2, rtol=1e-13)
assert_allclose(t1[1], p1, rtol=1e-13)
assert_allclose(t2[1], p2, rtol=1e-13)
def test_rank_compare_2indep1():
# Example from Munzel and Hauschke 2003
# data is given by counts, expand to observations
levels = [-2, -1, 0, 1, 2]
new = [24, 37, 21, 19, 6]
active = [11, 51, 22, 21, 7]
x1 = np.repeat(levels, new)
x2 = np.repeat(levels, active)
# using lawstat
# > brunner.munzel.test(xn, xa) #brunnermunzel.test(x, y)
res2_t = Holder(statistic=1.1757561456582,
df=204.2984239868,
pvalue=0.2410606649547,
ci=[0.4700629827705593, 0.6183882855872511],
prob=0.5442256341789052)
res = rank_compare_2indep(x1, x2, use_t=False)
assert_allclose(res.statistic, -res2_t.statistic, rtol=1e-13)
assert_allclose(res.prob1, 1 - res2_t.prob, rtol=1e-13)
assert_allclose(res.prob2, res2_t.prob, rtol=1e-13)
tt = res.test_prob_superior()
# TODO: return HolderTuple
# assert_allclose(tt.statistic, res2_t.statistic)
# TODO: check sign/direction in lawstat
assert_allclose(tt[0], -res2_t.statistic, rtol=1e-13)
ci = res.conf_int(alpha=0.05)
# we compare normal confint with t confint, lower rtol
assert_allclose(ci, 1 - np.array(res2_t.ci)[::-1], rtol=0.005)
# test consistency of test and confint
res_lb = res.test_prob_superior(value=ci[0])
assert_allclose(res_lb[1], 0.05, rtol=1e-13)
res_ub = res.test_prob_superior(value=ci[1])
assert_allclose(res_ub[1], 0.05, rtol=1e-13)
# test consistency of tost and confint
res_tost = res.tost_prob_superior(*ci)
assert_allclose(res_tost.results_smaller.pvalue, 0.025, rtol=1e-13)
assert_allclose(res_tost.results_larger.pvalue, 0.025, rtol=1e-13)
# use t-distribution
# our ranking is defined as reversed from lawstat, and BM article
# revere direction to match our definition
x1, x2 = x2, x1
res = rank_compare_2indep(x1, x2, use_t=True)
assert_allclose(res.statistic, res2_t.statistic, rtol=1e-13)
tt = res.test_prob_superior()
# TODO: return HolderTuple
# assert_allclose(tt.statistic, res2_t.statistic)
# TODO: check sign/direction in lawstat, reversed from ours
assert_allclose(tt[0], res2_t.statistic, rtol=1e-13)
assert_allclose(tt[1], res2_t.pvalue, rtol=1e-13)
assert_allclose(res.pvalue, res2_t.pvalue, rtol=1e-13)
assert_allclose(res.df, res2_t.df, rtol=1e-13)
ci = res.conf_int(alpha=0.05)
assert_allclose(ci, res2_t.ci, rtol=1e-11)
# test consistency of test and confint
res_lb = res.test_prob_superior(value=ci[0])
assert_allclose(res_lb[1], 0.05, rtol=1e-11)
res_ub = res.test_prob_superior(value=ci[1])
assert_allclose(res_ub[1], 0.05, rtol=1e-11)
# test consistency of tost and confint
res_tost = res.tost_prob_superior(*ci)
assert_allclose(res_tost.results_smaller.pvalue, 0.025, rtol=1e-11)
assert_allclose(res_tost.results_larger.pvalue, 0.025, rtol=1e-11)
# extras
# cohen's d
esd = res.effectsize_normal()
p = prob_larger_continuous(stats.norm(loc=esd), stats.norm)
# round trip
assert_allclose(p, res.prob1, rtol=1e-13)
# round trip with cohen's d
pc = cohensd2problarger(esd)
assert_allclose(pc, res.prob1, rtol=1e-13)
ci_tr = res.confint_lintransf(1, -1)
assert_allclose(ci_tr, 1 - np.array(res2_t.ci)[::-1], rtol=0.005)
def test_rank_compare_ord():
# compare ordinal count version with full version
# Example from Munzel and Hauschke 2003
# data is given by counts, expand to observations
levels = [-2, -1, 0, 1, 2]
new = [24, 37, 21, 19, 6]
active = [11, 51, 22, 21, 7]
x1 = np.repeat(levels, new)
x2 = np.repeat(levels, active)
for use_t in [False, True]:
res2 = rank_compare_2indep(x1, x2, use_t=use_t)
res1 = rank_compare_2ordinal(new, active, use_t=use_t)
assert_allclose(res2.prob1, res1.prob1, rtol=1e-13)
assert_allclose(res2.var_prob, res1.var_prob, rtol=1e-13)
s1 = str(res1.summary())
s2 = str(res2.summary())
assert s1 == s2
def test_rank_compare_vectorized():
np.random.seed(987126)
x1 = np.random.randint(0, 20, (50, 3))
x2 = np.random.randint(5, 25, (50, 3))
res = rank_compare_2indep(x1, x2)
tst = res.test_prob_superior(0.5)
tost = res.tost_prob_superior(0.4, 0.6)
# smoke test for summary
res.summary()
for i in range(3):
res_i = rank_compare_2indep(x1[:, i], x2[:, i])
assert_allclose(res.statistic[i], res_i.statistic, rtol=1e-14)
assert_allclose(res.pvalue[i], res_i.pvalue, rtol=1e-14)
assert_allclose(res.prob1[i], res_i.prob1, rtol=1e-14)
tst_i = res_i.test_prob_superior(0.5)
assert_allclose(tst.statistic[i], tst_i.statistic, rtol=1e-14)
assert_allclose(tst.pvalue[i], tst_i.pvalue, rtol=1e-14)
tost_i = res_i.tost_prob_superior(0.4, 0.6)
assert_allclose(tost.statistic[i], tost_i.statistic, rtol=1e-14)
assert_allclose(tost.pvalue[i], tost_i.pvalue, rtol=1e-14)
| |
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Django settings for stackdio project.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
from __future__ import unicode_literals
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import logging
import os
import dj_database_url
from celery.schedules import crontab
from django.contrib.messages import constants as messages
from stackdio.core.config import StackdioConfig, StackdioConfigException
logger = logging.getLogger(__name__)
# Grab a stackdio config object
STACKDIO_CONFIG = StackdioConfig()
# The delimiter used in state execution results
STATE_EXECUTION_DELIMITER = '_|-'
# The fields packed into the state execution result
STATE_EXECUTION_FIELDS = ('module', 'declaration_id', 'name', 'func')
##
# The Django local storage directory for storing its data
##
FILE_STORAGE_DIRECTORY = STACKDIO_CONFIG.storage_dir
LDAP_CONFIG = STACKDIO_CONFIG.get('ldap', {})
LDAP_ENABLED = LDAP_CONFIG.get('enabled', False)
OPBEAT_CONFIG = STACKDIO_CONFIG.get('opbeat', {})
OPBEAT_ENABLED = OPBEAT_CONFIG.get('enabled', False)
##
# Some convenience variables
##
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)
))))
# Set DEBUG things to False here, override to True in the development.py settings
DEBUG = False
JAVASCRIPT_DEBUG = False
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
SECRET_KEY = STACKDIO_CONFIG.django_secret_key
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'actstream',
'guardian',
'stackdio.core',
'stackdio.core.notifications',
'stackdio.api.users',
'stackdio.api.cloud',
'stackdio.api.stacks',
'stackdio.api.environments',
'stackdio.api.volumes',
'stackdio.api.blueprints',
'stackdio.api.formulas',
'stackdio.ui',
'rest_framework',
'rest_framework.authtoken',
)
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'stackdio.core.middleware.LoginRedirectMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
# Add the LDAP backend if we're enabled
if LDAP_ENABLED:
AUTHENTICATION_BACKENDS += ('django_auth_ldap.backend.LDAPBackend',)
# For guardian - we don't need the anonymous user
ANONYMOUS_USER_NAME = None
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.csrf',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.tz',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'stackdio.server.wsgi.application'
ROOT_URLCONF = 'stackdio.server.urls'
##
# Define your admin tuples like ('full name', 'email@address.com')
##
ADMINS = ()
MANAGERS = ADMINS
##
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
#
# We're using dj-database-url to simplify
# the required settings and instead of pulling the DSN from an
# environment variable, we're loading it from the stackdio config
##
DATABASES = {
'default': dj_database_url.parse(STACKDIO_CONFIG.database_url, conn_max_age=600)
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Login settings
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = '/logout/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '%s/static/' % FILE_STORAGE_DIRECTORY
# Additional locations of static files
STATICFILES_DIRS = ()
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '%s/media/' % FILE_STORAGE_DIRECTORY
# Override message tags for bootstrap
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Caching - only do 1 minute
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': STACKDIO_CONFIG.redis_url,
}
}
# Use the cache session engine
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)s [%(threadName)s] %(name)s - %(message)s',
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'formatter': 'default',
'class': 'logging.StreamHandler',
},
'file': {
'level': 'DEBUG',
'formatter': 'default',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(STACKDIO_CONFIG.log_dir, 'django.log'),
'maxBytes': 5242880,
'backupCount': 5,
},
},
'loggers': {
'django.db.backends': {
'handlers': ['null'],
'level': 'WARNING',
'propagate': True,
},
'django_auth_ldap': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'MARKDOWN': {
'handlers': ['console', 'file'],
'level': 'WARNING',
'propagate': False,
},
'pip': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'boto': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
'amqp': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
'stackdio.core.permissions': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
'git.cmd': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'salt.config': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'': {
'handlers': ['console', 'file'],
'level': 'WARNING',
'propagate': False,
},
}
}
##
# Django REST Framework configuration
##
REST_FRAMEWORK = {
'PAGE_SIZE': 50,
# Filtering
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
),
# Authentication
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
# All endpoints require authentication
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
# Parsers - enable FormParser to get nice forms in the browsable API
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
),
# Enable the browsable API - comment out the BrowsableAPIRenderer line to only return json
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.AdminRenderer',
),
}
##
# Available cloud providers - pull from config file
##
CLOUD_PROVIDERS = STACKDIO_CONFIG.cloud_providers
##
# Celery & RabbitMQ
##
BROKER_URL = STACKDIO_CONFIG.celery_broker_url
CELERY_REDIRECT_STDOUTS = False
CELERY_DEFAULT_QUEUE = 'default'
# Make sure workers don't prefetch tasks - otherwise you can end up with a single worker
# claiming multiple orchestration tasks, and it will only run 1 at a time even though
# there are other idle workers
CELERYD_PREFETCH_MULTIPLIER = 1
# Serializer settings
# We'll use json since pickle can sometimes be insecure
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
# Configure queues
CELERY_ROUTES = {
'environments.finish_environment': {'queue': 'environments'},
'environments.highstate': {'queue': 'environments'},
'environments.orchestrate': {'queue': 'environments'},
'environments.propagate_ssh': {'queue': 'environments'},
'environments.single_sls': {'queue': 'environments'},
'environments.sync_all': {'queue': 'environments'},
'notifications.generate_notifications': {'queue': 'short'},
'notifications.resend_failed_notifications': {'queue': 'short'},
'notifications.send_notification': {'queue': 'short'},
'notifications.send_bulk_notifications': {'queue': 'short'},
'stacks.destroy_hosts': {'queue': 'stacks'},
'stacks.destroy_stack': {'queue': 'stacks'},
'stacks.execute_action': {'queue': 'short'},
'stacks.finish_stack': {'queue': 'stacks'},
'stacks.global_orchestrate': {'queue': 'stacks'},
'stacks.highstate': {'queue': 'stacks'},
'stacks.launch_hosts': {'queue': 'stacks'},
'stacks.orchestrate': {'queue': 'stacks'},
'stacks.ping': {'queue': 'stacks'},
'stacks.propagate_ssh': {'queue': 'stacks'},
'stacks.register_dns': {'queue': 'stacks'},
'stacks.register_volume_delete': {'queue': 'stacks'},
'stacks.run_command': {'queue': 'short'},
'stacks.single_sls': {'queue': 'stacks'},
'stacks.sync_all': {'queue': 'stacks'},
'stacks.tag_infrastructure': {'queue': 'stacks'},
'stacks.unregister_dns': {'queue': 'stacks'},
'stacks.update_host_info': {'queue': 'short'},
'stacks.update_metadata': {'queue': 'stacks'},
}
CELERYBEAT_SCHEDULE = {
'update-host-info': {
'task': 'stacks.update_host_info',
'schedule': crontab(minute='*/5'), # Execute every 5 minutes
'args': (),
},
'resend-failed-notifications': {
'task': 'notifications.resend_failed_notifications',
'schedule': crontab(minute='*/10'), # Execute every 10 minutes
'args': (),
}
}
USER_AGENT_WHITELIST = STACKDIO_CONFIG.get('user_agent_whitelist', [])
# opbeat things
if OPBEAT_ENABLED:
INSTALLED_APPS += ('opbeat.contrib.django',)
MIDDLEWARE_CLASSES = (
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',
) + MIDDLEWARE_CLASSES
OPBEAT = {
'ORGANIZATION_ID': OPBEAT_CONFIG.get('organization_id'),
'APP_ID': OPBEAT_CONFIG.get('app_id'),
'SECRET_TOKEN': OPBEAT_CONFIG.get('secret_token'),
}
# Set up the logging
LOGGING['handlers']['opbeat'] = {
'level': 'WARNING',
'class': 'opbeat.contrib.django.handlers.OpbeatHandler',
}
LOGGING['loggers']['opbeat.errors'] = {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
}
LOGGING['loggers']['opbeat.instrumentation.packages.base'] = {
'level': 'WARNING',
'handlers': ['console'],
'propagate': False,
}
##
# LDAP configuration. To enable this, you should set ldap: enabled: true in your config file.
##
# Throw in the rest of our LDAP config if ldap is enabled
if LDAP_ENABLED:
try:
import ldap
import django_auth_ldap.config
from django_auth_ldap.config import LDAPSearch
except ImportError:
raise StackdioConfigException('LDAP is enabled, but django_auth_ldap is missing. '
'Please install django_auth_ldap.')
auth_ldap_search = ('group_type',)
call_value = ('group_type',)
def get_from_ldap_module(attr, module=ldap, fail_on_error=False):
try:
return getattr(module, attr)
except (AttributeError, TypeError):
if fail_on_error:
raise StackdioConfigException('Invalid config value: {}'.format(attr))
else:
# if we get an exception, just return the raw attribute
return attr
def get_search_object(user_or_group):
search_base = LDAP_CONFIG.get('{}_search_base'.format(user_or_group))
if not search_base:
raise StackdioConfigException('Missing ldap.{}_search_base '
'config parameter'.format(user_or_group))
search_scope_str = LDAP_CONFIG.get('{}_search_scope'.format(user_or_group), 'SCOPE_SUBTREE')
search_scope = get_from_ldap_module(search_scope_str, fail_on_error=True)
search_filter = LDAP_CONFIG.get('{}_search_filter'.format(user_or_group))
if search_filter is None:
return LDAPSearch(search_base, search_scope)
else:
return LDAPSearch(search_base, search_scope, search_filter)
# Set the search objects
AUTH_LDAP_USER_SEARCH = get_search_object('user')
AUTH_LDAP_GROUP_SEARCH = get_search_object('group')
for key, value in LDAP_CONFIG.items():
if key == 'enabled':
continue
settings_key = 'AUTH_LDAP_{}'.format(key.upper())
if key in auth_ldap_search:
search_module = django_auth_ldap.config
else:
search_module = ldap
if isinstance(value, dict):
settings_value = {}
for k, v in value.items():
sub_key = get_from_ldap_module(k, search_module)
sub_value = get_from_ldap_module(v, search_module)
settings_value[sub_key] = sub_value
else:
settings_value = get_from_ldap_module(value, search_module)
if key in call_value:
settings_value = settings_value()
# Set the attribute on this settings module
vars()[settings_key] = settings_value
| |
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base classes for storage engines
"""
import abc
from oslo_config import cfg
from oslo_db import api as db_api
import six
_BACKEND_MAPPING = {'sqlalchemy': 'ironic.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
lazy=True)
def get_instance():
"""Return a DB API instance."""
return IMPL
@six.add_metaclass(abc.ABCMeta)
class Connection(object):
"""Base class for storage system connections."""
@abc.abstractmethod
def __init__(self):
"""Constructor."""
@abc.abstractmethod
def get_nodeinfo_list(self, columns=None, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
"""Get specific columns for matching nodes.
Return a list of the specified columns for all nodes that match the
specified filters.
:param columns: List of column names to return.
Defaults to 'id' column when columns == None.
:param filters: Filters to apply. Defaults to None.
:associated: True | False
:reserved: True | False
:reserved_by_any_of: [conductor1, conductor2]
:maintenance: True | False
:chassis_uuid: uuid of chassis
:driver: driver's name
:provision_state: provision state of node
:provisioned_before:
nodes with provision_updated_at field before this
interval in seconds
:param limit: Maximum number of nodes to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
:returns: A list of tuples of the specified columns.
"""
@abc.abstractmethod
def get_node_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of nodes.
:param filters: Filters to apply. Defaults to None.
:associated: True | False
:reserved: True | False
:maintenance: True | False
:chassis_uuid: uuid of chassis
:driver: driver's name
:provision_state: provision state of node
:provisioned_before:
nodes with provision_updated_at field before this
interval in seconds
:param limit: Maximum number of nodes to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def reserve_node(self, tag, node_id):
"""Reserve a node.
To prevent other ManagerServices from manipulating the given
Node while a Task is performed, mark it reserved by this host.
:param tag: A string uniquely identifying the reservation holder.
:param node_id: A node id or uuid.
:returns: A Node object.
:raises: NodeNotFound if the node is not found.
:raises: NodeLocked if the node is already reserved.
"""
@abc.abstractmethod
def release_node(self, tag, node_id):
"""Release the reservation on a node.
:param tag: A string uniquely identifying the reservation holder.
:param node_id: A node id or uuid.
:raises: NodeNotFound if the node is not found.
:raises: NodeLocked if the node is reserved by another host.
:raises: NodeNotLocked if the node was found to not have a
reservation at all.
"""
@abc.abstractmethod
def create_node(self, values):
"""Create a new node.
:param values: A dict containing several items used to identify
and track the node, and several dicts which are passed
into the Drivers when managing this node. For example:
::
{
'uuid': uuidutils.generate_uuid(),
'instance_uuid': None,
'power_state': states.POWER_OFF,
'provision_state': states.AVAILABLE,
'driver': 'pxe_ipmitool',
'driver_info': { ... },
'properties': { ... },
'extra': { ... },
}
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_id(self, node_id):
"""Return a node.
:param node_id: The id of a node.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_uuid(self, node_uuid):
"""Return a node.
:param node_uuid: The uuid of a node.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_name(self, node_name):
"""Return a node.
:param node_name: The logical name of a node.
:returns: A node.
"""
@abc.abstractmethod
def get_node_by_instance(self, instance):
"""Return a node.
:param instance: The instance uuid to search for.
:returns: A node.
:raises: InstanceNotFound if the instance is not found.
:raises: InvalidUUID if the instance uuid is invalid.
"""
@abc.abstractmethod
def destroy_node(self, node_id):
"""Destroy a node and all associated interfaces.
:param node_id: The id or uuid of a node.
"""
@abc.abstractmethod
def update_node(self, node_id, values):
"""Update properties of a node.
:param node_id: The id or uuid of a node.
:param values: Dict of values to update.
May be a partial list, eg. when setting the
properties for a driver. For example:
::
{
'driver_info':
{
'my-field-1': val1,
'my-field-2': val2,
}
}
:returns: A node.
:raises: NodeAssociated
:raises: NodeNotFound
"""
@abc.abstractmethod
def get_port_by_id(self, port_id):
"""Return a network port representation.
:param port_id: The id of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_by_uuid(self, port_uuid):
"""Return a network port representation.
:param port_uuid: The uuid of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_by_address(self, address):
"""Return a network port representation.
:param address: The MAC address of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of ports.
:param limit: Maximum number of ports to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List all the ports for a given node.
:param node_id: The integer node ID.
:param limit: Maximum number of ports to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted
(asc, desc)
:returns: A list of ports.
"""
@abc.abstractmethod
def get_ports_by_portgroup_id(self, portgroup_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List all the ports for a given portgroup.
:param portgroup_id: The integer portgroup ID.
:param limit: Maximum number of ports to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: Direction in which results should be sorted
(asc, desc)
:returns: A list of ports.
"""
@abc.abstractmethod
def create_port(self, values):
"""Create a new port.
:param values: Dict of values.
"""
@abc.abstractmethod
def update_port(self, port_id, values):
"""Update properties of an port.
:param port_id: The id or MAC of a port.
:param values: Dict of values to update.
:returns: A port.
"""
@abc.abstractmethod
def destroy_port(self, port_id):
"""Destroy an port.
:param port_id: The id or MAC of a port.
"""
@abc.abstractmethod
def get_portgroup_by_id(self, portgroup_id):
"""Return a network portgroup representation.
:param portgroup_id: The id of a portgroup.
:returns: A portgroup.
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def get_portgroup_by_uuid(self, portgroup_uuid):
"""Return a network portgroup representation.
:param portgroup_uuid: The uuid of a portgroup.
:returns: A portgroup.
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def get_portgroup_by_address(self, address):
"""Return a network portgroup representation.
:param address: The MAC address of a portgroup.
:returns: A portgroup.
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def get_portgroup_by_name(self, name):
"""Return a network portgroup representation.
:param name: The logical name of a portgroup.
:returns: A portgroup.
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def get_portgroup_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of portgroups.
:param limit: Maximum number of portgroups to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
:returns: A list of portgroups.
"""
@abc.abstractmethod
def get_portgroups_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List all the portgroups for a given node.
:param node_id: The integer node ID.
:param limit: Maximum number of portgroups to return.
:param marker: The last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: Direction in which results should be sorted
(asc, desc)
:returns: A list of portgroups.
"""
@abc.abstractmethod
def create_portgroup(self, values):
"""Create a new portgroup.
:param values: Dict of values with the following keys:
'id'
'uuid'
'name'
'node_id'
'address'
'extra'
'created_at'
'updated_at'
:returns: A portgroup
:raises: PortgroupDuplicateName
:raises: PortgroupMACAlreadyExists
:raises: PortgroupAlreadyExists
"""
@abc.abstractmethod
def update_portgroup(self, portgroup_id, values):
"""Update properties of a portgroup.
:param portgroup_id: The UUID or MAC of a portgroup.
:param values: Dict of values to update.
May contain the following keys:
'uuid'
'name'
'node_id'
'address'
'extra'
'created_at'
'updated_at'
:returns: A portgroup.
:raises: InvalidParameterValue
:raises: PortgroupNotFound
:raises: PortgroupDuplicateName
:raises: PortgroupMACAlreadyExists
"""
@abc.abstractmethod
def destroy_portgroup(self, portgroup_id):
"""Destroy a portgroup.
:param portgroup_id: The UUID or MAC of a portgroup.
:raises: PortgroupNotEmpty
:raises: PortgroupNotFound
"""
@abc.abstractmethod
def create_chassis(self, values):
"""Create a new chassis.
:param values: Dict of values.
"""
@abc.abstractmethod
def get_chassis_by_id(self, chassis_id):
"""Return a chassis representation.
:param chassis_id: The id of a chassis.
:returns: A chassis.
"""
@abc.abstractmethod
def get_chassis_by_uuid(self, chassis_uuid):
"""Return a chassis representation.
:param chassis_uuid: The uuid of a chassis.
:returns: A chassis.
"""
@abc.abstractmethod
def get_chassis_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of chassis.
:param limit: Maximum number of chassis to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def update_chassis(self, chassis_id, values):
"""Update properties of an chassis.
:param chassis_id: The id or the uuid of a chassis.
:param values: Dict of values to update.
:returns: A chassis.
"""
@abc.abstractmethod
def destroy_chassis(self, chassis_id):
"""Destroy a chassis.
:param chassis_id: The id or the uuid of a chassis.
"""
@abc.abstractmethod
def register_conductor(self, values, update_existing=False):
"""Register an active conductor with the cluster.
:param values: A dict of values which must contain the following:
::
{
'hostname': the unique hostname which identifies
this Conductor service.
'drivers': a list of supported drivers.
}
:param update_existing: When false, registration will raise an
exception when a conflicting online record
is found. When true, will overwrite the
existing record. Default: False.
:returns: A conductor.
:raises: ConductorAlreadyRegistered
"""
@abc.abstractmethod
def get_conductor(self, hostname):
"""Retrieve a conductor's service record from the database.
:param hostname: The hostname of the conductor service.
:returns: A conductor.
:raises: ConductorNotFound
"""
@abc.abstractmethod
def unregister_conductor(self, hostname):
"""Remove this conductor from the service registry immediately.
:param hostname: The hostname of this conductor service.
:raises: ConductorNotFound
"""
@abc.abstractmethod
def touch_conductor(self, hostname):
"""Mark a conductor as active by updating its 'updated_at' property.
:param hostname: The hostname of this conductor service.
:raises: ConductorNotFound
"""
@abc.abstractmethod
def get_active_driver_dict(self, interval):
"""Retrieve drivers for the registered and active conductors.
:param interval: Seconds since last check-in of a conductor.
:returns: A dict which maps driver names to the set of hosts
which support them. For example:
::
{driverA: set([host1, host2]),
driverB: set([host2, host3])}
"""
@abc.abstractmethod
def get_offline_conductors(self):
"""Get a list conductor hostnames that are offline (dead).
:returns: A list of conductor hostnames.
"""
@abc.abstractmethod
def touch_node_provisioning(self, node_id):
"""Mark the node's provisioning as running.
Mark the node's provisioning as running by updating its
'provision_updated_at' property.
:param node_id: The id of a node.
:raises: NodeNotFound
"""
@abc.abstractmethod
def set_node_tags(self, node_id, tags):
"""Replace all of the node tags with specified list of tags.
This ignores duplicate tags in the specified list.
:param node_id: The id of a node.
:param tags: List of tags.
:returns: A list of NodeTag objects.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def unset_node_tags(self, node_id):
"""Remove all tags of the node.
:param node_id: The id of a node.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def get_node_tags_by_node_id(self, node_id):
"""Get node tags based on its id.
:param node_id: The id of a node.
:returns: A list of NodeTag objects.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def add_node_tag(self, node_id, tag):
"""Add tag to the node.
If the node_id and tag pair already exists, this should still
succeed.
:param node_id: The id of a node.
:param tag: A tag string.
:returns: the NodeTag object.
:raises: NodeNotFound if the node is not found.
"""
@abc.abstractmethod
def delete_node_tag(self, node_id, tag):
"""Delete specified tag from the node.
:param node_id: The id of a node.
:param tag: A tag string.
:raises: NodeNotFound if the node is not found.
:raises: NodeTagNotFound if the tag is not found.
"""
@abc.abstractmethod
def node_tag_exists(self, node_id, tag):
"""Check if the specified tag exist on the node.
:param node_id: The id of a node.
:param tag: A tag string.
:returns: True if the tag exists otherwise False.
"""
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Represent granular events that can be used to trigger callbacks.
Bokeh documents and applications are capable of supporting various kinds of
interactions. These are often associated with events, such as mouse or touch
events, interactive downsampling mode activation, widget or tool events, and
others. The classes in this module represent these different events, so that
callbacks can be attached and executed when they occur.
It is possible to respond to events with ``CustomJS`` callbacks, which will
function with or without a Bokeh server. This can be accomplished by passing
and event class, and a ``CustomJS`` model to the
:func:`~bokeh.model.Model.js_on_event` method. When the ``CustomJS`` is
executed in the browser, its ``cb_obj`` argument will contain the concrete
event object that triggered the callback.
.. code-block:: python
from bokeh.events import ButtonClick
from bokeh.models import Button, CustomJS
button = Button()
button.js_on_event(ButtonClick, CustomJS(code='console.log("JS:Click")'))
Alternatively it is possible to trigger Python code to run when events
happen, in the context of a Bokeh application running on a Bokeh server.
This can accomplished by passing an event class, and a callback function
to the the :func:`~bokeh.model.Model.on_event` method. The callback should
accept a single argument ``event``, which will be passed the concrete
event object that triggered the callback.
.. code-block:: python
from bokeh.events import ButtonClick
from bokeh.models import Button
button = Button()
def callback(event):
print('Python:Click')
button.on_event(ButtonClick, callback)
.. note ::
There is no throttling of events. Some events such as ``MouseMove``
may trigger at a very high rate.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import (
TYPE_CHECKING,
ClassVar,
Dict,
Literal,
Type,
TypedDict,
)
# Bokeh imports
from .core.serialization import Deserializer
if TYPE_CHECKING:
from .core.types import GeometryData, Unknown
from .model import Model
from .models.plots import Plot
from .models.widgets.buttons import AbstractButton
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ButtonClick',
'DocumentEvent',
'DocumentReady',
'DoubleTap',
'Event',
'LODStart',
'LODEnd',
'MenuItemClick',
'ModelEvent',
'MouseEnter',
'MouseLeave',
'MouseMove',
'MouseWheel',
'Pan',
'PanEnd',
'PanStart',
'Pinch',
'PinchEnd',
'PinchStart',
'RangesUpdate',
'Rotate',
'RotateEnd',
'RotateStart',
'PlotEvent',
'PointEvent',
'Press',
'PressUp',
'Reset',
'SelectionGeometry',
'Tap',
)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_CONCRETE_EVENT_CLASSES: Dict[str, Type[Event]] = {}
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class EventRep(TypedDict):
type: Literal["event"]
name: str
values: Dict[str, Unknown]
class Event:
''' Base class for all Bokeh events.
This base class is not typically useful to instantiate on its own.
'''
event_name: ClassVar[str]
@classmethod
def __init_subclass__(cls):
super().__init_subclass__()
if hasattr(cls, "event_name"):
_CONCRETE_EVENT_CLASSES[cls.event_name] = cls
@classmethod
def from_serializable(cls, rep: EventRep, decoder: Deserializer) -> Event:
if not ("name" in rep and "values" in rep):
decoder.error("invalid representation")
name = rep.get("name")
if name is None:
decoder.error("'name' field is missing")
values = rep.get("values")
if values is None:
decoder.error("'values' field is missing")
cls = _CONCRETE_EVENT_CLASSES.get(name)
if cls is None:
decoder.error(f"can't resolve event '{name}'")
decoded_values = decoder.decode(values)
event = cls(**decoded_values)
return event
class DocumentEvent(Event):
''' Base class for all Bokeh Document events.
This base class is not typically useful to instantiate on its own.
'''
class DocumentReady(DocumentEvent):
''' Announce when a Document is fully idle.
'''
event_name = 'document_ready'
class ModelEvent(Event):
''' Base class for all Bokeh Model events.
This base class is not typically useful to instantiate on its own.
'''
model: Model | None
def __init__(self, model: Model | None) -> None:
''' Create a new base event.
Args:
model (Model) : a Bokeh model to register event callbacks on
'''
self.model = model
class ButtonClick(ModelEvent):
''' Announce a button click event on a Bokeh button widget.
'''
event_name = 'button_click'
def __init__(self, model: AbstractButton | None) -> None:
from .models.widgets import AbstractButton, ButtonGroup
if model is not None and not isinstance(model, (AbstractButton, ButtonGroup)):
clsname = self.__class__.__name__
raise ValueError(f"{clsname} event only applies to button and button group models")
super().__init__(model=model)
class MenuItemClick(ModelEvent):
''' Announce a button click event on a Bokeh menu item.
'''
event_name = 'menu_item_click'
def __init__(self, model: Model, item: str | None = None) -> None:
self.item = item
super().__init__(model=model)
class PlotEvent(ModelEvent):
''' The base class for all events applicable to Plot models.
'''
def __init__(self, model: Plot | None) -> None:
from .models import Plot
if model is not None and not isinstance(model, Plot):
raise ValueError(f"{self.__class__.__name__} event only applies to Plot models")
super().__init__(model)
class LODStart(PlotEvent):
''' Announce the start of "interactive level-of-detail" mode on a plot.
During interactive actions such as panning or zooming, Bokeh can
optionally, temporarily draw a reduced set of the data, in order to
maintain high interactive rates. This is referred to as interactive
Level-of-Detail (LOD) mode. This event fires whenever a LOD mode
has just begun.
'''
event_name = 'lodstart'
class LODEnd(PlotEvent):
''' Announce the end of "interactive level-of-detail" mode on a plot.
During interactive actions such as panning or zooming, Bokeh can
optionally, temporarily draw a reduced set of the data, in order to
maintain high interactive rates. This is referred to as interactive
Level-of-Detail (LOD) mode. This event fires whenever a LOD mode
has just ended.
'''
event_name = 'lodend'
class RangesUpdate(PlotEvent):
''' Announce combined range updates in a single event.
Attributes:
x0 (float) : start x-coordinate for the default x-range
x1 (float) : end x-coordinate for the default x-range
y0 (float) : start x-coordinate for the default y-range
y1 (float) : end y-coordinate for the default x-range
Callbacks may be added to range ``start`` and ``end`` properties to respond
to range changes, but this can result in multiple callbacks being invoked
for a single logical operation (e.g. a pan or zoom). This event is emitted
by supported tools when the entire range update is complete, in order to
afford a *single* event that can be responded to.
'''
event_name = 'rangesupdate'
def __init__(self, model: Plot | None, *,
x0: float | None = None,
x1: float | None = None,
y0: float | None = None,
y1: float | None = None):
self.x0 = x0
self.x1 = x1
self.y0 = y0
self.y1 = y1
super().__init__(model=model)
class SelectionGeometry(PlotEvent):
''' Announce the coordinates of a selection event on a plot.
Attributes:
geometry (dict) : a dictionary containing the coordinates of the
selection event.
final (bool) : whether the selection event is the last selection event
in the case of selections on every mousemove.
'''
event_name = "selectiongeometry"
def __init__(self, model: Plot | None, geometry: GeometryData | None = None, final: bool = True) -> None:
self.geometry = geometry
self.final = final
super().__init__(model=model)
class Reset(PlotEvent):
''' Announce a button click event on a plot ``ResetTool``.
'''
event_name = "reset"
def __init__(self, model: Plot | None) -> None:
super().__init__(model=model)
class PointEvent(PlotEvent):
''' Base class for UI events associated with a specific (x,y) point.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
Note that data space coordinates are relative to the default range, not
any extra ranges, and the the screen space origin is at the top left of
the HTML canvas.
'''
def __init__(self, model: Plot | None, sx: float | None = None, sy:
float | None = None, x: float | None = None, y: float | None = None):
self.sx = sx
self.sy = sy
self.x = x
self.y = y
super().__init__(model=model)
class Tap(PointEvent):
''' Announce a tap or click event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'tap'
class DoubleTap(PointEvent):
''' Announce a double-tap or double-click event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'doubletap'
class Press(PointEvent):
''' Announce a press event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'press'
class PressUp(PointEvent):
''' Announce a pressup event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'pressup'
class MouseEnter(PointEvent):
''' Announce a mouse enter event onto a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
The enter event is generated when the mouse leaves the entire Plot
canvas, including any border padding and space for axes or legends.
'''
event_name = 'mouseenter'
class MouseLeave(PointEvent):
''' Announce a mouse leave event from a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
The leave event is generated when the mouse leaves the entire Plot
canvas, including any border padding and space for axes or legends.
'''
event_name = 'mouseleave'
class MouseMove(PointEvent):
''' Announce a mouse movement event over a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event can fire at a very high rate, potentially increasing network
traffic or CPU load.
'''
event_name = 'mousemove'
class MouseWheel(PointEvent):
''' Announce a mouse wheel event on a Bokeh plot.
Attributes:
delta (float) : the (signed) scroll speed
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
By default, Bokeh plots do not prevent default scroll events unless a
``WheelZoomTool`` or ``WheelPanTool`` is active. This may change in
future releases.
'''
event_name = 'wheel'
def __init__(self,
model: Plot | None,
*,
delta: float | None = None,
sx: float | None = None,
sy: float | None = None,
x: float | None = None,
y: float | None = None):
self.delta = delta
super().__init__(model, sx=sx, sy=sy, x=x, y=y)
class Pan(PointEvent):
''' Announce a pan event on a Bokeh plot.
Attributes:
delta_x (float) : the amount of scroll in the x direction
delta_y (float) : the amount of scroll in the y direction
direction (float) : the direction of scroll (1 or -1)
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'pan'
def __init__(self,
model: Plot | None,
*,
delta_x: float | None = None,
delta_y: float | None = None,
direction: Literal[-1, -1] | None = None,
sx: float | None = None,
sy: float | None = None,
x: float | None = None,
y: float | None = None):
self.delta_x = delta_x
self.delta_y = delta_y
self.direction = direction
super().__init__(model, sx=sx, sy=sy, x=x, y=y)
class PanEnd(PointEvent):
''' Announce the end of a pan event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'panend'
class PanStart(PointEvent):
''' Announce the start of a pan event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'panstart'
class Pinch(PointEvent):
''' Announce a pinch event on a Bokeh plot.
Attributes:
scale (float) : the (signed) amount of scaling
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'pinch'
def __init__(self,
model: Plot | None,
*,
scale: float | None = None,
sx: float | None = None,
sy: float | None = None,
x: float | None = None,
y: float | None = None):
self.scale = scale
super().__init__(model, sx=sx, sy=sy, x=x, y=y)
class PinchEnd(PointEvent):
''' Announce the end of a pinch event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'pinchend'
class PinchStart(PointEvent):
''' Announce the start of a pinch event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'pinchstart'
class Rotate(PointEvent):
''' Announce a rotate event on a Bokeh plot.
Attributes:
rotation (float) : the rotation that has been done (in deg)
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'rotate'
def __init__(self,
model: Plot | None,
*,
rotation: float | None = None,
sx: float | None = None,
sy: float | None = None,
x: float | None = None,
y: float | None = None):
self.rotation = rotation
super().__init__(model, sx=sx, sy=sy, x=x, y=y)
class RotateEnd(PointEvent):
''' Announce the end of a rotate event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'rotateend'
class RotateStart(PointEvent):
''' Announce the start of a rotate event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'rotatestart'
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Deserializer.register("event", Event.from_serializable)
| |
# all the imports
import sqlite3,re,json,ast
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
# configuration
DATABASE = '/home/flavorshare/mysite/flavorshare.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
CId=2
value=1
entries = []
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def main_page():
error = None
if not session.get('logged_in'):
return render_template('login_or_register.html')
else :
return redirect(url_for('homePage'))
@app.route('/', methods=['POST'])
def login_or_register():
error = None
if request.method == 'POST':
if request.form['login_register'] == "Login":
pageFunctionName='loginPage'
elif request.form['login_register'] == "Register":
pageFunctionName='registerPage'
return redirect(url_for(pageFunctionName))
@app.route('/register')
def registerPage():
error = None
if not session.get('logged_in'):
return render_template('register.html')
else :
return redirect(url_for('homePage'))
EMAIL_REGEX = re.compile(r"[^@|\s]+@[^@]+\.[^@|\s]+")
@app.route('/register', methods=['POST'])
def register():
error = None
if request.method == 'POST':
if request.form['register'] == "Register":
if request.form['password'] == request.form['confirm_password'] and EMAIL_REGEX.match(request.form['email']) :
g.db.execute('insert into users (name, email, password) values (?, ?, ?)',
[request.form['name'], request.form['email'], request.form['password']])
g.db.commit()
session['username'] = request.form['email']
session['logged_in'] = True
flash('Successfully Registered')
return redirect(url_for('homePage'))
else :
error='Incorrect Details'
return redirect(url_for('register'),error=error)
@app.route('/login')
def loginPage():
error = None
if not session.get('logged_in'):
return render_template('login.html')
else :
return redirect(url_for('homePage'))
@app.route('/login', methods=['POST'])
def login():
error = None
if request.method == 'POST':
if request.form['login'] == "Login":
cur = g.db.execute('select email, password from users where email = \"' + request.form['username'] + '\" and password = \"' + request.form['password'] + '\"')
user_detail = [row for row in cur.fetchall()]
if user_detail:
flash('Successfully Logged In')
session['username'] = request.form['username']
session['logged_in'] = True
return redirect(url_for('homePage'))
if not user_detail:
error='Invalid Login Details'
return render_template('login.html',error=error)
@app.route('/home')
def homePage():
error = None
if not session.get('logged_in'):
return render_template('login_or_register.html')
cur = g.db.execute('select name from users where email = \''+ session.get('username') + '\'')
names = [row for row in cur.fetchall()]
name = names[0]
display_name = name[0]
login=True
cur_count = g.db.execute('select count(*) from notification where mid_assignee in (select mid from users where email=\''+ session.get('username') + '\')')
for row in cur_count:
if row[0]==0:
notification=False
else:
notification=True
return render_template('home.html',display_name=display_name,login=login,notification=notification)
@app.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('username', None)
session.pop('logged_in', None)
return redirect(url_for('main_page'))
@app.route('/notification', methods=['GET'])
def notificationPage():
error = None
if request.method == 'GET':
cur = g.db.execute('select mid_assignor,gid,description,nid from notification where mid_assignee in (select mid from users where email=\''+ session.get('username') + '\')')
mids = [row for row in cur.fetchall()]
#mid=mids[0]
notification_list = []
for row in mids:
notification = {}
cur_name = g.db.execute('select name from users where mid= \''+ str(row[0]) + '\'')
cur_group = g.db.execute('select name from groups where gid= \''+ str(row[1]) + '\'')
for x in cur_name:
for y in cur_group:
notification = [dict(name=str(x[0]),group=str(y[0]),desc=str(row[2]),nid=str(row[3]))]
notification_list.append(notification)
return render_template('notification.html',notification_list=notification_list)
@app.route('/notification', methods=['POST'])
def notification():
error = None
if request.method == 'POST':
if "delete" in request.form:
nid = request.form["delete"]
g.db.execute('delete from notification where nid=\''+ request.form["delete"] + '\'')
g.db.commit()
cur = g.db.execute('select mid_assignor,gid,description,nid from notification where mid_assignee in (select mid from users where email=\''+ session.get('username') + '\')')
mids = [row for row in cur.fetchall()]
#mid=mids[0]
notification_list = []
for row in mids:
notification = {}
cur_name = g.db.execute('select name from users where mid= \''+ str(row[0]) + '\'')
cur_group = g.db.execute('select name from groups where gid= \''+ str(row[1]) + '\'')
for x in cur_name:
for y in cur_group:
notification = [dict(name=str(x[0]),group=str(y[0]),desc=str(row[2]),nid=str(row[3]))]
notification_list.append(notification)# = [dict(list=notification)]
return render_template('notification.html',notification_list=notification_list)
@app.route('/myProfile')
def myProfile():
error = None
if not session.get('logged_in'):
return render_template('login_or_register.html')
user_email = session.get('username')
cur_users = g.db.execute('select name from users where email = \''+ session.get('username') + '\'')
user_name = [row for row in cur_users.fetchall()]
user_name=user_name[0]
return render_template('my_profile.html',user_name=user_name, user_email=user_email)
@app.route('/group_listing')
def group_listingPage():
error = None
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
#print mids
mid=mids[0]
#print mid[0]
cur_groups = g.db.execute('select name from groups where gid in ( select gid from group_members where mid = \''+ str(mid[0]) + '\')')
#print cur_groups
group_names = [row for row in cur_groups.fetchall()]
#print group_names
return render_template('group_listing.html', group_names=group_names)
@app.route('/group_listing', methods=['GET','POST'])
def group_listing():
error = None
if request.method == 'GET':
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
#print mids
mid=mids[0]
#print mid[0]
cur_groups = g.db.execute('select name from groups where admin_id = \''+ str(mid[0]) + '\'')
#print cur_groups
group_names = [row for row in cur_groups.fetchall()]
#print group_names
return render_template('group_listing.html', group_names=group_names)
elif request.method == 'POST':
if 'listing' in request.form:
print request.form['listing']
if 'listing' in request.form:
if request.form['listing'] == "add_group":
return redirect(url_for('add_group'))
else:
group = request.form['listing']
cur = g.db.execute('select name from users where mid in (select mid from group_members where gid in (select gid from groups where name =\"' + group+ '\"))')
g.db.commit()
cur_details=g.db.execute('select description,venue,eventdate from groups where gid in (select gid from groups where name =\"' + group+ '\")')
cur_details=g.db.execute('select description,venue,eventdate from groups where gid in (select gid from groups where name =\"' + group+ '\")')
mids = [row for row in cur_details.fetchall()]
mid=mids[0]
groups = [dict(gname=group)]
names = [dict(name=row[0]) for row in cur.fetchall()]
desc=[dict(desc=row[0]) for row in mids]
venue=[dict(venue=row[1]) for row in mids]
eventdate=[dict(eventdate=row[2]) for row in mids]
return redirect(url_for('group_summary_init',groups=group))
#return render_template('group_summary.html',groups=groups,names=names,desc=desc,venue=venue,eventdate=eventdate)
@app.route('/add_group', methods=['GET','POST'])
def add_group():
error = None
if request.method == 'GET':
return render_template('add_group.html')
elif request.method == 'POST':
if request.form['group_members'] == "Next":
cur = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur.fetchall()]
mid=mids[0]
session['grpname']=request.form['name']
print session['grpname']
g.db.execute('insert into groups (name,admin_id, description, venue, eventdate) values (?,%d,?,?,?)'%mid,[ request.form['name'],request.form['description'],request.form['venue'],request.form['eventdate'] ])
g.db.commit()
#print gname
return redirect(url_for('group_membersPage'))
else:
flash('Try Again')
return redirect(url_for('add_group'))
@app.route('/group_members_summary')
def group_members_summaryPage():
error = None
print session['grpname']
return render_template('group_members_summary.html')
@app.route('/group_members_summary', methods=['POST'])
def group_members_summary():
error = None
print session['grpname']
if request.method == 'POST':
if request.form['display_group_members'] == "next":
number_of_members=int(request.form['number_members'])
for i in range(1,(number_of_members+1)) :
f = "email{0}".format(i)
g.db.execute('insert into group_members(mid,gid) values ((select mid from users where email=\"' + request.form[f]+ '\") ,(select gid from groups where name=\"' + session['grpname']+ '\"))')
#g.db.execute('insert into group_members(mid,gid) values ((select mid from users where email=\"' + request.form['email']+ '\") ,(select gid from groups where name=\"' + session['gname']+ '\"))')
g.db.commit()
print "go to hell"
flash('Group Members Added Successfully')
return redirect(url_for('group_summary_init',groups=session['gname']))
@app.route('/group_members')
def group_membersPage():
error = None
print session['grpname']
g.db.execute('insert into group_members(mid,gid) values ((select mid from users where mid in (select admin_id from groups where gid in(select gid from groups where name=\"' + session['grpname']+ '\"))) ,(select gid from groups where name=\"' + session['grpname']+ '\"))')
g.db.commit()
return render_template('group_members.html')
@app.route('/group_members', methods=['POST'])
def group_members():
error = None
print session['grpname']
if request.method == 'POST':
if request.form['display_group_members'] == "next":
number_of_members=int(request.form['number_members'])
for i in range(1,(number_of_members+1)) :
f = "email{0}".format(i)
g.db.execute('insert into group_members(mid,gid) values ((select mid from users where email=\"' + request.form[f]+ '\") ,(select gid from groups where name=\"' + session['grpname']+ '\"))')
g.db.execute('insert into notification(mid_assignee,mid_assignor,gid,description) values ((select mid from users where email=\"' + request.form[f]+ '\") ,(select mid from users where email = \''+ session.get('username') + '\'),(select gid from groups where name=\"' + session['grpname']+ '\"),("You have been added to a group!!"))')
#g.db.execute('insert into group_members(mid,gid) values ((select mid from users where email=\"' + request.form['email']+ '\") ,(select gid from groups where name=\"' + session['gname']+ '\"))')
g.db.commit()
print "go to hell"
flash('Group Members Added Successfully')
return redirect(url_for('display_group_membersPage'))
@app.route('/display_group_members')
def display_group_membersPage():
error = None
if request.method == 'GET':
#g.db.execute('insert into users (name, email, password) values ("tgif", "6", "abc")')
#g.db.commit()
cur = g.db.execute('select name from users where mid in (select mid from group_members where gid in (select gid from groups where name =\"' + session['grpname']+ '\"))')
#g.db.execute('insert into users (name, email, password) values ("tgif", "999", "abc")')
g.db.commit()
entries = [dict(name=row[0]) for row in cur.fetchall()]
return render_template('display_group_members.html', entries=entries)
@app.route('/display_group_members', methods=['POST'])
def display_group_members():
error = None
if request.method == 'POST':
if request.form['redirect_to'] == "add_more":
pageFunctionName='group_members.html'
return render_template(pageFunctionName)
elif request.form['redirect_to'] == "next":
pageFunctionName='group_config.html'
return redirect(url_for('group_configPage'))
@app.route('/group_summary')
def group_summary_init():
error = None
group = request.args['groups']
print group
session['gname'] = group
cur = g.db.execute('select name from users where mid in (select mid from group_members where gid in (select gid from groups where name =\"' + group+ '\"))')
names = [dict(name=row[0]) for row in cur.fetchall()]
g.db.commit()
cur_details=g.db.execute('select description,venue,eventdate from groups where gid in (select gid from groups where name =\"' + group+ '\")')
mids = [row for row in cur_details.fetchall()]
mid=mids[0]
desc=[dict(desc=row[0]) for row in mids]
venue=[dict(venue=row[1]) for row in mids]
eventdate=[dict(eventdate=row[2]) for row in mids]
groups = [dict(gname=group)]
category_details=g.db.execute('select category.name,group_category.no_of_items from category,group_category where category.cid=group_category.cid and group_category.gid in (select gid from groups where name=\"' + group + '\") and group_category.no_of_items>0')
categories = [row for row in category_details.fetchall()]
cat_name={row[0]:row[1] for row in categories}
print cat_name
category_recipe_details=g.db.execute('select category.name,recipes.name from category,group_category_recipes,recipes where category.cid=group_category_recipes.cid and recipes.rid=group_category_recipes.rid and gid in (select gid from groups where name=\"' + group+ '\") and category.cid not in (307,308,309,310)')
category_recipe = [row for row in category_recipe_details.fetchall()]
category_recipe_list={}
for item in category_recipe:
if item[0] in category_recipe_list:
category_recipe_list[item[0]].append(item[1])
else :
category_recipe_list[item[0]]=[item[1]]
cur_user = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
user_id = [row for row in cur_user.fetchall()]
user_id = user_id[0]
cur_admin = g.db.execute('select admin_id from groups where gid in (select gid from groups where name =\"' + group+ '\")')
admin_id = [row for row in cur_admin.fetchall()]
admin_id = admin_id[0]
cur_admin_name = g.db.execute('select name from users where mid = \''+ str(admin_id[0]) + '\'')
admin_name = [row for row in cur_admin_name.fetchall()]
admin_name = admin_name[0]
a_name = [dict(aname=admin_name[0])]
print admin_id
print user_id
if admin_id[0] == user_id[0]:
print "In admin_id==user_id"
return render_template('group_summary.html',groups=groups,names=names,desc=desc,venue=venue,eventdate=eventdate,cat_name=cat_name,category_recipe_list=category_recipe_list,a_name=a_name)
else:
return render_template('group_summary_normal.html',groups=groups,names=names,desc=desc,venue=venue,eventdate=eventdate,cat_name=cat_name,category_recipe_list=category_recipe_list,a_name=a_name)
@app.route('/group_summary', methods=['POST'])
def group_summary():
error = None
if request.method == 'POST':
group = session['gname']
print group
#print request.form['remove_recipe']
if 'member' in request.form:
print request.form['member']
memberName = request.form['member']
g.db.execute('delete from group_members where gid in (select gid from groups where name=\"' + group+ '\") and mid in ((select mid from users where name=\"' + memberName+ '\"))')
g.db.commit()
flash('Group Member Deleted Successfully')
return redirect(url_for('group_summary_init',groups=session['gname']))
elif 'edit' in request.form:
print "In edit"
return redirect(url_for('group_members_summaryPage'))
elif 'remove_recipe' in request.form:
print "Akshay!!!"
checked_recipes = request.form.getlist('checkbox-recipe')
print checked_recipes
for recipe in checked_recipes :
g.db.execute('delete from group_category_recipes where gid in (select gid from groups where name=\"' + group+ '\") and rid in ((select rid from recipes where name=\"' + recipe+ '\"))')
g.db.commit()
flash('Recipes Deleted Successfully')
return redirect(url_for('group_summary_init',groups=session['gname']))
elif 'done' in request.form:
return redirect(url_for('group_listingPage'))
elif 'addrecipe' in request.form:
cur_category_name = g.db.execute('select name from category where cid in (select A.cid from (select cid,no_of_items from group_category where cid not in (307,308,309,310) and gid in (select gid from groups where name=\"' + group+ '\")) A LEFT OUTER JOIN (select cid, count(rid) as C from group_category_recipes where gid in (select gid from groups where name=\"' + group+ '\") group by cid) B ON A.cid=B.cid where (A.no_of_items - ifnull(B.C,0)) > 0 )')
category = [row for row in cur_category_name.fetchall()]
recipe_list = {}
for name in category:
print "hello i am here"
print name
cur_recipe = g.db.execute('select name from recipes where cid in (select cid from category where name=\''+str(name[0])+'\')')
recipe_name = [row for row in cur_recipe.fetchall()]
recipes = [recipe[0] for recipe in recipe_name]
recipe_list[name[0]] = recipes
print recipe_list
jsondump = json.dumps(recipe_list)
print jsondump
#print recipe
return render_template('add_recipe.html',category=category,jsondump=jsondump,recipe_list=recipe_list)
@app.route('/add_recipe')
def add_recipePage():
error = None
return render_template('add_recipe.html')
@app.route('/add_recipe', methods=['POST'])
def add_recipe():
error = None
if request.method == 'POST':
if request.form['add_recipe'] == "save":
category_name = request.form['select-group']
recipe_name = request.form['select-members']
print "In add recipe"
print category_name
print recipe_name
group = session['gname']
cur = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur.fetchall()]
mid=mids[0]
print mid[0]
cur_gid=g.db.execute('select gid from groups where name = \''+group + '\'')
gids = [row for row in cur_gid.fetchall()]
gid=gids[0]
print gid[0]
cur_cid=g.db.execute('select cid from category where name = \''+category_name + '\'')
cids = [row for row in cur_cid.fetchall()]
cid=cids[0]
print cid[0]
cur_rid=g.db.execute('select rid from recipes where name = \''+recipe_name + '\'')
rids = [row for row in cur_rid.fetchall()]
rid=rids[0]
print rid[0]
g.db.execute('insert into group_category_recipes(gid,cid,rid,mid) values('+str(gid[0])+','+str(cid[0])+','+str(rid[0])+',' + str(mid[0])+')')
g.db.commit()
flash('Recipe Added Successfully')
return redirect(url_for('group_summary_init',groups=session['gname']))
#return redirect(url_for('homePage'))
@app.route('/group_config')
def group_configPage():
error = None
if request.method == 'GET':
groups = [dict(gname=session['grpname'])]
print groups
return render_template('group_config.html',groups=groups)
@app.route('/group_config', methods=['POST'])
def group_config():
error = None
if request.method == 'POST':
if request.form['finish_group'] == "save":
for i in range(301,311) :
f = "category{000}".format(i)
g.db.execute('insert into group_category(gid,cid,no_of_items) values ((select gid from groups where name=\"' + session['grpname']+ '\"),'+str(i)+', '+request.form[f]+')')
g.db.commit()
flash('Group Created Successfully')
return redirect(url_for('homePage'))
@app.route('/saved_recipes')
def savedRecipesPage():
error = None
cur = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur.fetchall()]
mid=mids[0]
cur_recipe = g.db.execute('select name from recipes where rid in (select rid from group_category_recipes where mid =\'' + str(mid[0])+ '\')')
recipe_names = [row for row in cur_recipe.fetchall()]
return render_template('saved_recipes.html', recipe_names = recipe_names)
@app.route('/recipe/<recipe_name>')
def recipe(recipe_name):
error = None
print "In recipe/ page"
cur = g.db.execute('select * from recipes where name = \''+ recipe_name + '\'')
recipe_details = [row for row in cur.fetchall()]
recipe_details = recipe_details[0]
rid = recipe_details[0]
cid = recipe_details[1]
rating = recipe_details[4]
cook_time = recipe_details[5]
servings = recipe_details[6]
instructions = recipe_details[3]
cur_ingredients = g.db.execute('select name,quantity from ingredients,recipe_ingredients where rid = ' + str(rid) + ' and recipe_ingredients.iid = ingredients.iid')
ingredient_list = [row for row in cur_ingredients.fetchall()]
return render_template('recipe.html',recipe_name = recipe_name, rating=rating, cook_time=cook_time, servings=servings, instructions=instructions,ingredient_list=ingredient_list)
@app.route('/recipe/<recipe_name>', methods=['POST'])
def recipePost(recipe_name):
error = None
print "In post of recipe"
if request.method == 'POST':
value = request.form.getlist('ingredients')
print "checkbox values"
print value
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
print mids
mid=mids[0]
print mid[0]
cur_recipe = g.db.execute('select rid from recipes where name =\'' + recipe_name+ '\'')
print cur_recipe
rids = [row for row in cur_recipe.fetchall()]
print rids
rid=rids[0]
if request.form['save_or_share'] == "Save":
print "In Save of recipe"
print "recipe id"
print rid[0]
for i in value:
g.db.execute('insert into my_saved_bag(mid,rid,ingredient) values('+str(mid[0])+','+str(rid[0])+ ',' +'\"'+i+'\")')
g.db.commit()
flash('Ingredients Saved to My Bag')
return redirect(url_for('showBag'))
elif request.form['save_or_share'] == "Share":
print "In Share of recipe"
cur_group_names = g.db.execute('select name from groups where gid in(select gid from group_members where mid = ' + str(mid[0])+')')
group_names = [row for row in cur_group_names.fetchall()]
print group_names
group_list = {}
for name in group_names:
print name[0]
cur_group_members = g.db.execute('select name from users where mid != ' + str(mid[0]) + ' and mid in(select mid from group_members where gid =(select gid from groups where name=\''+str(name[0])+'\'))' )
member_name = [row for row in cur_group_members.fetchall()]
print member_name
member_names=[ member[0] for member in member_name]
group_list[name[0]]=member_names
print group_list
jsonGroupList = json.dumps(group_list)
return render_template('share_ingredients.html', ingredients = value, group_list=group_list, jsonGroupList = jsonGroupList, recipe_name=recipe_name )
@app.route('/share', methods=['POST'])
def share():
error = None
print 'In share'
print request.form
ingredient_list = request.form['ingredients']
group_name = request.form['select-group']
group_member = request.form['select-members']
recipe_name = request.form['recipe_name']
print ingredient_list
print group_name
print group_member
ingredients_list1=ast.literal_eval(ingredient_list)
ingredients_list1 = [i.strip() for i in ingredients_list1]
print ingredients_list1
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
print mids
mid=mids[0]
print mid[0]
cur_recipe = g.db.execute('select rid from recipes where name =\'' + recipe_name+ '\'')
print cur_recipe
rids = [row for row in cur_recipe.fetchall()]
print rids
rid=rids[0]
print "recipe id"
print rid[0]
cur_group = g.db.execute('select gid from groups where name = \''+ group_name + '\'')
gids = [row for row in cur_group.fetchall()]
print gids
gid=gids[0]
print "group id"
print gid[0]
cur_users1 = g.db.execute('select mid from users where name = \''+ group_member + '\'')
mids1 = [row for row in cur_users1.fetchall()]
print mids1
mid_assignee=mids1[0]
print "assignee"
print mid_assignee[0]
for i in ingredients_list1:
#print i
g.db.execute('insert into my_shared_bag(mid_assignee,mid_assignor,rid,gid,ingredient) values('+str(mid_assignee[0])+','+str(mid[0])+','+str(rid[0])+',' + str(gid[0])+ ',' +'\"'+i+'\")')
g.db.execute('insert into notification(mid_assignee,mid_assignor,gid,description) values('+str(mid_assignee[0])+','+str(mid[0])+',' + str(gid[0])+ ',' +'\"A bag has been shared with you!!!")')
g.db.commit()
flash('Ingredients Shared Successfully')
return redirect(url_for('homePage'))
@app.route('/showBag')
def showBag():
error = None
print "IN SHOWBAG"
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
mid=mids[0]
cur_saved_bag = g.db.execute('select ingredient from my_saved_bag where mid = ' + str(mid[0]))
saved_bag = [row[0] for row in cur_saved_bag.fetchall()]
cur_shared_bag = g.db.execute('select mid_assignor,gid,ingredient from my_shared_bag where mid_assignee =' + str(mid[0]))
temp_shared_bag = [row for row in cur_shared_bag.fetchall()]
shared_bag = []
for row in temp_shared_bag:
print row
cur_group_name = g.db.execute('select name from groups where gid = ' + str(row[1]))
group_name = [row1[0] for row1 in cur_group_name.fetchall() ]
group_name = group_name[0]
print group_name
cur_member_name = g.db.execute('select name from users where mid = ' + str(row[0]))
member_name = [row2[0] for row2 in cur_member_name.fetchall() ]
member_name = member_name[0]
print member_name
shared_bag.append((row[2], group_name, member_name))
return render_template('showBag.html', saved_bag = saved_bag,shared_bag = shared_bag)
@app.route('/showBag', methods=['POST'])
def showBagPost():
error = None
print "IN SHOWBAG POST"
if request.method == 'POST':
group = session['gname']
print group
#print request.form['remove_recipe']
if 'saved_ingredient' in request.form:
print "In saved_ingredient"
cur_user = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mid = [row for row in cur_user.fetchall()]
mid=mid[0]
ingredient=request.form['saved_ingredient']
print mid[0]
print ingredient
g.db.execute('delete from my_saved_bag where mid=' + str(mid[0]) + ' and ingredient = \'' + ingredient + '\'' )
g.db.commit()
return redirect(url_for('showBag'))
elif 'shared_ingredient' in request.form:
print "In shared_ingredient"
cur_user = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mid = [row for row in cur_user.fetchall()]
mid=mid[0]
ingredient=request.form['shared_ingredient']
print mid[0]
print ingredient
g.db.execute('delete from my_shared_bag where mid_assignee=' + str(mid[0]) + ' and ingredient = \'' + ingredient + '\'' )
g.db.commit()
return redirect(url_for('showBag'))
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
| |
#!/usr/bin/env python
from __future__ import print_function
import io
import os
import sys
import json
import time
import logging
import argparse
import requests
from requests_oauthlib import OAuth1Session
try:
import configparser # Python 3
except ImportError:
import ConfigParser as configparser # Python 2
if sys.version_info[:2] <= (2, 7):
# Python 2
get_input = raw_input
else:
# Python 3
get_input = input
def main():
"""
The twarc command line.
"""
parser = argparse.ArgumentParser("twarc")
parser.add_argument("--search", dest="search",
help="search for tweets matching a query")
parser.add_argument("--max_id", dest="max_id",
help="maximum tweet id to search for")
parser.add_argument("--since_id", dest="since_id",
help="smallest id to search for")
parser.add_argument("--stream", dest="stream",
help="stream tweets matching filter")
parser.add_argument("--hydrate", dest="hydrate",
help="rehydrate tweets from a file of tweet ids")
parser.add_argument("--log", dest="log",
default="twarc.log", help="log file")
parser.add_argument("--consumer_key",
default=None, help="Twitter API consumer key")
parser.add_argument("--consumer_secret",
default=None, help="Twitter API consumer secret")
parser.add_argument("--access_token",
default=None, help="Twitter API access key")
parser.add_argument("--access_token_secret",
default=None, help="Twitter API access token secret")
parser.add_argument('-c', '--config',
default=default_config_filename(),
help="Config file containing Twitter keys and secrets")
parser.add_argument('-p', '--profile', default='main',
help="Name of a profile in your configuration file")
args = parser.parse_args()
logging.basicConfig(
filename=args.log,
level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s"
)
consumer_key = args.consumer_key or os.environ.get('CONSUMER_KEY')
consumer_secret = args.consumer_secret or os.environ.get('CONSUMER_SECRET')
access_token = args.access_token or os.environ.get('ACCESS_TOKEN')
access_token_secret = args.access_token_secret or os.environ.get("ACCESS_TOKEN_SECRET")
if not (consumer_key and consumer_secret and
access_token and access_token_secret):
credentials = load_config(args.config, args.profile)
if credentials:
consumer_key = credentials['consumer_key']
consumer_secret = credentials['consumer_secret']
access_token = credentials['access_token']
access_token_secret = credentials['access_token_secret']
else:
print("Please enter Twitter authentication credentials")
consumer_key = get_input('consumer key: ')
consumer_secret = get_input('consumer secret: ')
access_token = get_input('access_token: ')
access_token_secret = get_input('access token secret: ')
save_keys(args.profile, consumer_key, consumer_secret,
access_token, access_token_secret)
t = Twarc(consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token=access_token,
access_token_secret=access_token_secret)
if args.search:
tweets = t.search(
args.search,
since_id=args.since_id,
max_id=args.max_id
)
elif args.stream:
tweets = t.stream(args.stream)
elif args.hydrate:
tweets = t.hydrate(open(args.hydrate, 'rU'))
else:
raise argparse.ArgumentTypeError(
"must supply one of: --search --stream or --hydrate")
# iterate through the tweets and write them to stdout
for tweet in tweets:
if "id_str" in tweet:
logging.info("archived %s", tweet["id_str"])
print(json.dumps(tweet))
def load_config(filename, profile):
if not os.path.isfile(filename):
return None
config = configparser.ConfigParser()
config.read(filename)
data = {}
for key in ['access_token', 'access_token_secret', 'consumer_key', 'consumer_secret']:
try:
data[key] = config.get(profile, key)
except configparser.NoSectionError:
sys.exit("no such profile %s in %s" % (profile, filename))
except configparser.NoOptionError:
sys.exit("missing %s from profile %s in %s" % (key, profile, filename))
return data
def save_config(filename, profile,
consumer_key, consumer_secret,
access_token, access_token_secret):
config = configparser.ConfigParser()
config.add_section(profile)
config.set(profile, 'consumer_key', consumer_key)
config.set(profile, 'consumer_secret', consumer_secret)
config.set(profile, 'access_token', access_token)
config.set(profile, 'access_token_secret', access_token_secret)
with open(filename, 'w') as config_file:
config.write(config_file)
def default_config_filename():
"""
Return the default filename for storing Twitter keys.
"""
home = os.path.expanduser("~")
return os.path.join(home, ".twarc")
def save_keys(profile, consumer_key, consumer_secret,
access_token, access_token_secret):
"""
Save keys to ~/.twarc
"""
filename = default_config_filename()
save_config(filename, profile,
consumer_key, consumer_secret,
access_token, access_token_secret)
print("Keys saved to", filename)
def rate_limit(f):
"""
A decorator to handle rate limiting from the Twitter API. If
a rate limit error is encountered we will sleep until we can
issue the API call again.
"""
def new_f(*args, **kwargs):
while True:
resp = f(*args, **kwargs)
if resp.status_code == 200:
return resp
elif resp.status_code == 429:
reset = int(resp.headers['x-rate-limit-reset'])
now = time.time()
seconds = reset - now + 10
if seconds < 1:
seconds = 10
logging.warn("rate limit exceeded: sleeping %s secs", seconds)
time.sleep(seconds)
elif resp.status_code == 503:
seconds = 60
logging.warn("503 from Twitter API, sleeping %s", seconds)
time.sleep(seconds)
else:
resp.raise_for_status()
return new_f
class Twarc(object):
"""
Your friendly neighborhood Twitter archiving class. Twarc allows
you to search for existing tweets, stream live tweets that match
a filter query and lookup (hdyrate) a list of tweet ids.
Each method search, stream and hydrate returns a tweet iterator which
allows you to do what you want with the data. Twarc handles rate limiting
in the API, so it will go to sleep when Twitter tells it to, and wake back
up when it is able to get more data from the API.
"""
def __init__(self, consumer_key, consumer_secret, access_token,
access_token_secret):
"""
Instantiate a Twarc instance. Make sure your environment variables
are set.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
self._connect()
def search(self, q, max_id=None, since_id=None):
"""
Pass in a query with optional max_id and min_id and get back
an iterator for decoded tweets.
"""
logging.info("starting search for %s", q)
url = "https://api.twitter.com/1.1/search/tweets.json"
params = {
"count": 100,
"q": q
}
while True:
if since_id:
params['since_id'] = since_id
if max_id:
params['max_id'] = max_id
resp = self.get(url, params=params)
statuses = resp.json()["statuses"]
if len(statuses) == 0:
logging.info("no new tweets matching %s", params)
break
for status in statuses:
yield status
max_id = str(int(status["id_str"]) - 1)
def stream(self, query):
"""
Returns an iterator for tweets that match a given filter query from
the livestream of tweets happening right now.
"""
url = 'https://stream.twitter.com/1.1/statuses/filter.json'
params = {"track": query}
headers = {'accept-encoding': 'deflate, gzip'}
errors = 0
while True:
try:
logging.info("connecting to filter stream for %s", query)
resp = self.post(url, params, headers=headers, stream=True)
errors = 0
for line in resp.iter_lines(chunk_size=512):
try:
yield json.loads(line.decode())
except Exception as e:
logging.error("json parse error: %s - %s", e, line)
except requests.exceptions.HTTPError as e:
errors += 1
logging.error(e)
if e.response.status_code == 420:
t = errors * 60
logging.info("sleeping %s", t)
time.sleep(t)
else:
t = errors * 5
logging.info("sleeping %s", t)
time.sleep(t)
except Exception as e:
errors += 1
t = errors * 1
logging.error(e)
logging.info("sleeping %s", t)
time.sleep(t)
def hydrate(self, iterator):
"""
Pass in an iterator of tweet ids and get back an iterator for the
decoded JSON for each corresponding tweet.
"""
ids = []
url = "https://api.twitter.com/1.1/statuses/lookup.json"
# lookup 100 tweets at a time
for tweet_id in iterator:
tweet_id = tweet_id.strip() # remove new line if present
ids.append(tweet_id)
if len(ids) == 100:
logging.info("hydrating %s ids", len(ids))
resp = self.post(url, data={"id": ','.join(ids)})
tweets = resp.json()
tweets.sort(key=lambda t: t['id_str'])
for tweet in tweets:
yield tweet
ids = []
# hydrate any remaining ones
if len(ids) > 0:
logging.info("hydrating %s", ids)
resp = self.client.post(url, data={"id": ','.join(ids)})
for tweet in resp.json():
yield tweet
@rate_limit
def get(self, *args, **kwargs):
try:
return self.client.get(*args, **kwargs)
except requests.exceptions.ConnectionError as e:
logging.error("caught connection error %s", e)
self._connect()
return self.get(*args, **kwargs)
@rate_limit
def post(self, *args, **kwargs):
try:
return self.client.post(*args, **kwargs)
except requests.exceptions.ConnectionError as e:
logging.error("caught connection error %s", e)
self._connect()
return self.post(*args, **kwargs)
def _connect(self):
logging.info("creating http session")
self.client = OAuth1Session(
client_key=self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret
)
if __name__ == "__main__":
main()
| |
# coding: utf-8
# In[1]:
import tensorflow as tf
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn import metrics
from math import sqrt
import time
import random
from RNNUtil import RNNUtil
from RNNCell import RNNCell
from DataPreprocessor import DataPreprocessor
import csv
# In[2]:
# generic class for running student data
# pilot dataset is assistments
# generic rnn parameters ...
# input vector size => input does not have to be one-hot
# output vector size
# state vector size => for each hidden layer => len(state_size) is number of hidden layers
# cell type for each hidden layer
# activation => for each hidden layer
# dropout probability => for each hidden layer
# batch size
# num_steps => number of maximum time steps
# training or testing
# inject prediction function => TODO
# inject loss function => TODO
# inject trainer => TODO
class Generic_RNN(object):
# use this method to create rnn
@staticmethod
def make_rnn_param_config(input_size,
output_size,
state_size,
cell_type,
activation,
keep_in,
keep_out,
keep_states,
num_steps,
is_training):
config = {
'input_size': input_size, # int
'output_size': output_size, # int
'state_size': state_size, # list of ints
'cell_type': cell_type, # list of types
'activation': activation, # list of strings
'keep_in': keep_in, # list of floats
'keep_out': keep_out, # list of floats
'keep_states': keep_states, # list of floats
'num_steps': num_steps, # int
'is_training': is_training # boolean
}
return config
# method to pre-check the validity of config
# only checks types and length
@staticmethod
def check_rnn_param_config_validity(config):
input_size = config.get('input_size', None)
output_size = config.get('output_size', None)
state_size = config.get('state_size', None)
cell_type = config.get('cell_type', None)
activation = config.get('activation', None)
keep_in = config.get('keep_in', None)
keep_out = config.get('keep_out', None)
keep_states = config.get('keep_states', None)
num_steps = config.get('num_steps', None)
is_training = config.get('is_training', None)
is_valid = RNNUtil.is_type_length_valid(var=state_size, var_type_list=[list])
hidden_layer_num = len(state_size)
is_valid = (
is_valid
and RNNUtil.is_type_length_valid(var=input_size, var_type_list=[int])
and RNNUtil.is_type_length_valid(var=output_size, var_type_list=[int])
and RNNUtil.is_type_length_valid(var=cell_type, var_type_list=[list], length=hidden_layer_num)
and RNNUtil.is_type_length_valid(var=activation, var_type_list=[list], length=hidden_layer_num)
and RNNUtil.is_type_length_valid(var=keep_in, var_type_list=[list], length=hidden_layer_num)
and RNNUtil.is_type_length_valid(var=keep_out, var_type_list=[list], length=hidden_layer_num)
and RNNUtil.is_type_length_valid(var=keep_states, var_type_list=[list], length=hidden_layer_num)
and RNNUtil.is_type_length_valid(var=num_steps, var_type_list=[int])
and RNNUtil.is_type_length_valid(var=is_training, var_type_list=[bool])
)
return is_valid
def __init__(self, rnn_param_config):
is_valid = __class__.check_rnn_param_config_validity(rnn_param_config)
if (not is_valid):
print('rnn_param_config is not valid')
print(rnn_param_config)
exit(1)
self.set_rnn_params(rnn_param_config)
self.set_graph()
def set_rnn_params(self, param_config):
self.param_config = param_config
self.num_features = param_config.get('input_size', None)
self.num_classes = param_config.get('output_size', None)
self.cell_type = param_config.get('cell_type', None)
self.state_size = param_config.get('state_size', None)
self.num_steps = param_config.get('num_steps', None)
self.is_training = param_config.get('is_training', None)
self.activation_str = param_config.get('activation', None)
self.keep_in = param_config.get('keep_in', None)
self.keep_states = param_config.get('keep_states', None)
self.keep_out = param_config.get('keep_out', None)
def set_graph(self):
logits = self.build_feedforward_graph()
if (self.is_training):
# prediction and loss nodes
self.pred, self.loss = pred, loss = self.predict_and_loss(logits)
# cost
self.cost = cost = self.get_cost_from_loss(loss, 'reduce_sum')
# TODO: more oop trainer
# get optimizer
starting_learning_rate = 0.1
init = tf.constant(dtype=tf.int32, value=0)
global_step = tf.get_variable(name='global_step', dtype=tf.int32, trainable=False,
initializer=init)
learning_rate = tf.train.exponential_decay(starting_learning_rate, global_step, 3000, 0.96, staircase=True)
epsilon = 0.1
self.optimizer = RNNUtil.get_trainer('adamOptimizer', learning_rate, epsilon)
# cap gradient
max_grad_norm = 20.0
grads_and_vars = self.optimizer.compute_gradients(cost)
grads_and_vars = [(tf.clip_by_norm(g, max_grad_norm), v) for g, v in grads_and_vars if g is not None]
# optimization
self.eval_op = self.optimizer.apply_gradients(grads_and_vars, name = 'train_op', global_step = global_step)
else:
self.pred = pred = self.predict(logits)
self.eval_op = tf.no_op()
# build rnn inputs, sequence length, cells, cell, outputs, states, weight, bias, logits
def build_feedforward_graph(self):
# inputs: [batch_size, num_steps, num_features]
self.inputs = inputs = tf.placeholder(dtype = tf.float32,
shape = [None, self.num_steps, self.num_features])
# seq_len: [batch_size]
self.seq_len = seq_len = tf.placeholder(dtype = tf.int32, shape = [None])
hidden_layer_num = len(self.state_size)
# cells refer to individual cells
self.cells = cells = [RNNCell.makeRNNCell(self.cell_type[layer_index],
self.state_size[layer_index],
self.is_training,
activation_str = self.activation_str[layer_index],
keep_in = self.keep_in[layer_index],
keep_states = self.keep_states[layer_index],
keep_out = self.keep_out[layer_index])
for layer_index in range(hidden_layer_num)]
# cell is multi rnn cell
self.multi_cell = multi_cell = tf.contrib.rnn.MultiRNNCell(cells = cells,
state_is_tuple = True)
# outputs and states
# outputs: [batch_size, num_steps, state_size[-1]]
self.outputs, self.states = outputs, states = tf.nn.dynamic_rnn(cell = multi_cell,
inputs = self.inputs,
sequence_length = self.seq_len,
dtype = tf.float32)
# outputs: num_steps x [batch_size, state_size[-1]]
outputs = [tf.squeeze(output, axis = 1)
for output in tf.split(value = outputs,
num_or_size_splits = self.num_steps,
axis = 1)]
# weight and bias
# weight: [state_size[-1], num_classes]
# bias: [num_classes]
self.weight = weight = tf.get_variable('weight', [self.state_size[-1], self.num_classes])
self.bias = bias = tf.get_variable('bias', [self.num_classes])
# produce logit outputs
# logits: num_steps x [batch_size, num_classes]
logits = [tf.matmul(outputs[i], weight) + bias for i in range(self.num_steps)]
# stack logits: [num_steps, batch_size, num_classes]
logits = tf.stack(logits)
# transpose logits: [batch_size, num_steps, num_classes]
logits = tf.transpose(logits, [1, 0, 2])
return logits
# need to input target_id
def predict(self, logits):
# reshape logits: [batch_size x num_steps x num_classes]
logits = tf.reshape(logits, [-1])
# target_id[batch_i x num_steps + step_i] = batch_i x num_steps x num_classes + step_i x num_classes + class_i
self.target_id = target_id =tf.placeholder(dtype = tf.int32, shape = [None])
selected_logits = tf.gather(params = logits, indices = target_id)
return tf.nn.sigmoid(selected_logits)
# need to input target_id, target_correctness
def predict_and_loss(self, logits):
# reshape logits: [batch_size x num_steps x num_classes]
logits = tf.reshape(logits, [-1])
# target_id[batch_i x num_steps + step_i] = batch_i x num_steps x num_classes + step_i x num_classes + class_i
self.target_id = target_id = tf.placeholder(dtype = tf.int32, shape = [None])
selected_logits = tf.gather(params = logits, indices = target_id)
# define skill_seq_len
# denominator for each selected_logits element
# its size is the same as target_id
# logits[target_id[i]] = logits[target_id[i]] / skill_seq_len[i]
# => skill_seq_len[batch_i * num_steps + step_i] = N: number of skills for each interaction
self.skill_seq_len = skill_seq_len = tf.placeholder(dtype = tf.float32, shape = [None])
self.target_correctness = target_correctness = tf.placeholder(dtype = tf.float32, shape = [None])
unweighted_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits = selected_logits,
labels = target_correctness)
weighted_loss = tf.divide(unweighted_loss, skill_seq_len)
return tf.nn.sigmoid(selected_logits), weighted_loss
def get_cost_from_loss(self, loss, policy):
if ('reduce_mean' == policy):
cost = tf.reduce_mean(loss)
elif ('reduce_sum' == policy):
cost = tf.reduce_sum(loss)
else:
print('{} policy not yet realized'.format(policy))
exit(1)
return cost
# In[3]:
# RNN using Datapreprocessor
class RNN_Datapreprocessor(object):
def __init__(self):
self.model_name = 'DKT'
# run once for specific dataset config
def run(self, run_config, rnn_param_config, datapreprocessor_config, data_config, ext):
# run_config includes ...
# num_epochs: default to 150
# init_scale: default to 0.05
# batch_size: default to 100
# rnn_param_config includes every config except input_size, output_size, num_steps, is_training
# set datapreprocessor
self.datapreprocessor = self.load_datapreprocessor(datapreprocessor_config)
self.data_config = data_config
# load rnn data
self.train = self.load_rnn_data(data_config, is_training=True, ext=ext)
self.test = self.load_rnn_data(data_config, is_training=False, ext=ext)
#self.train_students, train_num_steps, train_num_skills = self.read_data_from_csv_file('../data/csv_rnn_data/0910_b_train.csv')
#self.test_students, test_num_steps, test_num_skills = self.read_data_from_csv_file('../data/csv_rnn_data/0910_b_test.csv')
if ('pkl' == ext):
self.train_students = self.train['students']
train_num_steps = int(self.train['num_steps'])
train_num_skills = int(self.train['num_skills'])
self.test_students = self.test['students']
test_num_steps = int(self.test['num_steps'])
test_num_skills = int(self.test['num_skills'])
train_rnn_param_config = Generic_RNN.make_rnn_param_config(input_size=train_num_skills*2,
output_size=train_num_skills,
num_steps=train_num_steps-1,
is_training=True,
state_size=rnn_param_config.get('state_size', [200]),
cell_type=rnn_param_config.get('cell_type', ['LSTM']),
activation=rnn_param_config.get('activation', ['tanh']),
keep_in=rnn_param_config.get('keep_in', [1.]),
keep_out=rnn_param_config.get('keep_out', [0.6]),
keep_states=rnn_param_config.get('keep_states', [1.]))
test_rnn_param_config = Generic_RNN.make_rnn_param_config(input_size=test_num_skills*2,
output_size=test_num_skills,
num_steps=test_num_steps-1,
is_training=False,
state_size=rnn_param_config.get('state_size', [200]),
cell_type=rnn_param_config.get('cell_type', ['LSTM']),
activation=rnn_param_config.get('activation', ['tanh']),
keep_in=rnn_param_config.get('keep_in', [1.]),
keep_out=rnn_param_config.get('keep_out', [1.]),
keep_states=rnn_param_config.get('keep_states', [1.]))
else:
print('loading from {} not yet realized'.format(ext))
exit(1)
# start graph
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement = True,
log_device_placement = False)
session_conf.gpu_options.allow_growth = True
# start session
with tf.Session(config = session_conf) as sess:
init_scale = run_config.get('init_scale', 0.05)
num_epochs = run_config.get('num_epochs', 150)
batch_size = run_config.get('batch_size', 100)
shuffle_every_epoch = run_config.get('shuffle_every_epoch', True)
result_path = self.get_result_path(run_config)
print('result_path: ', result_path)
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
# set up train and test models
with tf.variable_scope('model', reuse = None, initializer = initializer):
self.train_model = Generic_RNN(train_rnn_param_config)
with tf.variable_scope('model', reuse = True, initializer = initializer):
self.test_model = Generic_RNN(test_rnn_param_config)
# run epochs
self.run_epochs(sess, batch_size, num_epochs, shuffle_every_epoch, result_path, eval_interval=5)
def run_epochs(self, sess, batch_size, num_epochs, shuffle_every_epoch, result_file_path, eval_interval=5):
# initialize
sess.run(tf.global_variables_initializer())
# saver
saver = tf.train.Saver(tf.global_variables())
train_students = self.train_students
if (not shuffle_every_epoch):
random.shuffle(train_students)
for epoch in range(num_epochs):
if (shuffle_every_epoch):
random.shuffle(train_students)
rmse, auc, r2 = self.run_epoch(sess, batch_size, train_students, is_training=True)
print("Epoch: %d Train Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f \n" % (epoch + 1, rmse, auc, r2))
with open(result_file_path, "a+") as f:
f.write("Epoch: %d Train Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f \n" % (epoch + 1, rmse, auc, r2))
f.write("\n")
print("*"*10)
f.write("\n")
# save testing results
if (0 == (epoch + 1) % eval_interval):
save_path = saver.save(sess, self.model_name)
print('*' * 10)
print('Start to test model')
test_students = self.test_students
# shuffle test students
#random.shuffle(test_students)
rmse, auc, r2 = self.run_epoch(sess, batch_size, test_students, is_training=False)
print("Epoch: %d Test Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f" % (epoch + 1, rmse, auc, r2))
with open(result_file_path, "a+") as f:
f.write("Epoch: %d Test Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f" % (epoch + 1, rmse, auc, r2))
f.write("\n")
print("*"*10)
f.write("\n")
# run rnn for one_hot and not_one_hot
def run_epoch(self, sess, batch_size, students, is_training):
if (is_training):
model = self.train_model
else:
model = self.test_model
batch_start_i = 0
pred_labels = []
actual_labels = []
not_one_hot = (not self.data_config.get('one_hot', None)) and self.data_config.get('allow_multi_skills', None)
is_one_hot = not not_one_hot
while (batch_start_i + batch_size < len(students)):
if (batch_start_i + batch_size < len(students)):
mini_batch_size = batch_size
else:
mini_batch_size = len(students) - batch_start_i
mini_batch_size = 100
pred, actual = self.run_mini_batch(sess,
mini_batch_size,
batch_start_i,
model,
students,
actual_labels,
pred_labels,
is_training,
is_one_hot)
pred_labels.extend(pred)
actual_labels.extend(actual)
batch_start_i += batch_size
# print pred labels
#print('len(actual_labels): ', len(actual_labels))
#print('len(pred_lables): ', len(pred_labels))
rmse = sqrt(mean_squared_error(actual_labels, pred_labels))
fpr, tpr, thresholds = metrics.roc_curve(actual_labels, pred_labels, pos_label = 1)
auc = metrics.auc(fpr, tpr)
# calculate r2
r2 = r2_score(actual_labels, pred_labels)
return rmse, auc, r2
# return pred labels and actual labels to append
def run_mini_batch(self,
sess,
batch_size,
batch_start_i,
model,
students,
actual_labels,
pred_labels,
is_training,
is_one_hot=True):
actual_labels = []
pred_labels = []
input_x = np.zeros((batch_size, model.num_steps, model.num_features))
target_id = []
target_correctness = []
# seq_len[student_i]: number of steps for each student
# skill_seq_len: number of skills for each interaction. constructed as 1d
seq_len = np.empty(dtype = np.int32, shape = [batch_size])
skill_seq_len = []
for student_i in range(batch_size):
student = students[batch_start_i + student_i]
seq_len[student_i] = steps = int(student[0][0]) - 1
skill_ids = student[1]
correctness = student[2]
# one_hot
if (is_one_hot):
# skill ids is a list of integers for one_hot
for step_i in range(steps):
skill_id = int(skill_ids[step_i])
is_correct = int(correctness[step_i])
# student_i x num_steps x num_classes + step_i x num_classes + class_i
target_id.append(student_i * model.num_steps * model.num_classes
+ step_i * model.num_classes + int(skill_ids[step_i + 1]))
skill_seq_len.append(1)
target_correctness.append(int(correctness[step_i + 1]))
actual_labels.append(int(correctness[step_i + 1]))
if (is_correct):
input_x[student_i, step_i, skill_id] = 1
else:
input_x[student_i, step_i, skill_id + model.num_classes] = 1
# not_one_hot
else:
# skill ids is a list of lists of integers for not_one_hot
# target correctness should be duplicated with the length same as multiple skills
for step_i in range(steps):
skill_ids_input = skill_ids[step_i]
is_correct = int(correctness[step_i])
# student_i x num_steps x num_classes + step_i x num_classes + k-th skill to predict
skill_ids_to_predict = skill_ids[step_i + 1]
for skill_id_to_predict in skill_ids_to_predict:
target_id.append(student_i * model.num_steps * model.num_classes
+ step_i * model.num_classes + int(skill_id_to_predict))
skill_seq_len.append(len(skill_ids_to_predict))
target_correctness.append(int(correctness[step_i + 1]))
actual_labels.append(int(correctness[step_i + 1]))
for skill_id in skill_ids_input:
try:
if (is_correct):
input_x[student_i, step_i, skill_id] = 1
else:
input_x[student_i, step_i, skill_id + model.num_classes] = 1
except:
print('student_i: ', student_i)
print('step_i: ', step_i)
print('skill_id: ', skill_id)
print('num_classes: ', model.num_classes)
print('input_x.shape: ', input_x.shape)
feed_dict = {
model.inputs: input_x, model.target_id: target_id,
model.seq_len: seq_len
}
if (is_training):
feed_dict[model.target_correctness] = target_correctness
feed_dict[model.skill_seq_len] = skill_seq_len
pred, _ = sess.run([model.pred, model.eval_op], feed_dict = feed_dict)
# since prediction is always float, does not have to consider one_hot or not
for p in pred:
pred_labels.append(p)
return pred_labels, actual_labels
def load_datapreprocessor(self, datapreprocessor_config):
dataset = datapreprocessor_config.get('dataset', 'Assistments')
version = datapreprocessor_config.get('version', '2009')
return DataPreprocessor(dataset, version)
def load_rnn_data(self, data_config, is_training, ext='pkl'):
if ('datapreprocessor' not in self.__dict__):
print('please set datapreprocessor first')
exit(1)
else:
self.datapreprocessor.set_config(data_config)
return self.datapreprocessor.load_rnn_data(is_training=is_training, ext=ext)
def read_data_from_csv_file(self, fileName):
inputs = []
targets = []
rows = []
max_skill_num = 0
max_num_problems = 0
with open(fileName, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
rows.append(row)
index = 0
i = 0
print ("the number of rows is " + str(len(rows)))
tuple_rows = []
#turn list to tuple
while(index < len(rows)-1):
problems_num = int(rows[index][0])
tmp_max_skill = max(map(int, rows[index+1]))
if(tmp_max_skill > max_skill_num):
max_skill_num = tmp_max_skill
if(problems_num <= 2):
index += 3
else:
if problems_num > max_num_problems:
max_num_problems = problems_num
tup = (rows[index], rows[index+1], rows[index+2])
tuple_rows.append(tup)
index += 3
#shuffle the tuple
random.shuffle(tuple_rows)
print ("The number of students is " + str(len(tuple_rows)))
print ("Finish reading data")
return tuple_rows, max_num_problems, max_skill_num+1
def get_result_path(self, run_config):
result_folder = '/home/data/jleeae/ML/e_learning/KnowledgeTracing/results/'
version = self.datapreprocessor.version
dataset = self.datapreprocessor.dataset
data_config = self.datapreprocessor.config
shuffle_every_epoch = run_config.get('shuffle_every_epoch', True)
split_rate = data_config.get('split_rate', 0.2)
split_rate = str(int(split_rate * 100))
method = data_config.get('method', 'default')
has_scaffolding = data_config.get('has_scaffolding', None)
count_no_skill_id = data_config.get('count_no_skill_id', None)
has_test_mode = data_config.get('has_test_mode', None)
allow_multi_skills = data_config.get('allow_multi_skills', None)
one_hot = data_config.get('one_hot')
config_code = ''
if (has_scaffolding):
config_code += '1'
else:
config_code += '0'
if (count_no_skill_id):
config_code += '1'
else:
config_code += '0'
if (has_test_mode):
config_code += '1'
else:
config_code += '0'
if (allow_multi_skills):
config_code += '1'
else:
config_code += '0'
result_path = result_folder
if (shuffle_every_epoch):
result_path += 'shuffle_every_epoch/'
else:
result_path += 'shuffle_once/'
result_path += ('split_' + split_rate + '/')
if ('Assistments' == dataset):
result_path += ('A' + version + '/')
if (one_hot):
result_path += 'one_hot/'
else:
result_path += 'not_one_hot/'
result_path += (method + '/')
if ('sliding_window' == method):
test_format = data_config.get('test_format', None)
result_path += test_format
result_path += '/'
window_length = data_config.get('window_length', None)
result_path += ('window_' + str(window_length) + '_')
batch_size = run_config.get('batch_size', None)
result_path += ('batch_size_' + str(batch_size) + '_')
num_epochs = run_config.get('num_epochs', None)
result_path += ('epochs_' + str(num_epochs) + '_' + config_code + '.log')
return result_path
# In[4]:
def run_2009_one_hot_default_all(split_rate=0.2, shuffle_every_epoch=True):
# run Assistments 2009
# run all config
# make datapreprocessor_config
datapreprocessor_config = {
'dataset': 'Assistments',
'version': '2009'
}
# make rnn_param_config
rnn_param_config = {
'state_size': [200],
'cell_type': ['LSTM'],
'activation': ['tanh'],
'keep_in': [1.],
'keep_out': [0.6],
'keep_states': [1.]
}
# make run config
run_config = {
'shuffle_every_epoch': shuffle_every_epoch,
'num_epochs': 30,
'batch_size': 100,
'init_scale': 0.05
}
rnn_assistments_instance = RNN_Datapreprocessor()
for config_index in range(15, 16):
binary_index = format(config_index, '04b')
config_arr = []
for i in binary_index:
i_int = int(i)
i_bool = bool(i_int)
config_arr.append(i_bool)
data_config = {
'split_rate': split_rate,
'method': 'default',
'has_scaffolding': config_arr[0],
'count_no_skill_id': config_arr[1],
'has_test_mode': config_arr[2],
'allow_multi_skills': config_arr[3],
'one_hot': True
}
try:
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
except:
continue
# In[5]:
def run_2009_one_hot_sliding_window_all(split_rate=0.2, shuffle_every_epoch=True):
# run Assistments 2009
# run all config
# make datapreprocessor_config
datapreprocessor_config = {
'dataset': 'Assistments',
'version': '2009'
}
# make rnn_param_config
rnn_param_config = {
'state_size': [200],
'cell_type': ['LSTM'],
'activation': ['tanh'],
'keep_in': [1.],
'keep_out': [0.6],
'keep_states': [1.]
}
# make run config
run_config = {
'shuffle_every_epoch': shuffle_every_epoch,
'num_epochs': 30,
'batch_size': 100,
'init_scale': 0.05
}
rnn_assistments_instance = RNN_Datapreprocessor()
for config_index in range(16):
binary_index = format(config_index, '04b')
config_arr = []
for i in binary_index:
i_int = int(i)
i_bool = bool(i_int)
config_arr.append(i_bool)
data_config = {
'split_rate': split_rate,
'method': 'sliding_window',
'has_scaffolding': config_arr[0],
'count_no_skill_id': config_arr[1],
'has_test_mode': config_arr[2],
'allow_multi_skills': config_arr[3],
'window_length': 10,
'test_format': 'overlapping_last_element',
'one_hot': True
}
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
data_config['test_format'] = 'partition'
try:
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
except:
pass
data_config['test_format'] = 'same_as_training'
try:
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
except:
pass
data_config['test_format'] = 'default'
try:
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
except:
continue
# In[6]:
def run_2009_not_one_hot_default_all(split_rate=0.2, shuffle_every_epoch=True):
# run Assistments 2009
# run all config
# make datapreprocessor_config
datapreprocessor_config = {
'dataset': 'Assistments',
'version': '2009'
}
# make rnn_param_config
rnn_param_config = {
'state_size': [200],
'cell_type': ['LSTM'],
'activation': ['tanh'],
'keep_in': [1.],
'keep_out': [0.6],
'keep_states': [1.]
}
# make run config
run_config = {
'shuffle_every_epoch': shuffle_every_epoch,
'num_epochs': 30,
'batch_size': 100,
'init_scale': 0.05
}
rnn_assistments_instance = RNN_Datapreprocessor()
for config_index in range(8):
binary_index = format(config_index, '03b')
config_arr = []
for i in binary_index:
i_int = int(i)
i_bool = bool(i_int)
config_arr.append(i_bool)
data_config = {
'split_rate': split_rate,
'method': 'default',
'has_scaffolding': config_arr[0],
'count_no_skill_id': config_arr[1],
'has_test_mode': config_arr[2],
'allow_multi_skills': True,
'one_hot': False
}
try:
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
except:
continue
# In[7]:
def run_2009_not_one_hot_sliding_window_all(split_rate=0.2, shuffle_every_epoch=True):
# run Assistments 2009
# run all config
# make datapreprocessor_config
datapreprocessor_config = {
'dataset': 'Assistments',
'version': '2009'
}
# make rnn_param_config
rnn_param_config = {
'state_size': [200],
'cell_type': ['LSTM'],
'activation': ['tanh'],
'keep_in': [1.],
'keep_out': [0.6],
'keep_states': [1.]
}
# make run config
run_config = {
'shuffle_every_epoch': shuffle_every_epoch,
'num_epochs': 30,
'batch_size': 100,
'init_scale': 0.05
}
rnn_assistments_instance = RNN_Datapreprocessor()
for config_index in range(8):
binary_index = format(config_index, '03b')
config_arr = []
for i in binary_index:
i_int = int(i)
i_bool = bool(i_int)
config_arr.append(i_bool)
data_config = {
'split_rate': split_rate,
'method': 'sliding_window',
'has_scaffolding': config_arr[0],
'count_no_skill_id': config_arr[1],
'has_test_mode': config_arr[2],
'allow_multi_skills': True,
'window_length': 10,
'test_format': 'overlapping_last_element',
'one_hot': False
}
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
data_config['test_format'] = 'partition'
try:
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
except:
pass
data_config['test_format'] = 'same_as_training'
try:
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
except:
pass
data_config['test_format'] = 'default'
try:
rnn_assistments_instance.run(run_config=run_config,
rnn_param_config=rnn_param_config,
datapreprocessor_config=datapreprocessor_config,
data_config=data_config,
ext='pkl')
except:
continue
# In[17]:
if ('__main__' == __name__):
split_rate=0.1
shuffle_every_epoch=False
#run_2009_not_one_hot_default_all(split_rate=split_rate, shuffle_every_epoch=shuffle_every_epoch)
#run_2009_not_one_hot_sliding_window_all(split_rate=split_rate, shuffle_every_epoch=shuffle_every_epoch)
run_2009_one_hot_default_all(split_rate=split_rate, shuffle_every_epoch=shuffle_every_epoch)
#run_2009_one_hot_sliding_window_all(split_rate=split_rate, shuffle_every_epoch=shuffle_every_epoch)
| |
# Copyright 2018 RedHat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the Multiple VMware Datastore backend store"""
import hashlib
from unittest import mock
import uuid
from oslo_config import cfg
from oslo_utils import secretutils
from oslo_utils import units
from oslo_vmware import api
from oslo_vmware import exceptions as vmware_exceptions
from oslo_vmware.objects import datacenter as oslo_datacenter
from oslo_vmware.objects import datastore as oslo_datastore
import six
import glance_store as store
import glance_store._drivers.vmware_datastore as vm_store
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities
from glance_store.tests import utils
FAKE_UUID = str(uuid.uuid4())
FIVE_KB = 5 * units.Ki
VMWARE_DS = {
'debug': True,
'vmware_server_host': '127.0.0.1',
'vmware_server_username': 'username',
'vmware_server_password': 'password',
'vmware_store_image_dir': '/openstack_glance',
'vmware_insecure': 'True',
'vmware_datastores': ['a:b:0'],
}
def format_location(host_ip, folder_name, image_id, datastores):
"""
Helper method that returns a VMware Datastore store URI given
the component pieces.
"""
scheme = 'vsphere'
(datacenter_path, datastore_name, weight) = datastores[0].split(':')
return ("%s://%s/folder%s/%s?dcPath=%s&dsName=%s"
% (scheme, host_ip, folder_name,
image_id, datacenter_path, datastore_name))
def fake_datastore_obj(*args, **kwargs):
dc_obj = oslo_datacenter.Datacenter(ref='fake-ref',
name='fake-name')
dc_obj.path = args[0]
return oslo_datastore.Datastore(ref='fake-ref',
datacenter=dc_obj,
name=args[1])
class TestMultiStore(base.MultiStoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):
# NOTE(flaper87): temporary until we
# can move to a fully-local lib.
# (Swift store's fault)
_CONF = cfg.ConfigOpts()
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch('oslo_vmware.api.VMwareAPISession')
def setUp(self, mock_api_session, mock_get_datastore):
"""Establish a clean test environment."""
super(TestMultiStore, self).setUp()
enabled_backends = {
"vmware1": "vmware",
"vmware2": "vmware"
}
self.hash_algo = 'sha256'
self.conf = self._CONF
self.conf(args=[])
self.conf.register_opt(cfg.DictOpt('enabled_backends'))
self.config(enabled_backends=enabled_backends)
store.register_store_opts(self.conf)
self.config(default_backend='vmware1', group='glance_store')
# set vmware related config options
self.config(group='vmware1',
vmware_server_username='admin',
vmware_server_password='admin',
vmware_server_host='127.0.0.1',
vmware_insecure='True',
vmware_datastores=['a:b:0'],
vmware_store_image_dir='/openstack_glance')
self.config(group='vmware2',
vmware_server_username='admin',
vmware_server_password='admin',
vmware_server_host='127.0.0.1',
vmware_insecure='True',
vmware_datastores=['a:b:1'],
vmware_store_image_dir='/openstack_glance_1')
# Ensure stores + locations cleared
location.SCHEME_TO_CLS_BACKEND_MAP = {}
store.create_multi_stores(self.conf)
self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP',
dict())
self.addCleanup(self.conf.reset)
vm_store.Store.CHUNKSIZE = 2
mock_get_datastore.side_effect = fake_datastore_obj
self.store = vm_store.Store(self.conf, backend="vmware1")
self.store.configure()
def _mock_http_connection(self):
return mock.patch('six.moves.http_client.HTTPConnection')
def test_location_url_prefix_is_set(self):
expected_url_prefix = "vsphere://127.0.0.1/openstack_glance"
self.assertEqual(expected_url_prefix, self.store.url_prefix)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get(self, mock_api_session):
"""Test a "normal" retrieval of an image in chunks."""
expected_image_size = 31
expected_returns = ['I am a teapot, short and stout\n']
loc = location.get_location_from_uri_and_backend(
"vsphere://127.0.0.1/folder/openstack_glance/%s"
"?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response()
(image_file, image_size) = self.store.get(loc)
self.assertEqual(expected_image_size, image_size)
chunks = [c for c in image_file]
self.assertEqual(expected_returns, chunks)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get_non_existing(self, mock_api_session):
"""
Test that trying to retrieve an image that doesn't exist
raises an error
"""
loc = location.get_location_from_uri_and_backend(
"vsphere://127.0.0.1/folder/openstack_glan"
"ce/%s?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1",
conf=self.conf)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response(status_code=404)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
@mock.patch.object(vm_store.Store, '_build_vim_cookie_header')
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch.object(vm_store._Reader, 'size')
@mock.patch.object(api, 'VMwareAPISession')
def test_add(self, fake_api_session, fake_size, fake_select_datastore,
fake_cookie):
"""Test that we can add an image via the VMware backend."""
fake_select_datastore.return_value = self.store.datastores[0][0]
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = b"*" * expected_size
hash_code = secretutils.md5(expected_contents, usedforsecurity=False)
expected_checksum = hash_code.hexdigest()
fake_size.__get__ = mock.Mock(return_value=expected_size)
expected_cookie = 'vmware_soap_session=fake-uuid'
fake_cookie.return_value = expected_cookie
expected_headers = {'Content-Length': six.text_type(expected_size),
'Cookie': expected_cookie}
with mock.patch('hashlib.md5') as md5:
md5.return_value = hash_code
expected_location = format_location(
VMWARE_DS['vmware_server_host'],
VMWARE_DS['vmware_store_image_dir'],
expected_image_id,
VMWARE_DS['vmware_datastores'])
image = six.BytesIO(expected_contents)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response()
location, size, checksum, metadata = self.store.add(
expected_image_id, image, expected_size)
_, kwargs = HttpConn.call_args
self.assertEqual(expected_headers, kwargs['headers'])
self.assertEqual("vmware1", metadata["store"])
self.assertEqual(utils.sort_url_by_qs_keys(expected_location),
utils.sort_url_by_qs_keys(location))
self.assertEqual(expected_size, size)
self.assertEqual(expected_checksum, checksum)
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch.object(vm_store._Reader, 'size')
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_add_size_zero(self, mock_api_session, fake_size,
fake_select_datastore):
"""
Test that when specifying size zero for the image to add,
the actual size of the image is returned.
"""
fake_select_datastore.return_value = self.store.datastores[0][0]
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = b"*" * expected_size
hash_code = secretutils.md5(expected_contents, usedforsecurity=False)
expected_checksum = hash_code.hexdigest()
fake_size.__get__ = mock.Mock(return_value=expected_size)
with mock.patch('hashlib.md5') as md5:
md5.return_value = hash_code
expected_location = format_location(
VMWARE_DS['vmware_server_host'],
VMWARE_DS['vmware_store_image_dir'],
expected_image_id,
VMWARE_DS['vmware_datastores'])
image = six.BytesIO(expected_contents)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response()
location, size, checksum, metadata = self.store.add(
expected_image_id, image, 0)
self.assertEqual("vmware1", metadata["store"])
self.assertEqual(utils.sort_url_by_qs_keys(expected_location),
utils.sort_url_by_qs_keys(location))
self.assertEqual(expected_size, size)
self.assertEqual(expected_checksum, checksum)
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch('glance_store._drivers.vmware_datastore._Reader')
def test_add_with_verifier(self, fake_reader, fake_select_datastore):
"""Test that the verifier is passed to the _Reader during add."""
verifier = mock.MagicMock(name='mock_verifier')
image_id = str(uuid.uuid4())
size = FIVE_KB
contents = b"*" * size
image = six.BytesIO(contents)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response()
location, size, checksum, multihash, metadata = self.store.add(
image_id, image, size, self.hash_algo, verifier=verifier)
self.assertEqual("vmware1", metadata["store"])
fake_reader.assert_called_with(image, self.hash_algo, verifier)
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch('glance_store._drivers.vmware_datastore._Reader')
def test_add_with_verifier_size_zero(self, fake_reader, fake_select_ds):
"""Test that the verifier is passed to the _ChunkReader during add."""
verifier = mock.MagicMock(name='mock_verifier')
image_id = str(uuid.uuid4())
size = FIVE_KB
contents = b"*" * size
image = six.BytesIO(contents)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response()
location, size, checksum, multihash, metadata = self.store.add(
image_id, image, 0, self.hash_algo, verifier=verifier)
self.assertEqual("vmware1", metadata["store"])
fake_reader.assert_called_with(image, self.hash_algo, verifier)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_delete(self, mock_api_session):
"""Test we can delete an existing image in the VMware store."""
loc = location.get_location_from_uri_and_backend(
"vsphere://127.0.0.1/folder/openstack_glance/%s?"
"dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response()
vm_store.Store._service_content = mock.Mock()
self.store.delete(loc)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response(status_code=404)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_delete_non_existing(self, mock_api_session):
"""
Test that trying to delete an image that doesn't exist raises an error
"""
loc = location.get_location_from_uri_and_backend(
"vsphere://127.0.0.1/folder/openstack_glance/%s?"
"dsName=ds1&dcPath=dc1" % FAKE_UUID,
"vmware1", conf=self.conf)
with mock.patch.object(self.store.session,
'wait_for_task') as mock_task:
mock_task.side_effect = vmware_exceptions.FileNotFoundException
self.assertRaises(exceptions.NotFound, self.store.delete, loc)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get_size(self, mock_api_session):
"""
Test we can get the size of an existing image in the VMware store
"""
loc = location.get_location_from_uri_and_backend(
"vsphere://127.0.0.1/folder/openstack_glance/%s"
"?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response()
image_size = self.store.get_size(loc)
self.assertEqual(image_size, 31)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get_size_non_existing(self, mock_api_session):
"""
Test that trying to retrieve an image size that doesn't exist
raises an error
"""
loc = location.get_location_from_uri_and_backend(
"vsphere://127.0.0.1/folder/openstack_glan"
"ce/%s?dsName=ds1&dcPath=dc1" % FAKE_UUID,
"vmware1", conf=self.conf)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response(status_code=404)
self.assertRaises(exceptions.NotFound, self.store.get_size, loc)
def test_reader_full(self):
content = b'XXX'
image = six.BytesIO(content)
expected_checksum = secretutils.md5(content,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(content).hexdigest()
reader = vm_store._Reader(image, self.hash_algo)
ret = reader.read()
self.assertEqual(content, ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(expected_multihash, reader.os_hash_value.hexdigest())
self.assertEqual(len(content), reader.size)
def test_reader_partial(self):
content = b'XXX'
image = six.BytesIO(content)
expected_checksum = secretutils.md5(b'X',
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(b'X').hexdigest()
reader = vm_store._Reader(image, self.hash_algo)
ret = reader.read(1)
self.assertEqual(b'X', ret)
self.assertEqual(expected_checksum, reader.checksum.hexdigest())
self.assertEqual(expected_multihash, reader.os_hash_value.hexdigest())
self.assertEqual(1, reader.size)
def test_reader_with_verifier(self):
content = b'XXX'
image = six.BytesIO(content)
verifier = mock.MagicMock(name='mock_verifier')
reader = vm_store._Reader(image, self.hash_algo, verifier)
reader.read()
verifier.update.assert_called_with(content)
def test_sanity_check_multiple_datastores(self):
self.config(group='vmware1', vmware_api_retry_count=1)
self.config(group='vmware1', vmware_task_poll_interval=1)
self.config(group='vmware1', vmware_datastores=['a:b:0', 'a:d:0'])
try:
self.store._sanity_check()
except exceptions.BadStoreConfiguration:
self.fail()
def test_parse_datastore_info_and_weight_less_opts(self):
datastore = 'a'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._parse_datastore_info_and_weight,
datastore)
def test_parse_datastore_info_and_weight_invalid_weight(self):
datastore = 'a:b:c'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._parse_datastore_info_and_weight,
datastore)
def test_parse_datastore_info_and_weight_empty_opts(self):
datastore = 'a: :0'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._parse_datastore_info_and_weight,
datastore)
datastore = ':b:0'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._parse_datastore_info_and_weight,
datastore)
def test_parse_datastore_info_and_weight(self):
datastore = 'a:b:100'
parts = self.store._parse_datastore_info_and_weight(datastore)
self.assertEqual('a', parts[0])
self.assertEqual('b', parts[1])
self.assertEqual(100, parts[2])
def test_parse_datastore_info_and_weight_default_weight(self):
datastore = 'a:b'
parts = self.store._parse_datastore_info_and_weight(datastore)
self.assertEqual('a', parts[0])
self.assertEqual('b', parts[1])
self.assertEqual(0, parts[2])
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_unexpected_status(self, mock_api_session, mock_select_datastore):
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = b"*" * expected_size
image = six.BytesIO(expected_contents)
self.session = mock.Mock()
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response(status_code=401)
self.assertRaises(exceptions.BackendException,
self.store.add,
expected_image_id, image, expected_size)
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_unexpected_status_no_response_body(self, mock_api_session,
mock_select_datastore):
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = b"*" * expected_size
image = six.BytesIO(expected_contents)
self.session = mock.Mock()
with self._mock_http_connection() as HttpConn:
HttpConn.return_value = utils.fake_response(status_code=500,
no_response_body=True)
self.assertRaises(exceptions.BackendException,
self.store.add,
expected_image_id, image, expected_size)
@mock.patch.object(api, 'VMwareAPISession')
def test_reset_session(self, mock_api_session):
self.store.reset_session()
self.assertTrue(mock_api_session.called)
@mock.patch.object(api, 'VMwareAPISession')
def test_build_vim_cookie_header_active(self, mock_api_session):
self.store.session.is_current_session_active = mock.Mock()
self.store.session.is_current_session_active.return_value = True
self.store._build_vim_cookie_header(True)
self.assertFalse(mock_api_session.called)
@mock.patch.object(api, 'VMwareAPISession')
def test_build_vim_cookie_header_expired(self, mock_api_session):
self.store.session.is_current_session_active = mock.Mock()
self.store.session.is_current_session_active.return_value = False
self.store._build_vim_cookie_header(True)
self.assertTrue(mock_api_session.called)
@mock.patch.object(api, 'VMwareAPISession')
def test_build_vim_cookie_header_expired_noverify(self, mock_api_session):
self.store.session.is_current_session_active = mock.Mock()
self.store.session.is_current_session_active.return_value = False
self.store._build_vim_cookie_header()
self.assertFalse(mock_api_session.called)
@mock.patch.object(vm_store.Store, 'select_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_add_ioerror(self, mock_api_session, mock_select_datastore):
mock_select_datastore.return_value = self.store.datastores[0][0]
expected_image_id = str(uuid.uuid4())
expected_size = FIVE_KB
expected_contents = b"*" * expected_size
image = six.BytesIO(expected_contents)
self.session = mock.Mock()
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.request.side_effect = IOError
self.assertRaises(exceptions.BackendException,
self.store.add,
expected_image_id, image, expected_size)
def test_qs_sort_with_literal_question_mark(self):
url = 'scheme://example.com/path?key2=val2&key1=val1?sort=true'
exp_url = 'scheme://example.com/path?key1=val1%3Fsort%3Dtrue&key2=val2'
self.assertEqual(exp_url,
utils.sort_url_by_qs_keys(url))
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_build_datastore_weighted_map(self, mock_api_session, mock_ds_obj):
datastores = ['a:b:100', 'c:d:100', 'e:f:200']
mock_ds_obj.side_effect = fake_datastore_obj
ret = self.store._build_datastore_weighted_map(datastores)
ds = ret[200]
self.assertEqual('e', ds[0].datacenter.path)
self.assertEqual('f', ds[0].name)
ds = ret[100]
self.assertEqual(2, len(ds))
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_build_datastore_weighted_map_equal_weight(self, mock_api_session,
mock_ds_obj):
datastores = ['a:b:200', 'a:b:200']
mock_ds_obj.side_effect = fake_datastore_obj
ret = self.store._build_datastore_weighted_map(datastores)
ds = ret[200]
self.assertEqual(2, len(ds))
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(api, 'VMwareAPISession')
def test_build_datastore_weighted_map_empty_list(self, mock_api_session,
mock_ds_ref):
datastores = []
ret = self.store._build_datastore_weighted_map(datastores)
self.assertEqual({}, ret)
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(vm_store.Store, '_get_freespace')
def test_select_datastore_insufficient_freespace(self, mock_get_freespace,
mock_ds_ref):
datastores = ['a:b:100', 'c:d:100', 'e:f:200']
image_size = 10
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
freespaces = [5, 5, 5]
def fake_get_fp(*args, **kwargs):
return freespaces.pop(0)
mock_get_freespace.side_effect = fake_get_fp
self.assertRaises(exceptions.StorageFull,
self.store.select_datastore, image_size)
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(vm_store.Store, '_get_freespace')
def test_select_datastore_insufficient_fs_one_ds(self, mock_get_freespace,
mock_ds_ref):
# Tests if fs is updated with just one datastore.
datastores = ['a:b:100']
image_size = 10
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
freespaces = [5]
def fake_get_fp(*args, **kwargs):
return freespaces.pop(0)
mock_get_freespace.side_effect = fake_get_fp
self.assertRaises(exceptions.StorageFull,
self.store.select_datastore, image_size)
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(vm_store.Store, '_get_freespace')
def test_select_datastore_equal_freespace(self, mock_get_freespace,
mock_ds_obj):
datastores = ['a:b:100', 'c:d:100', 'e:f:200']
image_size = 10
mock_ds_obj.side_effect = fake_datastore_obj
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
freespaces = [11, 11, 11]
def fake_get_fp(*args, **kwargs):
return freespaces.pop(0)
mock_get_freespace.side_effect = fake_get_fp
ds = self.store.select_datastore(image_size)
self.assertEqual('e', ds.datacenter.path)
self.assertEqual('f', ds.name)
@mock.patch.object(vm_store.Store, '_get_datastore')
@mock.patch.object(vm_store.Store, '_get_freespace')
def test_select_datastore_contention(self, mock_get_freespace,
mock_ds_obj):
datastores = ['a:b:100', 'c:d:100', 'e:f:200']
image_size = 10
mock_ds_obj.side_effect = fake_datastore_obj
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
freespaces = [5, 11, 12]
def fake_get_fp(*args, **kwargs):
return freespaces.pop(0)
mock_get_freespace.side_effect = fake_get_fp
ds = self.store.select_datastore(image_size)
self.assertEqual('c', ds.datacenter.path)
self.assertEqual('d', ds.name)
def test_select_datastore_empty_list(self):
datastores = []
self.store.datastores = (
self.store._build_datastore_weighted_map(datastores))
self.assertRaises(exceptions.StorageFull,
self.store.select_datastore, 10)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_get_datacenter_ref(self, mock_api_session):
datacenter_path = 'Datacenter1'
self.store._get_datacenter(datacenter_path)
self.store.session.invoke_api.assert_called_with(
self.store.session.vim,
'FindByInventoryPath',
self.store.session.vim.service_content.searchIndex,
inventoryPath=datacenter_path)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_http_get_redirect(self, mock_api_session):
# Add two layers of redirects to the response stack, which will
# return the default 200 OK with the expected data after resolving
# both redirects.
redirect1 = {"location": "https://example.com?dsName=ds1&dcPath=dc1"}
redirect2 = {"location": "https://example.com?dsName=ds2&dcPath=dc2"}
responses = [utils.fake_response(),
utils.fake_response(status_code=302, headers=redirect1),
utils.fake_response(status_code=301, headers=redirect2)]
def getresponse(*args, **kwargs):
return responses.pop()
expected_image_size = 31
expected_returns = ['I am a teapot, short and stout\n']
loc = location.get_location_from_uri_and_backend(
"vsphere://127.0.0.1/folder/openstack_glance/%s"
"?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.side_effect = getresponse
(image_file, image_size) = self.store.get(loc)
self.assertEqual(expected_image_size, image_size)
chunks = [c for c in image_file]
self.assertEqual(expected_returns, chunks)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_http_get_max_redirects(self, mock_api_session):
redirect = {"location": "https://example.com?dsName=ds1&dcPath=dc1"}
responses = ([utils.fake_response(status_code=302, headers=redirect)]
* (vm_store.MAX_REDIRECTS + 1))
def getresponse(*args, **kwargs):
return responses.pop()
loc = location.get_location_from_uri_and_backend(
"vsphere://127.0.0.1/folder/openstack_glance/%s"
"?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.side_effect = getresponse
self.assertRaises(exceptions.MaxRedirectsExceeded, self.store.get,
loc)
@mock.patch('oslo_vmware.api.VMwareAPISession')
def test_http_get_redirect_invalid(self, mock_api_session):
redirect = {"location": "https://example.com?dsName=ds1&dcPath=dc1"}
loc = location.get_location_from_uri_and_backend(
"vsphere://127.0.0.1/folder/openstack_glance/%s"
"?dsName=ds1&dcPath=dc1" % FAKE_UUID, "vmware1", conf=self.conf)
with mock.patch('requests.Session.request') as HttpConn:
HttpConn.return_value = utils.fake_response(status_code=307,
headers=redirect)
self.assertRaises(exceptions.BadStoreUri, self.store.get, loc)
| |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils import ConfigDict
from mmdet.models.utils.transformer import (AdaptivePadding,
DetrTransformerDecoder,
DetrTransformerEncoder, PatchEmbed,
PatchMerging, Transformer)
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
pool = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
input = torch.rand(1, 1, 16, 17)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# padding to divisible by 2
assert (out.shape[2], out.shape[3]) == (12, 14)
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
# no padding
assert (out.shape[2], out.shape[3]) == (10, 13)
kernel_size = (11, 11)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# all padding
assert (out.shape[2], out.shape[3]) == (21, 21)
# test padding as kernel is (7,9)
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
# actually (7, 9)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
dilation_out = adap_pad(input)
assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
kernel79_out = adap_pad(input)
assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)
assert kernel79_out.shape == dilation_out.shape
# assert only support "same" "corner"
with pytest.raises(AssertionError):
AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=1)
def test_patch_embed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=1,
norm_cfg=None)
x1, shape = patch_merge_1(dummy_input)
# test out shape
assert x1.shape == (2, 2, 10)
# test outsize is correct
assert shape == (1, 2)
# test L = out_h * out_w
assert shape[0] * shape[1] == x1.shape[1]
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
# test dilation
patch_merge_2 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=None,
)
x2, shape = patch_merge_2(dummy_input)
# test out shape
assert x2.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x2.shape[1]
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
x3, shape = patch_merge_3(dummy_input)
# test out shape
assert x3.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x3.shape[1]
# test the init_out_size with nn.Unfold
assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -
1) // 2 + 1
assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -
1) // 2 + 1
H = 11
W = 12
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
# test adap padding
for padding in ('same', 'corner'):
in_c = 2
embed_dims = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_patch_merging():
# Test the model with int padding
in_c = 3
out_c = 4
kernel_size = 3
stride = 3
padding = 1
dilation = 1
bias = False
# test the case `pad_to_stride` is False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 3
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 16, 4)
assert out_size == (4, 4)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
in_c = 4
out_c = 5
kernel_size = 6
stride = 3
padding = 2
dilation = 2
bias = False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 4
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 4, 5)
assert out_size == (2, 2)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
# Test with adaptive padding
for padding in ('same', 'corner'):
in_c = 2
out_c = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_detr_transformer_dencoder_encoder_layer():
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=(
'norm',
'self_attn',
'norm',
'cross_attn',
'norm',
'ffn',
))))
assert DetrTransformerDecoder(**config).layers[0].pre_norm
assert len(DetrTransformerDecoder(**config).layers) == 6
DetrTransformerDecoder(**config)
with pytest.raises(AssertionError):
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=[
dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))
] * 5))
DetrTransformerDecoder(**config)
config = ConfigDict(
dict(
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))))
with pytest.raises(AssertionError):
# len(operation_order) == 6
DetrTransformerEncoder(**config)
def test_transformer():
config = ConfigDict(
dict(
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')),
)))
transformer = Transformer(**config)
transformer.init_weights()
| |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes, json
class Profile:
"""
A profile object is created at the beginning of every request with details of the use.
The global profile object is `webnotes.user`
"""
def __init__(self, name=''):
self.defaults = None
self.name = name or webnotes.session.get('user')
self.roles = []
self.all_read = []
self.can_create = []
self.can_read = []
self.can_write = []
self.can_cancel = []
self.can_search = []
self.can_get_report = []
self.allow_modules = []
self.in_create = []
def get_roles(self):
"""get list of roles"""
if not self.roles:
self.roles = get_roles(self.name)
return self.roles
def build_doctype_map(self):
"""build map of special doctype properties"""
self.doctype_map = {}
for r in webnotes.conn.sql("""select name, in_create, issingle, istable,
read_only, module from tabDocType""", as_dict=1):
self.doctype_map[r['name']] = r
def build_perm_map(self):
"""build map of permissions at level 0"""
self.perm_map = {}
for r in webnotes.conn.sql("""select parent, `read`, `write`, `create`, `submit`, `cancel`, `report`
from tabDocPerm where docstatus=0
and ifnull(permlevel,0)=0
and parent not like "old_parent:%%"
and role in ('%s')""" % "','".join(self.get_roles()), as_dict=1):
dt = r['parent']
if not dt in self.perm_map:
self.perm_map[dt] = {}
for k in ('read', 'write', 'create', 'submit', 'cancel', 'report'):
if not self.perm_map[dt].get(k):
self.perm_map[dt][k] = r.get(k)
def build_permissions(self):
"""build lists of what the user can read / write / create
quirks:
read_only => Not in Search
in_create => Not in create
"""
self.build_doctype_map()
self.build_perm_map()
for dt in self.doctype_map:
dtp = self.doctype_map[dt]
p = self.perm_map.get(dt, {})
if not dtp.get('istable'):
if p.get('create') and not dtp.get('issingle'):
if dtp.get('in_create'):
self.in_create.append(dt)
else:
self.can_create.append(dt)
elif p.get('write'):
self.can_write.append(dt)
elif p.get('read'):
if dtp.get('read_only'):
self.all_read.append(dt)
else:
self.can_read.append(dt)
if p.get('cancel'):
self.can_cancel.append(dt)
if (p.get('read') or p.get('write') or p.get('create')):
if p.get('report'):
self.can_get_report.append(dt)
if not dtp.get('istable'):
if not dtp.get('issingle') and not dtp.get('read_only'):
self.can_search.append(dt)
if not dtp.get('module') in self.allow_modules:
self.allow_modules.append(dtp.get('module'))
self.can_write += self.can_create
self.can_write += self.in_create
self.can_read += self.can_write
self.all_read += self.can_read
def get_defaults(self):
import webnotes.defaults
self.defaults = webnotes.defaults.get_defaults(self.name)
return self.defaults
# update recent documents
def update_recent(self, dt, dn):
rdl = webnotes.cache().get_value("recent:" + self.name) or []
new_rd = [dt, dn]
# clear if exists
for i in range(len(rdl)):
rd = rdl[i]
if rd==new_rd:
del rdl[i]
break
if len(rdl) > 19:
rdl = rdl[:19]
rdl = [new_rd] + rdl
r = webnotes.cache().set_value("recent:" + self.name, rdl)
def get_can_read(self):
"""return list of doctypes that the user can read"""
if not self.can_read:
self.build_permissions()
return self.can_read
def load_profile(self):
d = webnotes.conn.sql("""select email, first_name, last_name,
email_signature, background_image, user_type
from tabProfile where name = %s""", self.name, as_dict=1)[0]
if not self.can_read:
self.build_permissions()
d.name = self.name
d.recent = json.dumps(webnotes.cache().get_value("recent:" + self.name) or [])
d['roles'] = self.get_roles()
d['defaults'] = self.get_defaults()
d['can_create'] = self.can_create
d['can_write'] = self.can_write
d['can_read'] = list(set(self.can_read))
d['can_cancel'] = list(set(self.can_cancel))
d['can_get_report'] = list(set(self.can_get_report))
d['allow_modules'] = self.allow_modules
d['all_read'] = self.all_read
d['can_search'] = list(set(self.can_search))
d['in_create'] = self.in_create
return d
def get_user_fullname(user):
fullname = webnotes.conn.sql("SELECT CONCAT_WS(' ', first_name, last_name) FROM `tabProfile` WHERE name=%s", user)
return fullname and fullname[0][0] or ''
def get_system_managers():
"""returns all system manager's profile details"""
system_managers = webnotes.conn.sql("""select distinct name
from tabProfile p
where docstatus < 2 and enabled = 1
and name not in ("Administrator", "Guest")
and exists (select * from tabUserRole ur
where ur.parent = p.name and ur.role="System Manager")""")
return [p[0] for p in system_managers]
def add_role(profile, role):
profile_wrapper = webnotes.bean("Profile", profile)
profile_wrapper.doclist.append({
"doctype": "UserRole",
"parentfield": "user_roles",
"role": role
})
profile_wrapper.save()
def add_system_manager(email, first_name=None, last_name=None):
# add profile
profile = webnotes.new_bean("Profile")
profile.doc.fields.update({
"name": email,
"email": email,
"enabled": 1,
"first_name": first_name or email,
"last_name": last_name
})
profile.insert()
# add roles
roles = webnotes.conn.sql_list("""select name from `tabRole`
where name not in ("Administrator", "Guest", "All")""")
profile.make_controller().add_roles(*roles)
def get_roles(username=None, with_standard=True):
"""get roles of current user"""
if not username:
username = webnotes.session.user
if username=='Guest':
return ['Guest']
roles = [r[0] for r in webnotes.conn.sql("""select role from tabUserRole
where parent=%s and role!='All'""", username)] + ['All']
# filter standard if required
if not with_standard:
roles = filter(lambda x: x not in ['All', 'Guest', 'Administrator'], roles)
return roles
| |
#!/usr/bin/env python
"""
Performix
~~~~~~~
:copyright: (c) 2014 by the Biometix, see AUTHORS for more details.
:license:
"""
import os,sys
import redis
import copy,datetime
import urlparse
import sqlite3
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.wsgi import SharedDataMiddleware
from werkzeug.utils import redirect
from werkzeug.utils import cached_property
from werkzeug.security import generate_password_hash,check_password_hash,pbkdf2_hex
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
COOKIE_SECRET = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
from modules.util import remove_private,returnJson,test_url, test_finished
from modules.data import PxData
from modules.bias import Verify
from modules.obb import OpenBiometricBroker
import argparse
VERSION = "0.1b"
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
import json,datetime
from jinja2 import Environment, FileSystemLoader
def base36_encode(number):
assert number >= 0, 'positive integer required'
if number == 0:
return '0'
base36 = []
while number != 0:
number, i = divmod(number, 36)
base36.append('0123456789abcdefghijklmnopqrstuvwxyz'[i])
return ''.join(reversed(base36))
def is_valid_url(url):
parts = urlparse.urlparse(url)
return parts.scheme in ('http', 'https')
def get_hostname(url):
return urlparse.urlparse(url).netloc
class PxLogin(object):
def __init__(self):
self.url_map = [
Rule('/login', endpoint='login_page'),
Rule('/login', endpoint='login_user')
]
def on_login_page(self,request):
response = self.render_template('link_view.html',url=[{'n':'h1'},{'n':'h2'}])
return response
#def on_login_user(self, ):
# request.client_session["test4"]="h2"
from jsonrpc import JSONRPCResponseManager, dispatcher
class Struct(object):
"""Comment removed"""
def __init__(self, data):
for name, value in data.iteritems():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return Struct(value) if isinstance(value, dict) else value
class IdentityVerificationService(object):
def __init__(self, CONFIG):
self.redis = redis.Redis(CONFIG.redis_host, int(CONFIG.redis_port))
template_path = os.path.join(os.path.dirname(__file__), 'templates')
self.jinja_env = Environment(loader=FileSystemLoader(template_path),
autoescape=True)
self.jinja_env.filters['hostname'] = get_hostname
self.pxClasses = [PxData(self.redis),
PxLogin(),
OpenBiometricBroker(CONFIG,self),
Verify(CONFIG,self)
]
self.urls = [
Rule('/', endpoint='index'),
#Rule('/new', endpoint='new_url'),
Rule('/sessonid', endpoint='session_id'),
Rule('/gitpull', endpoint='gitpull')
]
for c in self.pxClasses:
self.urls.extend(c.url_map)
self.url_map = Map(self.urls)
@cached_property
def read_index(self, ):
return open("templates/index.html").read()
def on_index(self, request ):
return Response(self.read_index,mimetype='text/html')
def error_404(self,request):
if not request.url.endswith('.html'):
return redirect(request.url+'.html')
response = self.render_template('404.html')
response.status_code = 404
return response
def render_template(self, template_name, **context):
t = self.jinja_env.get_template(template_name)
return Response(t.render(context), mimetype='text/html')
def dispatch_request(self, request):
print "hello"
# JSON RPC Handler
response = JSONRPCResponseManager.handle(
request.data, dispatcher)
# print "dispatch", response.json,response.error
if response.error:
# if the error is a parse json error (-32700) means it is not an RPC call
if response.error['code']!=-32700:
# return the error json
return returnJson(response.json)
else:
pass # continue to process request below
else:
#print "*********#### "+help(request)
print request.remote_addr, request.remote_user #.host_url+" "+request.url
return returnJson(response.json)
adapter = self.url_map.bind_to_environ(request.environ)
# Normal Request Handler
try:
endpoint, values = adapter.match()
if hasattr(self, 'on_' + endpoint):
return getattr(self, 'on_' + endpoint)(request, **values)
else:
for pxClass in self.pxClasses:
if hasattr(pxClass, 'on_' + endpoint):
pxClass.render_template=self.render_template
if 'docs' in request.args:
return Response("<pre>Documentation"+str(dir(request))+"\n"+request.path+'\n'+request.remote_addr+
getattr(pxClass, 'on_' + endpoint).__doc__+"</pre>",
mimetype='text/html')
result = getattr(pxClass, 'on_' + endpoint)(request, **values)
if result.__class__!=Response:
return returnJson(result)
return result
except NotFound, e:
return self.error_404(request)
except HTTPException, e:
return e
def on_session_id(self, request):
#shjd=sdds
return returnJson(request.client_session.values())
def on_gitpull(self, request):
#shjd=sdds
from subprocess import check_output,STDOUT
pull_out = check_output('git pull',stderr=STDOUT, shell=True)
status_out = check_output('git status',stderr=STDOUT, shell=True)
return returnJson({'result':pull_out, 'git status':status_out})
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
request.client_session.save_cookie(response)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def create_app(CONFIG, with_static=True):
app = IdentityVerificationService(CONFIG)
if with_static:
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/static': os.path.join(os.path.dirname(__file__), 'static')
})
return app
if __name__ == '__main__':
from werkzeug.serving import run_simple
import argparse
# Parse command line args
# note that args can be read from a file using the @ command
parser = argparse.ArgumentParser(description='Identity Verification Service',fromfile_prefix_chars='@')
parser.add_argument('--port','-p', default=8000,
help='set the port (default 8000)')
parser.add_argument('--redis_host', default='localhost',
help='set the redis url (default localhost)')
parser.add_argument('--redis_port', default=6379,
help='set the redis port (default 6379)')
parser.add_argument('--hub_url','-u', default=False,
help='Set the hub address')
parser.add_argument('--no_matcher', default=False,
help='Do not enable matcher')
parser.add_argument('--type','-t', default="DLA",
help='Set the type of System (DLA=Drivers Licence, PASS=Passport')
parser.add_argument('--country','-c', default="AU",
help='Set the country code (default=AU)')
parser.add_argument('--location','-l', default="unknown",
help='Set location (default=unknown)')
parser.add_argument('--name','-n', default=None,
help='Set name (default=unknown)')
parser.add_argument('--logo', default='',
help='Set logo url')
parser.add_argument('--bgcolor', default='',
help='Set background color')
parser.add_argument('--version', action='version', version='%(prog)s '+str(VERSION))
args = parser.parse_args()
import time,random
threaded = True;
if (args.hub_url):
time.sleep(2+random.random()*3)
threaded = False
app = create_app(args)
run_simple('0.0.0.0', int(args.port), app, use_debugger=True, use_reloader=True, threaded = threaded) #, ssl_context = 'adhoc')
| |
#!/usr/bin/env python
import roslib
roslib.load_manifest('q50_tablet_backend')
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import time
import zmq
from math import sqrt
global done_working
done_working = False
ZMQ_DIAGNOSTICS_SEND_PORT = 5000
ZMQ_Q50_TABLET_BACKEND_PORT = 5001
class RosTopicManager:
def __init__(self):
self.bridge = CvBridge()
self.lastImageTime = 0
self.lastImage = None
self.nextLineInsCov = False
self.gps_markpvaa_counter = 0
(self.tx, self.ty, self.tz, self.rx, self.ry, self.rz) = (0,0,0,0,0,0)
(self.lat, self.lon) = (0,0)
#self.image_sub = rospy.Subscriber("/fwd_left/image_raw",Image,self.image_callback)
self.writer_ack_ov_601_counter = 0
self.writer_sub_ov_601 = rospy.Subscriber("/ov_601_writer/writer_ack",String,self.writer_ack_ov_601_callback)
self.writer_ack_ov_602_counter = 0
self.writer_sub_ov_602 = rospy.Subscriber("/ov_602_writer/writer_ack",String,self.writer_ack_ov_602_callback)
self.writer_ack_ov_603_counter = 0
self.writer_sub_ov_603 = rospy.Subscriber("/ov_603_writer/writer_ack",String,self.writer_ack_ov_603_callback)
self.writer_ack_ov_604_counter = 0
self.writer_sub_ov_604 = rospy.Subscriber("/ov_604_writer/writer_ack",String,self.writer_ack_ov_604_callback)
self.controller = rospy.Subscriber("/arduino/camera_gps_controller", String, self.controller_callback)
"""
self.writer_ack_fwd_left_counter = 0
self.writer_sub_fwd_left = rospy.Subscriber("/fwd_left_writer/writer_ack",String,self.writer_ack_fwd_left_callback)
self.writer_ack_fwd_right_counter = 0
self.writer_sub_fwd_right = rospy.Subscriber("/fwd_right_writer/writer_ack",String,self.writer_ack_fwd_right_callback)
"""
"""
self.writer_ack_wfov_front_counter = 0
self.writer_sub_wfov_front = rospy.Subscriber("/wfov_front_writer/writer_ack",String,self.writer_ack_wfov_front_callback)
self.writer_ack_wfov_right_counter = 0
self.writer_sub_wfov_right = rospy.Subscriber("/wfov_right_writer/writer_ack",String,self.writer_ack_wfov_right_callback)
self.writer_ack_wfov_left_counter = 0
self.writer_sub_wfov_left = rospy.Subscriber("/wfov_left_writer/writer_ack",String,self.writer_ack_wfov_left_callback)
self.writer_ack_wfov_back_counter = 0
self.writer_sub_wfov_back = rospy.Subscriber("/wfov_back_writer/writer_ack",String,self.writer_ack_wfov_back_callback)
"""
self.gps_sub = rospy.Subscriber("/novatel_port_out",String,self.gps_callback)
def getImage(self):
return self.lastImage
def getWriterAckCount(self):
#return (self.writer_ack_fwd_left_counter, self.writer_ack_fwd_right_counter, self.writer_ack_wfov_front_counter, self.writer_ack_wfov_left_counter, self.writer_ack_wfov_right_counter, self.writer_ack_wfov_back_counter, self.gps_markpvaa_counter)
return (self.writer_ack_ov_601_counter, self.writer_ack_ov_602_counter, self.writer_ack_ov_603_counter, self.writer_ack_ov_604_counter, self.gps_markpvaa_counter)
def getLatLong(self):
return '%.4f,%.4f' % (self.lat, self.lon)
def getGPSUncertainty(self):
tokens = (self.tx, self.ty, self.tz, self.rx, self.ry, self.rz)
return '%.2f,%.2f,%.2f\n%.2f,%.2f,%.2f' % tokens
"""
def writer_ack_fwd_left_callback(self, data):
self.writer_ack_fwd_left_counter += 1
def writer_ack_fwd_right_callback(self, data):
self.writer_ack_fwd_right_counter += 1
"""
def writer_ack_ov_601_callback(self, data):
self.writer_ack_ov_601_counter += 1
def writer_ack_ov_602_callback(self, data):
self.writer_ack_ov_602_counter += 1
def writer_ack_ov_603_callback(self, data):
self.writer_ack_ov_603_counter += 1
def writer_ack_ov_604_callback(self, data):
self.writer_ack_ov_604_counter += 1
def controller_callback(self, msg):
if 'TRIGGER' in msg.data:
self.gps_markpvaa_counter = 0
"""
def writer_ack_wfov_front_callback(self, data):
self.writer_ack_wfov_front_counter += 2
def writer_ack_wfov_left_callback(self, data):
self.writer_ack_wfov_left_counter += 2
def writer_ack_wfov_right_callback(self, data):
self.writer_ack_wfov_right_counter += 2
def writer_ack_wfov_back_callback(self, data):
self.writer_ack_wfov_back_counter += 2
"""
def gps_callback(self, msg):
header = msg.data.split(',')[0]
if 'MARK2PVAA' in header:
self.gps_markpvaa_counter += 1
header_tokens = msg.data.split(';')
gps_data = header_tokens[1]
tokens = gps_data.split(',')
self.lat = float(tokens[3])
self.lon = float(tokens[4])
if 'INSCOV' in header:
self.nextLineInsCov = True
return
if self.nextLineInsCov:
self.nextLineInsCov = False
tokens = msg.data.split(' ')
offset = 7
self.tx = sqrt(float(tokens[offset + 0]))
self.ty = sqrt(float(tokens[offset + 4]))
self.tz = sqrt(float(tokens[offset + 8]))
offset += 9
self.rx = sqrt(float(tokens[offset + 0]))
self.ry = sqrt(float(tokens[offset + 4]))
self.rz = sqrt(float(tokens[offset + 8]))
def image_callback(self,data):
if time.time() - self.lastImageTime < 0.5:
return
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "passthrough")
except CvBridgeError, e:
print e
cv_image = cv2.pyrDown(cv_image)
cv_image = cv2.pyrDown(cv_image)
self.lastImage = cv_image
self.lastImageTime = time.time()
def poll_msg(sock):
try:
msg = sock.recv(zmq.NOBLOCK)
except zmq.ZMQError, e:
if e.errno == zmq.EAGAIN:
pass
else:
raise
else:
return msg
def getStartDriversCommand():
return 'rosrun q50_launch start_drivers.sh'
def getStopDriversCommand():
return 'rosrun q50_launch stop_drivers.sh'
def getStartCollectionCommand(basename):
return 'rosrun q50_launch start_data_collection.sh ' + basename;
def getStopCollectionCommand():
return 'rosrun q50_launch stop_data_collection.sh'
def sendDiagnosticsMessage(pub_sock, msg):
if msg[0] != 'C':
print msg
pub_sock.send(msg, zmq.NOBLOCK)
def main(args):
global done_working
rospy.init_node('q50_tablet_backend')
basename = rospy.get_param('~basename', 'NONAME')
maxframes = int(rospy.get_param('~maxframes', '9999999999'))
print basename, maxframes
zmq_context = zmq.Context()
diagnostics_pub = zmq_context.socket(zmq.PUB)
diagnostics_pub.connect("tcp://localhost:"+str(ZMQ_DIAGNOSTICS_SEND_PORT))
sendMessage = lambda x: sendDiagnosticsMessage(diagnostics_pub, x)
my_commands = zmq_context.socket(zmq.SUB)
my_commands.bind("tcp://*:"+str(ZMQ_Q50_TABLET_BACKEND_PORT))
my_commands.setsockopt(zmq.SUBSCRIBE, '') # subscribe to all messages
rt = RosTopicManager()
import subprocess
"""
sendMessage('WARN:Starting Drivers')
start_drivers_p = subprocess.Popen(getStartDriversCommand(), shell=True)
time.sleep(4)
"""
sendMessage('WARN:Starting Recorders')
start_collection_p = subprocess.Popen(getStartCollectionCommand(basename), shell=True)
time.sleep(4)
total_execution_time = maxframes / 50;
start_time = time.time()
done_working = False
quit_via_user_input = False
while not done_working:
# check if we should terminate. Two cases: user hits stop or time expires
msg = poll_msg(my_commands)
if msg == 'TERMINATE':
done_working = True
quit_via_user_input = True
if time.time() - start_time > total_execution_time:
done_working = True
if rt.getImage() is not None:
(success, img_data) = cv2.imencode('.png', rt.getImage())
if success:
sendMessage('CAM:' + img_data.tostring())
sendMessage('INFOCAPTURERATE:' + str(rt.getWriterAckCount()))
sendMessage('GPS:' + str(rt.getLatLong()))
sendMessage('GPSUNCERTAINTY:'+ str(rt.getGPSUncertainty()))
time.sleep(1.0)
sendMessage('WARN:Stopping Recorders')
stop_collection_p = subprocess.Popen(getStopCollectionCommand(), shell=True)
time.sleep(8)
"""
sendMessage('WARN:Stopping Drivers')
stop_drivers_p = subprocess.Popen(getStopDriversCommand(), shell=True)
"""
start_collection_p.wait()
print 'start collect proc fin'
#start_drivers_p.wait()
#print 'start driver proc fin'
stop_collection_p.wait()
print 'stop collect proc fin'
#stop_drivers_p.wait()
#print 'stop drivers proc fin'
sendMessage('INFOCAPTURERATE:' + str(rt.getWriterAckCount()))
if quit_via_user_input:
return 1
else:
return 0
if __name__ == '__main__':
retval = main(sys.argv)
print 'received retval = ', retval
sys.exit(retval)
| |
import warnings
from django.conf import settings, UserSettingsHolder
from django.core import mail
from django.test.signals import template_rendered, setting_changed
from django.template import Template, loader, TemplateDoesNotExist
from django.template.loaders import cached
from django.utils.translation import deactivate
from django.utils.functional import wraps
__all__ = (
'Approximate', 'ContextList', 'get_runner', 'override_settings',
'setup_test_environment', 'teardown_test_environment',
)
RESTORE_LOADERS_ATTR = '_original_template_source_loaders'
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val-other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, basestring):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template.original_render = Template._render
Template._render = instrumented_test_render
mail.original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template.original_render
del Template.original_render
settings.EMAIL_BACKEND = mail.original_email_backend
del mail.original_email_backend
del mail.outbox
def get_warnings_state():
"""
Returns an object containing the state of the warnings module
"""
# There is no public interface for doing this, but this implementation of
# get_warnings_state and restore_warnings_state appears to work on Python
# 2.4 to 2.7.
return warnings.filters[:]
def restore_warnings_state(state):
"""
Restores the state of the warnings module when passed an object that was
returned by get_warnings_state()
"""
warnings.filters = state[:]
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
def setup_test_template_loader(templates_dict, use_cached_loader=False):
"""
Changes Django to only find templates from within a dictionary (where each
key is the template name and each value is the corresponding template
content to return).
Use meth:`restore_template_loaders` to restore the original loaders.
"""
if hasattr(loader, RESTORE_LOADERS_ATTR):
raise Exception("loader.%s already exists" % RESTORE_LOADERS_ATTR)
def test_template_loader(template_name, template_dirs=None):
"A custom template loader that loads templates from a dictionary."
try:
return (templates_dict[template_name], "test:%s" % template_name)
except KeyError:
raise TemplateDoesNotExist(template_name)
if use_cached_loader:
template_loader = cached.Loader(('test_template_loader',))
template_loader._cached_loaders = (test_template_loader,)
else:
template_loader = test_template_loader
setattr(loader, RESTORE_LOADERS_ATTR, loader.template_source_loaders)
loader.template_source_loaders = (template_loader,)
return template_loader
def restore_template_loaders():
"""
Restores the original template loaders after
:meth:`setup_test_template_loader` has been run.
"""
loader.template_source_loaders = getattr(loader, RESTORE_LOADERS_ATTR)
delattr(loader, RESTORE_LOADERS_ATTR)
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type) and issubclass(test_func, TransactionTestCase):
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value)
def disable(self):
settings._wrapped = self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value)
def str_prefix(s):
return s % {'_': 'u'}
| |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division, print_function
"""
QMTest classes to support SCons' testing and Aegis-inspired workflow.
Thanks to Stefan Seefeld for the initial code.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
########################################################################
# Imports
########################################################################
import qm
import qm.common
import qm.test.base
from qm.fields import *
from qm.executable import *
from qm.test import database
from qm.test import test
from qm.test import resource
from qm.test import suite
from qm.test.result import Result
from qm.test.file_result_stream import FileResultStream
from qm.test.classes.text_result_stream import TextResultStream
from qm.test.classes.xml_result_stream import XMLResultStream
from qm.test.directory_suite import DirectorySuite
from qm.extension import get_extension_class_name, get_class_arguments_as_dictionary
import dircache
import os
import imp
if sys.platform == 'win32':
console = 'con'
else:
console = '/dev/tty'
def Trace(msg):
open(console, 'w').write(msg)
# QMTest 2.3 hard-codes how it captures the beginning and end time by
# calling the qm.common.format_time_iso() function, which canonicalizes
# the time stamp in one-second granularity ISO format. In order to get
# sub-second granularity, as well as to use the more precise time.clock()
# function on Windows, we must replace that function with our own.
orig_format_time_iso = qm.common.format_time_iso
if sys.platform == 'win32':
time_func = time.clock
else:
time_func = time.time
def my_format_time(time_secs=None):
return str(time_func())
qm.common.format_time_iso = my_format_time
########################################################################
# Classes
########################################################################
def get_explicit_arguments(e):
"""This function can be removed once QMTest 2.4 is out."""
# Get all of the arguments.
arguments = get_class_arguments_as_dictionary(e.__class__)
# Determine which subset of the 'arguments' have been set
# explicitly.
explicit_arguments = {}
for name, field in arguments.items():
# Do not record computed fields.
if field.IsComputed():
continue
if name in e.__dict__:
explicit_arguments[name] = e.__dict__[name]
return explicit_arguments
def check_exit_status(result, prefix, desc, status):
"""This function can be removed once QMTest 2.4 is out."""
if sys.platform == "win32" or os.WIFEXITED(status):
# Obtain the exit code.
if sys.platform == "win32":
exit_code = status
else:
exit_code = os.WEXITSTATUS(status)
# If the exit code is non-zero, the test fails.
if exit_code != 0:
result.Fail("%s failed with exit code %d." % (desc, exit_code))
# Record the exit code in the result.
result[prefix + "exit_code"] = str(exit_code)
return False
elif os.WIFSIGNALED(status):
# Obtain the signal number.
signal = os.WTERMSIG(status)
# If the program gets a fatal signal, the test fails .
result.Fail("%s received fatal signal %d." % (desc, signal))
result[prefix + "signal"] = str(signal)
return False
else:
# A process should only be able to stop by exiting, or
# by being terminated with a signal.
assert None
return True
class Null:
pass
_null = Null()
sys_attributes = [
'byteorder',
'exec_prefix',
'executable',
'maxint',
'maxunicode',
'platform',
'prefix',
'version',
'version_info',
]
def get_sys_values():
sys_attributes.sort()
result = [(k, getattr(sys, k, _null)) for k in sys_attributes]
result = [t for t in result if not t[1] is _null]
result = [t[0] + '=' + repr(t[1]) for t in result]
return '\n '.join(result)
module_attributes = [
'__version__',
'__build__',
'__buildsys__',
'__date__',
'__developer__',
]
def get_module_info(module):
module_attributes.sort()
result = [(k, getattr(module, k, _null)) for k in module_attributes]
result = [t for t in result if not t[1] is _null]
result = [t[0] + '=' + repr(t[1]) for t in result]
return '\n '.join(result)
environ_keys = [
'PATH',
'SCONS',
'SCONSFLAGS',
'SCONS_LIB_DIR',
'PYTHON_ROOT',
'QTDIR',
'COMSPEC',
'INTEL_LICENSE_FILE',
'INCLUDE',
'LIB',
'MSDEVDIR',
'OS',
'PATHEXT',
'SystemRoot',
'TEMP',
'TMP',
'USERNAME',
'VXDOMNTOOLS',
'WINDIR',
'XYZZY'
'ENV',
'HOME',
'LANG',
'LANGUAGE',
'LC_ALL',
'LC_MESSAGES',
'LOGNAME',
'MACHINE',
'OLDPWD',
'PWD',
'OPSYS',
'SHELL',
'TMPDIR',
'USER',
]
def get_environment():
environ_keys.sort()
result = [(k, os.environ.get(k, _null)) for k in environ_keys]
result = [t for t in result if not t[1] is _null]
result = [t[0] + '-' + t[1] for t in result]
return '\n '.join(result)
class SConsXMLResultStream(XMLResultStream):
def __init__(self, *args, **kw):
super(SConsXMLResultStream, self).__init__(*args, **kw)
def WriteAllAnnotations(self, context):
# Load (by hand) the SCons modules we just unwrapped so we can
# extract their version information. Note that we have to override
# SCons.Script.main() with a do_nothing() function, because loading up
# the 'scons' script will actually try to execute SCons...
src_engine = os.environ.get('SCONS_LIB_DIR')
if not src_engine:
src_engine = os.path.join('src', 'engine')
fp, pname, desc = imp.find_module('SCons', [src_engine])
SCons = imp.load_module('SCons', fp, pname, desc)
# Override SCons.Script.main() with a do-nothing function, because
# loading the 'scons' script will actually try to execute SCons...
src_engine_SCons = os.path.join(src_engine, 'SCons')
fp, pname, desc = imp.find_module('Script', [src_engine_SCons])
SCons.Script = imp.load_module('Script', fp, pname, desc)
def do_nothing():
pass
SCons.Script.main = do_nothing
scons_file = os.environ.get('SCONS')
if scons_file:
src_script, scons_py = os.path.split(scons_file)
scons = os.path.splitext(scons_py)[0]
else:
src_script = os.path.join('src', 'script')
scons = 'scons'
fp, pname, desc = imp.find_module(scons, [src_script])
scons = imp.load_module('scons', fp, pname, desc)
fp.close()
self.WriteAnnotation("scons_test.engine", get_module_info(SCons))
self.WriteAnnotation("scons_test.script", get_module_info(scons))
self.WriteAnnotation("scons_test.sys", get_sys_values())
self.WriteAnnotation("scons_test.os.environ", get_environment())
class AegisStream(TextResultStream):
arguments = [
qm.fields.IntegerField(
name = "print_time",
title = "print individual test times",
description = """
""",
default_value = 0,
),
]
def __init__(self, *args, **kw):
super(AegisStream, self).__init__(*args, **kw)
self._num_tests = 0
self._outcomes = {}
self._outcome_counts = {}
for outcome in AegisTest.aegis_outcomes:
self._outcome_counts[outcome] = 0
self.format = "full"
def _percent(self, outcome):
return 100. * self._outcome_counts[outcome] / self._num_tests
def _aegis_no_result(self, result):
outcome = result.GetOutcome()
return (outcome == Result.FAIL and result.get('Test.exit_code') == '2')
def _DisplayText(self, text):
# qm.common.html_to_text() uses htmllib, which sticks an extra
# '\n' on the front of the text. Strip it and only display
# the text if there's anything to display.
text = qm.common.html_to_text(text)
if text[0] == '\n':
text = text[1:]
if text:
lines = text.splitlines()
if lines[-1] == '':
lines = lines[:-1]
self.file.write(' ' + '\n '.join(lines) + '\n\n')
def _DisplayResult(self, result, format):
test_id = result.GetId()
kind = result.GetKind()
if self._aegis_no_result(result):
outcome = "NO_RESULT"
else:
outcome = result.GetOutcome()
self._WriteOutcome(test_id, kind, outcome)
self.file.write('\n')
def _DisplayAnnotations(self, result):
try:
self._DisplayText(result["Test.stdout"])
except KeyError:
pass
try:
self._DisplayText(result["Test.stderr"])
except KeyError:
pass
if self.print_time:
start = float(result['qmtest.start_time'])
end = float(result['qmtest.end_time'])
fmt = " Total execution time: %.1f seconds\n\n"
self.file.write(fmt % (end - start))
class AegisChangeStream(AegisStream):
def WriteResult(self, result):
test_id = result.GetId()
if self._aegis_no_result(result):
outcome = AegisTest.NO_RESULT
else:
outcome = result.GetOutcome()
self._num_tests += 1
self._outcome_counts[outcome] += 1
super(AegisStream, self).WriteResult(result)
def _SummarizeTestStats(self):
self.file.write("\n")
self._DisplayHeading("STATISTICS")
if self._num_tests != 0:
# We'd like to use the _FormatStatistics() method to do
# this, but it's wrapped around the list in Result.outcomes,
# so it's simpler to just do it ourselves.
print(" %6d tests total\n" % self._num_tests)
for outcome in AegisTest.aegis_outcomes:
if self._outcome_counts[outcome] != 0:
print(" %6d (%3.0f%%) tests %s" % (
self._outcome_counts[outcome],
self._percent(outcome),
outcome
))
class AegisBaselineStream(AegisStream):
def WriteResult(self, result):
test_id = result.GetId()
if self._aegis_no_result(result):
outcome = AegisTest.NO_RESULT
self.expected_outcomes[test_id] = Result.PASS
self._outcome_counts[outcome] += 1
else:
self.expected_outcomes[test_id] = Result.FAIL
outcome = result.GetOutcome()
if outcome != Result.Fail:
self._outcome_counts[outcome] += 1
self._num_tests += 1
super(AegisStream, self).WriteResult(result)
def _SummarizeRelativeTestStats(self):
self.file.write("\n")
self._DisplayHeading("STATISTICS")
if self._num_tests != 0:
# We'd like to use the _FormatStatistics() method to do
# this, but it's wrapped around the list in Result.outcomes,
# so it's simpler to just do it ourselves.
if self._outcome_counts[AegisTest.FAIL]:
print(" %6d (%3.0f%%) tests as expected" % (
self._outcome_counts[AegisTest.FAIL],
self._percent(AegisTest.FAIL),
))
non_fail_outcomes = list(AegisTest.aegis_outcomes[:])
non_fail_outcomes.remove(AegisTest.FAIL)
for outcome in non_fail_outcomes:
if self._outcome_counts[outcome] != 0:
print(" %6d (%3.0f%%) tests unexpected %s" % (
self._outcome_counts[outcome],
self._percent(outcome),
outcome,
))
class AegisBatchStream(FileResultStream):
def __init__(self, arguments):
super(AegisBatchStream, self).__init__(arguments)
self._outcomes = {}
def WriteResult(self, result):
test_id = result.GetId()
kind = result.GetKind()
outcome = result.GetOutcome()
exit_status = '0'
if outcome == Result.FAIL:
exit_status = result.get('Test.exit_code')
self._outcomes[test_id] = exit_status
def Summarize(self):
self.file.write('test_result = [\n')
for file_name in sorted(self._outcomes.keys()):
exit_status = self._outcomes[file_name]
file_name = file_name.replace('\\', '/')
self.file.write(' { file_name = "%s";\n' % file_name)
self.file.write(' exit_status = %s; },\n' % exit_status)
self.file.write('];\n')
class AegisTest(test.Test):
PASS = "PASS"
FAIL = "FAIL"
NO_RESULT = "NO_RESULT"
ERROR = "ERROR"
UNTESTED = "UNTESTED"
aegis_outcomes = (
PASS, FAIL, NO_RESULT, ERROR, UNTESTED,
)
"""Aegis test outcomes."""
class Test(AegisTest):
"""Simple test that runs a python script and checks the status
to determine whether the test passes."""
script = TextField(title="Script to test")
topdir = TextField(title="Top source directory")
def Run(self, context, result):
"""Run the test. The test passes if the command exits with status=0,
and fails otherwise. The program output is logged, but not validated."""
command = RedirectedExecutable()
args = [context.get('python', sys.executable), '-tt', self.script]
status = command.Run(args, os.environ)
if not check_exit_status(result, 'Test.', self.script, status):
# In case of failure record exit code, stdout, and stderr.
result.Fail("Non-zero exit_code.")
result["Test.stdout"] = result.Quote(command.stdout)
result["Test.stderr"] = result.Quote(command.stderr)
class Database(database.Database):
"""Scons test database.
* The 'src' and 'test' directories are explicit suites.
* Their subdirectories are implicit suites.
* All files under 'src/' ending with 'Tests.py' contain tests.
* All files under 'test/' with extension '.py' contain tests.
* Right now there is only a single test class, which simply runs
the specified python interpreter on the given script. To be refined..."""
srcdir = TextField(title = "Source Directory",
description = "The root of the test suite's source tree.")
_is_generic_database = True
def is_a_test_under_test(path, t):
return os.path.splitext(t)[1] == '.py' \
and os.path.isfile(os.path.join(path, t))
def is_a_test_under_src(path, t):
return t[-8:] == 'Tests.py' \
and os.path.isfile(os.path.join(path, t))
is_a_test = {
'src' : is_a_test_under_src,
'test' : is_a_test_under_test,
}
exclude_subdirs = {
'.svn' : 1,
'CVS' : 1,
}
def is_a_test_subdir(path, subdir):
if exclude_subdirs.get(subdir):
return None
return os.path.isdir(os.path.join(path, subdir))
def __init__(self, path, arguments):
self.label_class = "file_label.FileLabel"
self.modifiable = "false"
# Initialize the base class.
super(Database, self).__init__(path, arguments)
def GetRoot(self):
return self.srcdir
def GetSubdirectories(self, directory):
components = self.GetLabelComponents(directory)
path = os.path.join(self.GetRoot(), *components)
if directory:
dirs = [d for d in dircache.listdir(path)
if os.path.isdir(os.path.join(path, d))]
else:
dirs = list(self.is_a_test.keys())
dirs.sort()
return dirs
def GetIds(self, kind, directory = "", scan_subdirs = 1):
components = self.GetLabelComponents(directory)
path = os.path.join(self.GetRoot(), *components)
if kind == database.Database.TEST:
if not components:
return []
ids = [self.JoinLabels(directory, t)
for t in dircache.listdir(path)
if self.is_a_test[components[0]](path, t)]
elif kind == Database.RESOURCE:
return [] # no resources yet
else: # SUITE
if directory:
ids = [self.JoinLabels(directory, d)
for d in dircache.listdir(path)
if os.path.isdir(os.path.join(path, d))]
else:
ids = list(self.is_a_test.keys())
if scan_subdirs:
for d in dircache.listdir(path):
if (os.path.isdir(d)):
ids.extend(self.GetIds(kind,
self.JoinLabels(directory, d),
True))
return ids
def GetExtension(self, id):
if not id:
return DirectorySuite(self, id)
components = self.GetLabelComponents(id)
path = os.path.join(self.GetRoot(), *components)
if os.path.isdir(path): # a directory
return DirectorySuite(self, id)
elif os.path.isfile(path): # a test
arguments = {}
arguments['script'] = path
arguments['topdir'] = self.GetRoot()
return Test(arguments, qmtest_id = id, qmtest_database = self)
else: # nothing else to offer
return None
def GetTest(self, test_id):
"""This method can be removed once QMTest 2.4 is out."""
t = self.GetExtension(test_id)
if isinstance(t, test.Test):
return database.TestDescriptor(self,
test_id,
get_extension_class_name(t.__class__),
get_explicit_arguments(t))
raise database.NoSuchTestError(test_id)
def GetSuite(self, suite_id):
"""This method can be removed once QMTest 2.4 is out."""
if suite_id == "":
return DirectorySuite(self, "")
s = self.GetExtension(suite_id)
if isinstance(s, suite.Suite):
return s
raise database.NoSuchSuiteError(suite_id)
def GetResource(self, resource_id):
"""This method can be removed once QMTest 2.4 is out."""
r = self.GetExtension(resource_id)
if isinstance(r, resource.Resource):
return ResourceDescriptor(self,
resource_id,
get_extension_class_name(r.__class__),
get_explicit_arguments(r))
raise database.NoSuchResourceError(resource_id)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data types util shared for orchestration."""
from typing import Dict, Iterable, List, Mapping, Optional
from tfx import types
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import artifact_utils
from tfx.utils import json_utils
from tfx.utils import proto_utils
from google.protobuf import message
from ml_metadata.proto import metadata_store_pb2
from ml_metadata.proto import metadata_store_service_pb2
def build_artifact_dict(
proto_dict: Mapping[str, metadata_store_service_pb2.ArtifactStructList]
) -> Dict[str, List[types.Artifact]]:
"""Converts input/output artifact dict."""
result = {}
for k, v in proto_dict.items():
result[k] = []
for artifact_struct in v.elements:
if not artifact_struct.HasField('artifact'):
raise RuntimeError('Only support artifact oneof field')
artifact_and_type = artifact_struct.artifact
result[k].append(
artifact_utils.deserialize_artifact(artifact_and_type.type,
artifact_and_type.artifact))
return result
def build_artifact_struct_dict(
artifact_dict: Mapping[str, Iterable[types.Artifact]]
) -> Dict[str, metadata_store_service_pb2.ArtifactStructList]:
"""Converts input/output artifact dict."""
result = {}
if not artifact_dict:
return result
for k, v in artifact_dict.items():
artifact_list = metadata_store_service_pb2.ArtifactStructList()
for artifact in v:
artifact_struct = metadata_store_service_pb2.ArtifactStruct(
artifact=metadata_store_service_pb2.ArtifactAndType(
artifact=artifact.mlmd_artifact, type=artifact.artifact_type))
artifact_list.elements.append(artifact_struct)
result[k] = artifact_list
return result
def build_value_dict(
metadata_value_dict: Mapping[str, metadata_store_pb2.Value]
) -> Dict[str, types.Property]:
"""Converts MLMD value dict into plain value dict."""
result = {}
for k, v in metadata_value_dict.items():
result[k] = getattr(v, v.WhichOneof('value'))
return result
def build_metadata_value_dict(
value_dict: Mapping[str, types.ExecPropertyTypes]
) -> Dict[str, metadata_store_pb2.Value]:
"""Converts plain value dict into MLMD value dict."""
result = {}
if not value_dict:
return result
for k, v in value_dict.items():
if v is None:
continue
value = metadata_store_pb2.Value()
result[k] = set_metadata_value(value, v)
return result
def build_pipeline_value_dict(
value_dict: Dict[str, types.ExecPropertyTypes]
) -> Dict[str, pipeline_pb2.Value]:
"""Converts plain value dict into pipeline_pb2.Value dict."""
result = {}
if not value_dict:
return result
for k, v in value_dict.items():
if v is None:
continue
value = pipeline_pb2.Value()
result[k] = set_parameter_value(value, v)
return result
def get_parsed_value(
value: metadata_store_pb2.Value,
schema: Optional[pipeline_pb2.Value.Schema]) -> types.ExecPropertyTypes:
"""Converts MLMD value into parsed (non-)primitive value."""
def parse_value(
value: str, value_type: pipeline_pb2.Value.Schema.ValueType
) -> types.ExecPropertyTypes:
if value_type.HasField('list_type'):
list_value = json_utils.loads(value)
return [parse_value(val, value_type.list_type) for val in list_value]
elif value_type.HasField('proto_type'):
return proto_utils.deserialize_proto_message(
value, value_type.proto_type.message_type,
value_type.proto_type.file_descriptors)
else:
return value
if schema and value.HasField('string_value'):
if schema.value_type.HasField('boolean_type'):
return json_utils.loads(value.string_value)
else:
return parse_value(value.string_value, schema.value_type)
else:
return getattr(value, value.WhichOneof('value'))
def build_parsed_value_dict(
value_dict: Mapping[str, pipeline_pb2.Value]
) -> Dict[str, types.ExecPropertyTypes]:
"""Converts MLMD value into parsed (non-)primitive value dict."""
result = {}
if not value_dict:
return result
for k, v in value_dict.items():
if not v.HasField('field_value'):
raise RuntimeError('Field value missing for %s' % k)
result[k] = get_parsed_value(v.field_value,
v.schema if v.HasField('schema') else None)
return result
def get_metadata_value_type(
value: types.ExecPropertyTypes) -> metadata_store_pb2.PropertyType:
"""Gets the metadata property type of a property value from a value.
Args:
value: The property value represented by pipeline_pb2.Value or a primitive
property value type.
Returns:
A metadata_store_pb2.PropertyType.
Raises:
RuntimeError: If property value is still in RuntimeParameter form
ValueError: The value type is not supported.
"""
if isinstance(value, int):
return metadata_store_pb2.INT
elif isinstance(value, float):
return metadata_store_pb2.DOUBLE
elif isinstance(value, pipeline_pb2.Value):
which = value.WhichOneof('value')
if which != 'field_value':
raise RuntimeError('Expecting field_value but got %s.' % value)
value_type = value.field_value.WhichOneof('value')
if value_type == 'int_value':
return metadata_store_pb2.INT
elif value_type == 'double_value':
return metadata_store_pb2.DOUBLE
elif value_type == 'string_value':
return metadata_store_pb2.STRING
else:
raise ValueError('Unexpected value type %s' % value_type)
elif isinstance(value, (str, bool, message.Message, list)):
return metadata_store_pb2.STRING
else:
raise ValueError('Unexpected value type %s' % type(value))
def get_value(tfx_value: pipeline_pb2.Value) -> types.Property:
"""Gets the primitive type value of a pipeline_pb2.Value instance.
Args:
tfx_value: A pipeline_pb2.Value message.
Returns:
The primitive type value of the tfx value.
Raises:
RuntimeError: when the value is still in RuntimeParameter form.
"""
which = tfx_value.WhichOneof('value')
if which != 'field_value':
raise RuntimeError('Expecting field_value but got %s.' % tfx_value)
return getattr(tfx_value.field_value,
tfx_value.field_value.WhichOneof('value'))
def get_metadata_value(
value: metadata_store_pb2.Value) -> Optional[types.Property]:
"""Gets the primitive type value of a metadata_store_pb2.Value instance.
Args:
value: A metadata_store_pb2.Value message.
Returns:
The primitive type value of metadata_store_pb2.Value instance if set, `None`
otherwise.
"""
which = value.WhichOneof('value')
return None if which is None else getattr(value, which)
def set_metadata_value(
metadata_value: metadata_store_pb2.Value,
value: types.ExecPropertyTypes) -> metadata_store_pb2.Value:
"""Sets metadata property based on tfx value.
Args:
metadata_value: A metadata_store_pb2.Value message to be set.
value: The value of the property in pipeline_pb2.Value form.
Returns:
A Value proto filled with the provided value.
Raises:
ValueError: If value type is not supported or is still RuntimeParameter.
"""
parameter_value = pipeline_pb2.Value()
set_parameter_value(parameter_value, value, set_schema=False)
metadata_value.CopyFrom(parameter_value.field_value)
return metadata_value
def set_parameter_value(
parameter_value: pipeline_pb2.Value,
value: types.ExecPropertyTypes,
set_schema: Optional[bool] = True) -> pipeline_pb2.Value:
"""Sets field value and schema based on tfx value.
Args:
parameter_value: A pipeline_pb2.Value message to be set.
value: The value of the property.
set_schema: Boolean value indicating whether to set schema in
pipeline_pb2.Value.
Returns:
A pipeline_pb2.Value proto with field_value and optionally schema filled
based on input property.
Raises:
ValueError: If value type is not supported.
"""
def get_value_and_set_type(
value: types.ExecPropertyTypes,
value_type: pipeline_pb2.Value.Schema.ValueType) -> types.Property:
"""Returns serialized value and sets value_type."""
if isinstance(value, bool):
if set_schema:
value_type.boolean_type.SetInParent()
return value
elif isinstance(value, message.Message):
# TODO(b/171794016): Investigate if file descripter set is needed for
# tfx-owned proto already build in the launcher binary.
if set_schema:
proto_type = value_type.proto_type
proto_type.message_type = type(value).DESCRIPTOR.full_name
proto_utils.build_file_descriptor_set(value,
proto_type.file_descriptors)
return proto_utils.proto_to_json(value)
elif isinstance(value, list) and len(value):
if set_schema:
value_type.list_type.SetInParent()
value = [
get_value_and_set_type(val, value_type.list_type) for val in value
]
return json_utils.dumps(value)
elif isinstance(value, (int, float, str)):
return value
else:
raise ValueError('Unexpected type %s' % type(value))
if isinstance(value, int) and not isinstance(value, bool):
parameter_value.field_value.int_value = value
elif isinstance(value, float):
parameter_value.field_value.double_value = value
elif isinstance(value, str):
parameter_value.field_value.string_value = value
elif isinstance(value, pipeline_pb2.Value):
which = value.WhichOneof('value')
if which != 'field_value':
raise ValueError('Expecting field_value but got %s.' % value)
parameter_value.field_value.CopyFrom(value.field_value)
elif isinstance(value, bool):
parameter_value.schema.value_type.boolean_type.SetInParent()
parameter_value.field_value.string_value = json_utils.dumps(value)
elif isinstance(value, (list, message.Message)):
parameter_value.field_value.string_value = get_value_and_set_type(
value, parameter_value.schema.value_type)
else:
raise ValueError('Unexpected type %s' % type(value))
return parameter_value
| |
#!/usr/bin/env python
"""
B.5 Macros for text
"""
from plasTeX import Command, Environment, sourceChildren
class frenchspacing(Command):
unicode = u''
class nonfrenchspacing(Command):
unicode = u''
class normalbaselines(Command):
unicode = u''
class lq(Command):
unicode = unichr(8216)
class rq(Command):
unicode = unichr(8217)
class lbrack(Command):
unicode = u'['
class rbrack(Command):
unicode = u']'
class space(Command):
unicode = u' '
class empty(Command):
unicode = u''
class null(Command):
unicode = u''
class bgroup(Command):
def invoke(self, tex):
self.ownerDocument.context.push()
def digest(self, tokens):
# Absorb the tokens that belong to us
for item in tokens:
if item.nodeType == Command.ELEMENT_NODE:
if item.level < self.ENDSECTIONS_LEVEL:
tokens.push(item)
break
if isinstance(item, (egroup,endgroup)):
break
if item.contextDepth < self.contextDepth:
tokens.push(item)
break
item.parentNode = self
item.digest(tokens)
self.appendChild(item)
self.paragraphs(force=False)
@property
def source(self):
if self.hasChildNodes():
return '{%s}' % sourceChildren(self)
return '{'
class begingroup(bgroup):
pass
class egroup(Command):
unicode = u''
def invoke(self, tex):
self.ownerDocument.context.pop()
@property
def source(self):
return '}'
def digest(self, tokens):
return
class endgroup(egroup):
unicode = u''
class obeyspaces(Command):
unicode = u''
class loop(Command):
args = 'var:Tok'
unicode = u''
class iterate(Command):
unicode = u''
class repeat(Command):
unicode = u''
class enskip(Command):
pass
class enspace(Command):
pass
class quad(Command):
pass
class qquad(Command):
pass
class thinspace(Command):
pass
class negthinspace(Command):
pass
class hglue(Command):
pass
class vglue(Command):
pass
class topglue(Command):
pass
class nointerlineskip(Command):
pass
class offinterlineskip(Command):
pass
class smallskip(Command):
pass
class medskip(Command):
pass
class bigskip(Command):
pass
class TeXBreak(Command):
macroName = 'break'
unicode = u''
class allowbreak(Command):
unicode = u''
class ControlSpace(Command):
macroName = 'active::~'
class slash(Command):
pass
class filbreak(Command):
pass
class goodbreak(Command):
pass
class eject(Command):
unicode = u''
class supereject(Command):
unicode = u''
class removelastskip(Command):
pass
class smallbreak(Command):
pass
class medbreak(Command):
pass
class bigbreak(Command):
pass
class line(Command):
pass
class leftline(Command):
args = 'self'
class llap(Command):
args = 'self'
class centerline(Command):
args = 'self'
class underbar(Command):
args = 'self'
class hang(Command):
pass
class textindent(Command):
args = 'self'
class narrower(Command):
pass
class raggedright(Environment):
pass
#
# Accents are done in the LaTeX package
#
class dots(Command):
unicode = unichr(8230)
| |
from __future__ import absolute_import
import logging
from constantpool import ConstantPool
from classtypes import CodeAttribute, Method
from classconstants import ACC_STATIC, ACC_NATIVE, ACC_INTERFACE, void, null
from descriptor import parse_descriptor
class NoSuchMethodException(Exception):
pass
EMPTY_METHOD = Method(ACC_STATIC, '', '()V', [CodeAttribute(0, 0, [], [], [])], '', '')
class Class(object):
def __init__(self,
name=None,
super_class=None,
vm=None):
self._klass = None
self.primitive = False
self.name = name if name else self.__class__.__name__
self.major_version, self.minor_version = -1, -1
self.constant_pool = ConstantPool(0)
self.access_flags = 0
self.super_class = super_class
self.interfaces = []
self.fields = {}
self.field_values = {}
self.field_overrides = {}
self.methods = {}
self.method_overrides = {}
self.attributes = []
self.java_instance = None
@classmethod
def array_factory(cls, class_name):
assert class_name[0] == '['
array = cls(class_name)
array.super_class = None # this is later changed to java/lang/Class
return array
@property
def is_array(self):
return self.name[0] == '['
def get_method(self, method_name, type_signature):
built_method_name = Class.method_name(method_name, type_signature)
if built_method_name in self.methods:
return self, self.methods[built_method_name]
# lookup in super class
if '<clinit>' != method_name and self.super_class:
try:
return self.super_class.get_method(
method_name, type_signature)
except NoSuchMethodException:
pass
# TODO: implement lookup for default methods in interfaces
raise NoSuchMethodException('No such method %s.%s (%s)' % (
self.name, method_name, type_signature) )
def get_field(self, field_name):
klass = self
while field_name not in klass.fields:
klass = klass.super_class
return klass.fields[field_name]
@property
def is_interface(self):
return self.access_flags & ACC_INTERFACE
def implements(self, interface):
if self is interface:
return True
for interf in self.interfaces:
if interf.implements(interface):
return True
return False
def is_subclass(self, instance):
if isinstance(instance, Class):
return self.name in ('java/lang/Class', 'java/lang/Object')
else:
klass = instance._klass
while klass != self and klass != klass.super_class:
klass = klass.super_class
return klass == self
def instantiate(self, size=None):
if self.is_array:
assert size is not None
return ArrayClass(self, size)
return ClassInstance(self.name, self)
@staticmethod
def fetch_native_method(class_name, method):
assert method.access_flags & ACC_NATIVE
assert not hasattr(method, 'code')
try:
if class_name not in classes_with_natives and \
method.name == 'registerNatives':
native_method = (lambda *args: void)
else:
module = classes_with_natives[class_name]
if method.name == 'registerNatives' and \
not 'registerNatives' in module.__dict__:
native_method = (lambda *args: void)
else:
native_method = getattr(module, method.name)
except (KeyError, AttributeError):
raise Exception('Missing method %s on class %s' % (
method.name, class_name))
method.attributes.append(CodeAttribute(100, 100, [], [], []))
return native_method
def run_method(self, vm, method, method_descriptor):
native_method = None
# handle native methods
if (method.access_flags & ACC_NATIVE) != 0:
native_method = Class.fetch_native_method(self.name, method)
code = get_attribute(method, 'Code')
# may contain an instance argument (not STATIC
is_static = method.access_flags & ACC_STATIC != 0
num_args = len(method.parameters) + (0 if is_static else 1)
arguments = [vm.frame_stack[-1].pop() for i in xrange(num_args)][::-1]
for argument in arguments:
assert argument is null or isinstance(argument,
(int, long, ClassInstance, float)), 'not valid type '+str(type(argument))
if not is_static:
instance = arguments[0]
assert instance is not null, '%s is null' % str(arguments[0])
if method.name in instance._klass.method_overrides:
native_method = instance._klass.method_overrides[method.name]
print 'adding method %s.%s to stack' % (self.name, method.name)
print 'with arguments %s' % repr(arguments)
frame = Frame(
parameters=arguments,
max_stack=code.max_stack,
max_locals=code.max_locals,
code=code,
method=method,
native_method=native_method,
klass=self)
vm.frame_stack.append(frame)
def __repr__(self):
return '<Class %s%s> ' % (self.name, ' array' if self.is_array else '')
@staticmethod
def method_name(*args):
if isinstance(args[0], Method):
return '%s__%s' % (args[0].name, args[0].descriptor)
elif len(args) == 2:
return '%s__%s' % args
raise Exception
def override_native_method(self, name):
def wrapper(f):
self.method_overrides[name] = f
return wrapper
class NativeClass(Class):
def get_method(self, method_name, type_signature):
if method_name in ('<init>', '<clinit>'):
return self, Method(ACC_STATIC, method_name, type_signature, [], '', '')
if method_name in self.methods:
return self, self.methods[method_name]
raise NoSuchMethodException('No such method %s.%s (%s)' % (self, method_name, type_signature) )
def run_method(self, vm, method, method_descriptor):
args = []
if (method.access_flags & ACC_STATIC ) == 0:
# method is not static so load instance
args.append(vm.stack[-1].pop())
if args[-1] == null:
raise Exception('nullpointerexception')
# parse argument list and return type
method.parameters, method.return_type = parse_descriptor(method_descriptor)
# read arguments into stack
for arg_type in method.parameters:
arg = vm.stack[-1].pop()
args.append(arg)
if method.name in ('<init>', '<clinit>'):
return void
return_value = getattr(self, method.name, None)(*args)
if method.return_type == 'V':
return void
return return_value
class ClassInstance(object):
_klass = None
_klass_name = None
_values = None
def __init__(self, klass_name, klass):
assert isinstance(klass, Class)
assert klass_name == klass.name
self._values = {}
self._klass = klass
self._klass_name = klass_name
self.natives = {}
def __repr__(self):
if self._klass_name == 'java/lang/Class':
return '<JavaClass %s>' % self._values['class_name']
if self._klass_name == 'java/lang/String' and self._values.get('value'):
return '<String "%s">' % (''.join(unichr(x)
for x in self._values['value'].array))
return '<Instance of "%s">' % (
self._klass_name)
return '<Instance of "%s" values:%s>' % (
self._klass_name, self._values)
class ArrayClass(ClassInstance):
_klass = None
def __init__(self, klass, size):
assert isinstance(klass, Class)
self._klass = klass
self._klass_name = klass.name
if klass.primitive:
self.array = [0] * size
else:
self.array = [null] * size
def __repr__(self):
if len(self.array) < 10:
return '<Array %s %s> ' % (self._klass.name, self.array)
return '<Array %s size=%d> ' % (self._klass.name, len(self.array))
def __len__(self):
return len(self.array)
@property
def size(self):
return len(self.array)
ArrayInstance = ArrayClass
from frame import Frame
from klasses import classes_with_natives
from utils import get_attribute
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class StratifiedSampleTest(test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [array_ops.zeros([1, 3]), array_ops.ones([1, 5])]
label = constant_op.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
init_probs = [.1, .3, .1, .3, .2]
batch_size = 16
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([]),
probs,
batch_size,
init_probs,
enqueue_many=True)
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([1, 1]),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val,
constant_op.constant([0, 1, 0, 0, 0]),
probs, batch_size, init_probs)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
sampling_ops.stratified_sample(
array_ops.zeros([1, 3]), label, probs, batch_size, init_probs)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
constant_op.constant(1),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
[array_ops.zeros([2, 1])],
label,
probs,
batch_size,
init_probs,
enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val, label, 1, batch_size, init_probs)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
label,
array_ops.placeholder(
dtypes.float32, shape=[None]),
batch_size,
init_probs)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [array_ops.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = array_ops.placeholder(dtypes.int32, shape=[None])
probs_ph = array_ops.placeholder(
dtypes.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = sampling_ops._verify_input( # pylint: disable=protected-access
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [array_ops.zeros([2, 3, 4])]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
summary_op = logging_ops.merge_summary(
ops.get_collection(ops.GraphKeys.SUMMARIES))
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testRejectionBatchingBehavior(self):
batch_size = 20
input_batch_size = 11
val_input_batch = [array_ops.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 1,
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, .3, 0, .7, 0],
enqueue_many=True)
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = constant_op.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = array_ops.placeholder(
dtypes.float32) # completely undefined shape
labels_ph = array_ops.placeholder(
dtypes.int32) # completely undefined shape
val_tf, labels_tf, _ = sampling_ops._verify_input( # pylint: disable=protected-access
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.test_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def testRejectionDataListInput(self):
batch_size = 20
val_input_batch = [
array_ops.zeros([2, 3, 4]), array_ops.ones([2, 4]), array_ops.ones(2) *
3
]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, 1, 0, 0, 0])
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, ops.Tensor))
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def normalBehaviorHelper(self, sampler):
# Set up graph.
random_seed.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(lbl1), lambda: constant_op.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.test_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
class RejectionSampleTest(test.TestCase):
def testGraphConstructionFailures(self):
accept_prob_fn = lambda _: constant_op.constant(1.0)
batch_size = 32
# Data must have batch dimension if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([])], accept_prob_fn, batch_size, enqueue_many=True)
# Batch dimensions should be equal if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([5, 1]), array_ops.zeros([4, 1])],
accept_prob_fn,
batch_size,
enqueue_many=True)
def testRuntimeFailures(self):
prob_ph = array_ops.placeholder(dtypes.float32, [])
accept_prob_fn = lambda _: prob_ph
batch_size = 32
# Set up graph.
random_seed.set_random_seed(1234)
sampling_ops.rejection_sample(
[array_ops.zeros([])],
accept_prob_fn,
batch_size,
runtime_checks=True,
name='rejection_sample')
prob_tensor = ops.get_default_graph().get_tensor_by_name(
'rejection_sample/prob_with_checks:0')
# Run session that should fail.
with self.test_session() as sess:
for illegal_prob in [-0.1, 1.1]:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(prob_tensor, feed_dict={prob_ph: illegal_prob})
def testNormalBehavior(self):
tensor_list = [
control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(2.0))
]
accept_prob_fn = lambda x: x[0] - 1.0
batch_size = 10
# Set up graph.
sample = sampling_ops.rejection_sample(tensor_list, accept_prob_fn,
batch_size)
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(5):
sample_np = sess.run(sample)[0]
self.assertListEqual([2.0] * batch_size, list(sample_np))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
test.main()
| |
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Repository module to handle different types of repositories."""
from __future__ import print_function
import constants
import os
import re
import shutil
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import path_util
from chromite.lib import retry_util
from chromite.lib import rewrite_git_alternates
# File that marks a buildroot as being used by a trybot
_TRYBOT_MARKER = '.trybot'
class SrcCheckOutException(Exception):
"""Exception gets thrown for failure to sync sources"""
def IsARepoRoot(directory):
"""Returns True if directory is the root of a repo checkout."""
return os.path.exists(os.path.join(directory, '.repo'))
def IsInternalRepoCheckout(root):
"""Returns whether root houses an internal 'repo' checkout."""
manifest_dir = os.path.join(root, '.repo', 'manifests')
manifest_url = git.RunGit(
manifest_dir, ['config', 'remote.origin.url']).output.strip()
return (os.path.splitext(os.path.basename(manifest_url))[0]
== os.path.splitext(os.path.basename(constants.MANIFEST_INT_URL))[0])
def CloneGitRepo(working_dir, repo_url, reference=None, bare=False,
mirror=False, depth=None, branch=None, single_branch=False):
"""Clone given git repo
Args:
working_dir: location where it should be cloned to
repo_url: git repo to clone
reference: If given, pathway to a git repository to access git objects
from. Note that the reference must exist as long as the newly created
repo is to be usable.
bare: Clone a bare checkout.
mirror: Clone a mirror checkout.
depth: If given, do a shallow clone limiting the objects pulled to just
that # of revs of history. This option is mutually exclusive to
reference.
branch: If given, clone the given branch from the parent repository.
single_branch: Clone only one the requested branch.
"""
osutils.SafeMakedirs(working_dir)
cmd = ['clone', repo_url, working_dir]
if reference:
if depth:
raise ValueError("reference and depth are mutually exclusive "
"options; please pick one or the other.")
cmd += ['--reference', reference]
if bare:
cmd += ['--bare']
if mirror:
cmd += ['--mirror']
if depth:
cmd += ['--depth', str(int(depth))]
if branch:
cmd += ['--branch', branch]
if single_branch:
cmd += ['--single-branch']
git.RunGit(working_dir, cmd)
def UpdateGitRepo(working_dir, repo_url, **kwargs):
"""Update the given git repo, blowing away any local changes.
If the repo does not exist, clone it from scratch.
Args:
working_dir: location where it should be cloned to
repo_url: git repo to clone
**kwargs: See CloneGitRepo.
"""
assert not kwargs.get('bare'), 'Bare checkouts are not supported'
if git.IsGitRepo(working_dir):
try:
git.CleanAndCheckoutUpstream(working_dir)
except cros_build_lib.RunCommandError:
logging.warning('Could not update %s', working_dir, exc_info=True)
shutil.rmtree(working_dir)
CloneGitRepo(working_dir, repo_url, **kwargs)
else:
CloneGitRepo(working_dir, repo_url, **kwargs)
def GetTrybotMarkerPath(buildroot):
"""Get path to trybot marker file given the buildroot."""
return os.path.join(buildroot, _TRYBOT_MARKER)
def CreateTrybotMarker(buildroot):
"""Create the file that identifies a buildroot as being used by a trybot."""
osutils.WriteFile(GetTrybotMarkerPath(buildroot), '')
def ClearBuildRoot(buildroot, preserve_paths=()):
"""Remove and recreate the buildroot while preserving the trybot marker."""
trybot_root = os.path.exists(GetTrybotMarkerPath(buildroot))
if os.path.exists(buildroot):
cmd = ['find', buildroot, '-mindepth', '1', '-maxdepth', '1']
ignores = []
for path in preserve_paths:
if ignores:
ignores.append('-a')
ignores += ['!', '-name', path]
cmd.extend(ignores)
cmd += ['-exec', 'rm', '-rf', '{}', '+']
cros_build_lib.SudoRunCommand(cmd)
else:
os.makedirs(buildroot)
if trybot_root:
CreateTrybotMarker(buildroot)
def PrepManifestForRepo(git_repo, manifest):
"""Use this to store a local manifest in a git repo suitable for repo.
The repo tool can only fetch manifests from git repositories. So, to use
a local manifest file as the basis for a checkout, it must be checked into
a local git repository.
Common Usage:
manifest = CreateOrFetchWondrousManifest()
with osutils.TempDir() as manifest_git_dir:
PrepManifestForRepo(manifest_git_dir, manifest)
repo = RepoRepository(manifest_git_dir, repo_dir)
repo.Sync()
Args:
git_repo: Path at which to create the git repository (directory created, if
needed). If a tempdir, then cleanup is owned by the caller.
manifest: Path to existing manifest file to copy into the new git
repository.
"""
if not git.IsGitRepo(git_repo):
git.Init(git_repo)
new_manifest = os.path.join(git_repo, constants.DEFAULT_MANIFEST)
shutil.copyfile(manifest, new_manifest)
git.AddPath(new_manifest)
message = 'Local repository holding: %s' % manifest
# Commit new manifest. allow_empty in case it's the same as last manifest.
git.Commit(git_repo, message, allow_empty=True)
class RepoRepository(object):
"""A Class that encapsulates a repo repository."""
# If a repo hasn't been used in the last 5 runs, wipe it.
LRU_THRESHOLD = 5
def __init__(self, manifest_repo_url, directory, branch=None,
referenced_repo=None, manifest=constants.DEFAULT_MANIFEST,
depth=None, repo_url=constants.REPO_URL, repo_branch=None,
groups=None, repo_cmd='repo'):
"""Initialize.
Args:
manifest_repo_url: URL to fetch repo manifest from.
directory: local path where to checkout the repository.
branch: Branch to check out the manifest at.
referenced_repo: Repository to reference for git objects, if possible.
manifest: Which manifest.xml within the branch to use. Effectively
default.xml if not given.
depth: Mutually exclusive option to referenced_repo; this limits the
checkout to a max commit history of the given integer.
repo_url: URL to fetch repo tool from.
repo_branch: Branch to check out the repo tool at.
groups: Only sync projects that match this filter.
repo_cmd: Name of repo_cmd to use.
"""
self.manifest_repo_url = manifest_repo_url
self.repo_url = repo_url
self.repo_branch = repo_branch
self.directory = directory
self.branch = branch
self.groups = groups
self.repo_cmd = repo_cmd
# It's perfectly acceptable to pass in a reference pathway that isn't
# usable. Detect it, and suppress the setting so that any depth
# settings aren't disabled due to it.
if referenced_repo is not None:
if depth is not None:
raise ValueError("referenced_repo and depth are mutually exclusive "
"options; please pick one or the other.")
if not IsARepoRoot(referenced_repo):
referenced_repo = None
self._referenced_repo = referenced_repo
self._manifest = manifest
# If the repo exists already, force a selfupdate as the first step.
self._repo_update_needed = IsARepoRoot(self.directory)
if not self._repo_update_needed and git.FindRepoDir(self.directory):
raise ValueError('Given directory %s is not the root of a repository.'
% self.directory)
self._depth = int(depth) if depth is not None else None
def _SwitchToLocalManifest(self, local_manifest):
"""Reinitializes the repository if the manifest has changed."""
logging.debug('Moving to manifest defined by %s', local_manifest)
# TODO: use upstream repo's manifest logic when we bump repo version.
manifest_path = self.GetRelativePath('.repo/manifest.xml')
os.unlink(manifest_path)
shutil.copyfile(local_manifest, manifest_path)
def Initialize(self, local_manifest=None, extra_args=()):
"""Initializes a repository. Optionally forces a local manifest.
Args:
local_manifest: The absolute path to a custom manifest to use. This will
replace .repo/manifest.xml.
extra_args: Extra args to pass to 'repo init'
"""
# Do a sanity check on the repo; if it exists and we can't pull a
# manifest from it, we know it's fairly screwed up and needs a fresh
# rebuild.
if os.path.exists(os.path.join(self.directory, '.repo', 'manifest.xml')):
cmd = [self.repo_cmd, 'manifest']
try:
cros_build_lib.RunCommand(cmd, cwd=self.directory, capture_output=True)
except cros_build_lib.RunCommandError:
logging.warning("Wiping %r due to `repo manifest` failure",
self.directory)
paths = [os.path.join(self.directory, '.repo', x) for x in
('manifest.xml', 'manifests.git', 'manifests', 'repo')]
cros_build_lib.SudoRunCommand(['rm', '-rf'] + paths)
self._repo_update_needed = False
# Wipe local_manifest.xml if it exists- it can interfere w/ things in
# bad ways (duplicate projects, etc); we control this repository, thus
# we can destroy it.
osutils.SafeUnlink(os.path.join(self.directory, 'local_manifest.xml'))
# Force a repo self update first; during reinit, repo doesn't do the
# update itself, but we could be doing the init on a repo version less
# then v1.9.4, which didn't have proper support for doing reinit that
# involved changing the manifest branch in use; thus selfupdate.
# Additionally, if the self update fails for *any* reason, wipe the repo
# innards and force repo init to redownload it; same end result, just
# less efficient.
# Additionally, note that this method may be called multiple times;
# thus code appropriately.
if self._repo_update_needed:
cmd = [self.repo_cmd, 'selfupdate']
try:
cros_build_lib.RunCommand(cmd, cwd=self.directory)
except cros_build_lib.RunCommandError:
osutils.RmDir(os.path.join(self.directory, '.repo', 'repo'),
ignore_missing=True)
self._repo_update_needed = False
# Use our own repo, in case android.kernel.org (the default location) is
# down.
init_cmd = [self.repo_cmd, 'init',
'--repo-url', self.repo_url,
'--manifest-url', self.manifest_repo_url]
if self._referenced_repo:
init_cmd.extend(['--reference', self._referenced_repo])
if self._manifest:
init_cmd.extend(['--manifest-name', self._manifest])
if self._depth is not None:
init_cmd.extend(['--depth', str(self._depth)])
init_cmd.extend(extra_args)
# Handle branch / manifest options.
if self.branch:
init_cmd.extend(['--manifest-branch', self.branch])
if self.repo_branch:
init_cmd.extend(['--repo-branch', self.repo_branch])
if self.groups:
init_cmd.extend(['--groups', self.groups])
cros_build_lib.RunCommand(init_cmd, cwd=self.directory, input='\n\ny\n')
if local_manifest and local_manifest != self._manifest:
self._SwitchToLocalManifest(local_manifest)
@property
def _ManifestConfig(self):
return os.path.join(self.directory, '.repo', 'manifests.git', 'config')
def _EnsureMirroring(self, post_sync=False):
"""Ensure git is usable from w/in the chroot if --references is enabled
repo init --references hardcodes the abspath to parent; this pathway
however isn't usable from the chroot (it doesn't exist). As such the
pathway is rewritten to use relative pathways pointing at the root of
the repo, which via I84988630 enter_chroot sets up a helper bind mount
allowing git/repo to access the actual referenced repo.
This has to be invoked prior to a repo sync of the target trybot to
fix any pathways that may have been broken by the parent repo moving
on disk, and needs to be invoked after the sync has completed to rewrite
any new project's abspath to relative.
"""
if not self._referenced_repo:
return
proj_root = os.path.join(self.directory, '.repo', 'project-objects')
if not os.path.exists(proj_root):
# Not yet synced, nothing to be done.
return
rewrite_git_alternates.RebuildRepoCheckout(self.directory,
self._referenced_repo)
if post_sync:
chroot_path = os.path.join(self._referenced_repo, '.repo', 'chroot',
'external')
chroot_path = path_util.ToChrootPath(chroot_path)
rewrite_git_alternates.RebuildRepoCheckout(
self.directory, self._referenced_repo, chroot_path)
# Finally, force the git config marker that enter_chroot looks for
# to know when to do bind mounting trickery; this normally will exist,
# but if we're converting a pre-existing repo checkout, it's possible
# that it was invoked w/out the reference arg. Note this must be
# an absolute path to the source repo- enter_chroot uses that to know
# what to bind mount into the chroot.
cmd = ['config', '--file', self._ManifestConfig, 'repo.reference',
self._referenced_repo]
git.RunGit('.', cmd)
def Sync(self, local_manifest=None, jobs=None, all_branches=True,
network_only=False):
"""Sync/update the source. Changes manifest if specified.
Args:
local_manifest: If true, checks out source to manifest. DEFAULT_MANIFEST
may be used to set it back to the default manifest.
jobs: May be set to override the default sync parallelism defined by
the manifest.
all_branches: If False, a repo sync -c is performed; this saves on
sync'ing via grabbing only what is needed for the manifest specified
branch. Defaults to True. TODO(davidjames): Set the default back to
False once we've fixed http://crbug.com/368722 .
network_only: If true, perform only the network half of the sync; skip
the checkout. Primarily of use to validate a manifest (although
if the manifest has bad copyfile statements, via skipping checkout
the broken copyfile tag won't be spotted), or of use when the
invoking code is fine w/ operating on bare repos, ie .repo/projects/*.
"""
try:
# Always re-initialize to the current branch.
self.Initialize(local_manifest)
# Fix existing broken mirroring configurations.
self._EnsureMirroring()
cmd = [self.repo_cmd, '--time', 'sync']
if jobs:
cmd += ['--jobs', str(jobs)]
if not all_branches or self._depth is not None:
# Note that this option can break kernel checkouts. crbug.com/464536
cmd.append('-c')
# Do the network half of the sync; retry as necessary to get the content.
retry_util.RunCommandWithRetries(constants.SYNC_RETRIES, cmd + ['-n'],
cwd=self.directory)
if network_only:
return
# Do the local sync; note that there is a couple of corner cases where
# the new manifest cannot transition from the old checkout cleanly-
# primarily involving git submodules. Thus we intercept, and do
# a forced wipe, then a retry.
try:
cros_build_lib.RunCommand(cmd + ['-l'], cwd=self.directory)
except cros_build_lib.RunCommandError:
manifest = git.ManifestCheckout.Cached(self.directory)
targets = set(project['path'].split('/', 1)[0]
for project in manifest.ListCheckouts())
if not targets:
# No directories to wipe, thus nothing we can fix.
raise
cros_build_lib.SudoRunCommand(['rm', '-rf'] + sorted(targets),
cwd=self.directory)
# Retry the sync now; if it fails, let the exception propagate.
cros_build_lib.RunCommand(cmd + ['-l'], cwd=self.directory)
# We do a second run to fix any new repositories created by repo to
# use relative object pathways. Note that cros_sdk also triggers the
# same cleanup- we however kick it erring on the side of caution.
self._EnsureMirroring(True)
self._DoCleanup()
except cros_build_lib.RunCommandError as e:
err_msg = e.Stringify(error=False, output=False)
logging.error(err_msg)
raise SrcCheckOutException(err_msg)
def _DoCleanup(self):
"""Wipe unused repositories."""
# Find all projects, even if they're not in the manifest. Note the find
# trickery this is done to keep it as fast as possible.
repo_path = os.path.join(self.directory, '.repo', 'projects')
current = set(cros_build_lib.RunCommand(
['find', repo_path, '-type', 'd', '-name', '*.git', '-printf', '%P\n',
'-a', '!', '-wholename', '*.git/*', '-prune'],
print_cmd=False, capture_output=True).output.splitlines())
data = {}.fromkeys(current, 0)
path = os.path.join(self.directory, '.repo', 'project.lru')
if os.path.exists(path):
existing = [x.strip().split(None, 1)
for x in osutils.ReadFile(path).splitlines()]
data.update((k, int(v)) for k, v in existing if k in current)
# Increment it all...
data.update((k, v + 1) for k, v in data.iteritems())
# Zero out what is now used.
checkouts = git.ManifestCheckout.Cached(self.directory).ListCheckouts()
data.update(('%s.git' % x['path'], 0) for x in checkouts)
# Finally... wipe anything that's greater than our threshold.
wipes = [k for k, v in data.iteritems() if v > self.LRU_THRESHOLD]
if wipes:
cros_build_lib.SudoRunCommand(
['rm', '-rf'] + [os.path.join(repo_path, proj) for proj in wipes])
map(data.pop, wipes)
osutils.WriteFile(path, "\n".join('%s %i' % x for x in data.iteritems()))
def GetRelativePath(self, path):
"""Returns full path including source directory of path in repo."""
return os.path.join(self.directory, path)
def ExportManifest(self, mark_revision=False, revisions=True):
"""Export the revision locked manifest
Args:
mark_revision: If True, then the sha1 of manifest.git is recorded
into the resultant manifest tag as a version attribute.
Specifically, if manifests.git is at 1234, <manifest> becomes
<manifest revision="1234">.
revisions: If True, then rewrite all branches/tags into a specific
sha1 revision. If False, don't.
Returns:
The manifest as a string.
"""
cmd = [self.repo_cmd, 'manifest', '-o', '-']
if revisions:
cmd += ['-r']
output = cros_build_lib.RunCommand(
cmd, cwd=self.directory, print_cmd=False, capture_output=True,
extra_env={'PAGER':'cat'}).output
if not mark_revision:
return output
modified = git.RunGit(os.path.join(self.directory, '.repo/manifests'),
['rev-list', '-n1', 'HEAD'])
assert modified.output
return output.replace("<manifest>", '<manifest revision="%s">' %
modified.output.strip())
def IsManifestDifferent(self, other_manifest):
"""Checks whether this manifest is different than another.
May blacklists certain repos as part of the diff.
Args:
other_manifest: Second manifest file to compare against.
Returns:
True: If the manifests are different
False: If the manifests are same
"""
logging.debug('Calling IsManifestDifferent against %s', other_manifest)
black_list = ['="chromium/']
blacklist_pattern = re.compile(r'|'.join(black_list))
manifest_revision_pattern = re.compile(r'<manifest revision="[a-f0-9]+">',
re.I)
current = self.ExportManifest()
with open(other_manifest, 'r') as manifest2_fh:
for (line1, line2) in zip(current.splitlines(), manifest2_fh):
line1 = line1.strip()
line2 = line2.strip()
if blacklist_pattern.search(line1):
logging.debug('%s ignored %s', line1, line2)
continue
if line1 != line2:
logging.debug('Current and other manifest differ.')
logging.debug('current: "%s"', line1)
logging.debug('other : "%s"', line2)
# Ignore revision differences on the manifest line. The revision of
# the manifest.git repo is uninteresting when determining if the
# current manifest describes the same sources as the other manifest.
if manifest_revision_pattern.search(line2):
logging.debug('Ignoring difference in manifest revision.')
continue
return True
return False
| |
import angr
import claripy
from angr.sim_type import SimTypeString, SimTypeInt
from angr.errors import SimProcedureError
import logging
l = logging.getLogger("angr.procedures.libc.strtol")
# note: this does not handle skipping white space
class strtol(angr.SimProcedure):
@staticmethod
def strtol_inner(s, state, region, base, signed, read_length=None):
"""
:param s: the string address/offset
:param state: SimState
:param region: memory, file, etc
:param base: the base to use to interpret the number
note: all numbers may start with +/- and base 16 may start with 0x
:param signed: boolean, true means the result will be signed, otherwise unsigned
:param read_length: int, the number of bytes parsed in strtol
:return: expression, value, num_bytes
the returned expression is a symbolic boolean indicating success, value will be set to 0 on failure
value is the returned value (set to min/max on overflow)
num_bytes is the number of bytes read in the string
"""
# sanity check
if base < 2 or base > 36:
raise SimProcedureError("base should be in the range [2,36]")
# order matters here since we will use an if then else tree, and -0x will have precedence over -
prefixes = ["-", "+", ""]
if base == 16:
prefixes = ["0x", "-0x", "+0x"] + prefixes
cases = []
conditions = []
possible_num_bytes = []
for prefix in prefixes:
condition, value, num_bytes = strtol._load_num_with_prefix(prefix, s, region, state, base, signed, read_length)
conditions.append(condition)
cases.append((condition, value))
possible_num_bytes.append(num_bytes)
# only one of the cases needed to match
result = state.se.ite_cases(cases[:-1], cases[-1][1])
expression = state.se.Or(*conditions)
num_bytes = state.se.ite_cases(zip(conditions, possible_num_bytes), 0)
return expression, result, num_bytes
@staticmethod
def _load_num_with_prefix(prefix, addr, region, state, base, signed, read_length=None):
"""
loads a number from addr, and returns a condition that addr must start with the prefix
"""
length = len(prefix)
read_length = (read_length-length) if read_length else None
condition, value, num_bytes = strtol._string_to_int(addr+length, state, region, base, signed, read_length)
# the prefix must match
if len(prefix) > 0:
loaded_prefix = region.load(addr, length)
condition = state.se.And(loaded_prefix == state.se.BVV(prefix), condition)
total_num_bytes = num_bytes + length
# negatives
if prefix.startswith("-"):
value = state.se.BVV(0, state.arch.bits) - value
return condition, value, total_num_bytes
@staticmethod
def _string_to_int(s, state, region, base, signed, read_length=None):
"""
reads values from s and generates the symbolic number that it would equal
the first char is either a number in the given base, or the result is 0
expression indicates whether or not it was successful
"""
# if length wasn't provided, read the maximum bytes
cutoff = (read_length == None)
length = state.libc.max_strtol_len if cutoff else read_length
# expression whether or not it was valid at all
expression, _ = strtol._char_to_val(region.load(s, 1), base)
cases = []
# to detect overflows we keep it in a larger bv and extract it at the end
num_bits = min(state.arch.bits*2, 128)
current_val = state.se.BVV(0, num_bits)
num_bytes = state.se.BVS("num_bytes", state.arch.bits)
constraints_num_bytes = []
conditions = []
# we need all the conditions to hold except the last one to have found a value
for i in range(length):
char = region.load(s + i, 1)
condition, value = strtol._char_to_val(char, base)
# if it was the end we'll get the current val
cases.append((num_bytes == i, current_val))
case_constraints = conditions + [state.se.Not(condition)] + [num_bytes == i]
constraints_num_bytes.append(state.se.And(*case_constraints))
# add the value and the condition
current_val = current_val*base + value.zero_extend(num_bits-8)
conditions.append(condition)
# the last one is unterminated, let's ignore it
if not cutoff:
cases.append((num_bytes == length, current_val))
case_constraints = conditions + [num_bytes == length]
constraints_num_bytes.append(state.se.And(*case_constraints))
# only one of the constraints need to hold
# since the constraints look like (num_bytes == 2 and the first 2 chars are valid, and the 3rd isn't)
state.add_constraints(state.se.Or(*constraints_num_bytes))
result = state.se.ite_cases(cases, 0)
# overflow check
max_bits = state.arch.bits-1 if signed else state.arch.bits
max_val = 2**max_bits - 1
result = state.se.If(result < max_val, state.se.Extract(state.arch.bits-1, 0, result),
state.se.BVV(max_val, state.arch.bits))
return expression, result, num_bytes
@staticmethod
def _char_to_val(char, base):
"""
converts a symbolic char to a number in the given base
returns expression, result
expression is a symbolic boolean indicating whether or not it was a valid number
result is the value
"""
cases = []
# 0-9
max_digit = claripy.BVV("9", 8)
min_digit = claripy.BVV("0", 8)
if base < 10:
max_digit = claripy.BVV(chr(ord("0") + base), 8)
is_digit = claripy.And(char >= min_digit, char <= max_digit)
# return early here so we don't add unnecessary statements
if base <= 10:
return is_digit, char - min_digit
# handle alphabetic chars
max_char_lower = claripy.BVV(chr(ord("a") + base-10 - 1), 8)
max_char_upper = claripy.BVV(chr(ord("A") + base-10 - 1), 8)
min_char_lower = claripy.BVV(chr(ord("a")), 8)
min_char_upper = claripy.BVV(chr(ord("A")), 8)
cases.append((is_digit, char - min_digit))
is_alpha_lower = claripy.And(char >= min_char_lower, char <= max_char_lower)
cases.append((is_alpha_lower, char - min_char_lower + 10))
is_alpha_upper = claripy.And(char >= min_char_upper, char <= max_char_upper)
cases.append((is_alpha_upper, char - min_char_upper + 10))
expression = claripy.Or(is_digit, is_alpha_lower, is_alpha_upper)
# use the last case as the default, the expression will encode whether or not it's satisfiable
result = claripy.ite_cases(cases[:-1], cases[-1][1])
return expression, result
def run(self, nptr, endptr, base):
self.argument_types = {0: self.ty_ptr(SimTypeString()),
1: self.ty_ptr(self.ty_ptr(SimTypeString())),
2: SimTypeInt(self.state.arch, True)}
self.return_type = SimTypeInt(self.state.arch, True)
if self.state.se.symbolic(base):
l.warning("Concretizing symbolic base in strtol")
base_concrete = self.state.se.eval(base)
self.state.add_constraints(base == base_concrete)
base = self.state.se.eval(base)
if base == 0:
# in this case the base is 16 if it starts with 0x, 8 if it starts with 0, 10 otherwise
# here's the possibilities
base_16_pred = self.state.se.Or(
self.state.memory.load(nptr, 2) == self.state.se.BVV("0x"),
self.state.memory.load(nptr, 3) == self.state.se.BVV("+0x"),
self.state.memory.load(nptr, 3) == self.state.se.BVV("-0x"))
base_8_pred = self.state.se.And(
self.state.se.Or(
self.state.memory.load(nptr, 1) == self.state.se.BVV("0"),
self.state.memory.load(nptr, 2) == self.state.se.BVV("+0"),
self.state.memory.load(nptr, 2) == self.state.se.BVV("-0")),
self.state.se.Not(base_16_pred))
base_10_pred = self.state.se.And(
self.state.se.Not(base_16_pred),
self.state.se.Not(base_8_pred)
)
expressions = []
values = []
num_bytes_arr = []
# read a string to long for each possibility
pred_base = zip([base_16_pred, base_10_pred, base_8_pred], [16, 10, 8])
for pred, base in pred_base:
expression, value, num_bytes = self.strtol_inner(nptr, self.state, self.state.memory, base, True)
expressions.append(self.state.se.And(expression, pred))
values.append(value)
num_bytes_arr.append(num_bytes)
# we would return the Or(expressions) as the indicator whether or not it succeeded, but it's not needed
# for strtol
# expression = self.state.se.Or(expressions)
value = self.state.se.ite_cases(zip(expressions, values), 0)
num_bytes = self.state.se.ite_cases(zip(expressions, num_bytes_arr), 0)
self.state.memory.store(endptr, nptr+num_bytes,
condition=(endptr != 0), endness=self.state.arch.memory_endness)
return value
expression, value, num_bytes = self.strtol_inner(nptr, self.state, self.state.memory, base, True)
self.state.memory.store(endptr, nptr+num_bytes, condition=(endptr != 0), endness=self.state.arch.memory_endness)
return self.state.se.If(expression, value, 0)
| |
"""Utilities for fast persistence of big data, with optional compression."""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import pickle
import os
import sys
import warnings
try:
from pathlib import Path
except ImportError:
Path = None
from .compressor import lz4, LZ4_NOT_INSTALLED_ERROR
from .compressor import _COMPRESSORS, register_compressor, BinaryZlibFile
from .compressor import (ZlibCompressorWrapper, GzipCompressorWrapper,
BZ2CompressorWrapper, LZMACompressorWrapper,
XZCompressorWrapper, LZ4CompressorWrapper)
from .numpy_pickle_utils import Unpickler, Pickler
from .numpy_pickle_utils import _read_fileobject, _write_fileobject
from .numpy_pickle_utils import _read_bytes, BUFFER_SIZE
from .numpy_pickle_compat import load_compatibility
from .numpy_pickle_compat import NDArrayWrapper
# For compatibility with old versions of joblib, we need ZNDArrayWrapper
# to be visible in the current namespace.
# Explicitly skipping next line from flake8 as it triggers an F401 warning
# which we don't care.
from .numpy_pickle_compat import ZNDArrayWrapper # noqa
from ._compat import _basestring, PY3_OR_LATER
from .backports import make_memmap
# Register supported compressors
register_compressor('zlib', ZlibCompressorWrapper())
register_compressor('gzip', GzipCompressorWrapper())
register_compressor('bz2', BZ2CompressorWrapper())
register_compressor('lzma', LZMACompressorWrapper())
register_compressor('xz', XZCompressorWrapper())
register_compressor('lz4', LZ4CompressorWrapper())
###############################################################################
# Utility objects for persistence.
class NumpyArrayWrapper(object):
"""An object to be persisted instead of numpy arrays.
This object is used to hack into the pickle machinery and read numpy
array data from our custom persistence format.
More precisely, this object is used for:
* carrying the information of the persisted array: subclass, shape, order,
dtype. Those ndarray metadata are used to correctly reconstruct the array
with low level numpy functions.
* determining if memmap is allowed on the array.
* reading the array bytes from a file.
* reading the array using memorymap from a file.
* writing the array bytes to a file.
Attributes
----------
subclass: numpy.ndarray subclass
Determine the subclass of the wrapped array.
shape: numpy.ndarray shape
Determine the shape of the wrapped array.
order: {'C', 'F'}
Determine the order of wrapped array data. 'C' is for C order, 'F' is
for fortran order.
dtype: numpy.ndarray dtype
Determine the data type of the wrapped array.
allow_mmap: bool
Determine if memory mapping is allowed on the wrapped array.
Default: False.
"""
def __init__(self, subclass, shape, order, dtype, allow_mmap=False):
"""Constructor. Store the useful information for later."""
self.subclass = subclass
self.shape = shape
self.order = order
self.dtype = dtype
self.allow_mmap = allow_mmap
def write_array(self, array, pickler):
"""Write array bytes to pickler file handle.
This function is an adaptation of the numpy write_array function
available in version 1.10.1 in numpy/lib/format.py.
"""
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data
# directly. Instead, we will pickle it out with version 2 of the
# pickle protocol.
pickle.dump(array, pickler.file_handle, protocol=2)
else:
for chunk in pickler.np.nditer(array,
flags=['external_loop',
'buffered',
'zerosize_ok'],
buffersize=buffersize,
order=self.order):
pickler.file_handle.write(chunk.tostring('C'))
def read_array(self, unpickler):
"""Read array from unpickler file handle.
This function is an adaptation of the numpy read_array function
available in version 1.10.1 in numpy/lib/format.py.
"""
if len(self.shape) == 0:
count = 1
else:
count = unpickler.np.multiply.reduce(self.shape)
# Now read the actual data.
if self.dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
array = pickle.load(unpickler.file_handle)
else:
if (not PY3_OR_LATER and
unpickler.np.compat.isfileobj(unpickler.file_handle)):
# In python 2, gzip.GzipFile is considered as a file so one
# can use numpy.fromfile().
# For file objects, use np.fromfile function.
# This function is faster than the memory-intensive
# method below.
array = unpickler.np.fromfile(unpickler.file_handle,
dtype=self.dtype, count=count)
else:
# This is not a real file. We have to read it the
# memory-intensive way.
# crc32 module fails on reads greater than 2 ** 32 bytes,
# breaking large reads from gzip streams. Chunk reads to
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
# of the read. In non-chunked case count < max_read_count, so
# only one read is performed.
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE,
self.dtype.itemsize)
array = unpickler.np.empty(count, dtype=self.dtype)
for i in range(0, count, max_read_count):
read_count = min(max_read_count, count - i)
read_size = int(read_count * self.dtype.itemsize)
data = _read_bytes(unpickler.file_handle,
read_size, "array data")
array[i:i + read_count] = \
unpickler.np.frombuffer(data, dtype=self.dtype,
count=read_count)
del data
if self.order == 'F':
array.shape = self.shape[::-1]
array = array.transpose()
else:
array.shape = self.shape
return array
def read_mmap(self, unpickler):
"""Read an array using numpy memmap."""
offset = unpickler.file_handle.tell()
if unpickler.mmap_mode == 'w+':
unpickler.mmap_mode = 'r+'
marray = make_memmap(unpickler.filename,
dtype=self.dtype,
shape=self.shape,
order=self.order,
mode=unpickler.mmap_mode,
offset=offset)
# update the offset so that it corresponds to the end of the read array
unpickler.file_handle.seek(offset + marray.nbytes)
return marray
def read(self, unpickler):
"""Read the array corresponding to this wrapper.
Use the unpickler to get all information to correctly read the array.
Parameters
----------
unpickler: NumpyUnpickler
Returns
-------
array: numpy.ndarray
"""
# When requested, only use memmap mode if allowed.
if unpickler.mmap_mode is not None and self.allow_mmap:
array = self.read_mmap(unpickler)
else:
array = self.read_array(unpickler)
# Manage array subclass case
if (hasattr(array, '__array_prepare__') and
self.subclass not in (unpickler.np.ndarray,
unpickler.np.memmap)):
# We need to reconstruct another subclass
new_array = unpickler.np.core.multiarray._reconstruct(
self.subclass, (0,), 'b')
return new_array.__array_prepare__(array)
else:
return array
###############################################################################
# Pickler classes
class NumpyPickler(Pickler):
"""A pickler to persist big data efficiently.
The main features of this object are:
* persistence of numpy arrays in a single file.
* optional compression with a special care on avoiding memory copies.
Attributes
----------
fp: file
File object handle used for serializing the input object.
protocol: int, optional
Pickle protocol used. Default is pickle.DEFAULT_PROTOCOL under
python 3, pickle.HIGHEST_PROTOCOL otherwise.
"""
dispatch = Pickler.dispatch.copy()
def __init__(self, fp, protocol=None):
self.file_handle = fp
self.buffered = isinstance(self.file_handle, BinaryZlibFile)
# By default we want a pickle protocol that only changes with
# the major python version and not the minor one
if protocol is None:
protocol = (pickle.DEFAULT_PROTOCOL if PY3_OR_LATER
else pickle.HIGHEST_PROTOCOL)
Pickler.__init__(self, self.file_handle, protocol=protocol)
# delayed import of numpy, to avoid tight coupling
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _create_array_wrapper(self, array):
"""Create and returns a numpy array wrapper from a numpy array."""
order = 'F' if (array.flags.f_contiguous and
not array.flags.c_contiguous) else 'C'
allow_mmap = not self.buffered and not array.dtype.hasobject
wrapper = NumpyArrayWrapper(type(array),
array.shape, order, array.dtype,
allow_mmap=allow_mmap)
return wrapper
def save(self, obj):
"""Subclass the Pickler `save` method.
This is a total abuse of the Pickler class in order to use the numpy
persistence function `save` instead of the default pickle
implementation. The numpy array is replaced by a custom wrapper in the
pickle persistence stack and the serialized array is written right
after in the file. Warning: the file produced does not follow the
pickle format. As such it can not be read with `pickle.load`.
"""
if self.np is not None and type(obj) in (self.np.ndarray,
self.np.matrix,
self.np.memmap):
if type(obj) is self.np.memmap:
# Pickling doesn't work with memmapped arrays
obj = self.np.asanyarray(obj)
# The array wrapper is pickled instead of the real array.
wrapper = self._create_array_wrapper(obj)
Pickler.save(self, wrapper)
# A framer was introduced with pickle protocol 4 and we want to
# ensure the wrapper object is written before the numpy array
# buffer in the pickle file.
# See https://www.python.org/dev/peps/pep-3154/#framing to get
# more information on the framer behavior.
if self.proto >= 4:
self.framer.commit_frame(force=True)
# And then array bytes are written right after the wrapper.
wrapper.write_array(obj, self)
return
return Pickler.save(self, obj)
class NumpyUnpickler(Unpickler):
"""A subclass of the Unpickler to unpickle our numpy pickles.
Attributes
----------
mmap_mode: str
The memorymap mode to use for reading numpy arrays.
file_handle: file_like
File object to unpickle from.
filename: str
Name of the file to unpickle from. It should correspond to file_handle.
This parameter is required when using mmap_mode.
np: module
Reference to numpy module if numpy is installed else None.
"""
dispatch = Unpickler.dispatch.copy()
def __init__(self, filename, file_handle, mmap_mode=None):
# The next line is for backward compatibility with pickle generated
# with joblib versions less than 0.10.
self._dirname = os.path.dirname(filename)
self.mmap_mode = mmap_mode
self.file_handle = file_handle
# filename is required for numpy mmap mode.
self.filename = filename
self.compat_mode = False
Unpickler.__init__(self, self.file_handle)
try:
import numpy as np
except ImportError:
np = None
self.np = np
def load_build(self):
"""Called to set the state of a newly created object.
We capture it to replace our place-holder objects, NDArrayWrapper or
NumpyArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.
NDArrayWrapper is used for backward compatibility with joblib <= 0.9.
"""
Unpickler.load_build(self)
# For backward compatibility, we support NDArrayWrapper objects.
if isinstance(self.stack[-1], (NDArrayWrapper, NumpyArrayWrapper)):
if self.np is None:
raise ImportError("Trying to unpickle an ndarray, "
"but numpy didn't import correctly")
array_wrapper = self.stack.pop()
# If any NDArrayWrapper is found, we switch to compatibility mode,
# this will be used to raise a DeprecationWarning to the user at
# the end of the unpickling.
if isinstance(array_wrapper, NDArrayWrapper):
self.compat_mode = True
self.stack.append(array_wrapper.read(self))
# Be careful to register our new method.
if PY3_OR_LATER:
dispatch[pickle.BUILD[0]] = load_build
else:
dispatch[pickle.BUILD] = load_build
###############################################################################
# Utility functions
def dump(value, filename, compress=0, protocol=None, cache_size=None):
"""Persist an arbitrary Python object into one file.
Read more in the :ref:`User Guide <persistence>`.
Parameters
-----------
value: any Python object
The object to store to disk.
filename: str, pathlib.Path, or file object.
The file object or path of the file in which it is to be stored.
The compression method corresponding to one of the supported filename
extensions ('.z', '.gz', '.bz2', '.xz' or '.lzma') will be used
automatically.
compress: int from 0 to 9 or bool or 2-tuple, optional
Optional compression level for the data. 0 or False is no compression.
Higher value means more compression, but also slower read and
write times. Using a value of 3 is often a good compromise.
See the notes for more details.
If compress is True, the compression level used is 3.
If compress is a 2-tuple, the first element must correspond to a string
between supported compressors (e.g 'zlib', 'gzip', 'bz2', 'lzma'
'xz'), the second element must be an integer from 0 to 9, corresponding
to the compression level.
protocol: int, optional
Pickle protocol, see pickle.dump documentation for more details.
cache_size: positive int, optional
This option is deprecated in 0.10 and has no effect.
Returns
-------
filenames: list of strings
The list of file names in which the data is stored. If
compress is false, each array is stored in a different file.
See Also
--------
joblib.load : corresponding loader
Notes
-----
Memmapping on load cannot be used for compressed files. Thus
using compression can significantly slow down loading. In
addition, compressed files take extra extra memory during
dump and load.
"""
if Path is not None and isinstance(filename, Path):
filename = str(filename)
is_filename = isinstance(filename, _basestring)
is_fileobj = hasattr(filename, "write")
compress_method = 'zlib' # zlib is the default compression method.
if compress is True:
# By default, if compress is enabled, we want the default compress
# level of the compressor.
compress_level = None
elif isinstance(compress, tuple):
# a 2-tuple was set in compress
if len(compress) != 2:
raise ValueError(
'Compress argument tuple should contain exactly 2 elements: '
'(compress method, compress level), you passed {}'
.format(compress))
compress_method, compress_level = compress
elif isinstance(compress, _basestring):
compress_method = compress
compress_level = None # Use default compress level
compress = (compress_method, compress_level)
else:
compress_level = compress
# LZ4 compression is only supported and installation checked with
# python 3+.
if compress_method == 'lz4' and lz4 is None and PY3_OR_LATER:
raise ValueError(LZ4_NOT_INSTALLED_ERROR)
if (compress_level is not None and
compress_level is not False and
compress_level not in range(10)):
# Raising an error if a non valid compress level is given.
raise ValueError(
'Non valid compress level given: "{}". Possible values are '
'{}.'.format(compress_level, list(range(10))))
if compress_method not in _COMPRESSORS:
# Raising an error if an unsupported compression method is given.
raise ValueError(
'Non valid compression method given: "{}". Possible values are '
'{}.'.format(compress_method, _COMPRESSORS))
if not is_filename and not is_fileobj:
# People keep inverting arguments, and the resulting error is
# incomprehensible
raise ValueError(
'Second argument should be a filename or a file-like object, '
'%s (type %s) was given.'
% (filename, type(filename))
)
if is_filename and not isinstance(compress, tuple):
# In case no explicit compression was requested using both compression
# method and level in a tuple and the filename has an explicit
# extension, we select the corresponding compressor.
# unset the variable to be sure no compression level is set afterwards.
compress_method = None
for name, compressor in _COMPRESSORS.items():
if filename.endswith(compressor.extension):
compress_method = name
if compress_method in _COMPRESSORS and compress_level == 0:
# we choose the default compress_level in case it was not given
# as an argument (using compress).
compress_level = None
if not PY3_OR_LATER and compress_method in ('lzma', 'xz'):
raise NotImplementedError("{} compression is only available for "
"python version >= 3.3. You are using "
"{}.{}".format(compress_method,
sys.version_info[0],
sys.version_info[1]))
if cache_size is not None:
# Cache size is deprecated starting from version 0.10
warnings.warn("Please do not set 'cache_size' in joblib.dump, "
"this parameter has no effect and will be removed. "
"You used 'cache_size={}'".format(cache_size),
DeprecationWarning, stacklevel=2)
if compress_level != 0:
with _write_fileobject(filename, compress=(compress_method,
compress_level)) as f:
NumpyPickler(f, protocol=protocol).dump(value)
elif is_filename:
with open(filename, 'wb') as f:
NumpyPickler(f, protocol=protocol).dump(value)
else:
NumpyPickler(filename, protocol=protocol).dump(value)
# If the target container is a file object, nothing is returned.
if is_fileobj:
return
# For compatibility, the list of created filenames (e.g with one element
# after 0.10.0) is returned by default.
return [filename]
def _unpickle(fobj, filename="", mmap_mode=None):
"""Internal unpickling function."""
# We are careful to open the file handle early and keep it open to
# avoid race-conditions on renames.
# That said, if data is stored in companion files, which can be
# the case with the old persistence format, moving the directory
# will create a race when joblib tries to access the companion
# files.
unpickler = NumpyUnpickler(filename, fobj, mmap_mode=mmap_mode)
obj = None
try:
obj = unpickler.load()
if unpickler.compat_mode:
warnings.warn("The file '%s' has been generated with a "
"joblib version less than 0.10. "
"Please regenerate this pickle file."
% filename,
DeprecationWarning, stacklevel=3)
except UnicodeDecodeError as exc:
# More user-friendly error message
if PY3_OR_LATER:
new_exc = ValueError(
'You may be trying to read with '
'python 3 a joblib pickle generated with python 2. '
'This feature is not supported by joblib.')
new_exc.__cause__ = exc
raise new_exc
# Reraise exception with Python 2
raise
return obj
def load(filename, mmap_mode=None):
"""Reconstruct a Python object from a file persisted with joblib.dump.
Read more in the :ref:`User Guide <persistence>`.
Parameters
-----------
filename: str, pathlib.Path, or file object.
The file object or path of the file from which to load the object
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, the arrays are memory-mapped from the disk. This
mode has no effect for compressed files. Note that in this
case the reconstructed object might no longer match exactly
the originally pickled object.
Returns
-------
result: any Python object
The object stored in the file.
See Also
--------
joblib.dump : function to save an object
Notes
-----
This function can load numpy array files saved separately during the
dump. If the mmap_mode argument is given, it is passed to np.load and
arrays are loaded as memmaps. As a consequence, the reconstructed
object might not match the original pickled object. Note that if the
file was saved with compression, the arrays cannot be memmapped.
"""
if Path is not None and isinstance(filename, Path):
filename = str(filename)
if hasattr(filename, "read"):
fobj = filename
filename = getattr(fobj, 'name', '')
with _read_fileobject(fobj, filename, mmap_mode) as fobj:
obj = _unpickle(fobj)
else:
with open(filename, 'rb') as f:
with _read_fileobject(f, filename, mmap_mode) as fobj:
if isinstance(fobj, _basestring):
# if the returned file object is a string, this means we
# try to load a pickle file generated with an version of
# Joblib so we load it with joblib compatibility function.
return load_compatibility(fobj)
obj = _unpickle(fobj, filename, mmap_mode)
return obj
| |
from . import gen
from coda import types
class Python3VisitorGenerator(gen.Python3Generator):
def __init__(self, options):
super().__init__(options)
self.visitorNames = set(cls.strip() for cls in options.getOption('visitor').split(','))
def calcSourcePath(self, fd, options, decl):
'''@type fd: coda.descriptors.FileDescriptor
@type options: FileOptions
Method to calculate the file path to write a descriptor to.'''
return self.calcSourcePathWithoutExtension(fd, options, decl) + 'visitor.py'
def genFilesForModule(self, fd, options):
# Override to only generate a visitor if it contains one of the requested classes
self.baseClasses = []
def collectVisitorRoots(prefix, structs):
for st in structs:
name = prefix + '.' + st.getName()
if name in self.visitorNames:
self.baseClasses.append(st)
collectVisitorRoots(name, st.getStructs())
collectVisitorRoots(fd.getPackage(), fd.getStructs())
if self.baseClasses:
super().genFilesForModule(fd, options)
def genImports(self, fd):
'''@type fd: coda.descriptors.FileDescriptor'''
#self.writeLn('import coda.runtime')
# Generate an import to the descriptor module
# package = self.getScopedOption(fd.getOptions().getPackage(), None)
# if package:
# self.writeImport(package)
# self.writeLn()
def genEnum(self, fd, en):
pass
def genStruct(self, fd, struct):
if struct in self.baseClasses:
super().genStruct(fd, struct)
def getStructSectionOrder(self):
'''Return the order in which definitions appear in the generated class.'''
return (
self.genStructVisitorMethods,
self.genStructVistorDispatcher)
def beginStruct(self, fd, struct):
'''@type fd: coda.descriptors.FileDescriptor
@type struct: coda.descriptors.StructType'''
# Attempt to locate all structs that inherit from this one
self.subtypes = []
def findInheritingStructs(structs):
for st in structs:
base = st.getBaseType()
while base:
if base == struct:
self.subtypes.append(st)
break
base = base.getBaseType()
findInheritingStructs(st.getStructs())
for file in self.allFiles:
findInheritingStructs(file.getStructs())
self.genStructVisitorClassHeader(fd, struct)
def genStructVisitorClassHeader(self, fd, struct):
self.writeLn('# ' + '=' * 77)
self.writeLnFmt('# {0}', self.visitorClassName(struct))
self.writeLn('# ' + '=' * 77)
self.writeLn()
self.writeLnFmt('class {0}:', self.visitorClassName(struct))
def genStructVisitorMethods(self, fd, struct):
'''@type fd: coda.descriptors.FileDescriptor
@type struct: coda.descriptors.StructType'''
valueName = self.getValueName(struct)
self.writeLnFmt('def {0}(self, {1}, *args):', self.getVisitorMethodName(struct), valueName)
self.indent()
for field in struct.getFields():
if not field.getOptions().isNovisit():
self.genVisitField(fd, struct, field, valueName)
self.writeLnFmt('return {0}', valueName)
self.unindent()
self.writeLn()
self.writeLnFmt('def visitUnknown{1}(self, {0}, *args):', valueName, struct.getName())
self.indent()
self.writeLnFmt("assert False, 'Unknown {0} type \\\'' + type({1}).__name__ + '\\\' with id: ' + str({1}.typeId())",
struct.getName(), valueName)
self.unindent()
self.writeLn()
for subtype in self.subtypes:
self.genVisitMethod(fd, struct, subtype, valueName)
def genVisitMethod(self, fd, struct, subtype, valueName):
self.writeLnFmt('def {0}(self, {1}, *args):', self.getVisitorMethodName(subtype), valueName)
self.indent()
visitable = [field for field in subtype.getFields() if not field.getOptions().isNovisit()]
for field in visitable:
self.genVisitField(fd, struct, field, valueName)
self.writeLnFmt('return self.{0}({1}, *args)',
self.getVisitorMethodName(subtype.getBaseType()), valueName)
self.unindent()
self.writeLn()
def genVisitField(self, fd, struct, field, valueName):
'''@type fd: coda.descriptors.FileDescriptor
@type struct: coda.descriptors.StructType
@type field: coda.descriptors.StructType.Field'''
fieldType = field.getType()
fmtParams = {
'struct': struct.getName(),
'field': self.capitalize(field.getName()),
'value': valueName
}
if fieldType.typeId() == types.TypeKind.MODIFIED:
fieldType = fieldType.getElementType()
if fieldType.typeId() == types.TypeKind.STRUCT and self.isSubType(fieldType, struct):
if field.getOptions().isNullable():
self.writeLnFmt('if {value}.has{field}() and {value}.get{field}():', **fmtParams)
else:
self.writeLnFmt('if {value}.has{field}():', **fmtParams)
self.indent()
self.writeLnFmt('self.traverse{struct}({value}.get{field}(), *args)', **fmtParams)
self.unindent()
elif fieldType.typeId() == types.TypeKind.LIST:
if self.isSubType(fieldType.getElementType(), struct):
self.writeLnFmt('for item in {value}.get{field}():', **fmtParams)
self.indent()
self.writeLnFmt('self.traverse{struct}(item, *args)', **fmtParams)
self.unindent()
elif fieldType.typeId() == types.TypeKind.SET:
if self.isSubType(fieldType.getElementType(), struct):
self.writeLnFmt('for item in {value}.get{field}():', **fmtParams)
self.indent()
self.writeLnFmt('self.traverse{struct}(item, *args)', **fmtParams)
self.unindent()
elif fieldType.typeId() == types.TypeKind.MAP:
if self.isSubType(fieldType.getKeyType(), struct):
self.writeLnFmt('for item in {value}.get{field}().keys():', **fmtParams)
self.indent()
self.writeLnFmt('self.traverse{struct}(item, *args)', **fmtParams)
self.unindent()
if self.isSubType(fieldType.getValueType(), struct):
self.writeLnFmt('for item in {value}.get{field}().values():', **fmtParams)
self.indent()
self.writeLnFmt('self.traverse{struct}(item, *args)', **fmtParams)
self.unindent()
def genStructVistorDispatcher(self, fd, struct):
'''@type fd: coda.descriptors.FileDescriptor
@type struct: coda.descriptors.StructType'''
self.writeLnFmt('def traverse{0}(self, value, *args):', struct.getName())
self.indent()
self.writeLnFmt(
'result = self.__dispatch.get(value.typeId(), self.visitUnknown{0})(value, *args)',
struct.getName())
self.writeLnFmt('self.validate{0}Result(value, result)', struct.getName())
self.writeLn('return result')
self.unindent()
self.writeLn()
self.writeLnFmt('def traverse{0}List(self, valueList, *args):', struct.getName())
self.indent()
self.writeLnFmt(
'return [self.traverse{0}(value, *args) for value in valueList]', struct.getName())
self.unindent()
self.writeLn()
self.writeLnFmt('def validate{0}Result(self, value, result):', struct.getName())
self.indent()
self.writeLn('pass')
self.unindent()
self.writeLn()
# Dispatch function (call operator)
self.writeLnFmt('def __call__(self, value, *args):')
self.indent()
self.writeLnFmt(
'return self.__dispatch.get(value.typeId(), self.visitUnknown{0})(value, *args)',
struct.getName())
self.unindent()
self.writeLn()
# Constructor - initializes dispatch table
self.writeLnFmt('def __init__(self):')
self.indent()
self.writeLn('super().__init__()')
self.writeLn('self.__dispatch = {')
self.indent()
self.writeLnFmt('{0}: self.{1},', struct.getTypeId(), self.getVisitorMethodName(struct))
for subtype in self.subtypes:
self.writeLnFmt('{0}: self.{1},', subtype.getTypeId(), self.getVisitorMethodName(subtype))
self.unindent()
self.writeLn('}')
self.unindent()
self.writeLn()
def visitorClassName(self, struct):
return struct.getName() + "Visitor"
def endFile(self, fd):
pass
def isSubType(self, ty, base):
if ty.typeId() == types.TypeKind.MODIFIED:
ty = ty.getElementType()
if ty.typeId() != types.TypeKind.STRUCT:
return False
while ty:
if ty is base:
return True;
elif ty.hasBaseType():
ty = ty.getBaseType()
else:
break
return False
def getVisitorMethodName(self, struct):
name = struct.getName()
while struct.getEnclosingType():
struct = struct.getEnclosingType()
name = struct.getName() + name
return 'visit' + name
def getValueName(self, struct):
name = self.decapitalize(struct.getName())
st = struct
while name == struct.getName() or name == 'args' or\
name == 'item' or name in self.RESERVED_WORDS:
if st.getBaseType():
st = st.getBaseType()
name = self.decapitalize(struct.getBaseType().getName())
else:
return 'value'
return name
| |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
class TestDataFrameSelectReindex:
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=["a", "b"], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([("a", ""), ("c", "c1")])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ["a", "b", ("c", "c1")]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on="a")
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ["a", "b", ("a", ""), ("c", "c1")]
expected = DataFrame(columns=columns, data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on="a")
tm.assert_frame_equal(result, expected)
def test_reindex(self, float_frame):
datetime_series = tm.makeTimeSeries(nper=30)
newFrame = float_frame.reindex(datetime_series.index)
for col in newFrame.columns:
for idx, val in newFrame[col].items():
if idx in float_frame.index:
if np.isnan(val):
assert np.isnan(float_frame[col][idx])
else:
assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in newFrame.items():
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = float_frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = float_frame.reindex(datetime_series.index[::2])
for col in nonContigFrame.columns:
for idx, val in nonContigFrame[col].items():
if idx in float_frame.index:
if np.isnan(val):
assert np.isnan(float_frame[col][idx])
else:
assert val == float_frame[col][idx]
else:
assert np.isnan(val)
for col, series in nonContigFrame.items():
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
# Same index, copies values but not index if copy=False
newFrame = float_frame.reindex(float_frame.index, copy=False)
assert newFrame.index is float_frame.index
# length zero
newFrame = float_frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(float_frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = float_frame.reindex([])
newFrame = newFrame.reindex(float_frame.index)
assert len(newFrame.index) == len(float_frame.index)
assert len(newFrame.columns) == len(float_frame.columns)
# pass non-Index
newFrame = float_frame.reindex(list(datetime_series.index))
expected = datetime_series.index._with_freq(None)
tm.assert_index_equal(newFrame.index, expected)
# copy with no axes
result = float_frame.reindex()
tm.assert_frame_equal(result, float_frame)
assert result is not float_frame
def test_reindex_nan(self):
df = pd.DataFrame(
[[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=["joe", "jim"],
)
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
tm.assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype("object")
tm.assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame(
{
"other": ["a", "b", np.nan, "c"],
"date": ["2015-03-22", np.nan, "2012-01-08", np.nan],
"amount": [2, 3, 4, 5],
}
)
df["date"] = pd.to_datetime(df.date)
df["delta"] = (pd.to_datetime("2015-06-18") - df["date"]).shift(1)
left = df.set_index(["delta", "other", "date"]).reset_index()
right = df.reindex(columns=["delta", "other", "date", "amount"])
tm.assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(np.random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name="iname")
df = df.reindex(i)
assert df.index.name == "iname"
df = df.reindex(Index(np.arange(10), name="tmpname"))
assert df.index.name == "tmpname"
s = Series(np.random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name="iname")
df = df.reindex(columns=i)
assert df.columns.name == "iname"
def test_reindex_int(self, int_frame):
smaller = int_frame.reindex(int_frame.index[::2])
assert smaller["A"].dtype == np.int64
bigger = smaller.reindex(int_frame.index)
assert bigger["A"].dtype == np.float64
smaller = int_frame.reindex(columns=["A", "B"])
assert smaller["A"].dtype == np.int64
def test_reindex_columns(self, float_frame):
new_frame = float_frame.reindex(columns=["A", "B", "E"])
tm.assert_series_equal(new_frame["B"], float_frame["B"])
assert np.isnan(new_frame["E"]).all()
assert "C" not in new_frame
# Length zero
new_frame = float_frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(
data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float,
)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(
data=[
[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method="ffill")
expected = DataFrame(
data=[
[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method="bfill")
expected = DataFrame(
data=[
[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(
np.ones((3, 3)),
index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)],
columns=["a", "b", "c"],
)
time_freq = date_range("2012-01-01", "2012-01-03", freq="d")
some_cols = ["a", "b"]
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(list(range(15)))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
tm.assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=range(5), fill_value=0.0)
expected = df.copy()
expected[4] = 0.0
tm.assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value=0)
expected = df.copy()
expected[4] = 0
tm.assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value="foo")
expected = df.copy()
expected[4] = "foo"
tm.assert_frame_equal(result, expected)
# other dtypes
df["foo"] = "foo"
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
tm.assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
tm.assert_frame_equal(result, expected)
# reindex fails
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df.reindex(index=list(range(len(df))))
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame(
{"A": [1, 2, np.nan], "B": [4, 5, np.nan]}, index=[0, 1, 3]
)
result = df.reindex([0, 1, 3])
tm.assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
tm.assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis="index")
tm.assert_frame_equal(result, expected)
def test_reindex_positional_warns(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1.0, 2], "B": [4.0, 5], "C": [np.nan, np.nan]})
with tm.assert_produces_warning(FutureWarning):
result = df.reindex([0, 1], ["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ["A"], axis=1)
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ["A"], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis="columns")
with pytest.raises(TypeError, match="Cannot specify all"):
df.reindex([0, 1], [0], ["A"])
# Mixing styles
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="index")
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis="columns")
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=["A"])
expected = pd.DataFrame({"A": [1, 2]})
tm.assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
with tm.assert_produces_warning(FutureWarning) as m:
res1 = df.reindex(["b", "a"], ["e", "d"])
assert "reindex" in str(m[0].message)
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype="float64").reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1["0.X"] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
tm.assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(
np.ones((10, 2), dtype=bool), index=np.arange(0, 20, 2), columns=[0, 2]
)
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=range(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self, float_string_frame):
reindexed = float_string_frame.reindex(columns=["foo", "A", "B"])
assert "foo" in reindexed
reindexed = float_string_frame.reindex(columns=["A", "B"])
assert "foo" not in reindexed
def test_reindex_corner(self, int_frame):
index = Index(["a", "b", "c"])
dm = DataFrame({}).reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = int_frame.reindex(columns=["A", "B", "E"])
assert smaller["E"].dtype == np.float64
def test_reindex_with_nans(self):
df = DataFrame(
[[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=["a", "b"],
index=[100.0, 101.0, np.nan, 102.0, 103.0],
)
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
tm.assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
tm.assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
tm.assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
tm.assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=range(2), columns=range(2))
expected = df.reindex(range(2)).reindex(columns=range(2))
tm.assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=["a", "b", "c"])
result = df.reindex(index=[0, 1], columns=["a", "b"])
expected = df.reindex([0, 1]).reindex(columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_reindex_multi_categorical_time(self):
# https://github.com/pandas-dev/pandas/issues/21390
midx = pd.MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(date_range("2012-01-01", periods=3, freq="H")),
]
)
df = pd.DataFrame({"a": range(len(midx))}, index=midx)
df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]]
result = df2.reindex(midx)
expected = pd.DataFrame({"a": [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"operation", ["__iadd__", "__isub__", "__imul__", "__ipow__"]
)
@pytest.mark.parametrize("inplace", [False, True])
def test_inplace_drop_and_operation(self, operation, inplace):
# GH 30484
df = pd.DataFrame({"x": range(5)})
expected = df.copy()
df["y"] = range(5)
y = df["y"]
with tm.assert_produces_warning(None):
if inplace:
df.drop("y", axis=1, inplace=inplace)
else:
df = df.drop("y", axis=1, inplace=inplace)
# Perform operation and check result
getattr(y, operation)(1)
tm.assert_frame_equal(df, expected)
| |
import numpy as np
from multi_sbm_helpers import comp_edge_cts, softmax
from cgs_llhds import diri_multi_llhd
from dcsbm_helpers import GD, BD2, samp_shape_post_step
from multi_dcsbm_helpers import mdcsbm_samp_rate_post_step
from itertools import compress
import dcsbm
class aug_dcsbm_cgs(dcsbm.cgs):
"""
Augmented collapsed gibbs sampler for the DCSBM.
Supports methods to add and remove graphs.
Basic idea is that we're dealing with the case where A is the sum of the adjacency matrices of many graphs (so it's
a simultaneous DCSBM on many graphs), and we need methods to add and remove graphs to the sum of A.
Used for the multi-DCSBM collapsed gibbs sampler
"""
def __init__(self, A, z, n_comm, alpha, kap_base, lam_base, gam_base, n_graphs=1):
"""
:param A: adjacency matrix of (multi) graph (sum of participant graphs)
:param z: community identities of vertices
:param n_comm: number of communities (in case some communities are empty at init)
:param alpha: dirichlet prior parameter for community memberships
:param kap: scalar, gamma dist param; for single graph
:param lam: scalar, gamma dist param; for single graph
:param gam: scalar, param for deg correction dirichlet dist; for single graph.
Basic block model recovered in gamma->inf limit
:param n_graphs: int, number of graphs that have been aggregated into A
"""
# kap_base and gam_base not actually used for anything
self.kap_base = kap_base
self.lam_base = lam_base
self.gam_base = gam_base
self.n_graphs = n_graphs
# the DCSBM for A = sum_i A_i
if n_graphs != 0:
dcsbm.cgs.__init__(self, A, z, n_comm, alpha, kap_base, 1.*lam_base / n_graphs, gam_base)
else:
# this will cause all of the sufficient stats to initialize appropriately, but it's not
# really quite the right thing... might cause problems if used incautiously
A = np.zeros([len(z),len(z)])
dcsbm.cgs.__init__(self, A, z, n_comm, alpha, kap_base, lam_base, gam_base)
def remove_graph(self, A_cut, A_cut_diags):
"""
In the case that self.A is actually the sum of many (simple) adjacency matrices, this cuts out one of those
:param A_cut: nparray. graph to be removed
:param A_cut_diags: (imputed) diagonal entries of A_cut
"""
self.n_graphs -= 1 # 1 less possible edge between any pair of vertices
if self.n_graphs==0:
self.lam=-1. # this should never be invoked anywhere...
else:
self.lam = 1.*self.lam_base / self.n_graphs
self.A = self.A - A_cut
self.diags = self.diags - A_cut_diags
# edge_cts[k,l] is number of edges between community k and community l
self.edge_cts = comp_edge_cts(self.A,self.comm_idxs)
self.degs = np.sum(self.A, axis=1)
def add_graph(self, A_add, A_add_diags):
"""
In the case that self.A is actually the sum of many (simple) adjacency matrices, this cuts out one of those
:param A_add: graph to be added
:param A_add_diags: (imputed) diagonal entries of A_add
"""
self.n_graphs += 1 # 1 more possible edge between any pair of vertices
self.lam = 1. * self.lam_base / self.n_graphs
self.A = self.A + A_add
self.diags = self.diags + A_add_diags
# edge_cts[k,l] is number of edges between community k and community l (ignoring self edges)
self.edge_cts = comp_edge_cts(self.A, self.comm_idxs)
self.degs = np.sum(self.A, axis=1)
def q(self, A_new, A_new_diags):
"""
Computes q(A_new, self.z) where, for A and adjacency matrix of a single (multi)-graph,
log(P(A|z,lam,gam,kap)) = q(A, z, lam, gam, kap) + C(A) where C(A) depends only on A (and thus carries no info about how compatible
A is with this graph model)
More precisely, C(A) = -sum_i\lej gammaln(A_ij) - log(2)*sum_i A_ii
Expression for P(A|z,lam,gam,kap) from DCSBM paper
This computation is used in the gibbs update for the type assignments of graphs in the multi-DCSBM model
:param A_new: nparray, adjacency matrix
:param A_new_diags, nparray, the self edges of A_new (which ought to be imputed at an earlier stage?)
:return: npfloat, q(A_new)
"""
# Compute the sufficient stats, then use these to compute llhd based on expression from DCSBM
tot_trials = 1. * np.outer(self.n, self.n) # number of pairs of vertices between comm k and l
tot_trials[np.diag_indices_from(tot_trials)] = self.n ** 2 / 2. # possible connections to comm [l,l] is smaller
if self.n_graphs == 0:
# if we're considering a type that currently contains no graphs
kap_post = self.kap_base + np.zeros([self.n_comm,self.n_comm])
lam_post = self.lam_base + tot_trials
gam_post = self.gam_base + np.zeros(len(self.z))
else:
# # sample new self edges of adjacency matrix for the current multigraph model
self.sample_diags()
# add in the contribution from the self edges to the sufficient stats
# self_edges[l] is number of self edges in community l
self_edges = np.array([np.sum(self.diags[comm_idx]) for comm_idx in self.comm_idxs])
self.edge_cts[np.diag_indices_from(self.edge_cts)] += self_edges
self.degs += self.diags
kap_post = self.kap + self.edge_cts
lam_post = self.n_graphs * (self.lam + tot_trials)
gam_post = self.gam + self.diags + self.degs
# compute sufficient stats of A_new
# note that sufficient stats for terms that only depend on comm identity as the same as for aggregate graph
# for debugging
# A_new_diags = np.random.binomial(self.diags, 1. / self.n_graphs)
A_self_edges = np.array([np.sum(A_new_diags[comm_idx]) for comm_idx in self.comm_idxs])
A_ec = comp_edge_cts(A_new,self.comm_idxs)
A_ec[np.diag_indices_from(A_ec)] += A_self_edges
# number of termini incident on each vertex
A_terms = np.sum(A_new, axis=1) + 2 * A_new_diags
q = 0
for k in range(self.n_comm):
for l in range(k,self.n_comm):
q += GD(kap_post[k,l], lam_post[k,l], A_ec[k,l], tot_trials[k,l])
for l in range(self.n_comm):
comm_idx = self.comm_idxs[l]
if len(comm_idx)!=0:
q += BD2(gam_post[comm_idx], A_terms[comm_idx]) \
+ np.sum(A_terms[comm_idx])*np.log(self.n[l])
# clean up the self edges
if self.n_graphs != 0:
self.edge_cts[np.diag_indices_from(self.edge_cts)] -= self_edges
self.degs -= self.diags
return np.float(q)
class cgs(aug_dcsbm_cgs):
"""
Collapsed Gibbs sampling for the multi-DCSBM model with no covariates
"""
def __init__(self, As, n_types, ts, n_comms, zs, alpha, kap, lam, gam, beta):
"""
:param As: list of adjacency matrices of simple graphs on a common vertex set
:param cs: list of covariate indicators
:param n_types: int, number of distinct graph model types
:param ts: [len(As)], type identity of the graphs
:param n_comms: [n_types], number of communities in each graph type
:param zs: [n_types, n_verts], z[t,v] is community membership of vertex v in type t
:param alpha: dirichlet prior parameter for community memberships, common to all types
:param kap: scalar, gamma dist param, common to all communities and types
:param lam: scalar, gamma dist param, common to all communities and types
:param gam: scalar, param for deg correction dirichlet dist,common to all types -- Basic block model recovered in gamma->inf limit
:param beta: dirichlet prior for type assignments, common for all types
"""
self.As = As
self.n_types = n_types
self.ts = ts
self.n_comms = n_comms
self.zs = zs
self.kap = np.float32(kap)
self.lam = np.float32(lam)
self.gam = np.float32(gam)
self.alpha = np.float32(alpha)
self.beta = np.float32(beta)
self.n_graphs = len(As)
self.n_vert = As[0].shape[0]
# type_idxs[s] is list of indices of graphs with type s
self.type_idxs = []
for s in range(n_types):
self.type_idxs.append([i for i, t in enumerate(ts) if t == s])
# type_cts[s] is number of graphs with type s
self.type_cts = np.array([members.__len__() for members in self.type_idxs])
# sufficient stats for type assignment is just sums of all adj mats in that type
A_sums = [ sum([As[s] for s in type_s])
for type_s in self.type_idxs]
# DCSBM collapsed gibb sampler objects to store the models of each type
self.agg_models = [aug_dcsbm_cgs(A_sums[s], self.zs[s, :], self.n_comms[s], alpha, self.kap, self.lam,
self.gam, self.type_cts[s]) for s in range(self.n_types)]
# [vectors of the form [1 0 0 0], [0 1 0 0], etc., used to call diri-multi-llhd
self._type_indicators = [np.identity(n_types, int)[j, :] for j in range(n_types)]
def update_model_zs(self, s):
"""
Run the CGS to update the community identities in model s
:param s: integer, id of model to be updated
"""
vertex_order = range(self.n_vert)
np.random.shuffle(vertex_order)
for v in vertex_order:
self.agg_models[s].update_z(v)
def update_zs(self):
"""
Update the community indicators in all of the models
"""
for s in range(self.n_types):
#update the z only if there's at least one graph to get info from
if self.type_cts[s] != 0:
self.update_model_zs(s)
def update_NB_params_local(self):
# allow diff models to have different kap and lam
for m in self.agg_models:
if m.n_graphs != 0:
m.update_NB_params()
m.lam_base = m.n_graphs*m.lam
def update_gam_local(self):
for m in self.agg_models:
m.update_gam()
def type_llhd(self, g, g_self_edges):
"""
computes a length n_types vector q such that q(k)-q(l) = log(P(A[g]|z, t[g]=k)) - log(P(A[g]|z, t[g]=l))
:param g: int, id of graph with type identities to be computed
:param g_self_edges: nparray, imputed self edges of graph g
:return: nparray, length n_types vector q such that q(k)-q(l) = log(P(A[g]|z, t[g]=k)) - log(P(A[g]|z, t[g]=l))
"""
ret = np.asarray([mod.q(self.As[g], g_self_edges) for mod in self.agg_models])
return ret
def update_ts(self):
graph_order = range(self.n_graphs)
np.random.shuffle(graph_order)
for g in graph_order:
# impute self edges for g to be used in llhd computations
g_self_edges = np.random.binomial(self.agg_models[self.ts[g]].diags, 1./self.type_cts[self.ts[g]])
# remove graph from its current type (affects llhd of graph under model)
self.agg_models[self.ts[g]].remove_graph(self.As[g], g_self_edges)
self.type_cts[self.ts[g]] -= 1 # remove graph from type count (affects diri-multi prob)
# should do nothing... but will cause an exception to be thrown if this is referenced before it's reassigned
self.ts[g] = self.n_types + 1
'''
Sample type of g from distribution given all other type indicators and all community indicators
Pr(t_g = s | t_\g, z, As[g]) = Pr(t_g = s | t_\g, agg_models, As[g])
\propto Pr(As[g] | agg_models[s]) * Pr(t_g = s | t_\g)
'''
# Pr(As[g] | agg_models[s]) term
log_type_prob = self.type_llhd(g, g_self_edges)
# log_type_prob = np.repeat(-1.,self.n_types)
# Pr(t_g = s | t_\g) term
for s in range(self.n_types):
log_type_prob[s] = log_type_prob[s] + \
diri_multi_llhd(obs=self._type_indicators[s], alphas=self.beta + self.type_cts)
# exponentiate and sample
type_prob = softmax(log_type_prob)
self.ts[g] = np.random.multinomial(1, type_prob).nonzero()[0][0]
# add the graph to its new type
self.agg_models[self.ts[g]].add_graph(self.As[g],g_self_edges)
self.type_cts[self.ts[g]] += 1 # add graph to type count (affects diri-multi prob)
class cgsSharedComm(cgs):
"""
Collapsed Gibbs sampling for the multi-DCSBM model where communities are shared across all distinct types
"""
def __init__(self, As, n_types, ts, n_comm, zs, alpha, kap, lam, gam, beta):
# make copies of the initial community assignment stuff so I can reuse the cgs init statement
multi_zs = np.tile(zs,(n_types,1))
n_comms = np.repeat(n_comm, n_types)
cgs.__init__(self, As, n_types, ts, n_comms, multi_zs, alpha, kap, lam, gam, beta)
# because community indicators are common across all graphs in this model
self.n_comm = n_comm
self.zs = zs
self.n = self.agg_models[0].n # valid since all comm identities are common
# [vectors of the form [1 0 0 0], [0 1 0 0], etc., used to call diri-multi-llhd
self._comm_indicators = [np.identity(n_comm, int)[j,:] for j in range(n_comm)]
def update_z(self, v):
"""
Runs a single step of the collapsed gibbs sampler to resample the community identity of v
:param v: integer, a vertex in the graph
"""
'''
update diagonal estimation and remove current vertex from sufficient stats
'''
self_edges = []
for m in self.agg_models:
if m.n_graphs != 0:
# add in the contribution from the self edges to the sufficient stats
# self_edges[l] is number of self edges in community l
m.sample_diags()
m_self_edges = np.array([np.sum(m.diags[comm_idx]) for comm_idx in m.comm_idxs])
m.edge_cts[np.diag_indices_from(m.edge_cts)] += m_self_edges
m.degs += m.diags
self_edges.append(m_self_edges) # remember these to kill em later
m.remove_vertex(v)
# valid because community indicators are same across all graphs
self.n = self.agg_models[0].n
'''
sample the new index conditional on all other community assignments and the type assignments
using P(z_i = k | A, z_\i) \propto \prod_s P( A[s][i,:] \given z_i = k, z_\i, ts) * P(z_i = k | z_\i)
'''
log_comm_prob = np.zeros(self.n_comm)
# TBD: computations below could be vectorized to maybe speed up the code by a factor of 2.
# this is the bit where the common community id's come into play
# \prod_s P( A[s][i,:] \given z_i = k, z_\i)
for m in self.agg_models:
if m.n_graphs != 0:
log_comm_prob += m.comm_llhd(v)
# P(z_i = k | z_\i) (diri-multinom) part of the likelihood
log_comm_prob = log_comm_prob + \
np.array([diri_multi_llhd(obs=comm_indic, alphas=self.alpha + self.n) for comm_indic in self._comm_indicators])
# exponentiate and sample the new label
comm_prob = softmax(log_comm_prob)
new_comm = np.random.multinomial(1, comm_prob).nonzero()[0][0]
'''
add the vertex back into the sufficient stats and clean up self edges
'''
itr = 0
for m in self.agg_models:
m.add_vertex(v,new_comm)
if m.n_graphs != 0:
m.degs -= m.diags
m.edge_cts[np.diag_indices_from(m.edge_cts)] -= self_edges[itr]
itr+=1
# valid because community indicators are same across all graphs
self.n = self.agg_models[0].n
def update_zs(self):
"""
Update the community indicators in all of the models
"""
vertex_order = range(self.n_vert)
np.random.shuffle(vertex_order)
for v in vertex_order:
self.update_z(v)
self.zs = self.agg_models[0].z
def update_NB_params_joint(self):
"""
Update the parameters of the negative binomial distribution (governing edge rates), sharing information between
distinct graph types
"""
kap = np.copy(self.kap)
lam = np.copy(self.lam)
# the community occupancy counts and edge counts for each community pair
t_upp = []
e_upp = []
n_graphs = []
relevant = [m.n_graphs != 0 for m in self.agg_models]
for m in compress(self.agg_models,relevant):
tot_trials = 1. * np.outer(m.n, m.n) # number of pairs of vertices between comm k and l
tot_trials[np.diag_indices_from(tot_trials)] = self.n ** 2 / 2. # possible connections to comm [l,l] is smaller
ttm = np.ma.masked_values(tot_trials, 0)
em = np.ma.masked_array(m.edge_cts, ttm.mask)
# count each community pair only once
unique_pairs = np.triu_indices(self.n_comm)
t_upp.append(ttm[unique_pairs].compressed())
e_upp.append(em[unique_pairs].compressed())
n_graphs.append(m.n_graphs)
# update kappa
# key observation is that if e_lm[t] is total number of edges between comms l and m in graph type t then
# e_lm ~ NB(kap, 1/(1+lam/(n_lm*n_graph[t])))
# so we can use augmented conjugate update of Zhou&Carin 2012
ps = t_upp[0]*n_graphs[0] / (t_upp[0]*n_graphs[0] + lam)
m = e_upp[0]
for t in range(1,np.alen(t_upp)):
np.append(ps,t_upp[t]*n_graphs[t] / (t_upp[t]*n_graphs[t] + lam))
np.append(ps,e_upp[t])
kap = samp_shape_post_step(m, kap, ps, 0.1, 0.1)
lam = mdcsbm_samp_rate_post_step(e_upp, t_upp, n_graphs, kap, lam)
self.lam = lam
self.kap = kap
for m in self.agg_models:
m.lam_base = lam
if m.n_graphs == 0:
m.lam = m.lam_base
else:
m.lam = lam / m.n_graphs
m.kap_base = kap
m.kap = kap
| |
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import isspmatrix
import scipy.sparse as sp
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
import warnings
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from megaman.embedding.spectral_embedding import SpectralEmbedding, spectral_embedding, _graph_is_connected
import megaman.geometry.geometry as geom
from sklearn.metrics import normalized_mutual_info_score
from sklearn.datasets.samples_generator import make_blobs
from megaman.utils.testing import assert_raise_message
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
"""Test spectral embedding with two components"""
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1,
random_state=np.random.RandomState(seed),
eigen_solver = 'arpack')
embedded_coordinate = se_precomp.fit_transform(affinity,
input_type='affinity')
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_diffusion_embedding_two_components_no_diffusion_time(seed=36):
"""Test spectral embedding with two components"""
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
geom_params = {'laplacian_method':'geometric'}
se_precomp = SpectralEmbedding(n_components=1,
random_state=np.random.RandomState(seed),
eigen_solver = 'arpack',
diffusion_maps = True,
geom = geom_params)
embedded_coordinate = se_precomp.fit_transform(affinity,
input_type='affinity')
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_diffusion_embedding_two_components_diffusion_time_one(seed=36):
"""Test spectral embedding with two components"""
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
geom_params = {'laplacian_method':'geometric'}
se_precomp = SpectralEmbedding(n_components=1,
random_state=np.random.RandomState(seed),
eigen_solver = 'arpack',
diffusion_maps = True,
diffusion_time = 1.0,
geom = geom_params)
embedded_coordinate = se_precomp.fit_transform(affinity,
input_type='affinity')
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36,almost_equal_decimals=5):
"""Test spectral embedding with precomputed kernel"""
radius = 4.0
se_precomp = SpectralEmbedding(n_components=2,
random_state=np.random.RandomState(seed))
geom_params = {'affinity_kwds':{'radius':radius}, 'adjacency_kwds':{'radius':radius},
'adjacency_method':'brute'}
se_rbf = SpectralEmbedding(n_components=2, random_state=np.random.RandomState(seed),
geom = geom_params)
G = geom.Geometry(adjacency_method = 'brute', adjacency_kwds = {'radius':radius},
affinity_kwds = {'radius':radius})
G.set_data_matrix(S)
A = G.compute_affinity_matrix()
embed_precomp = se_precomp.fit_transform(A, input_type = 'affinity')
embed_rbf = se_rbf.fit_transform(S, input_type = 'data')
assert_array_almost_equal(
se_precomp.affinity_matrix_.todense(), se_rbf.affinity_matrix_.todense(),
almost_equal_decimals)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_amg_solver(seed=20):
"""Test spectral embedding with amg solver vs arpack using symmetric laplacian"""
radius = 4.0
geom_params = {'affinity_kwds':{'radius':radius}, 'adjacency_kwds':{'radius':radius}, 'adjacency_method':'brute',
'laplacian_method':'symmetricnormalized'}
try:
import pyamg
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2,eigen_solver="amg",
random_state=np.random.RandomState(seed), geom = geom_params)
se_arpack = SpectralEmbedding(n_components=2, eigen_solver="arpack", geom = geom_params,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_spectral_embedding_symmetrzation(seed=36):
"""Test spectral embedding with amg solver vs arpack using non symmetric laplacian"""
radius = 4.0
geom_params = {'affinity_kwds':{'radius':radius}, 'adjacency_kwds':{'radius':radius}, 'adjacency_method':'brute',
'laplacian_method':'geometric'}
try:
import pyamg
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2,eigen_solver="amg",
random_state=np.random.RandomState(seed), geom = geom_params)
se_arpack = SpectralEmbedding(n_components=2, eigen_solver="arpack", geom = geom_params,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_spectral_embedding_unknown_eigensolver(seed=36):
"""Test that SpectralClustering fails with an unknown eigensolver"""
se = SpectralEmbedding(n_components=1,
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
"""Test that graph connectivity test works as expected"""
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_predict_size(seed=36):
"""Test the predict function returns appropriate size data"""
def check_size(diffusion_maps):
radius = 4.0
geom_params = {'affinity_kwds':{'radius':radius}, 'adjacency_kwds':{'radius':radius}, 'adjacency_method':'brute',
'laplacian_method':'geometric'}
se = SpectralEmbedding(n_components=2,eigen_solver="arpack",
random_state=np.random.RandomState(seed), geom = geom_params)
S_train = S[:900,:]
S_test = S[-100:, :]
embed_train= se.fit_transform(S_train)
embed_test, embed_total = se.predict(S_test)
assert(embed_test.shape[0] == S_test.shape[0])
assert(embed_test.shape[1] == embed_train.shape[1])
assert(embed_total.shape[0] == S.shape[0])
assert(embed_total.shape[1] == embed_train.shape[1])
for diffusion_maps in [False, True]:
yield check_size, diffusion_maps
def test_predict_error_not_fitted(seed=36):
""" Test predict function raises an error when .fit() has not been called"""
radius = 4.0
geom_params = {'affinity_kwds':{'radius':radius}, 'adjacency_kwds':{'radius':radius}, 'adjacency_method':'brute',
'laplacian_method':'geometric'}
se = SpectralEmbedding(n_components=2,eigen_solver="arpack",
random_state=np.random.RandomState(seed), geom = geom_params)
S_train = S[:900,:]
S_test = S[-100:, :]
msg = 'the .fit() function must be called before the .predict() function'
assert_raise_message(RuntimeError, msg, se.predict, S_test)
def test_predict_error_no_data(seed=36):
""" Test predict raises an error when data X are not passed"""
radius = 4.0
se = SpectralEmbedding(n_components=2,
random_state=np.random.RandomState(seed))
G = geom.Geometry(adjacency_method = 'brute', adjacency_kwds = {'radius':radius},
affinity_kwds = {'radius':radius})
G.set_data_matrix(S)
S_test = S[-100:, :]
A = G.compute_affinity_matrix()
embed = se.fit_transform(A, input_type = 'affinity')
msg = 'method only implemented when X passed as data'
assert_raise_message(NotImplementedError, msg, se.predict, S_test)
| |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
'''
This module is in place to test the interoperatbility between CPython and CLR numerical
types.
TODO:
- at the moment the test cases in place are simple sanity checks to ensure the
appropriate operator overloads have been implemented. This needs to be extended
quite a bit (e.g., see what happens with OverFlow cases).
- a few special cases aren't covered yet - comlex numbers, unary ops, System.Char,
'''
import unittest
from iptest import is_cli, run_test
#Test Python/CLR number interop.
clr_integer_types = [ "System.Byte",
"System.SByte",
"System.Int16",
"System.UInt16",
"System.Int32",
"System.UInt32",
"System.Int64",
"System.UInt64"]
clr_float_types = [ "System.Single",
"System.Double",
"System.Decimal",
]
#TODO - char???
clr_types = clr_integer_types + clr_float_types
py_integer_types = ["int", "long"]
py_float_types = ["float"]
#TODO - special case complex???
py_types = py_integer_types + py_float_types
bug_operands = []
unsupported_operands = [
#System.Decimal +: System.Single, System.Double, long, float
"System.Decimal+long",
"long+System.Decimal",
#System.Decimal -: System.Single, System.Double, long, float
"System.Decimal-long",
"long-System.Decimal",
#System.Decimal *: System.Single, System.Double, long, float
"System.Decimal*long",
"long*System.Decimal",
#System.Decimal /:System.Single, System.Double, long, float
"System.Decimal/long",
"long/System.Decimal",
#System.Decimal //:System.Byte System.SByte
"System.Decimal//System.Byte",
"System.Decimal//System.SByte",
"System.Decimal//System.Int16",
"System.Decimal//System.UInt16",
"System.Decimal//System.Int32",
"System.Decimal//System.UInt32",
"System.Decimal//System.Int64",
"System.Decimal//System.UInt64",
"System.Decimal//System.Decimal",
"System.Decimal//int",
"System.Decimal//long",
"System.Byte//System.Decimal",
"System.SByte//System.Decimal",
"System.Int16//System.Decimal",
"System.UInt16//System.Decimal",
"System.Int32//System.Decimal",
"System.UInt32//System.Decimal",
"System.Int64//System.Decimal",
"System.UInt64//System.Decimal",
"System.Decimal//System.Decimal",
"int//System.Decimal",
"long//System.Decimal",
"System.Decimal**System.Byte",
"System.Decimal**System.SByte",
"System.Decimal**System.Int16",
"System.Decimal**System.UInt16",
"System.Decimal**System.Int32",
"System.Decimal**System.UInt32",
"System.Decimal**System.Int64",
"System.Decimal**System.UInt64",
"System.Decimal**System.Decimal",
"System.Decimal**int",
"System.Decimal**long",
"System.Byte**System.Decimal",
"System.SByte**System.Decimal",
"System.Int16**System.Decimal",
"System.UInt16**System.Decimal",
"System.Int32**System.Decimal",
"System.UInt32**System.Decimal",
"System.Int64**System.Decimal",
"System.UInt64**System.Decimal",
"System.Decimal**System.Decimal",
"int**System.Decimal",
"long**System.Decimal",
"System.Decimal%long",
"long%System.Decimal",
] + bug_operands
known_bugs = []
bool_test_cases = [
#x==x
("0","==","0", True),
("0.0","==","0", True),
("1","==","1", True),
("3","==","3", True),
("-1","==","-1", True),
#! x==x
("10","==","0", False),
("10.0","==","0", False),
("11","==","1", False),
("31","==","3", False),
("-11","==","-1", False),
#x!=x
("10","!=","0", True),
("10.0","!=","0", True),
("11","!=","1", True),
("31","!=","3", True),
("-11","!=","-1", True),
#! x!=x
("0","!=","0", False),
("0.0","!=","0", False),
("1","!=","1", False),
("3","!=","3", False),
("-1","!=","-1", False),
#x<=x
("0","<=","0", True),
("0.0","<=","0", True),
("1","<=","1", True),
("3","<=","3", True),
("-1","<=","-1", True),
#! x<=x
("10","<=","0", False),
("10.0","<=","0", False),
("11","<=","1", False),
("13","<=","3", False),
("10","<=","-1", False),
#x>=x
("0",">=","0", True),
("0.0",">=","0", True),
("1",">=","1", True),
("3",">=","3", True),
("-1",">=","-1", True),
#! x>=x
("0",">=","10", False),
("0.0",">=","10", False),
("1",">=","11", False),
("3",">=","13", False),
("-1",">=","11", False),
#x<=/<y
("0", "<=", "1", True),
("0", "<", "1", True),
("3.14", "<=", "19", True),
("3.14", "<", "19", True),
#!x<=/<y
("10", "<=", "1", False),
("10", "<", "1", False),
("31.14", "<=", "19", False),
("31.14", "<", "19", False),
#x>=/.y
("10", ">=", "1", True),
("10", ">", "1", True),
("31.14", ">=", "19", True),
("31.14", ">", "19", True),
#! x>=/.y
("0", ">=", "1", False),
("0", ">", "1", False),
("3.14", ">=", "19", False),
("3.14", ">", "19", False),
]
arith_test_cases = [
#add
("0", "+", "0", 0),
("0", "+", "1", 1),
("1", "+", "-1", 0),
("2", "+", "-1", 1),
#sub
("0", "-", "0", 0),
("0", "-", "1", -1),
("1", "-", "-1", 2),
("2", "-", "-1", 3),
#mult
("0", "*", "0", 0),
("0", "*", "1", 0),
("2", "*", "1", 2),
("1", "*", "-1", -1),
("2", "*", "-1", -2),
#div
("0", "/", "1", 0),
("4", "/", "2", 2),
("2", "/", "1", 2),
("1", "/", "-1", -1),
("2", "/", "-1", -2),
#trun div
("0", "//", "1", 0),
("4", "//", "2", 2),
("2", "//", "1", 2),
("1", "//", "-1", -1),
("2", "//", "-1", -2),
("3", "//", "2", 1),
#power
("0", "**", "1", 0),
("4", "**", "2", 16),
("2", "**", "1", 2),
("1", "**", "-1", 1),
#mod
("0", "%", "1", 0),
("5", "%", "2", 1),
("2", "%", "1", 0),
("1", "%", "-1", 0),
("2", "%", "-1", 0),
]
bitwise_test_cases = [
#left shift
("0", "<<", "1", 0),
("3", "<<", "1", 6),
("-3", "<<", "1", -6),
#right shift
("0", ">>", "1", 0),
("6", ">>", "1", 3),
("-3", ">>", "1", -2),
#bitwise AND
("0", "&", "1", 0),
("1", "&", "1", 1),
("7", "&", "2", 2),
("-1", "&", "1", 1),
#bitwise OR
("0", "|", "1", 1),
("1", "|", "1", 1),
("4", "|", "2", 6),
("-1", "|", "1", -1),
#bitwise XOR
("0", "^", "1", 1),
("1", "^", "1", 0),
("7", "^", "2", 5),
("-1", "^", "1", -2),
]
@unittest.skipUnless(is_cli, 'IronPython specific test case')
class ClrNumInteropTest(unittest.TestCase):
def num_ok_for_type(self, number, proposed_type):
'''Helper function returns true if the number param is within the range of valid values for the proposed type'''
import clr
import System
#handle special cases first
if proposed_type=="long":
#arbitrary precision
return True
if proposed_type=="float":
#arbitrary precision
return True
if number >= eval(proposed_type + ".MinValue") and number <= eval(proposed_type + ".MaxValue"):
return True
#must give it another shot...maybe the operator is broken
if eval(proposed_type + ".MinValue") <= number and eval(proposed_type + ".MaxValue") >= number:
return True
return False
def _test_interop_set(self, clr_types, py_types, test_cases):
'''Helper function which permutes Python/CLR types onto test cases'''
global unsupported_operands
global known_bugs
import clr
import System
g, l = globals(), locals() # scopes for use in list comprehension
#each test case
for leftop, op, rightop, expected_value in test_cases:
#get the left operand as a Python type
py_left = eval(leftop)
#------------------------------------------------------------------
#create a list of values where each element is the lefthand operand
#converted to a CLR type
leftop_clr_types = [x for x in clr_types if self.num_ok_for_type(py_left, x)]
leftop_clr_values = [eval(x + "(" + leftop + ")", g, l) for x in leftop_clr_types]
#------------------------------------------------------------------
#create a list of values where each element is the lefthand operand
#converted to a Python type
leftop_py_types = [x for x in py_types if self.num_ok_for_type(py_left, x)]
leftop_py_values = [eval(x + "(" + leftop + ")", g, l) for x in leftop_py_types]
#------------------------------------------------------------------
#get the right operand as a Python type
py_right = eval(rightop)
rightop_clr_types = [x for x in clr_types if self.num_ok_for_type(py_right, x)]
rightop_clr_values = [eval(x + "(" + rightop + ")", g, l) for x in rightop_clr_types]
#------------------------------------------------------------------
#create a list of values where each element is the righthand operand
#converted to a Python type
rightop_py_types = [x for x in py_types if self.num_ok_for_type(py_right, x)]
rightop_py_values = [eval(x + "(" + rightop + ")", g, l) for x in rightop_py_types]
#------------------------------------------------------------------
#Comparisons between CPython/CLR types
def assertionHelper(left_type, left_op, op, right_type, right_op, expected):
'''Helper function used to figure out which test cases fail without blowing up the rest of the test.'''
import clr
import System
expression_str = '{0}({1}) {2} {3}({4})'.format(left_type, left_op, str(op), right_type, right_op)
#if it's supposedly unsupported...make sure
if unsupported_operands.count(left_type + op + right_type)>0:
with self.assertRaises(TypeError):
eval(expression_str)
return
try:
expression = eval(expression_str)
except TypeError as e:
self.fail("TYPE BUG: %s" % expression_str)
try:
self.assertEqual(expression, expected)
if known_bugs.count(left_type + op + right_type)>0:
self.fail("NO BUG FOR: %s" % expression_str)
except:
if known_bugs.count(left_type + op + right_type)>0:
return
self.fail(expression_str)
#CLR-CLR
for x in leftop_clr_types:
for y in rightop_clr_types:
assertionHelper(x, leftop, op, y, rightop, expected_value)
#CLR-PY
for x in leftop_clr_types:
for y in rightop_py_types:
assertionHelper(x, leftop, op, y, rightop, expected_value)
#PY-CLR
for x in leftop_py_types:
for y in rightop_clr_types:
assertionHelper(x, leftop, op, y, rightop, expected_value)
#PY-PY
for x in leftop_py_types:
for y in rightop_py_types:
assertionHelper(x, leftop, op, y, rightop, expected_value)
def test_boolean(self):
'''Test boolean operations involving a left and right operand'''
self._test_interop_set(clr_types, py_types, bool_test_cases)
def test_arithmetic(self):
'''Test general arithmetic operations.'''
self._test_interop_set(clr_types, py_types, arith_test_cases)
def test_bitwiseshift(self):
'''Test bitwise and shifting operations.'''
self._test_interop_set(clr_integer_types, py_integer_types, bitwise_test_cases)
def test_sanity(self):
'''Make sure that numbers within the constraints of the numerical types are allowed.'''
import clr
import System
temp_list = [ ["System.Byte", 0, 255],
["System.SByte", -128, 127],
["System.Byte", 0, 255],
["System.Int16", -32768, 32767],
["System.UInt16", 0, 65535],
["System.Int32", -2147483648, 2147483647],
["System.UInt32", 0, 4294967295],
["System.Int64", -9223372036854775808, 9223372036854775807],
["System.UInt64", 0, 18446744073709551615],
["System.Single", -3.40282e+038, 3.40282e+038],
["System.Double", -1.79769313486e+308, 1.79769313486e+308],
["System.Decimal", -79228162514264337593543950335, 79228162514264337593543950335],
["int", -2147483648, 2147483647]
]
for num_type, small_val, large_val in temp_list:
self.assertTrue(self.num_ok_for_type(1, num_type))
self.assertTrue(self.num_ok_for_type(1.0, num_type))
#Minimum value
self.assertTrue(self.num_ok_for_type(small_val, num_type))
self.assertTrue(self.num_ok_for_type(small_val + 1, num_type))
self.assertTrue(self.num_ok_for_type(small_val + 2, num_type))
#Maximum value
self.assertTrue(self.num_ok_for_type(large_val, num_type))
self.assertTrue(self.num_ok_for_type(large_val - 1, num_type))
self.assertTrue(self.num_ok_for_type(large_val - 2, num_type))
#Negative cases
if num_type!="System.Single" and num_type!="System.Double" and num_type!="System.Decimal":
self.assertTrue(not self.num_ok_for_type(small_val - 1, num_type))
self.assertTrue(not self.num_ok_for_type(small_val - 2, num_type))
self.assertTrue(not self.num_ok_for_type(large_val + 1, num_type))
self.assertTrue(not self.num_ok_for_type(large_val + 2, num_type))
#Special cases
self.assertTrue(self.num_ok_for_type(0, "long"))
self.assertTrue(self.num_ok_for_type(1, "long"))
self.assertTrue(self.num_ok_for_type(-1, "long"))
self.assertTrue(self.num_ok_for_type(5, "long"))
self.assertTrue(self.num_ok_for_type(-92233720368547758080000, "long"))
self.assertTrue(self.num_ok_for_type( 18446744073709551615000, "long"))
self.assertTrue(self.num_ok_for_type(0.0, "float"))
self.assertTrue(self.num_ok_for_type(1.0, "float"))
self.assertTrue(self.num_ok_for_type(-1.0, "float"))
self.assertTrue(self.num_ok_for_type(3.14, "float"))
self.assertTrue(self.num_ok_for_type(-92233720368547758080000.0, "float"))
self.assertTrue(self.num_ok_for_type( 18446744073709551615000.0, "float"))
run_test(__name__)
| |
""" a panflute filter to format Span element
representations of RawInline elements
The :py:mod:`ipypublish.filters_pandoc.prepare_raw` filter should be run
first to access the functionality below:
"""
import itertools
# from textwrap import fill as textwrap
from panflute import Element, Doc, Span # noqa: F401
import panflute as pf
from ipypublish.filters_pandoc.definitions import (
CONVERTED_OTHER_CLASS,
CONVERTED_DIRECTIVE_CLASS,
IPUB_META_ROUTE,
)
def process_raw_spans(container, doc):
# type: (Span, Doc) -> Element
if not isinstance(container, (pf.Span, pf.Div)):
return None
hide_raw = doc.get_metadata(IPUB_META_ROUTE + ".hide_raw", False)
if CONVERTED_OTHER_CLASS in container.classes and isinstance(container, pf.Span):
if doc.format == "rst" and container.attributes["format"] == "latex":
if container.attributes["tag"] in ["todo"]:
return pf.Str(
"\n\n.. {}:: {}\n\n".format(
container.attributes["tag"], container.attributes["content"]
)
)
if container.attributes["tag"] == "ensuremath":
return pf.RawInline(
":math:`{}`".format(container.attributes["content"]), format="rst"
)
return pf.RawInline(
container.attributes.get("original"), format=container.attributes["format"]
)
if CONVERTED_DIRECTIVE_CLASS in container.classes and isinstance(container, pf.Div):
# convert the directive head, which will be e.g.
# Para(Str(..) Space Str(toctree::) SoftBreak Str(:maxdepth:) Space Str(2) SoftBreak Str(:numbered:)) # noqa
# we need to spilt on the soft breaks,
# place them on a new line and re-indent them
if doc.format in ("rst"):
# split into lines by soft breaks
header_lines = [
list(y)
for x, y in itertools.groupby(
container.content[0].content, lambda z: isinstance(z, pf.SoftBreak)
)
if not x
]
# wrap each line in a Para and convert block with pandoc
head_doc = pf.Doc(*[pf.Para(*l) for l in header_lines])
head_doc.api_version = doc.api_version
head_str = pf.convert_text(
head_doc, input_format="panflute", output_format=doc.format
)
# remove blank lines and indent
head_str = head_str.replace("\n\n", "\n ") + "\n\n"
head_block = pf.RawBlock(head_str, format=doc.format)
if len(container.content) == 1:
return head_block
# split into lines by soft breaks, we use indicators to tell
# us where to indent in the converted text
body_blocks = []
for block in container.content[1:]:
new_elements = [pf.RawInline("%^*", format=doc.format)]
for el in block.content:
if isinstance(el, pf.SoftBreak):
new_elements.append(pf.RawInline("?&@", format=doc.format))
else:
new_elements.append(el)
block.content = new_elements
body_blocks.append(block)
# convert body content with pandoc
body_doc = pf.Doc(*body_blocks)
body_doc.api_version = doc.api_version
body_str = pf.convert_text(
body_doc, input_format="panflute", output_format=doc.format
)
# raise ValueError(body_blocks)
body_str = body_str.replace("%^*", " ").replace("?&@", "\n ")
# ensure all lines are indented correctly
# (doesn't occur by default?)
body_str = (
"\n".join(
[
" " + l.lstrip() if l.strip() else l
for l in body_str.splitlines()
]
)
+ "\n\n"
)
body_block = pf.RawBlock(body_str, format=doc.format)
return [head_block, body_block]
elif (
doc.format in ("html", "html5") and container.attributes["format"] == "rst"
):
if hide_raw:
return []
head_para = pf.Para(
*[
pf.RawInline("<br>" + " " * 4)
if isinstance(c, pf.SoftBreak)
else c
for c in container.content[0].content
]
)
head_str = pf.convert_text(
head_para, input_format="panflute", output_format=doc.format
)
if len(container.content) > 1:
body_doc = pf.Doc(*container.content[1:])
body_doc.api_version = doc.api_version
body_str = pf.convert_text(
body_doc, input_format="panflute", output_format=doc.format
)
body_str = (
'<p></p><div style="margin-left: 20px">' "{0}</div>"
).format(body_str)
else:
body_str = ""
return pf.RawBlock(
'<div {0} style="background-color:rgba(10, 225, 10, .2)">'
"{1}{2}"
"</div>".format(
container.attributes.get("directive", ""), head_str, body_str
),
format="html",
)
elif doc.format in ("tex", "latex") and container.attributes["format"] == "rst":
if hide_raw:
return []
directive = container.attributes.get("directive", "")
inline = container.attributes.get("inline", "")
# TODO handle directive with options and/or inline body
# e.g. .. figure:: path/to/figure
# :centre:
box_open = (
"\\begin{{mdframed}}"
"[frametitle={{{0}}},frametitlerule=true]".format(directive)
)
if inline:
box_open += "\n\\mdfsubtitle{{{0}}}".format(inline)
box_close = "\\end{mdframed}"
if len(container.content) == 1:
return pf.RawBlock(box_open + box_close, format="tex")
else:
return (
[pf.RawBlock(box_open, format="tex")]
+ list(container.content[1:])
+ [pf.RawBlock(box_close, format="tex")]
)
return pf.RawBlock(
pf.stringify(pf.Doc(*container.content)),
format=container.attributes["format"],
)
if CONVERTED_OTHER_CLASS in container.classes and isinstance(container, pf.Div):
return pf.RawBlock(
pf.stringify(pf.Doc(*container.content)),
format=container.attributes["format"],
)
# now unused
# def split_soft_breaks(container,
# indent=4, fmt="rst", indent_first=False,
# pre_content="", post_content="",
# pre_chunk="", post_chunk="",
# linebreak="\n", raw_indent=None):
# """rst conversion doesn't recognise soft breaks as new lines,
# so add them manually and return a list containing the new elements
# """
# content = []
# if pre_content:
# content.append(pf.RawBlock(pre_content, fmt))
# chunks = [list(y) for x, y in itertools.groupby(
# container.content,
# lambda z: isinstance(z, pf.SoftBreak)) if not x]
# for i, chunk in enumerate(chunks):
# if i > 0 or indent_first:
# if raw_indent is not None:
# chunk = [pf.RawInline(raw_indent, fmt)] * indent + chunk
# else:
# chunk = [pf.Space()] * indent + chunk
# if pre_chunk:
# content.append(pf.RawBlock(pre_chunk, fmt))
# content.append(pf.Plain(*chunk))
# content.append(pf.RawBlock(linebreak, fmt))
# if post_chunk:
# content.append(pf.RawBlock(post_chunk, fmt))
# # if isinstance(container, pf.Para):
# # content.append(pf.RawBlock(linebreak, fmt))
# if post_content:
# content.append(pf.RawBlock(post_content, fmt))
# return content
def process_code_latex(code, doc):
# type: (pf.CodeBlock, Doc) -> Element
if doc.format not in ("tex", "latex"):
return None
if not isinstance(code, pf.CodeBlock):
return None
# TODO line wrapping
return [
pf.RawBlock("\\begin{mdframed}", format=doc.format),
code,
pf.RawBlock("\\end{mdframed}", format=doc.format),
]
def prepare(doc):
# type: (Doc) -> None
pass
def finalize(doc):
# type: (Doc) -> None
pass
def main(doc=None):
# type: (Doc) -> None
"""
"""
return pf.run_filters(
[process_raw_spans, process_code_latex], prepare, finalize, doc=doc
)
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test harness for chromium clang tools."""
from __future__ import print_function
import argparse
import difflib
import glob
import json
import os
import os.path
import shutil
import subprocess
import sys
def _RunGit(args):
if sys.platform == 'win32':
args = ['git.bat'] + args
else:
args = ['git'] + args
subprocess.check_call(args)
def _GenerateCompileCommands(files, include_paths):
"""Returns a JSON string containing a compilation database for the input."""
# Note: in theory, backslashes in the compile DB should work but the tools
# that write compile DBs and the tools that read them don't agree on the
# escaping convention: https://llvm.org/bugs/show_bug.cgi?id=19687
files = [f.replace('\\', '/') for f in files]
include_path_flags = ' '.join('-I %s' % include_path.replace('\\', '/')
for include_path in include_paths)
return json.dumps([{'directory': os.path.dirname(f),
'command': 'clang++ -std=c++14 -fsyntax-only %s -c %s' % (
include_path_flags, os.path.basename(f)),
'file': os.path.basename(f)} for f in files], indent=2)
def _NumberOfTestsToString(tests):
"""Returns an English describing the number of tests."""
return '%d test%s' % (tests, 's' if tests != 1 else '')
def _ApplyTool(tools_clang_scripts_directory,
tool_to_test,
tool_path,
tool_args,
test_directory_for_tool,
actual_files,
apply_edits):
try:
# Stage the test files in the git index. If they aren't staged, then
# run_tool.py will skip them when applying replacements.
args = ['add']
args.extend(actual_files)
_RunGit(args)
# Launch the following pipeline if |apply_edits| is True:
# run_tool.py ... | extract_edits.py | apply_edits.py ...
# Otherwise just the first step is done and the result is written to
# actual_files[0].
processes = []
args = ['python',
os.path.join(tools_clang_scripts_directory, 'run_tool.py')]
extra_run_tool_args_path = os.path.join(test_directory_for_tool,
'run_tool.args')
if os.path.exists(extra_run_tool_args_path):
with open(extra_run_tool_args_path, 'r') as extra_run_tool_args_file:
extra_run_tool_args = extra_run_tool_args_file.readlines()
args.extend([arg.strip() for arg in extra_run_tool_args])
args.extend(['--tool', tool_to_test, '-p', test_directory_for_tool])
if tool_path:
args.extend(['--tool-path', tool_path])
if tool_args:
for arg in tool_args:
args.append('--tool-arg=%s' % arg)
args.extend(actual_files)
processes.append(subprocess.Popen(args, stdout=subprocess.PIPE))
if apply_edits:
args = [
'python',
os.path.join(tools_clang_scripts_directory, 'extract_edits.py')
]
processes.append(subprocess.Popen(
args, stdin=processes[-1].stdout, stdout=subprocess.PIPE))
args = [
'python',
os.path.join(tools_clang_scripts_directory, 'apply_edits.py'), '-p',
test_directory_for_tool
]
processes.append(subprocess.Popen(
args, stdin=processes[-1].stdout, stdout=subprocess.PIPE))
# Wait for the pipeline to finish running + check exit codes.
stdout, _ = processes[-1].communicate()
for process in processes:
process.wait()
if process.returncode != 0:
print('Failure while running the tool.')
return process.returncode
if apply_edits:
# Reformat the resulting edits via: git cl format.
args = ['cl', 'format']
args.extend(actual_files)
_RunGit(args)
else:
with open(actual_files[0], 'w') as output_file:
output_file.write(stdout)
return 0
finally:
# No matter what, unstage the git changes we made earlier to avoid polluting
# the index.
args = ['reset', '--quiet', 'HEAD']
args.extend(actual_files)
_RunGit(args)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'--apply-edits',
action='store_true',
help='Applies the edits to the original test files and compares the '
'reformatted new files with the expected files.')
parser.add_argument(
'--tool-arg', nargs='?', action='append',
help='optional arguments passed to the tool')
parser.add_argument(
'--tool-path', nargs='?',
help='optional path to the tool directory')
parser.add_argument('tool_name',
nargs=1,
help='Clang tool to be tested.')
parser.add_argument(
'--test-filter', default='*', help='optional glob filter for test names')
args = parser.parse_args(argv)
tool_to_test = args.tool_name[0]
print('\nTesting %s\n' % tool_to_test)
tools_clang_scripts_directory = os.path.dirname(os.path.realpath(__file__))
tools_clang_directory = os.path.dirname(tools_clang_scripts_directory)
test_directory_for_tool = os.path.join(
tools_clang_directory, tool_to_test, 'tests')
compile_database = os.path.join(test_directory_for_tool,
'compile_commands.json')
source_files = glob.glob(
os.path.join(test_directory_for_tool,
'%s-original.cc' % args.test_filter))
ext = 'cc' if args.apply_edits else 'txt'
actual_files = ['-'.join([source_file.rsplit('-', 1)[0], 'actual.cc'])
for source_file in source_files]
expected_files = ['-'.join([source_file.rsplit('-', 1)[0], 'expected.' + ext])
for source_file in source_files]
if not args.apply_edits and len(actual_files) != 1:
print('Only one test file is expected for testing without apply-edits.')
return 1
include_paths = []
include_paths.append(
os.path.realpath(os.path.join(tools_clang_directory, '../..')))
# Many gtest and gmock headers expect to have testing/gtest/include and/or
# testing/gmock/include in the include search path.
include_paths.append(
os.path.realpath(os.path.join(tools_clang_directory,
'../..',
'testing/gtest/include')))
include_paths.append(
os.path.realpath(os.path.join(tools_clang_directory,
'../..',
'testing/gmock/include')))
if len(actual_files) == 0:
print('Tool "%s" does not have compatible test files.' % tool_to_test)
return 1
# Set up the test environment.
for source, actual in zip(source_files, actual_files):
shutil.copyfile(source, actual)
# Generate a temporary compilation database to run the tool over.
with open(compile_database, 'w') as f:
f.write(_GenerateCompileCommands(actual_files, include_paths))
# Run the tool.
os.chdir(test_directory_for_tool)
exitcode = _ApplyTool(tools_clang_scripts_directory, tool_to_test,
args.tool_path, args.tool_arg,
test_directory_for_tool, actual_files,
args.apply_edits)
if (exitcode != 0):
return exitcode
# Compare actual-vs-expected results.
passed = 0
failed = 0
for expected, actual in zip(expected_files, actual_files):
print('[ RUN ] %s' % os.path.relpath(actual))
expected_output = actual_output = None
with open(expected, 'r') as f:
expected_output = f.readlines()
with open(actual, 'r') as f:
actual_output = f.readlines()
if actual_output != expected_output:
failed += 1
lines = difflib.unified_diff(expected_output, actual_output,
fromfile=os.path.relpath(expected),
tofile=os.path.relpath(actual))
sys.stdout.writelines(lines)
print('[ FAILED ] %s' % os.path.relpath(actual))
# Don't clean up the file on failure, so the results can be referenced
# more easily.
continue
print('[ OK ] %s' % os.path.relpath(actual))
passed += 1
os.remove(actual)
if failed == 0:
os.remove(compile_database)
print('[==========] %s ran.' % _NumberOfTestsToString(len(source_files)))
if passed > 0:
print('[ PASSED ] %s.' % _NumberOfTestsToString(passed))
if failed > 0:
print('[ FAILED ] %s.' % _NumberOfTestsToString(failed))
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
import numpy as np
from pyhsmm.util.general import list_split
from pyhsmm.util.stats import sample_discrete_from_log
from pyhsmm.internals.transitions import WeakLimitHDPHMMTransitions
from pyhsmm.internals.initial_state import HMMInitialState
from pyhlm.internals import hlm_states
class WeakLimitHDPHLMPython(object):
_states_class = hlm_states.WeakLimitHDPHLMStatesPython
def __init__(self, num_states, alpha, gamma, init_state_concentration, letter_hsmm, dur_distns, length_distn):
self._letter_hsmm = letter_hsmm
self._length_distn = length_distn#Poisson(alpha_0=30, beta_0=10)
self._dur_distns = dur_distns
self._num_states = num_states
self._letter_num_states = letter_hsmm.num_states
self._init_state_distn = HMMInitialState(self, init_state_concentration=init_state_concentration)
self._trans_distn = WeakLimitHDPHMMTransitions(num_states=num_states, alpha=alpha, gamma=gamma)
self.states_list = []
self.word_list = [None] * self.num_states
for i in range(self.num_states):
word = self.generate_word()
while word in self.word_list[:i]:
word = self.generate_word()
self.word_list[i] = word
self.resample_dur_distns()
@property
def num_states(self):
return self._num_states
@property
def letter_num_states(self):
return self._letter_num_states
@property
def letter_obs_distns(self):
return self.letter_hsmm.obs_distns
@property
def dur_distns(self):
return self._dur_distns
@property
def letter_dur_distns(self):
return self.letter_hsmm.dur_distns
@property
def init_state_distn(self):
return self._init_state_distn
@property
def trans_distn(self):
return self._trans_distn
@property
def length_distn(self):
return self._length_distn
@property
def letter_hsmm(self):
return self._letter_hsmm
@property
def params(self):
letter_hsmm_params = self.letter_hsmm.params
bigram_params = {**self.init_state_distn.params, "trans_matrix": self.trans_distn.trans_matrix}
length_params = self.length_distn.params
word_dicts = {f"word({i})": np.array(word) for i, word in enumerate(self.word_list)}
return {"num_states": self.num_states, "word_dicts": word_dicts, "letter_hsmm": letter_hsmm_params, "word_length": length_params, "bigram": bigram_params}
@property
def hypparams(self):
letter_hsmm_hypparams = self.letter_hsmm.hypparams
bigram_hypparams = self.init_state_distn.hypparams
length_hypparams = self.length_distn.hypparams
return {"letter_hsmm": letter_hsmm_hypparams, "word_length": length_hypparams, "bigram": bigram_hypparams}
def log_likelihood(self):
return sum(word_state.log_likelihood() for word_state in self.states_list)
def word_counts(self):
r = np.zeros(self.num_states, dtype=np.int32)
for s in self.states_list:
for i in s.stateseq_norep:
r[i] += 1
return r
def generate_word(self):
size = self.length_distn.rvs() or 1
return self.letter_hsmm.generate_word(size)
def add_data(self, data, **kwargs):
self.states_list.append(self._states_class(self, data, **kwargs))
def add_word_data(self, data, **kwargs):
self.letter_hsmm.add_data(data, **kwargs)
def resample_model(self, num_procs=0):
self.letter_hsmm.states_list = []
[state.add_word_datas(generate=False) for state in self.states_list]
self.letter_hsmm.resample_states(num_procs=num_procs)
[letter_state.reflect_letter_stateseq() for letter_state in self.letter_hsmm.states_list]
self.resample_words(num_procs=num_procs)
self.letter_hsmm.resample_parameters_by_sampled_words(self.word_list)
self.resample_length_distn()
self.resample_dur_distns()
self.resample_trans_distn()
self.resample_init_state_distn()
self.resample_states(num_procs=num_procs)
self._clear_caches()
def resample_states(self, num_procs=0):
if num_procs == 0:
for state in self.states_list:
state.resample()
else:
self._joblib_resample_states(self.states_list, num_procs)
def _joblib_resample_states(self, states_list, num_procs):
from joblib import Parallel, delayed
from . import parallel
# warn('joblib is segfaulting on OS X only, not sure why')
if len(states_list) > 0:
joblib_args = list_split(
[self._get_joblib_pair(s) for s in states_list],
num_procs)
parallel.model = self
parallel.args = joblib_args
raw_stateseqs = Parallel(n_jobs=num_procs,backend='multiprocessing')\
(delayed(parallel._get_sampled_stateseq_norep_and_durations_censored)(idx)
for idx in range(len(joblib_args)))
for s, (stateseq, stateseq_norep, durations_censored, log_likelihood) in zip(
[s for grp in list_split(states_list,num_procs) for s in grp],
[seq for grp in raw_stateseqs for seq in grp]):
s.stateseq, s._stateseq_norep, s._durations_censored, s._normalizer = stateseq, stateseq_norep, durations_censored, log_likelihood
def _get_joblib_pair(self,states_obj):
return (states_obj.data, states_obj._kwargs)
def resample_words(self, num_procs=0):
if num_procs == 0:
self.word_list = [self._resample_a_word(
[letter_state for letter_state in self.letter_hsmm.states_list if letter_state.word_idx == word_idx]
) for word_idx in range(self.num_states)]
else:
from joblib import Parallel, delayed
self.word_list = Parallel(n_jobs=num_procs, backend='multiprocessing')\
([delayed(self._resample_a_word)(
[letter_state for letter_state in self.letter_hsmm.states_list if letter_state.word_idx == word_idx]
) for word_idx in range(self.num_states)]
)
# Merge same letter seq which has different id.
for i, word in enumerate(self.word_list):
if word in self.word_list[:i]:
existed_id = self.word_list[:i].index(word)
for word_state in self.states_list:
stateseq, stateseq_norep = word_state.stateseq, word_state.stateseq_norep
word_state.stateseq[stateseq == i] = existed_id
word_state.stateseq_norep[stateseq_norep == i] = existed_id
word_candi = self.generate_word()
while word_candi in self.word_list:
word_candi = self.generate_word()
self.word_list[i] = word_candi
def _resample_a_word(self, hsmm_states):
# hsmm_states = [letter_state for letter_state in self.letter_hsmm.states_list if letter_state.word_idx == word_idx]
candidates = [tuple(letter_state.stateseq_norep) for letter_state in hsmm_states]
unique_candidates = list(set(candidates))
ref_array = np.array([unique_candidates.index(candi) for candi in candidates])
if len(candidates) == 0:
return self.generate_word()
elif len(unique_candidates) == 1:
return unique_candidates[0]
cache_score = np.empty((len(unique_candidates), len(candidates)))
likelihoods = np.array([letter_state.log_likelihood() for letter_state in hsmm_states])
range_tmp = list(range(len(candidates)))
for candi_idx, candi in enumerate(unique_candidates):
tmp = range_tmp[:]
if (ref_array == candi_idx).sum() == 1:
tmp.remove(np.where(ref_array == candi_idx)[0][0])
for tmp_idx in tmp:
# print(hsmm_states[tmp_idx].likelihood_block_word(candi)[-1])
cache_score[candi_idx, tmp_idx] = hsmm_states[tmp_idx].likelihood_block_word(candi)[-1]
cache_scores_matrix = cache_score[ref_array]
for i in range_tmp:
cache_scores_matrix[i, i] = 0.0
scores = cache_scores_matrix.sum(axis=1) + likelihoods
sampled_candi_idx = sample_discrete_from_log(scores)
return candidates[sampled_candi_idx]
def resample_length_distn(self):
self.length_distn.resample(np.array([len(word) for word in self.word_list]))
def resample_dur_distns(self):#Do not resample!! This code only update the parameter of duration distribution of word.
letter_lmbdas = np.array([letter_dur_distn.lmbda for letter_dur_distn in self.letter_dur_distns])
for word, dur_distn in zip(self.word_list, self.dur_distns):
dur_distn.lmbda = np.sum(letter_lmbdas[list(word)])
def resample_trans_distn(self):
self.trans_distn.resample([word_state.stateseq_norep for word_state in self.states_list])
def resample_init_state_distn(self):
self.init_state_distn.resample(np.array([word_state.stateseq_norep[0] for word_state in self.states_list]))
def _clear_caches(self):
for word_state in self.states_list:
word_state.clear_caches()
class WeakLimitHDPHLM(WeakLimitHDPHLMPython):
_states_class = hlm_states.WeakLimitHDPHLMStates
| |
# Copyright 2011 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: Not platform independent -- uses VT escape codes
# Magic sequence used to introduce a command or color
MAGIC = "@@@"
# Colors for log levels
LEVEL_COLORS = {
'DEBUG': 'CYAN',
'INFO': 'GREEN',
'WARNING': 'YELLOW',
'ERROR': 'RED',
'CRITICAL': 'blink@@@RED',
}
# Will get set to True if module is initialized
enabled = False
# Gets set to True if we should strip special sequences but
# not actually try to colorize
_strip_only = False
import logging
import sys
# Name to (intensity, base_value) (more colors added later)
COLORS = {
'black' : (0,0),
'red' : (0,1),
'green' : (0,2),
'yellow' : (0,3),
'blue' : (0,4),
'magenta' : (0,5),
'cyan' : (0,6),
'gray' : (0,7),
'darkgray' : (1,0),
'pink' : (1,1),
'white' : (1,7),
}
# Add intense/bold colors (names it capitals)
for _c in [_n for _n,_v in COLORS.items() if _v[0] == 0]:
COLORS[_c.upper()] = (1,COLORS[_c][1])
COMMANDS = {
'reset' : 0,
'bold' : 1,
'dim' : 2,
'bright' : 1,
'dull' : 2,
'bright:' : 1,
'dull:' : 2,
'blink' : 5,
'BLINK' : 6,
'invert' : 7,
'bg:' : -1, # Special
'level' : -2, # Special -- color of current level
'normal' : 22,
'underline' : 4,
'nounderline' : 24,
}
# Control Sequence Introducer
CSI = "\033["
def _color (color, msg):
""" Colorizes the given text """
return _proc(MAGIC + color) + msg + _proc(MAGIC + 'reset').lower()
def _proc (msg, level_color = "DEBUG"):
"""
Do some replacements on the text
"""
msg = msg.split(MAGIC)
#print "proc:",msg
r = ''
i = 0
cmd = False
while i < len(msg):
m = msg[i]
#print i,m
i += 1
if cmd:
best = None
bestlen = 0
for k,v in COMMANDS.iteritems():
if len(k) > bestlen:
if m.startswith(k):
best = (k,v)
bestlen = len(k)
special = None
if best is not None and best[0].endswith(':'):
special = best
m = m[bestlen:]
best = None
bestlen = 0
for k,v in COLORS.iteritems():
if len(k) > bestlen:
if m.startswith(k):
best = (k,v)
bestlen = len(k)
if best is not None:
#print "COMMAND", best
m = m[bestlen:]
if type(best[1]) is tuple:
# Color
brightness,color = best[1]
if special is not None:
if special[1] == -1:
brightness = None
color += 10
color += 30
if not _strip_only:
r += CSI
if brightness is not None:
r += str(brightness) + ";"
r += str(color) + "m"
elif not _strip_only:
# Command
if best[1] == -2:
r += _proc(MAGIC + LEVEL_COLORS.get(level_color, ""), level_color)
else:
r += CSI + str(best[1]) + "m"
cmd = True
r += m
return r
def launch (entire=False):
"""
If --entire then the whole message is color-coded, otherwise just the
log level.
Also turns on interpretation of some special sequences in the log
format string. For example, try:
log --format="%(levelname)s: @@@bold%(message)s@@@normal" log.color
"""
global enabled
if enabled: return
from pox.core import core
log = core.getLogger()
windows_hack = False
# Try to work on Windows
if sys.platform == "win32":
try:
from colorama import init
windows_hack = True
init()
except:
log.info("You need colorama if you want color logging on Windows")
global _strip_only
_strip_only = True
from pox.core import _default_log_handler as dlf
if not dlf:
log.warning("Color logging disabled -- no default logger found")
return
#if not hasattr(dlf, 'formatter'):
# log.warning("Color logging disabled -- no formatter found")
# return
#if not hasattr(dlf.formatter, '_fmt'):
# log.warning("Color logging disabled -- formatter unrecognized")
# return
# Monkeypatch in a new format function...
old_format = dlf.format
if entire:
def new_format (record):
msg = _proc(old_format(record), record.levelname)
color = LEVEL_COLORS.get(record.levelname)
if color is None:
return msg
return _color(color, msg)
else:
def new_format (record):
color = LEVEL_COLORS.get(record.levelname)
oldlevelname = record.levelname
if color is not None:
record.levelname = _color(color, record.levelname)
r = _proc(old_format(record), oldlevelname)
record.levelname = oldlevelname
return r
dlf.format = new_format
if windows_hack:
if hasattr(dlf, "stream"):
if dlf.stream is sys.__stderr__:
dlf.stream = sys.stderr
enabled = True
else:
enabled = True
| |
import asyncio
from concurrent.futures import Executor, ThreadPoolExecutor
from itertools import count
from threading import current_thread
from unittest.mock import patch
import pytest
from async_generator import yield_
from asphalt.core import (
Context, ResourceConflict, ResourceNotFound, callable_name, context_teardown, executor)
from asphalt.core.context import ResourceContainer, TeardownError
@pytest.fixture
def context():
return Context()
@pytest.fixture
async def special_executor(context):
executor = ThreadPoolExecutor(1)
context.add_resource(executor, 'special', types=[Executor])
yield executor
executor.shutdown()
class TestResourceContainer:
@pytest.mark.parametrize('thread', [False, True], ids=['eventloop', 'worker'])
@pytest.mark.parametrize('context_attr', [None, 'attrname'], ids=['no_attr', 'has_attr'])
@pytest.mark.asyncio
async def test_generate_value(self, thread, context_attr):
container = ResourceContainer(lambda ctx: 'foo', (str,), 'default', context_attr, True)
context = Context()
if thread:
value = await context.call_in_executor(container.generate_value, context)
else:
value = container.generate_value(context)
assert value == 'foo'
assert context.get_resource(str) == 'foo'
if context_attr:
assert getattr(context, context_attr) == 'foo'
def test_repr(self):
container = ResourceContainer('foo', (str,), 'default', 'attrname', False)
assert repr(container) == ("ResourceContainer(value='foo', types=[str], name='default', "
"context_attr='attrname')")
def test_repr_factory(self):
container = ResourceContainer(lambda ctx: 'foo', (str,), 'default', 'attrname', True)
assert repr(container) == (
"ResourceContainer(factory=test_context.TestResourceContainer.test_repr_factory."
"<locals>.<lambda>, types=[str], name='default', context_attr='attrname')")
class TestContext:
@pytest.mark.asyncio
async def test_parent(self):
"""Test that the parent property points to the parent context instance, if any."""
parent = Context()
child = Context(parent)
assert parent.parent is None
assert child.parent is parent
@pytest.mark.parametrize('exception', [None, Exception('foo')],
ids=['noexception', 'exception'])
@pytest.mark.asyncio
async def test_close(self, context, exception):
"""
Test that teardown callbacks are called in reverse order when a context is closed.
"""
def callback(exception=None):
called_functions.append((callback, exception))
async def async_callback(exception=None):
called_functions.append((async_callback, exception))
called_functions = []
context.add_teardown_callback(callback, pass_exception=True)
context.add_teardown_callback(async_callback, pass_exception=True)
await context.close(exception)
assert called_functions == [(async_callback, exception), (callback, exception)]
@pytest.mark.asyncio
async def test_teardown_callback_exception(self, context):
"""
Test that all callbacks are called even when some teardown callbacks raise exceptions,
and that a TeardownError is raised in such a case, containing the exception objects.
"""
def callback1():
items.append(1)
def callback2():
raise Exception('foo')
context.add_teardown_callback(callback1)
context.add_teardown_callback(callback2)
context.add_teardown_callback(callback1)
context.add_teardown_callback(callback2)
items = []
with pytest.raises(TeardownError) as exc:
await context.close()
assert 'foo' in str(exc.value)
assert items == [1, 1]
assert len(exc.value.exceptions) == 2
@pytest.mark.asyncio
async def test_close_closed(self, context):
"""Test that closing an already closed context raises a RuntimeError."""
assert not context.closed
await context.close()
assert context.closed
with pytest.raises(RuntimeError) as exc:
await context.close()
exc.match('this context has already been closed')
def test_contextmanager_exception(self, context, event_loop):
close_future = event_loop.create_future()
close_future.set_result(None)
exception = Exception('foo')
with patch.object(context, 'close', return_value=close_future):
with pytest.raises(Exception) as exc, pytest.deprecated_call():
with context:
raise exception
# close.assert_called_once_with(exception)
assert exc.value is exception
@pytest.mark.asyncio
async def test_async_contextmanager_exception(self, event_loop, context):
"""Test that "async with context:" calls close() with the exception raised in the block."""
close_future = event_loop.create_future()
close_future.set_result(None)
exception = Exception('foo')
with patch.object(context, 'close', return_value=close_future) as close:
with pytest.raises(Exception) as exc:
async with context:
raise exception
close.assert_called_once_with(exception)
assert exc.value is exception
@pytest.mark.parametrize('types', [int, (int,), ()], ids=['type', 'tuple', 'empty'])
@pytest.mark.asyncio
async def test_add_resource(self, context, event_loop, types):
"""Test that a resource is properly added in the context and listeners are notified."""
event_loop.call_soon(context.add_resource, 6, 'foo', 'foo.bar', types)
event = await context.resource_added.wait_event()
assert event.resource_types == (int,)
assert event.resource_name == 'foo'
assert not event.is_factory
assert context.get_resource(int, 'foo') == 6
@pytest.mark.asyncio
async def test_add_resource_name_conflict(self, context):
"""Test that adding a resource won't replace any existing resources."""
context.add_resource(5, 'foo')
with pytest.raises(ResourceConflict) as exc:
context.add_resource(4, 'foo')
exc.match("this context already contains a resource of type int using the name 'foo'")
@pytest.mark.asyncio
async def test_add_resource_none_value(self, context):
"""Test that None is not accepted as a resource value."""
exc = pytest.raises(ValueError, context.add_resource, None)
exc.match('"value" must not be None')
@pytest.mark.asyncio
async def test_add_resource_context_attr(self, context):
"""Test that when resources are added, they are also set as properties of the context."""
context.add_resource(1, context_attr='foo')
assert context.foo == 1
def test_add_resource_context_attr_conflict(self, context):
"""
Test that the context won't allow adding a resource with an attribute name that conflicts
with an existing attribute.
"""
context.a = 2
with pytest.raises(ResourceConflict) as exc:
context.add_resource(2, context_attr='a')
exc.match("this context already has an attribute 'a'")
assert context.get_resource(int) is None
@pytest.mark.asyncio
async def test_add_resource_type_conflict(self, context):
context.add_resource(5)
with pytest.raises(ResourceConflict) as exc:
await context.add_resource(6)
exc.match("this context already contains a resource of type int using the name 'default'")
@pytest.mark.parametrize('name', ['a.b', 'a:b', 'a b'], ids=['dot', 'colon', 'space'])
@pytest.mark.asyncio
async def test_add_resource_bad_name(self, context, name):
with pytest.raises(ValueError) as exc:
context.add_resource(1, name)
exc.match('"name" must be a nonempty string consisting only of alphanumeric characters '
'and underscores')
@pytest.mark.asyncio
async def test_add_resource_factory(self, context):
"""Test that resources factory callbacks are only called once for each context."""
def factory(ctx):
assert ctx is context
return next(counter)
counter = count(1)
context.add_resource_factory(factory, int, context_attr='foo')
assert context.foo == 1
assert context.foo == 1
assert context.__dict__['foo'] == 1
@pytest.mark.parametrize('name', ['a.b', 'a:b', 'a b'], ids=['dot', 'colon', 'space'])
@pytest.mark.asyncio
async def test_add_resource_factory_bad_name(self, context, name):
with pytest.raises(ValueError) as exc:
context.add_resource_factory(lambda ctx: 1, int, name)
exc.match('"name" must be a nonempty string consisting only of alphanumeric characters '
'and underscores')
@pytest.mark.asyncio
async def test_add_resource_factory_coroutine_callback(self, context):
async def factory(ctx):
return 1
with pytest.raises(TypeError) as exc:
context.add_resource_factory(factory, int)
exc.match('"factory_callback" must not be a coroutine function')
@pytest.mark.asyncio
async def test_add_resource_factory_empty_types(self, context):
with pytest.raises(ValueError) as exc:
context.add_resource_factory(lambda ctx: 1, ())
exc.match('"types" must not be empty')
@pytest.mark.asyncio
async def test_add_resource_factory_context_attr_conflict(self, context):
context.add_resource_factory(lambda ctx: None, str, context_attr='foo')
with pytest.raises(ResourceConflict) as exc:
await context.add_resource_factory(lambda ctx: None, str, context_attr='foo')
exc.match(
"this context already contains a resource factory for the context attribute 'foo'")
@pytest.mark.asyncio
async def test_add_resource_factory_type_conflict(self, context):
context.add_resource_factory(lambda ctx: None, (str, int))
with pytest.raises(ResourceConflict) as exc:
await context.add_resource_factory(lambda ctx: None, int)
exc.match('this context already contains a resource factory for the type int')
@pytest.mark.asyncio
async def test_add_resource_factory_no_inherit(self, context):
"""
Test that a subcontext gets its own version of a factory-generated resource even if a
parent context has one already.
"""
context.add_resource_factory(id, int, context_attr='foo')
subcontext = Context(context)
assert context.foo == id(context)
assert subcontext.foo == id(subcontext)
@pytest.mark.asyncio
async def test_getattr_attribute_error(self, context):
child_context = Context(context)
pytest.raises(AttributeError, getattr, child_context, 'foo').\
match('no such context variable: foo')
@pytest.mark.asyncio
async def test_getattr_parent(self, context):
"""
Test that accessing a nonexistent attribute on a context retrieves the value from parent.
"""
child_context = Context(context)
context.a = 2
assert child_context.a == 2
@pytest.mark.asyncio
async def test_get_resources(self, context):
context.add_resource(9, 'foo')
context.add_resource_factory(lambda ctx: len(ctx.context_chain), int, 'bar')
context.require_resource(int, 'bar')
subctx = Context(context)
subctx.add_resource(4, 'foo')
assert subctx.get_resources(int) == {1, 4}
@pytest.mark.asyncio
async def test_require_resource(self, context):
context.add_resource(1)
assert context.require_resource(int) == 1
def test_require_resource_not_found(self, context):
"""Test that ResourceNotFound is raised when a required resource is not found."""
exc = pytest.raises(ResourceNotFound, context.require_resource, int, 'foo')
exc.match("no matching resource was found for type=int name='foo'")
assert exc.value.type == int
assert exc.value.name == 'foo'
@pytest.mark.asyncio
async def test_request_resource_parent_add(self, context, event_loop):
"""
Test that adding a resource to the parent context will satisfy a resource request in a
child context.
"""
child_context = Context(context)
task = event_loop.create_task(child_context.request_resource(int))
event_loop.call_soon(context.add_resource, 6)
resource = await task
assert resource == 6
@pytest.mark.asyncio
async def test_request_resource_factory_context_attr(self, context):
"""Test that requesting a factory-generated resource also sets the context variable."""
context.add_resource_factory(lambda ctx: 6, int, context_attr='foo')
await context.request_resource(int)
assert context.__dict__['foo'] == 6
@pytest.mark.asyncio
async def test_call_async_plain(self, context):
def runs_in_event_loop(worker_thread, x, y):
assert current_thread() is not worker_thread
return x + y
def runs_in_worker_thread():
worker_thread = current_thread()
return context.call_async(runs_in_event_loop, worker_thread, 1, y=2)
assert await context.call_in_executor(runs_in_worker_thread) == 3
@pytest.mark.asyncio
async def test_call_async_coroutine(self, context):
async def runs_in_event_loop(worker_thread, x, y):
assert current_thread() is not worker_thread
await asyncio.sleep(0.1)
return x + y
def runs_in_worker_thread():
worker_thread = current_thread()
return context.call_async(runs_in_event_loop, worker_thread, 1, y=2)
assert await context.call_in_executor(runs_in_worker_thread) == 3
@pytest.mark.asyncio
async def test_call_async_exception(self, context):
def runs_in_event_loop():
raise ValueError('foo')
with pytest.raises(ValueError) as exc:
await context.call_in_executor(context.call_async, runs_in_event_loop)
assert exc.match('foo')
@pytest.mark.asyncio
async def test_call_in_executor(self, context):
"""Test that call_in_executor actually runs the target in a worker thread."""
worker_thread = await context.call_in_executor(current_thread)
assert worker_thread is not current_thread()
@pytest.mark.parametrize('use_resource_name', [True, False], ids=['direct', 'resource'])
@pytest.mark.asyncio
async def test_call_in_executor_explicit(self, context, use_resource_name):
executor = ThreadPoolExecutor(1)
context.add_resource(executor, types=[Executor])
context.add_teardown_callback(executor.shutdown)
executor_arg = 'default' if use_resource_name else executor
worker_thread = await context.call_in_executor(current_thread, executor=executor_arg)
assert worker_thread is not current_thread()
@pytest.mark.asyncio
async def test_threadpool(self, context):
event_loop_thread = current_thread()
async with context.threadpool():
assert current_thread() is not event_loop_thread
@pytest.mark.asyncio
async def test_threadpool_named_executor(self, context, special_executor):
special_executor_thread = special_executor.submit(current_thread).result()
async with context.threadpool('special'):
assert current_thread() is special_executor_thread
class TestExecutor:
@pytest.mark.asyncio
async def test_no_arguments(self, context):
@executor
def runs_in_default_worker():
assert current_thread() is not event_loop_thread
event_loop_thread = current_thread()
await runs_in_default_worker()
@pytest.mark.asyncio
async def test_named_executor(self, context, special_executor):
@executor('special')
def runs_in_default_worker(ctx):
assert current_thread() is special_executor_thread
special_executor_thread = special_executor.submit(current_thread).result()
await runs_in_default_worker(context)
@pytest.mark.asyncio
async def test_executor_missing_context(self, event_loop, context):
@executor('special')
def runs_in_default_worker():
pass
with pytest.raises(RuntimeError) as exc:
await runs_in_default_worker()
exc.match(r'the first positional argument to %s\(\) has to be a Context instance' %
callable_name(runs_in_default_worker))
class TestContextTeardown:
@pytest.mark.parametrize('expected_exc', [
None, Exception('foo')
], ids=['no_exception', 'exception'])
@pytest.mark.asyncio
async def test_function(self, expected_exc):
@context_teardown
async def start(ctx: Context):
nonlocal phase, received_exception
phase = 'started'
exc = yield
phase = 'finished'
received_exception = exc
phase = received_exception = None
context = Context()
await start(context)
assert phase == 'started'
await context.close(expected_exc)
assert phase == 'finished'
assert received_exception == expected_exc
@pytest.mark.parametrize('expected_exc', [
None, Exception('foo')
], ids=['no_exception', 'exception'])
@pytest.mark.asyncio
async def test_method(self, expected_exc):
class SomeComponent:
@context_teardown
async def start(self, ctx: Context):
nonlocal phase, received_exception
phase = 'started'
exc = yield
phase = 'finished'
received_exception = exc
phase = received_exception = None
context = Context()
await SomeComponent().start(context)
assert phase == 'started'
await context.close(expected_exc)
assert phase == 'finished'
assert received_exception == expected_exc
def test_plain_function(self):
def start(ctx):
pass
pytest.raises(TypeError, context_teardown, start).\
match(' must be an async generator function')
@pytest.mark.asyncio
async def test_bad_args(self):
with pytest.deprecated_call():
@context_teardown
async def start(ctx):
pass
with pytest.raises(RuntimeError) as exc:
await start(None)
exc.match(r'the first positional argument to %s\(\) has to be a Context instance' %
callable_name(start))
@pytest.mark.asyncio
async def test_exception(self):
@context_teardown
async def start(ctx):
raise Exception('dummy error')
yield
context = Context()
with pytest.raises(Exception) as exc_info:
await start(context)
exc_info.match('dummy error')
@pytest.mark.asyncio
async def test_missing_yield(self):
with pytest.deprecated_call():
@context_teardown
async def start(ctx: Context):
pass
await start(Context())
@pytest.mark.asyncio
async def test_py35_generator(self):
with pytest.deprecated_call():
@context_teardown
async def start(ctx: Context):
await yield_()
await start(Context())
class TestContextFinisher:
@pytest.mark.parametrize('expected_exc', [
None, Exception('foo')
], ids=['no_exception', 'exception'])
@pytest.mark.asyncio
async def test_context_teardown(self, expected_exc):
@context_teardown
async def start(ctx: Context):
nonlocal phase, received_exception
phase = 'started'
exc = yield
phase = 'finished'
received_exception = exc
phase = received_exception = None
context = Context()
await start(context)
assert phase == 'started'
await context.close(expected_exc)
assert phase == 'finished'
assert received_exception == expected_exc
| |
# Copyright (c) 2013 Dell Inc.
# Copyright 2013 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from eventlet import greenthread
import mock
from oslo_concurrency import processutils
import paramiko
import six
from cinder import context
from cinder import exception
from cinder import ssh_utils
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import eqlx
class DellEQLSanISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(DellEQLSanISCSIDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.san_is_local = False
self.configuration.san_ip = "10.0.0.1"
self.configuration.san_login = "foo"
self.configuration.san_password = "bar"
self.configuration.san_ssh_port = 16022
self.configuration.san_thin_provision = True
self.configuration.san_private_key = 'foo'
self.configuration.ssh_min_pool_conn = 1
self.configuration.ssh_max_pool_conn = 5
self.configuration.ssh_conn_timeout = 30
self.configuration.eqlx_pool = 'non-default'
self.configuration.eqlx_group_name = 'group-0'
self.configuration.eqlx_cli_timeout = 30
self.configuration.eqlx_cli_max_retries = 5
self.configuration.eqlx_use_chap = False
self.configuration.use_chap_auth = True
self.configuration.chap_username = 'admin'
self.configuration.chap_password = 'password'
self.configuration.max_over_subscription_ratio = 1.0
self.driver_stats_output = ['TotalCapacity: 111GB',
'FreeSpace: 11GB',
'VolumeReserve: 80GB']
self.cmd = 'this is dummy command'
self._context = context.get_admin_context()
self.driver = eqlx.DellEQLSanISCSIDriver(
configuration=self.configuration)
self.volume_name = "fakevolume"
self.volid = "fakeid"
self.connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:2227dab76162',
'host': 'fakehost'}
self.access_record_output = [
"ID Initiator Ipaddress AuthMethod UserName Apply-To",
"--- --------------- ------------- ---------- ---------- --------",
"1 iqn.1993-08.org.debian:01:222 *.*.*.* none both",
" 7dab76162"]
self.fake_iqn = 'iqn.2003-10.com.equallogic:group01:25366:fakev'
self.fake_iqn_return = ['iSCSI target name is %s.' % self.fake_iqn]
self.driver._group_ip = '10.0.1.6'
self.properties = {
'target_discovered': True,
'target_portal': '%s:3260' % self.driver._group_ip,
'target_iqn': self.fake_iqn,
'volume_id': 1}
self._model_update = {
'provider_location': "%s:3260,1 %s 0" % (self.driver._group_ip,
self.fake_iqn),
'provider_auth': 'CHAP %s %s' % (
self.configuration.chap_username,
self.configuration.chap_password)
}
def _fake_get_iscsi_properties(self, volume):
return self.properties
def test_create_volume(self):
volume = {'name': self.volume_name, 'size': 1}
mock_attrs = {'args': ['volume', 'create', volume['name'],
"%sG" % (volume['size']), 'pool',
self.configuration.eqlx_pool,
'thin-provision']}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.fake_iqn_return
model_update = self.driver.create_volume(volume)
self.assertEqual(self._model_update, model_update)
def test_delete_volume(self):
volume = {'name': self.volume_name, 'size': 1}
show_attrs = {'args': ['volume', 'select', volume['name'], 'show']}
off_attrs = {'args': ['volume', 'select', volume['name'], 'offline']}
delete_attrs = {'args': ['volume', 'delete', volume['name']]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**show_attrs)
mock_eql_execute.configure_mock(**off_attrs)
mock_eql_execute.configure_mock(**delete_attrs)
self.driver.delete_volume(volume)
def test_delete_absent_volume(self):
volume = {'name': self.volume_name, 'size': 1, 'id': self.volid}
mock_attrs = {'args': ['volume', 'select', volume['name'], 'show']}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.side_effect = processutils.ProcessExecutionError(
stdout='% Error ..... does not exist.\n')
self.driver.delete_volume(volume)
def test_ensure_export(self):
volume = {'name': self.volume_name, 'size': 1}
mock_attrs = {'args': ['volume', 'select', volume['name'], 'show']}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
self.driver.ensure_export({}, volume)
def test_create_snapshot(self):
snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'}
snap_name = 'fake_snap_name'
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.return_value = ['Snapshot name is %s' % snap_name]
self.driver.create_snapshot(snapshot)
def test_create_volume_from_snapshot(self):
snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'}
volume = {'name': self.volume_name}
mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'],
'snapshot', 'select', snapshot['name'],
'clone', volume['name']]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.fake_iqn_return
model_update = self.driver.create_volume_from_snapshot(volume,
snapshot)
self.assertEqual(self._model_update, model_update)
def test_create_cloned_volume(self):
src_vref = {'name': 'fake_uuid'}
volume = {'name': self.volume_name}
mock_attrs = {'args': ['volume', 'select', volume['name'],
'multihost-access', 'enable']}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.fake_iqn_return
model_update = self.driver.create_cloned_volume(volume, src_vref)
self.assertEqual(self._model_update, model_update)
def test_delete_snapshot(self):
snapshot = {'name': 'fakesnap', 'volume_name': 'fakevolume_name'}
mock_attrs = {'args': ['volume', 'select', snapshot['volume_name'],
'snapshot', 'delete', snapshot['name']]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
self.driver.delete_snapshot(snapshot)
def test_extend_volume(self):
new_size = '200'
volume = {'name': self.volume_name, 'size': 100}
mock_attrs = {'args': ['volume', 'select', volume['name'],
'size', "%sG" % new_size]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
self.driver.extend_volume(volume, new_size)
def test_initialize_connection(self):
volume = {'name': self.volume_name}
mock_attrs = {'args': ['volume', 'select', volume['name'], 'access',
'create', 'initiator',
self.connector['initiator'],
'authmethod', 'chap',
'username',
self.configuration.chap_username]}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
with mock.patch.object(self.driver,
'_get_iscsi_properties') as mock_iscsi:
mock_eql_execute.configure_mock(**mock_attrs)
mock_iscsi.return_value = self.properties
iscsi_properties = self.driver.initialize_connection(
volume, self.connector)
self.assertEqual(self._fake_get_iscsi_properties(volume),
iscsi_properties['data'])
def test_terminate_connection(self):
def my_side_effect(*args, **kwargs):
if args[4] == 'show':
return self.access_record_output
else:
return ''
volume = {'name': self.volume_name}
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.side_effect = my_side_effect
self.driver.terminate_connection(volume, self.connector)
def test_do_setup(self):
fake_group_ip = '10.1.2.3'
def my_side_effect(*args, **kwargs):
if args[0] == 'grpparams':
return ['Group-Ipaddress: %s' % fake_group_ip]
else:
return ''
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.side_effect = my_side_effect
self.driver.do_setup(self._context)
self.assertEqual(fake_group_ip, self.driver._group_ip)
def test_update_volume_stats_thin(self):
mock_attrs = {'args': ['pool', 'select',
self.configuration.eqlx_pool, 'show']}
self.configuration.san_thin_provision = True
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.driver_stats_output
self.driver._update_volume_stats()
self.assert_volume_stats(self.driver._stats)
def test_update_volume_stats_thick(self):
mock_attrs = {'args': ['pool', 'select',
self.configuration.eqlx_pool, 'show']}
self.configuration.san_thin_provision = False
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.driver_stats_output
self.driver._update_volume_stats()
self.assert_volume_stats(self.driver._stats)
def test_get_volume_stats_thin(self):
mock_attrs = {'args': ['pool', 'select',
self.configuration.eqlx_pool, 'show']}
self.configuration.san_thin_provision = True
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.driver_stats_output
stats = self.driver.get_volume_stats(refresh=True)
self.assert_volume_stats(stats)
def test_get_volume_stats_thick(self):
mock_attrs = {'args': ['pool', 'select',
self.configuration.eqlx_pool, 'show']}
self.configuration.san_thin_provision = False
with mock.patch.object(self.driver,
'_eql_execute') as mock_eql_execute:
mock_eql_execute.configure_mock(**mock_attrs)
mock_eql_execute.return_value = self.driver_stats_output
stats = self.driver.get_volume_stats(refresh=True)
self.assert_volume_stats(stats)
def assert_volume_stats(self, stats):
thin_enabled = self.configuration.san_thin_provision
self.assertEqual(float('111.0'), stats['total_capacity_gb'])
self.assertEqual(float('11.0'), stats['free_capacity_gb'])
if thin_enabled:
self.assertEqual(80.0, stats['provisioned_capacity_gb'])
else:
space = stats['total_capacity_gb'] - stats['free_capacity_gb']
self.assertEqual(space, stats['provisioned_capacity_gb'])
self.assertEqual(thin_enabled, stats['thin_provisioning_support'])
self.assertEqual(not thin_enabled,
stats['thick_provisioning_support'])
self.assertEqual('Dell', stats['vendor_name'])
def test_get_space_in_gb(self):
self.assertEqual(123.0, self.driver._get_space_in_gb('123.0GB'))
self.assertEqual(123.0 * 1024, self.driver._get_space_in_gb('123.0TB'))
self.assertEqual(1.0, self.driver._get_space_in_gb('1024.0MB'))
def test_get_output(self):
def _fake_recv(ignore_arg):
return '%s> ' % self.configuration.eqlx_group_name
chan = mock.Mock(paramiko.Channel)
mock_recv = self.mock_object(chan, 'recv')
mock_recv.return_value = '%s> ' % self.configuration.eqlx_group_name
self.assertEqual([_fake_recv(None)], self.driver._get_output(chan))
def test_get_prefixed_value(self):
lines = ['Line1 passed', 'Line1 failed']
prefix = ['Line1', 'Line2']
expected_output = [' passed', None]
self.assertEqual(expected_output[0],
self.driver._get_prefixed_value(lines, prefix[0]))
self.assertEqual(expected_output[1],
self.driver._get_prefixed_value(lines, prefix[1]))
def test_ssh_execute(self):
ssh = mock.Mock(paramiko.SSHClient)
chan = mock.Mock(paramiko.Channel)
transport = mock.Mock(paramiko.Transport)
mock_get_output = self.mock_object(self.driver, '_get_output')
self.mock_object(chan, 'invoke_shell')
expected_output = ['NoError: test run']
mock_get_output.return_value = expected_output
ssh.get_transport.return_value = transport
transport.open_session.return_value = chan
chan.invoke_shell()
chan.send('stty columns 255' + '\r')
chan.send(self.cmd + '\r')
chan.close()
self.assertEqual(expected_output,
self.driver._ssh_execute(ssh, self.cmd))
def test_ssh_execute_error(self):
self.mock_object(self.driver, '_ssh_execute',
mock.Mock(side_effect=
processutils.ProcessExecutionError))
ssh = mock.Mock(paramiko.SSHClient)
chan = mock.Mock(paramiko.Channel)
transport = mock.Mock(paramiko.Transport)
mock_get_output = self.mock_object(self.driver, '_get_output')
self.mock_object(ssh, 'get_transport')
self.mock_object(chan, 'invoke_shell')
expected_output = ['Error: test run', '% Error']
mock_get_output.return_value = expected_output
ssh.get_transport().return_value = transport
transport.open_session.return_value = chan
chan.invoke_shell()
chan.send('stty columns 255' + '\r')
chan.send(self.cmd + '\r')
chan.close()
self.assertRaises(processutils.ProcessExecutionError,
self.driver._ssh_execute, ssh, self.cmd)
@mock.patch.object(greenthread, 'sleep')
def test_ensure_retries(self, _gt_sleep):
num_attempts = 3
self.driver.configuration.eqlx_cli_max_retries = num_attempts
self.mock_object(self.driver, '_ssh_execute',
mock.Mock(side_effect=exception.
VolumeBackendAPIException("some error")))
# mocks for calls in _run_ssh
self.mock_object(utils, 'check_ssh_injection')
self.mock_object(ssh_utils, 'SSHPool')
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
self.driver.sshpool = mock.Mock(return_value=sshpool)
ssh = mock.Mock(paramiko.SSHClient)
self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh)
self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False)
# now call the execute
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._eql_execute, "fake command")
self.assertEqual(num_attempts + 1,
self.driver._ssh_execute.call_count)
@mock.patch.object(greenthread, 'sleep')
def test_ensure_connection_retries(self, _gt_sleep):
num_attempts = 3
self.driver.configuration.eqlx_cli_max_retries = num_attempts
self.mock_object(self.driver, '_ssh_execute',
mock.Mock(side_effect=
processutils.ProcessExecutionError
(stdout='% Error ... some error.\n')))
# mocks for calls in _run_ssh
self.mock_object(utils, 'check_ssh_injection')
self.mock_object(ssh_utils, 'SSHPool')
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
self.driver.sshpool = mock.Mock(return_value=sshpool)
ssh = mock.Mock(paramiko.SSHClient)
self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh)
self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False)
# now call the execute
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._eql_execute, "fake command")
self.assertEqual(num_attempts + 1,
self.driver._ssh_execute.call_count)
@mock.patch.object(greenthread, 'sleep')
def test_ensure_retries_on_channel_timeout(self, _gt_sleep):
num_attempts = 3
self.driver.configuration.eqlx_cli_max_retries = num_attempts
# mocks for calls and objects in _run_ssh
self.mock_object(utils, 'check_ssh_injection')
self.mock_object(ssh_utils, 'SSHPool')
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
self.driver.sshpool = mock.Mock(return_value=sshpool)
ssh = mock.Mock(paramiko.SSHClient)
self.driver.sshpool.item().__enter__ = mock.Mock(return_value=ssh)
self.driver.sshpool.item().__exit__ = mock.Mock(return_value=False)
# mocks for _ssh_execute and _get_output
self.mock_object(self.driver, '_get_output',
mock.Mock(side_effect=exception.
VolumeBackendAPIException("some error")))
# now call the execute
with mock.patch('sys.stderr', new=six.StringIO()):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._eql_execute, "fake command")
self.assertEqual(num_attempts + 1, self.driver._get_output.call_count)
def test_with_timeout(self):
@eqlx.with_timeout
def no_timeout(cmd, *args, **kwargs):
return 'no timeout'
@eqlx.with_timeout
def w_timeout(cmd, *args, **kwargs):
time.sleep(1)
self.assertEqual('no timeout', no_timeout('fake cmd'))
self.assertRaises(exception.VolumeBackendAPIException,
w_timeout, 'fake cmd', timeout=0.1)
def test_local_path(self):
self.assertRaises(NotImplementedError, self.driver.local_path, '')
| |
import json
from django.urls import reverse
from rest_framework.test import APIRequestFactory
from tests.support.decorators import mark_urls
from tests.support.views import ArtistViewSet, AlbumViewSet, TrackViewSet
@mark_urls
def test_detail_attributes(factory: APIRequestFactory) -> None:
"""You can render primary data object attributes."""
request = factory.get(reverse("artist-detail", kwargs={"pk": 1}))
view_detail = ArtistViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=1)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "1",
"type": "artist",
"attributes": {"firstName": "John", "lastName": "Coltrane"},
}
}
@mark_urls
def test_list_attributes(factory: APIRequestFactory) -> None:
"""You can render primary data list attributes."""
request = factory.get(reverse("artist-list"))
view_list = ArtistViewSet.as_view({"get": "list"})
response = view_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
data = json.loads(response.content)["data"]
assert data[:3] == [
{
"id": "0",
"type": "artist",
"attributes": {"firstName": "Miles", "lastName": "Davis"},
},
{
"id": "1",
"type": "artist",
"attributes": {"firstName": "John", "lastName": "Coltrane"},
},
{
"id": "2",
"type": "artist",
"attributes": {"firstName": "Charles", "lastName": "Mingus"},
},
]
@mark_urls
def test_empty_list(factory: APIRequestFactory) -> None:
"""The 'data' field appears in the top-level object even if data is empty."""
request = factory.get(reverse("artist-list"), {"filter[firstName]": "Foo"})
view_list = ArtistViewSet.as_view({"get": "list"})
response = view_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {"data": []}
@mark_urls
def test_attributes_exception(factory: APIRequestFactory) -> None:
"""The renderer handles thrown exceptions."""
request = factory.get(reverse("artist-detail", kwargs={"pk": 8}))
view_detail = ArtistViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=8)
response.render()
assert json.loads(response.content.decode()) == {
"errors": [{"detail": "Not found."}]
}
@mark_urls
def test_attributes_options(factory: APIRequestFactory) -> None:
"""You can specify options in meta."""
request = factory.options(reverse("artist-list"))
view_list = ArtistViewSet.as_view({"get": "list"})
response = view_list(request)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"meta": {
"data": {
"description": "A simple ViewSet for listing or retrieving artists.",
"name": "Artist",
"parses": ["application/vnd.api+json"],
"renders": ["application/vnd.api+json"],
}
}
}
@mark_urls
def test_attributes_fields(factory: APIRequestFactory) -> None:
"""The fields attribute returns specific fields."""
url = reverse("artist-detail", kwargs={"pk": 1})
request = factory.get(url, {"fields[artist]": "firstName"})
view_detail = ArtistViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=1)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {"id": "1", "type": "artist", "attributes": {"firstName": "John"}}
}
@mark_urls
def test_relationships_empty(factory: APIRequestFactory) -> None:
"""You can render empty relationships."""
request = factory.get(reverse("album-detail", kwargs={"pk": 3}))
view_detail = AlbumViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=3)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "3",
"type": "album",
"attributes": {"albumName": "Unknown Artist"},
"relationships": {"artist": {"data": None}, "tracks": {"data": []}},
}
}
@mark_urls
def test_to_one_non_empty(factory: APIRequestFactory) -> None:
"""You can render a non-empty to-one relationship."""
request = factory.get(reverse("album-detail", kwargs={"pk": 0}))
view_detail = AlbumViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=0)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "0",
"type": "album",
"attributes": {"albumName": "A Love Supreme"},
"relationships": {
"artist": {"data": {"id": "1", "type": "artist"}},
"tracks": {"data": []},
},
}
}
@mark_urls
def test_to_many_non_empty(factory: APIRequestFactory) -> None:
"""You can render a non-empty to-many relationship."""
request = factory.get(reverse("album-detail", kwargs={"pk": 1}))
view_detail = AlbumViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=1)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "1",
"type": "album",
"attributes": {"albumName": "Birth of the Cool"},
"relationships": {
"artist": {"data": {"id": "0", "type": "artist"}},
"tracks": {
"data": [
{"id": "0", "type": "track"},
{"id": "1", "type": "track"},
{"id": "2", "type": "track"},
{"id": "3", "type": "track"},
]
},
},
}
}
@mark_urls
def test_include_to_one(factory: APIRequestFactory) -> None:
"""You can include a to-one relationship as a compound document."""
request = factory.get(
reverse("album-detail", kwargs={"pk": 0}), {"include": "artist"}
)
view_detail = AlbumViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=0)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "0",
"type": "album",
"attributes": {"albumName": "A Love Supreme"},
"relationships": {
"artist": {"data": {"id": "1", "type": "artist"}},
"tracks": {"data": []},
},
},
"included": [
{
"id": "1",
"type": "artist",
"attributes": {"firstName": "John", "lastName": "Coltrane"},
}
],
}
@mark_urls
def test_include_to_many_and_paths(factory: APIRequestFactory) -> None:
"""You can include a to-many relationship as a compound document."""
track_detail = TrackViewSet.as_view({"get": "retrieve"})
request = factory.get(
reverse("track-detail", kwargs={"pk": 0}),
{"include": "album,album.artist,album.tracks"},
)
response = track_detail(request, pk=0)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "0",
"type": "track",
"attributes": {"name": "Jeru", "trackNum": 1},
"relationships": {"album": {"data": {"id": "1", "type": "album"}}},
},
"included": [
{
"id": "1",
"type": "album",
"attributes": {"albumName": "Birth of the Cool"},
"relationships": {
"artist": {"data": {"id": "0", "type": "artist"}},
"tracks": {
"data": [
{"id": "0", "type": "track"},
{"id": "1", "type": "track"},
{"id": "2", "type": "track"},
{"id": "3", "type": "track"},
]
},
},
},
{
"id": "0",
"type": "artist",
"attributes": {"firstName": "Miles", "lastName": "Davis"},
},
{
"id": "0",
"type": "track",
"attributes": {"name": "Jeru", "trackNum": 1},
"relationships": {"album": {"data": {"id": "1", "type": "album"}}},
},
{
"id": "1",
"type": "track",
"attributes": {"name": "Moon Dreams", "trackNum": 2},
"relationships": {"album": {"data": {"id": "1", "type": "album"}}},
},
{
"id": "2",
"type": "track",
"attributes": {"name": "Venus de Milo", "trackNum": 3},
"relationships": {"album": {"data": {"id": "1", "type": "album"}}},
},
{
"id": "3",
"type": "track",
"attributes": {"name": "Deception", "trackNum": 4},
"relationships": {"album": {"data": {"id": "1", "type": "album"}}},
},
],
}
@mark_urls
def test_relationships_fields(factory: APIRequestFactory) -> None:
"""You can use fields to specify specific relationships fields."""
request = factory.get(
reverse("album-detail", kwargs={"pk": 0}),
{"fields[album]": "artist", "fields[artist]": "firstName", "include": "artist"},
)
view_detail = AlbumViewSet.as_view({"get": "retrieve"})
response = view_detail(request, pk=0)
response.render()
assert response["Content-Type"] == "application/vnd.api+json"
assert json.loads(response.content) == {
"data": {
"id": "0",
"type": "album",
"relationships": {"artist": {"data": {"id": "1", "type": "artist"}}},
},
"included": [
{"id": "1", "type": "artist", "attributes": {"firstName": "John"}}
],
}
| |
"""Orchestrator for building wheels from InstallRequirements.
"""
import logging
import os.path
import re
import shutil
from pip._internal.models.link import Link
from pip._internal.operations.build.wheel import build_wheel_pep517
from pip._internal.operations.build.wheel_legacy import build_wheel_legacy
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed
from pip._internal.utils.setuptools_build import make_setuptools_clean_args
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs import vcs
if MYPY_CHECK_RUNNING:
from typing import Any, Callable, Iterable, List, Optional, Tuple
from pip._internal.cache import WheelCache
from pip._internal.req.req_install import InstallRequirement
BinaryAllowedPredicate = Callable[[InstallRequirement], bool]
BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]]
logger = logging.getLogger(__name__)
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.IGNORECASE)
def _contains_egg_info(s):
# type: (str) -> bool
"""Determine whether the string looks like an egg_info.
:param s: The string to parse. E.g. foo-2.1
"""
return bool(_egg_info_re.search(s))
def _should_build(
req, # type: InstallRequirement
need_wheel, # type: bool
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
"""Return whether an InstallRequirement should be built into a wheel."""
if req.constraint:
# never build requirements that are merely constraints
return False
if req.is_wheel:
if need_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
return False
if need_wheel:
# i.e. pip wheel, not pip install
return True
# From this point, this concerns the pip install command only
# (need_wheel=False).
if req.editable or not req.source_dir:
return False
if not check_binary_allowed(req):
logger.info(
"Skipping wheel build for %s, due to binaries "
"being disabled for it.", req.name,
)
return False
if not req.use_pep517 and not is_wheel_installed():
# we don't build legacy requirements if wheel is not installed
logger.info(
"Using legacy 'setup.py install' for %s, "
"since package 'wheel' is not installed.", req.name,
)
return False
return True
def should_build_for_wheel_command(
req, # type: InstallRequirement
):
# type: (...) -> bool
return _should_build(
req, need_wheel=True, check_binary_allowed=_always_true
)
def should_build_for_install_command(
req, # type: InstallRequirement
check_binary_allowed, # type: BinaryAllowedPredicate
):
# type: (...) -> bool
return _should_build(
req, need_wheel=False, check_binary_allowed=check_binary_allowed
)
def _should_cache(
req, # type: InstallRequirement
):
# type: (...) -> Optional[bool]
"""
Return whether a built InstallRequirement can be stored in the persistent
wheel cache, assuming the wheel cache is available, and _should_build()
has determined a wheel needs to be built.
"""
if req.editable or not req.source_dir:
# never cache editable requirements
return False
if req.link and req.link.is_vcs:
# VCS checkout. Do not cache
# unless it points to an immutable commit hash.
assert not req.editable
assert req.source_dir
vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
assert vcs_backend
if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
return True
return False
assert req.link
base, ext = req.link.splitext()
if _contains_egg_info(base):
return True
# Otherwise, do not cache.
return False
def _get_cache_dir(
req, # type: InstallRequirement
wheel_cache, # type: WheelCache
):
# type: (...) -> str
"""Return the persistent or temporary cache directory where the built
wheel need to be stored.
"""
cache_available = bool(wheel_cache.cache_dir)
assert req.link
if cache_available and _should_cache(req):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
return cache_dir
def _always_true(_):
# type: (Any) -> bool
return True
def _build_one(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
return None
# Install build deps into temporary directory (PEP 518)
with req.build_env:
return _build_one_inside_env(
req, output_dir, build_options, global_options
)
def _build_one_inside_env(
req, # type: InstallRequirement
output_dir, # type: str
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> Optional[str]
with TempDirectory(kind="wheel") as temp_dir:
assert req.name
if req.use_pep517:
assert req.metadata_directory
wheel_path = build_wheel_pep517(
name=req.name,
backend=req.pep517_backend,
metadata_directory=req.metadata_directory,
build_options=build_options,
tempd=temp_dir.path,
)
else:
wheel_path = build_wheel_legacy(
name=req.name,
setup_py_path=req.setup_py_path,
source_dir=req.unpacked_source_directory,
global_options=global_options,
build_options=build_options,
tempd=temp_dir.path,
)
if wheel_path is not None:
wheel_name = os.path.basename(wheel_path)
dest_path = os.path.join(output_dir, wheel_name)
try:
wheel_hash, length = hash_file(wheel_path)
shutil.move(wheel_path, dest_path)
logger.info('Created wheel for %s: '
'filename=%s size=%d sha256=%s',
req.name, wheel_name, length,
wheel_hash.hexdigest())
logger.info('Stored in directory: %s', output_dir)
return dest_path
except Exception as e:
logger.warning(
"Building wheel for %s failed: %s",
req.name, e,
)
# Ignore return, we can't do anything else useful.
if not req.use_pep517:
_clean_one_legacy(req, global_options)
return None
def _clean_one_legacy(req, global_options):
# type: (InstallRequirement, List[str]) -> bool
clean_args = make_setuptools_clean_args(
req.setup_py_path,
global_options=global_options,
)
logger.info('Running setup.py clean for %s', req.name)
try:
call_subprocess(clean_args, cwd=req.source_dir)
return True
except Exception:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(
requirements, # type: Iterable[InstallRequirement]
wheel_cache, # type: WheelCache
build_options, # type: List[str]
global_options, # type: List[str]
):
# type: (...) -> BuildResult
"""Build wheels.
:return: The list of InstallRequirement that succeeded to build and
the list of InstallRequirement that failed to build.
"""
if not requirements:
return [], []
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join(req.name for req in requirements), # type: ignore
)
with indent_log():
build_successes, build_failures = [], []
for req in requirements:
cache_dir = _get_cache_dir(req, wheel_cache)
wheel_file = _build_one(
req, cache_dir, build_options, global_options
)
if wheel_file:
# Update the link for this.
req.link = Link(path_to_url(wheel_file))
req.local_file_path = req.link.file_path
assert req.link.is_wheel
build_successes.append(req)
else:
build_failures.append(req)
# notify success/failure
if build_successes:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_successes]), # type: ignore
)
if build_failures:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failures]), # type: ignore
)
# Return a list of requirements that failed to build
return build_successes, build_failures
| |
#!/usr/bin/python3
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import time
import os
import pwd
from datetime import date,timedelta,datetime
import subprocess
from pprint import pprint
import locale
# argument check
if len(sys.argv)<2:
print ("USAGE: username [startDate=YYYY-MM-DD|today|yesterday|this-week]")
print ("EXAMPLE: alice today")
sys.exit(1)
# basic audit record fields
class AuditRecord:
def __init__(self):
ts = date.today()
type = ""
pid = 0
ppid = 0
euid = 0
auid = 0
# USER_CMD has response code and full command
user_res = ""
user_cmd = ""
# EXECVE has full command
execve_fullcmd = ""
# SYSCALL has success (yes|no) and exit code
# if success=no, then no EXECVE
syscall_success = ""
syscall_exit = 0
def callProcess(cmdstr):
# search audit trail for userid
print ("exec: {}".format(cmdstr))
p = subprocess.Popen(cmdstr, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
return output, p_status, err
def splitAndMakeDict(str,splitBy,keyBy):
dict = {}
fields = str.split(splitBy)
for f in fields:
if keyBy in f:
(k,v) = f.split(keyBy,1)
dict[k] = v
return dict
def concatDictArgs(dict,keyPrefix):
retstr = ""
for x in range(0,20):
lookForKey = "{}{}".format(keyPrefix,x)
#print ("looking for key {}".format(lookForKey))
if lookForKey in dict:
retstr = retstr + dict[lookForKey].strip('\"') + " "
return retstr
def parseRecordList(rawList):
output = rawList
recList = []
# iterate through record set, decode() necessary for python3
recordlist = output.decode().split('----')
for record in recordlist:
# go through each line in a record
auditrec = AuditRecord()
linelist = record.split('\n')
for line in linelist:
if line.strip()=="----" or line.strip()=="" :
continue
if line.startswith("time->"):
line = line[6:]
datetime_object = datetime.strptime(line,'%a %b %d %H:%M:%S %Y')
auditrec.ts = datetime_object.strftime('%Y-%m-%d %H:%M:%S')
if line.startswith("type=EXECVE"):
dict = splitAndMakeDict(line,' ','=')
auditrec.execve_fullcmd = concatDictArgs(dict,'a')
if line.startswith("type=USER_CMD") or line.startswith("type=SYSCALL") or line.startswith("type=USER_AUTH"):
#print ("LINE {}".format(line))
dict = splitAndMakeDict(line,' ','=')
auditrec.type = dict['type']
if 'uid' in dict:
auditrec.uid = dict['uid']
if 'pid' in dict:
auditrec.pid = dict['pid']
if 'ppid' in dict:
auditrec.ppid = dict['ppid']
if 'auid' in dict:
auditrec.auid = dict['auid']
if 'res' in dict:
# USER_CMD success code
#print ("found 'res' {}".format(dict['res']))
auditrec.user_res = dict['res']
# chop off last char if single quote
if auditrec.user_res.endswith("'"):
auditrec.user_res = auditrec.user_res[:-1]
if 'success' in dict:
auditrec.syscall_success = dict['success']
if auditrec.syscall_success=="yes":
auditrec.syscall_success = "success"
elif auditrec.syscall_success=="no":
auditrec.syscall_success = "fail"
if 'exit' in dict:
auditrec.syscall_exit = dict['exit']
if 'cmd' in dict:
# cmd needs to be parsed manually
index1 = line.index("cmd=")
# up to next field with =
index2 = line.index("=",index1+5)
auditrec.user_cmd = line[index1:index2]
# still need to chop off last field
lastspace = auditrec.user_cmd.rindex(' ')
auditrec.user_cmd = auditrec.user_cmd[:lastspace]
# used when not using '-i'
#try:
# auditrec.user_cmd = dict['cmd'].decode("hex")
#except:
# auditrec.user_cmd = dict['cmd']
if "msg=audit(" in line:
# timestamp needs to be parsed manually
index1 = line.index("msg=audit(")
index2 = line.index(")",index1+10)
auditrec.ts = line[index1+10:index2]
if False: #line.startswith("type=SYSCALL"):
# only SYSCALL, and no EXECVE if failure
auditrec.type = "SYSCALL"
dict = splitAndMakeDict(line,' ','=')
auditrec.syscall_success = dict['success']
if auditrec.syscall_success=="yes":
auditrec.syscall_success = "success"
elif auditrec.syscall_success=="no":
auditrec.syscall_success = "fail"
auditrec.syscall_exit = dict['exit']
# add record to returning list
recList.append(auditrec)
return recList
############### MAIN ###############################
# simple parse for arguments
# use today startdate if not specified
username = sys.argv[1]
if len(sys.argv)<3:
curlocale = locale.getdefaultlocale()
locale.setlocale(locale.LC_TIME, curlocale)
today = date.today()
# hardcoding this date format means other locale would not work
#startDate = today.strftime('%m/%d/%Y')
startDate = time.strftime('%x')
print ("Getting today date {} based on locale {}".format(startDate,curlocale))
else:
startDate = sys.argv[2]
print ("startDate: " + startDate)
# must run as root/sudo to search audit reports
if os.geteuid()!=0:
print ("ERROR - must run as root/sudo in order to search audit logs")
sys.exit(2)
# get userid based on name
try:
userid = pwd.getpwnam(username).pw_uid
except:
print ("ERROR trying to resolve user id for {}".format(username))
sys.exit(3)
print ("Going to trace commands for user {} with id {}".format(username,userid))
# search audit trail for userid
cmdstr="sudo ausearch -ui {} -i -ts {}".format(userid,startDate)
(output, pstatus, err) = callProcess(cmdstr)
#print ("Command return code: ", pstatus)
# iterate through record list
print ("----Commands by {} starting at {}-----".format(username,startDate))
recList = parseRecordList(output)
for p in recList:
if not hasattr(p,'type'):
continue
#pprint(vars(p))
if p.type.startswith("USER_CMD"):
print ("{:10s} {:10s} at {} by {}/{} executing {}".format(p.type,p.user_res,p.ts,p.auid,p.uid,p.user_cmd))
elif p.type.startswith("SYSCALL"):
print ("{:10s} {:10s} at {} by {}/{} executing {}".format(p.type,p.syscall_success,p.ts,p.auid,p.uid,p.execve_fullcmd if hasattr(p,"execve_fullcmd") else "?"))
elif p.type.startswith("USER_AUTH"):
print ("{:10s} {:10s} at {} by {}/{}".format(p.type,p.user_res,p.ts,p.auid,p.uid))
sys.exit(4)
for rec in recList:
pprint(vars(rec))
# search audit trail for process
try:
cmdstr="sudo ausearch -p {}".format(rec.user_pid)
(output, pstatus, err) = callProcess(cmdstr)
#print ("Command return code: ", pstatus)
print ("PROCESS")
pList = parseRecordList(output)
for p in pList:
#pprint(vars(p))
print ("{} by {}/{} executing {}".format(p.user_res,p.ts,p.auid,p.uid,p.user_cmd))
except AttributeError:
pass # ok if pid does not exist for record
# search audit trail for parent process
try:
cmdstr="sudo ausearch -pp {}".format(rec.user_ppid)
(output, pstatus, err) = callProcess(cmdstr)
#print ("Command return code: ", pstatus)
print ("PARENT PROCESS")
ppList = parseRecordList(output)
for pp in ppList:
pprint(vars(pp))
except AttributeError:
pass # ok if parent pid does not exist for record
# search root audit trail for process
try:
cmdstr="sudo ausearch -k rootcmd -p {}".format(rec.user_ppid)
(output, pstatus, err) = callProcess(cmdstr)
#print ("Command return code: ", pstatus)
print ("ROOT PROCESS")
ppList = parseRecordList(output)
for pp in ppList:
pprint(vars(pp))
except AttributeError:
pass # ok if parent pid does not exist for record
# search root audit trail for parent process
try:
cmdstr="sudo ausearch -k rootcmd -pp {}".format(rec.user_ppid)
(output, pstatus, err) = callProcess(cmdstr)
#print ("Command return code: ", pstatus)
print ("ROOT PARENT PROCESS")
ppList = parseRecordList(output)
for pp in ppList:
pprint(vars(pp))
except AttributeError:
pass # ok if parent pid does not exist for record
print ("***REC******************************************\n\n")
| |
#####################################################################
#
# Copyright 2015 Mayur Patel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#####################################################################
try: # version-proof
import xmlrpclib as xmlrpc_lib
except ImportError :
import xmlrpc.client as xmlrpc_lib
# -------------------------------------------------------------------
from .. import localclient
from .. import fs
from .. import conf
from .. import auth
from .. import ds
# -------------------------------------------------------------------
import stat
import grp
import pwd
import os
import logging
import inspect
import functools
import copy
import threading
import random
import socket
import base64
import datetime
import codecs
# -------------------------------------------------------------------
# things that we probably don't want to expose to configuration:
NONCE_EXPIRY = 60 # seconds
NONCE_CACHE_LIMIT = 4096
DEFAULT_UID = 0
DEFAULT_GID = 0
DEFAULT_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH
# -------------------------------------------------------------------
def tuples_to_ptclist( tlist ):
"transcoder to receive list of tuples and convert them to list of PathTraversalContexts"
return [ ds.PathTraversalContext( *l ) for l in tlist ]
# -------------------------------------------------------------------
#
# Implements a XML-RPC client class
# with decorators to autopopulate from the
# definition of the server:
#
# Unfortunately, XMLRPC does not support optional arguments, so this complicates
# our attempts at simplifying the parameters associated with authentication.
#
class RemoteClient( localclient.LocalClient ) :
def __init__( self, confdict, compileddoc, startingpath, notifier=None ):
server_list = [x.strip() for x in confdict['DIRB_SERVERS'].split(',')]
assert len(server_list) > 0
self._server_list = server_list
self._server_bound = len(server_list)
assert( self._server_bound )
self._server_num = random.randint( 0, self._server_bound-1)
self._notifier = notifier
self._conf = confdict
super(RemoteClient, self).__init__( compileddoc, startingpath )
# ===========================================
def _pick_one( self ):
# round robin across the servers
ret = self._server_list[ self._server_num ]
self._server_num += 1
self._server_num = 0 if self._server_num == self._server_bound else self._server_num
return ret
# ===========================================
def _get_user( self, user_index, args, kwargs ):
if len( args ) > user_index :
if args[ user_index ] :
return args[ user_index ]
else:
if 'user' in kwargs:
if kwargs['user']:
return kwargs['user']
return auth.get_username()
# ===========================================
def _set_user( self, user_index, user, args, kwargs ):
newargs = args
newkw = kwargs
if len( args ) > user_index :
newargs = list( args )
newargs[ user_index ] = user
elif len( args ) == user_index :
newargs = list( args ) + [ user ]
else:
# xmlrpc does not support kwargs, so we cannot use them
raise TypeError( 'Unable to attach authentication argument' )
return newargs, newkw
# ===========================================
def _set_compileddoc( self, doc_index, doc, args, kwargs ):
newargs = args
newkw = kwargs
if doc_index is not None:
if len( args ) > doc_index :
newargs = list( args )
newargs[ doc_index ] = doc
elif len( args ) == doc_index :
newargs = list( args ) + [ doc ]
elif len( args) < doc_index :
delta = doc_index - len( args )
newargs = list( args ) + [ None ] * delta + [ doc ]
else:
# xmlrpc does not support kwargs, so we cannot use them
raise TypeError( 'Unable to attach compiled schema document' )
return newargs, newkw
# ===========================================
def _set_startingpath( self, path_index, path, args, kwargs ):
newargs = args
newkw = kwargs
if path_index is not None:
if len( args ) > path_index :
newargs = list( args )
newargs[ path_index ] = path
elif len( args ) == path_index :
newargs = list( args ) + [ path ]
elif len( args) < path_index :
delta = path_index - len( args )
newargs = list( args ) + [ None ] * delta + [ path ]
else:
# xmlrpc does not support kwargs, so we cannot use them
raise TypeError( 'Unable to attach starting path' )
return newargs, newkw
# ===========================================
def _replace_args( self, server, method, args, kwargs ):
"as a convenience, we can automagically fill in some args that the server method requires"
argspec = inspect.getargspec(method)
user_index = argspec.args.index( 'user' ) - 1
doc_index = argspec.args.index( 'compileddoc' ) - 1 if 'compileddoc' in argspec.args else None
path_index = argspec.args.index( 'startingpath' ) - 1 if 'startingpath' in argspec.args else None
# security protocol replaces username with a full user-credential object:
servernonce = server.get_nonce()
username = self._get_user( user_index, args, kwargs )
user = tuple( auth.get_user_credentials( username, self._conf, servernonce ))
newargs, newkw = self._set_user( user_index, user, args, kwargs )
# attach the compile document to the call, when appropriate:
# attach the starting path to the call, will very frequently pair with the compiled document
if doc_index is not None:
newargs, newkw = self._set_compileddoc( doc_index, self._doc, newargs, newkw )
if path_index is not None:
newargs, newkw = self._set_startingpath( path_index, self._root, newargs, newkw )
return newargs, newkw
# ===========================================
# if a server fails, then send email and remove it from the list!
def _call_one( self, method, *args, **kwargs ):
server = self._pick_one()
ret = None
try:
try:
p = xmlrpc_lib.ServerProxy(server, allow_none=True, use_builtin_types=True )
except TypeError :
p = xmlrpc_lib.ServerProxy(server, allow_none=True )
name = method.__name__
# replace any arguments that we should:
newargs, newkw = self._replace_args( p, method, args, kwargs )
# Be careful here, if you mess this call up, you'll be calling the
# local definition of the server method, not the method on the remote server!
ret = p.__getattr__(name)(*newargs, **newkw) # do the function call
except socket.error :
if self._notifier :
self._notifier( server ) # flash the red lights
self._server_list.remove( server )
self._server_bound = len( self._server_list )
self._server_num = 0
if not self._server_list:
raise # pass the socket.error on up
return self._call_one( method, *args, **kwargs )
return ret
# ===========================================
def _call_all( self, method, *args, **kwargs ):
ret = {}
# would be lovely to execute in parallel:
for server in self._server_list :
try:
try:
p = xmlrpc_lib.ServerProxy(server, allow_none=True, use_builtin_types=True )
except TypeError :
p = xmlrpc_lib.ServerProxy(server, allow_none=True )
name = method.__name__
# replace any arguments that we should:
newargs, newkw = self._replace_args( p, method, args, kwargs )
# Be careful here, if you mess this call up, you'll be calling the
# local definition of the server method, not the method on the remote server!
ret[ server ] = p.__getattr__(name)( *newargs, **newkw) # do the function call
except socket.error :
if self._notifier :
self._notifier( server ) # flash the red lights
self._server_list.remove( server )
self._server_bound = len( self._server_list )
self._server_num = 0
raise # pass the socket.error on up
return ret
# ===========================================
@classmethod
def _rpc_one( cls, transcoder=None ):
def fn_d ( fn ):
def api( client, *args, **kwargs ):
ret = client._call_one( fn, *args, **kwargs )
return transcoder( ret ) if transcoder else ret
api.__doc__ = fn.__doc__
setattr( cls, fn.__name__, api )
return fn
return fn_d
# ===========================================
@classmethod
def _rpc_all( cls, transcoder=None ):
def fn_d( fn ):
def api( client, *args, **kwargs ):
ret = client._call_all( fn, *args, **kwargs )
return transcoder( ret ) if transcoder else ret
api.__doc__ = fn.__doc__
setattr( cls, fn.__name__, api )
return fn
return fn_d
# ===========================================
@classmethod
def _rpc_specific( cls, transcoder=None ):
def fn_d( fn ):
argspec = inspect.getargspec(fn)
server_index = argspec.args.index( 'server' ) - 1
def api( client, *args, **kwargs ):
server = args[server_index]
try:
p = xmlrpc_lib.ServerProxy(server, allow_none=True, use_builtin_types=True )
except TypeError :
p = xmlrpc_lib.ServerProxy(server, allow_none=True )
method = p.__getattr__(fn.__name__)
# replace any arguments that we should:
newargs, newkw = client._replace_args( p, method, args, kwargs )
# Be careful here, if you mess this call up, you'll be calling the
# local definition of the server method, not the method on the remote server!
ret = method(*newargs, **newkw) # do the function call
return transcoder( ret ) if transcoder else ret
api.__doc__ = fn.__doc__
setattr( cls, fn.__name__, api )
return fn
return fn_d
# -------------------------------------------------------------------
#
# decorator for methods:
# requires that the class of the method has a method _auth_user_method
# to authorize access to the method
# requires that the method has a parameter 'user' which is a UserCredentials object
#
def _authorized(fn):
argspec = inspect.getargspec(fn)
user_index = argspec.args.index( 'user' ) - 1
def wrapper(server, *args, **kwargs):
user = args[user_index]
if server._auth_user_method( user, fn.__name__ ):
return fn(server, *args, **kwargs) # do the original function call
return None
return functools.wraps(fn)(wrapper)
# -------------------------------------------------------------------
#
# XMLRPC App
#
class ServerApp :
def __init__(self, url, config, logginglevel = logging.DEBUG):
"config is the parameter dictionary; contains server configuration vars"
# get a logger:
self._logger = logging.getLogger('dirbserver')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s | %(message)s')
handler.setFormatter(formatter)
self._logger.addHandler(handler)
self._logger.setLevel(logginglevel)
self._logger.info( "Initializing server app" )
self._url = url
# ---------------------------------------
# no default shutdown callable -- needs to come from server framework:
# server really does need to be shutdown cleanly,
# to terminate sidecar processes
# associated with the sandbox.
self._shutdown_callable = None # needed to shutdown the server above us
# ---------------------------------------
#
# get configuration settings:
#
self._config = conf.get_default_server_config()
for k in config :
self._config[k] = copy.deepcopy(config[k])
# ---------------------------------------
self._mutex = threading.Lock() # TODO: should this be a multi-lock?
# ---------------------------------------
self._noncecache = {}
# ---------------------------------------
self._auth = auth.MethodPermissions( self._config, self._logger )
return
# ===========================================
def _lock(self):
self._mutex.acquire(True)
return
def _unlock(self):
self._mutex.release()
return
# ===========================================
def get_nonce( self ):
nonce = codecs.decode( base64.b64encode(auth.get_nonce()), "utf-8" )
now = datetime.datetime.now()
self._lock()
self._noncecache[ nonce ] = now
self._unlock()
return nonce
# -------------------------------------------
def _auth_user( self, cred ):
# prune if past a limit:
if len( self._noncecache ) > NONCE_CACHE_LIMIT :
try:
self._lock()
keys = self._noncecache.keys()
for k in keys:
if now - self._noncecache[k] > datetime.timedelta( seconds=NONCE_EXPIRY ) :
del self._noncecache[k]
finally:
self._unlock()
# need to verify the server nonce before we call auth module to verify the credentials:
ret = False
try:
self._lock()
timestamp = self._noncecache[ cred.servernonce ]
if datetime.datetime.now() - timestamp < datetime.timedelta( seconds=NONCE_EXPIRY ):
del self._noncecache[ cred.servernonce ]
ret = auth.verify_user_credentials( cred, self._config )
finally:
self._unlock()
if not ret:
raise SystemError( "Permission Denied" )
return ret
# -------------------------------------------
def _auth_user_method( self, user, methodname ):
# authenticate the user identity first, then whether they can run the method or not
# encode from generic tuple from the wire to the named tuple we require
cred = auth.UserCredentials( *user )
if self._auth_user( cred ):
if self._auth.verify( cred.username, methodname ):
return True
raise SystemError( "Permission Denied" )
return False
#############################################
# ===========================================
# not to be served to clients:
def set_shutdown_callable(self, fn):
"Different servers hosting this app need different methods for shutting down"
self._shutdown_callable = fn
# ===========================================
@_authorized
@RemoteClient._rpc_all()
def shutdown_server(self, user):
"friendly shutdown of the cluster"
cred = auth.UserCredentials( *user )
self._logger.warning( "Shutdown call received from %s" % cred.username )
self._lock() # will not release !
return self._shutdown_callable() # execute the callable
# ===========================================
@_authorized
@RemoteClient._rpc_one( transcoder=tuples_to_ptclist )
def create_paths(self, createexpr, user, compileddoc, startingpath ):
"Returns a list of PathTraversalContexts that were created from the given creation expression."
cl = localclient.LocalClient( compileddoc, startingpath )
cred = auth.UserCredentials( *user )
created = []
# get target paths to create
target_paths = cl.depict_paths( createexpr )
# sort target paths soas to create shallow directories first
target_paths = ( (fs.split_path(x.path), x) for x in target_paths )
target_paths = [ (len(x[0]), x[-1].path, x[-1]) for x in target_paths ]
target_paths = [ x[-1] for x in sorted( target_paths ) ]
for target in target_paths:
# acquire permissions, uid, gid
# TODO: would be nice to cache these credentials to reduce overhead:
uid = pwd.getpwnam( target.user ).pw_uid if target.user else DEFAULT_UID
gid = grp.getgrnam( target.group ).gr_gid if target.group else DEFAULT_GID
permissions = target.permissions if target.permissions else DEFAULT_PERMISSIONS
# create directory
if not os.path.isdir( target.path ) :
os.mkdir(target.path)
# set permissions on this directory
# will not overwrite existing permissions on existing dirs!
os.chown(target.path, uid, gid)
os.chmod(target.path, permissions )
self._logger.debug( "%s created %s" % (cred.username, target.path))
created.append( tuple(target) ) # use transcoder on client side to get back namedtuple objects.
return created
| |
"""Parts of a process.
Here you may:
* Define list of part names that a module may make, even recursively.
* Refer to parts by name.
* Register maker of parts.
* Assemble parts.
The `parts` module will replace the `components` module as the way to
assemble pieces of a process.
What about the name "parts"? It is said that a component is usually
self-contained but a part might be not; so these pieces are called parts
instead of components, as they might not be self-contained (also it is
easier to type "part" than "component").
"""
__all__ = [
'AUTO',
'Parts',
'assemble',
'define_maker',
'define_makers',
'define_part',
]
import functools
import inspect
from collections import OrderedDict
from collections import defaultdict
from collections import namedtuple
from garage.assertions import ASSERT
from startup import Startup
AUTO = object()
def _assert_name(name):
ASSERT.type_of(name, str)
ASSERT(
not name.startswith('_'),
'expect name not start with underscore: %s', name,
)
return name
# Make it a subclass of str so that `startup` accepts it.
class PartName(str):
pass
class Parts:
"""List of part names.
To create a part list, you (optionally) provide the module name, and
then assign each part as follows:
# Assume __name__ == 'some.module'
part_list = Parts(__name__)
part_list.name_1 = AUTO
part_list.name_2 = 'some_part_name'
part_list.sub_list = another_part_list
Then you may refer to these part names like this:
assert part_list.name_1 == 'some.module:name_1'
assert part_list.name_2 == 'some.module:some_part_name'
A part list is either orphaned or adopted. If it is an orphan, you
cannot read its part names. It is adopted if it is created with a
module name, or is assigned to a non-orphan part list. A part list
can only be adopted once.
"""
def __init__(self, module_name=None):
super().__setattr__('_parent', module_name)
super().__setattr__('_part_names', OrderedDict())
super().__setattr__('_resolved', None)
def __repr__(self):
return '<%s.%s 0x%x %s>' % (
self.__module__, self.__class__.__qualname__, id(self),
' '.join(
'%s=%s' % (attr_name, part_name)
for attr_name, part_name in self._part_names.items()
),
)
def __getattr__(self, attr_name):
_assert_name(attr_name)
try:
part_name = self._part_names[attr_name]
except KeyError:
msg = '%r has no part %r' % (self.__class__.__name__, attr_name)
raise AttributeError(msg) from None
if part_name.__class__ is str:
self._part_names[attr_name] = part_name = self._resolve(part_name)
return part_name
def __setattr__(self, attr_name, part_name):
if attr_name in self.__dict__:
return super().__setattr__(attr_name, part_name)
_assert_name(attr_name)
if part_name is AUTO:
part_name = attr_name
if isinstance(part_name, Parts):
part_name._adopt_by(self, attr_name)
else:
ASSERT.is_(part_name.__class__, str)
self._part_names[attr_name] = part_name
def _adopt_by(self, parent, edge):
ASSERT(not self._parent, 'expect orphan: %r', self)
self._parent = (parent, edge)
def _resolve(self, part_name):
if not self._resolved:
module_name = None
pieces = []
obj = self
while True:
parent = obj._parent
ASSERT(parent, 'expect non-orphan: %r, %r', self, obj)
if isinstance(parent, str):
module_name = parent
break
else:
obj, edge = parent
pieces.append(edge)
if pieces:
pieces.reverse()
self._resolved = '%s:%s.' % (module_name, '.'.join(pieces))
else:
self._resolved = module_name + ':'
return PartName(self._resolved + part_name)
MakerSpec = namedtuple('MakerSpec', [
'input_specs', # List of InputSpec.
'output_specs', # List of part names that this function makes.
])
InputSpec = namedtuple('InputSpec', [
'parameter', # Name of function's parameter.
'part_name', # Part name annotated to this parameter.
'is_all' # True if it is `[x]`-annotated.
])
def parse_maker_spec(maker):
"""Return MakerSpec of `maker`."""
sig = inspect.signature(maker)
input_specs = []
for parameter in sig.parameters.values():
if parameter.annotation is sig.empty:
# We should probably not err out here because maker could be
# a wrapper, and this parameter is bound to some default (if
# not, eventually an error will be raised when this maker is
# called).
continue
is_all = isinstance(parameter.annotation, list)
if is_all:
ASSERT(
len(parameter.annotation) == 1,
'expect `[x]`-form annotation, not: %r', parameter.annotation,
)
part_name = parameter.annotation[0]
else:
part_name = parameter.annotation
input_specs.append(InputSpec(
parameter=parameter.name,
part_name=ASSERT.type_of(part_name, str),
is_all=is_all,
))
input_specs = tuple(input_specs)
if sig.return_annotation is sig.empty:
# While a maker should usually be annotated with return values,
# we let the caller decide whether the case of no annotation is
# an error or not.
output_specs = ()
elif isinstance(sig.return_annotation, tuple):
output_specs = tuple(
ASSERT.type_of(output_part_name, str)
for output_part_name in sig.return_annotation
)
else:
output_specs = (ASSERT.type_of(sig.return_annotation, str),)
return MakerSpec(
input_specs=input_specs,
output_specs=output_specs,
)
# Table of output part name -> maker -> list of InputSpec.
_MAKER_TABLE = defaultdict(dict)
def define_makers(makers):
"""Register a collection of part maker functions."""
for maker in makers:
define_maker(maker)
def define_maker(maker):
"""Register a part maker function."""
return _define_maker(_MAKER_TABLE, maker)
def _define_maker(maker_table, maker):
maker_spec = parse_maker_spec(maker)
ASSERT(
maker_spec.output_specs,
'expect maker be annotated on its return value: %r', maker,
)
for output_part_name in maker_spec.output_specs:
maker_table[output_part_name][maker] = maker_spec.input_specs
return maker
def define_part(part_name, *part):
"""Register a part.
You either call it as a decorator, like,
@define_part(part_name)
def part():
pass
Or as a "define" expression, like,
part = define_part(part_name, part)
NOTE: This is just a wrapper of define_maker because `Startup.set`
does not support setting values multiple times at the moment.
"""
if not part:
return functools.partial(define_part, part_name)
ASSERT.equal(1, len(part))
part = part[0]
def make_part() -> part_name:
return part
define_maker(make_part)
return part
def assemble(part_names, *, input_parts=None, selected_makers=None):
"""Assemble parts.
For each part name, it searches registered part maker (recursively)
and assembles the (sub-)parts.
You may provide parts that no maker are registered for via the
`input_parts` argument.
If there are multiple registered makers for a part, either one of
the following will happen:
* Some makers are selected by the caller.
* All makers are selected by the caller.
* An error is raised.
The `selected_makers` argument looks like this:
selected_makers = {
part_name: [maker, ...], # Some makers are selected.
another_part_name: all, # All makers are selected.
}
`assemble` errs when there are multiple sources for one part unless
either caller explicitly allows it or a maker is annotated with [x]
on the part (a source is either a maker provide the part of an input
part provided by caller). This restriction is to protect the case
when accidentally multiple parts are produced, but only one of them
is consumed.
"""
return _assemble(
_MAKER_TABLE,
part_names,
input_parts or {},
selected_makers or {},
)
def _assemble(maker_table, part_names, input_parts, selected_makers):
startup = Startup()
sources = find_sources(
part_names,
input_parts,
maker_table, selected_makers,
)
for maker, pair in sources:
if maker:
startup(maker)
else:
startup.set(pair[0], pair[1])
return startup.call()
def find_sources(part_names, input_parts, maker_table, selected_makers):
"""For each part name, find its source recursively.
A source is either a maker, or a part provided by the caller.
"""
queue = []
for part_name in part_names:
if isinstance(part_name, InputSpec):
is_all = part_name.is_all
part_name = part_name.part_name
elif isinstance(part_name, list):
ASSERT.equal(1, len(part_name))
part_name = part_name[0]
is_all = True
else:
is_all = False
queue.append((ASSERT.type_of(part_name, str), is_all))
seen_part_names = set()
yielded_makers = set()
def maybe_yield_maker(maker, input_specs):
if maker not in yielded_makers:
for input_spec in input_specs:
queue.append((input_spec.part_name, input_spec.is_all))
yielded_makers.add(maker)
yield maker, None
while queue:
part_name, is_all = queue.pop(0)
if part_name in seen_part_names:
continue
seen_part_names.add(part_name)
maker_to_input_specs = maker_table.get(part_name)
if maker_to_input_specs is None:
# No maker is registered for this part; try input parts.
ASSERT(
part_name in input_parts,
'expect part %s from caller', part_name,
)
yield None, (part_name, input_parts[part_name])
continue
# Okay, some makers are registered for this part; let's check
# whether we want them all or not.
selected = selected_makers.get(part_name)
if selected is all:
is_all = True
if is_all:
# We want them all; let's check input part before we check
# registered makers.
if part_name in input_parts:
yield None, (part_name, input_parts[part_name])
else:
# Assert that there is only one source - the maker.
ASSERT(
part_name not in input_parts,
'expect part %s by maker, not from caller', part_name,
)
if is_all or len(maker_to_input_specs) == 1:
for maker, input_specs in maker_to_input_specs.items():
yield from maybe_yield_maker(maker, input_specs)
continue
# It is getting complex as multiple makers are registered for
# this part, and we don't want them all.
ASSERT(
selected is not None,
'expect caller to select maker(s) for %s', part_name,
)
for maker in selected:
input_specs = maker_to_input_specs.get(maker)
ASSERT(
input_specs is not None,
'expect maker to be registered: %r', maker,
)
yield from maybe_yield_maker(maker, input_specs)
| |
from datetime import datetime
import json
from urllib import urlencode
from dimagi.utils.couch.resource_conflict import retry_resource
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template.loader import render_to_string
from django.shortcuts import render
from django.contrib import messages
from django.utils.translation import ugettext as _, ugettext_lazy
from corehq.apps.app_manager.views import _clear_app_cache
from corehq.apps.appstore.forms import AddReviewForm
from corehq.apps.appstore.models import Review
from corehq.apps.domain.decorators import require_superuser
from corehq.elastic import es_query, parse_args_for_es, fill_mapping_with_facets
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.database import apply_update
SNAPSHOT_FACETS = ['project_type', 'license', 'author.exact']
DEPLOYMENT_FACETS = ['deployment.region']
SNAPSHOT_MAPPING = [
("", True, [
{"facet": "project_type", "name": ugettext_lazy("Category"), "expanded": True },
{
"facet": "license",
"name": ugettext_lazy("License"),
"expanded": True,
"mapping": {
'cc': 'CC BY',
'cc-sa': 'CC BY-SA',
'cc-nd': 'CC BY-ND',
'cc-nc': 'CC BY-NC',
'cc-nc-sa': 'CC BY-NC-SA',
'cc-nc-nd': 'CC BY-NC-ND',
}
},
{"facet": "author.exact", "name": ugettext_lazy("Author"), "expanded": True },
]),
]
DEPLOYMENT_MAPPING = [
("", True, [
{"facet": "deployment.region", "name": "Region", "expanded": True },
]),
]
def rewrite_url(request, path):
return HttpResponseRedirect('/exchange%s?%s' % (path, request.META['QUERY_STRING']))
def inverse_dict(d):
return dict([(v, k) for k, v in d.iteritems()])
def can_view_app(req, dom):
if not dom or not dom.is_snapshot:
return False
if not dom.is_approved and (not getattr(req, "couch_user", "") or not req.couch_user.is_domain_admin(dom.copied_from.name)):
return False
return True
def project_info(request, domain, template="appstore/project_info.html"):
dom = Domain.get(domain)
if not can_view_app(request, dom):
raise Http404()
if request.method == "POST" and dom.copied_from.name not in request.couch_user.get_domains():
form = AddReviewForm(request.POST)
if form.is_valid():
title = form.cleaned_data['review_title']
rating = int(request.POST.get('rating'))
if rating < 1:
rating = 1
if rating > 5:
rating = 5
info = form.cleaned_data['review_info']
date_published = datetime.now()
user = request.user.username
old_review = Review.get_by_version_and_user(domain, user)
if len(old_review) > 0: # replace old review
review = old_review[0]
review.title = title
review.rating = rating
review.info = info
review.date_published = date_published
else:
review = Review(title=title, rating=rating, user=user, info=info, date_published = date_published, domain=domain, project_id=dom.copied_from._id)
review.save()
else:
form = AddReviewForm()
else:
form = AddReviewForm()
copies = dom.copies_of_parent()
reviews = Review.get_by_app(dom.copied_from._id)
average_rating = Review.get_average_rating_by_app(dom.copied_from._id)
num_ratings = Review.get_num_ratings_by_app(dom.copied_from._id)
if average_rating:
average_rating = round(average_rating, 1)
images = set()
audio = set()
return render(request, template, {
"project": dom,
"applications": dom.full_applications(include_builds=False),
"form": form,
"copies": copies,
"reviews": reviews,
"average_rating": average_rating,
"num_ratings": num_ratings,
"images": images,
"audio": audio,
"url_base": reverse('appstore'),
'display_import': True if getattr(request, "couch_user", "") and request.couch_user.get_domains() else False
})
def deduplicate(hits):
unique_names = set()
unique_hits = []
for hit in hits:
if not hit['_source']['name'] in unique_names:
unique_hits.append(hit)
unique_names.add(hit['_source']['name'])
return unique_hits
def appstore(request, template="appstore/appstore_base.html"):
page_length = 10
include_unapproved = True if request.GET.get('is_approved', "") == "false" else False
if include_unapproved and not request.user.is_superuser:
raise Http404()
params, _ = parse_args_for_es(request)
page = params.pop('page', 1)
page = int(page[0] if isinstance(page, list) else page)
results = es_snapshot_query(params, SNAPSHOT_FACETS)
hits = results.get('hits', {}).get('hits', [])
hits = deduplicate(hits)
d_results = [Domain.wrap(res['_source']) for res in hits]
sort_by = request.GET.get('sort_by', None)
if sort_by == 'best':
d_results = Domain.popular_sort(d_results)
elif sort_by == 'newest':
pass
else:
d_results = Domain.hit_sort(d_results)
persistent_params = {}
if sort_by:
persistent_params["sort_by"] = sort_by
if include_unapproved:
persistent_params["is_approved"] = "false"
persistent_params = urlencode(persistent_params) # json.dumps(persistent_params)
average_ratings = list()
for result in d_results:
average_ratings.append([result.name, Review.get_average_rating_by_app(result.copied_from._id)])
more_pages = False if len(d_results) <= page*page_length else True
facet_map = fill_mapping_with_facets(SNAPSHOT_MAPPING, results, params)
vals = dict(
apps=d_results[(page-1)*page_length:page*page_length],
page=page,
prev_page=(page-1),
next_page=(page+1),
more_pages=more_pages,
sort_by=sort_by,
average_ratings=average_ratings,
include_unapproved=include_unapproved,
facet_map=facet_map,
facets=results.get("facets", []),
query_str=request.META['QUERY_STRING'],
search_query=params.get('search', [""])[0],
persistent_params=persistent_params,
)
return render(request, template, vals)
def appstore_api(request):
params, facets = parse_args_for_es(request)
results = es_snapshot_query(params, facets)
return HttpResponse(json.dumps(results), mimetype="application/json")
def es_snapshot_query(params, facets=None, terms=None, sort_by="snapshot_time"):
if terms is None:
terms = ['is_approved', 'sort_by', 'search']
if facets is None:
facets = []
q = {"sort": {sort_by: {"order" : "desc"} },
"query": {"bool": {"must":
[{"match": {'doc_type': "Domain"}},
{"term": {"published": True}},
{"term": {"is_snapshot": True}}]}},
"filter": {"and": [{"term": {"is_approved": params.get('is_approved', None) or True}}]}}
search_query = params.get('search', "")
if search_query:
q['query']['bool']['must'].append({
"match" : {
"_all" : {
"query" : search_query,
"operator" : "and"
}
}
})
return es_query(params, facets, terms, q)
def appstore_default(request):
from corehq.apps.appstore.dispatcher import AppstoreDispatcher
return HttpResponseRedirect(reverse(AppstoreDispatcher.name(), args=['advanced']))
@require_superuser
def approve_app(request, domain):
domain = Domain.get(domain)
if request.GET.get('approve') == 'true':
domain.is_approved = True
domain.save()
elif request.GET.get('approve') == 'false':
domain.is_approved = False
domain.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER') or reverse('appstore'))
@login_required
@retry_resource(3)
def import_app(request, domain):
user = request.couch_user
if not user.is_eula_signed():
messages.error(request, 'You must agree to our eula to download an app')
return project_info(request, domain)
from_project = Domain.get(domain)
if request.method == 'POST' and from_project.is_snapshot:
if not from_project.published:
messages.error(request, "This project is not published and can't be downloaded")
return project_info(request, domain)
to_project_name = request.POST['project']
if not user.is_member_of(to_project_name):
messages.error(request, _("You don't belong to that project"))
return project_info(request, domain)
for app in from_project.full_applications(include_builds=False):
new_doc = from_project.copy_component(app['doc_type'], app.get_id, to_project_name, user)
_clear_app_cache(request, to_project_name)
from_project.downloads += 1
from_project.save()
messages.success(request, render_to_string("appstore/partials/view_wiki.html", {"pre": _("Application successfully imported!")}), extra_tags="html")
return HttpResponseRedirect(reverse('view_app', args=[to_project_name, new_doc.id]))
else:
return HttpResponseRedirect(reverse('project_info', args=[domain]))
@login_required
def copy_snapshot(request, domain):
user = request.couch_user
if not user.is_eula_signed():
messages.error(request, 'You must agree to our eula to download an app')
return project_info(request, domain)
dom = Domain.get(domain)
if request.method == "POST" and dom.is_snapshot:
from corehq.apps.registration.forms import DomainRegistrationForm
args = {'domain_name': request.POST['new_project_name'], 'eula_confirmed': True}
form = DomainRegistrationForm(args)
if request.POST.get('new_project_name', ""):
if not dom.published:
messages.error(request, "This project is not published and can't be downloaded")
return project_info(request, domain)
if form.is_valid():
new_domain = dom.save_copy(form.cleaned_data['domain_name'], user=user)
else:
messages.error(request, form.errors)
return project_info(request, domain)
if new_domain is None:
messages.error(request, _("A project by that name already exists"))
return project_info(request, domain)
def inc_downloads(d):
d.downloads += 1
apply_update(dom, inc_downloads)
messages.success(request, render_to_string("appstore/partials/view_wiki.html", {"pre": _("Project copied successfully!")}), extra_tags="html")
return HttpResponseRedirect(reverse('view_app',
args=[new_domain.name, new_domain.full_applications()[0].get_id]))
else:
messages.error(request, _("You must specify a name for the new project"))
return project_info(request, domain)
else:
return HttpResponseRedirect(reverse('project_info', args=[domain]))
def project_image(request, domain):
project = Domain.get(domain)
if project.image_path:
image = project.fetch_attachment(project.image_path)
return HttpResponse(image, content_type=project.image_type)
else:
raise Http404()
@login_required
def deployment_info(request, domain, template="appstore/deployment_info.html"):
dom = Domain.get_by_name(domain)
if not dom or not dom.deployment.public:
raise Http404()
# get facets
results = es_deployments_query({}, DEPLOYMENT_FACETS)
facet_map = fill_mapping_with_facets(DEPLOYMENT_MAPPING, results, {})
return render(request, template, {
'domain': dom,
'search_url': reverse('deployments'),
'url_base': reverse('deployments'),
'facet_map': facet_map,
})
@login_required
def deployments(request, template="appstore/deployments.html"):
params, _ = parse_args_for_es(request)
params = dict([(DEPLOYMENT_MAPPING.get(p, p), params[p]) for p in params])
page = int(params.pop('page', 1))
results = es_deployments_query(params, DEPLOYMENT_FACETS)
d_results = [Domain.wrap(res['_source']) for res in results['hits']['hits']]
more_pages = False if len(d_results) <= page*10 else True
facet_map = fill_mapping_with_facets(DEPLOYMENT_MAPPING, results, params)
include_unapproved = True if request.GET.get('is_approved', "") == "false" else False
vals = { 'deployments': d_results[(page-1)*10:page*10],
'page': page,
'prev_page': page-1,
'next_page': (page+1),
'more_pages': more_pages,
'include_unapproved': include_unapproved,
'facet_map': facet_map,
'query_str': request.META['QUERY_STRING'],
'search_url': reverse('deployments'),
'search_query': params.get('search', [""])[0]}
return render(request, template, vals)
def deployments_api(request):
params, facets = parse_args_for_es(request)
params = dict([(DEPLOYMENT_MAPPING.get(p, p), params[p]) for p in params])
results = es_deployments_query(params, facets)
return HttpResponse(json.dumps(results), mimetype="application/json")
def es_deployments_query(params, facets=None, terms=None, sort_by="snapshot_time"):
if terms is None:
terms = ['is_approved', 'sort_by', 'search']
if facets is None:
facets = []
q = {"query": {"bool": {"must":
[{"match": {'doc_type': "Domain"}},
{"term": {"deployment.public": True}}]}}}
search_query = params.get('search', "")
if search_query:
q['query']['bool']['must'].append({
"match" : {
"_all" : {
"query" : search_query,
"operator" : "and"
}
}
})
return es_query(params, facets, terms, q)
def media_files(request, domain, template="appstore/media_files.html"):
dom = Domain.get(domain)
if not can_view_app(request, dom):
raise Http404()
return render(request, template, {
"project": dom,
"url_base": reverse('appstore')
})
| |
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Auth management UI handlers."""
import functools
import json
import os
import webapp2
from google.appengine.api import users
from components import template
from components import utils
from .. import api
from .. import handler
from .. import model
from .. import replication
# templates/.
TEMPLATES_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates')
# Global static configuration set in 'configure_ui'.
_ui_app_name = 'Unknown'
_ui_navbar_tabs = ()
def configure_ui(app_name, ui_tabs=None):
"""Modifies global configuration of Auth UI.
Args:
app_name: name of the service (visible in page headers, titles, etc.)
ui_tabs: list of UINavbarTabHandler subclasses that define tabs to show, or
None to show the standard set of tabs.
"""
global _ui_app_name
global _ui_navbar_tabs
_ui_app_name = app_name
if ui_tabs is not None:
assert all(issubclass(cls, UINavbarTabHandler) for cls in ui_tabs)
_ui_navbar_tabs = tuple(ui_tabs)
template.bootstrap({'auth': TEMPLATES_DIR})
def get_ui_routes():
"""Returns a list of routes with auth UI handlers."""
# Routes for registered navbar tabs.
routes = [webapp2.Route(cls.navbar_tab_url, cls) for cls in _ui_navbar_tabs]
# Routes for everything else.
routes.extend([
webapp2.Route(r'/auth', MainHandler),
webapp2.Route(r'/auth/bootstrap', BootstrapHandler, name='bootstrap'),
webapp2.Route(r'/auth/link', LinkToPrimaryHandler),
webapp2.Route(r'/auth/groups/log', ChangeLogHandler),
])
return routes
def forbid_ui_on_replica(method):
"""Decorator for methods that are not allowed to be called on Replica.
If such method is called on a service in Replica mode, it would return
HTTP 405 "Method Not Allowed".
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
assert isinstance(self, webapp2.RequestHandler)
if model.is_replica():
primary_url = model.get_replication_state().primary_url
self.abort(
405,
detail='Now allowed on a replica, see primary at %s' % primary_url)
return method(self, *args, **kwargs)
return wrapper
def redirect_ui_on_replica(method):
"""Decorator for methods that redirect to Primary when called on replica.
If such method is called on a service in Replica mode, it would return
HTTP 302 redirect to corresponding method on Primary.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
assert isinstance(self, webapp2.RequestHandler)
assert self.request.method == 'GET'
if model.is_replica():
primary_url = model.get_replication_state().primary_url
protocol = 'http://' if utils.is_local_dev_server() else 'https://'
assert primary_url and primary_url.startswith(protocol), primary_url
assert self.request.path_qs.startswith('/'), self.request.path_qs
self.redirect(primary_url.rstrip('/') + self.request.path_qs, abort=True)
return method(self, *args, **kwargs)
return wrapper
class UIHandler(handler.AuthenticatingHandler):
"""Renders Jinja templates extending base.html or base_minimal.html."""
def reply(self, path, env=None, status=200):
"""Render template |path| to response using given environment.
Optional keys from |env| that base.html uses:
css_file: URL to a file with page specific styles, relative to site root.
js_file: URL to a file with page specific Javascript code, relative to
site root. File should define global object named same as a filename,
i.e. '/auth/static/js/api.js' should define global object 'api' that
incapsulates functionality implemented in the module.
navbar_tab_id: id a navbar tab to highlight.
page_title: title of an HTML page.
Args:
path: path to a template, relative to templates/.
env: additional environment dict to use when rendering the template.
status: HTTP status code to return.
"""
env = (env or {}).copy()
env.setdefault('css_file', None)
env.setdefault('js_file', None)
env.setdefault('navbar_tab_id', None)
env.setdefault('page_title', 'Untitled')
# This goes to both Jinja2 env and Javascript config object.
common = {
'login_url': users.create_login_url(self.request.path),
'logout_url': users.create_logout_url('/'),
'xsrf_token': self.generate_xsrf_token(),
}
# Name of Javascript module with page code.
js_module_name = None
if env['js_file']:
assert env['js_file'].endswith('.js')
js_module_name = os.path.basename(env['js_file'])[:-3]
# This will be accessible from Javascript as global 'config' variable.
js_config = {
'identity': api.get_current_identity().to_bytes(),
}
js_config.update(common)
# Jinja2 environment to use to render a template.
full_env = {
'app_name': _ui_app_name,
'app_revision_url': utils.get_app_revision_url(),
'app_version': utils.get_app_version(),
'config': json.dumps(js_config),
'identity': api.get_current_identity(),
'js_module_name': js_module_name,
'navbar': [
(cls.navbar_tab_id, cls.navbar_tab_title, cls.navbar_tab_url)
for cls in _ui_navbar_tabs
],
}
full_env.update(common)
full_env.update(env)
# Render it.
self.response.set_status(status)
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
self.response.write(template.render(path, full_env))
def authentication_error(self, error):
"""Shows 'Access denied' page."""
env = {
'page_title': 'Access Denied',
'error': error,
}
self.reply('auth/access_denied.html', env=env, status=401)
def authorization_error(self, error):
"""Redirects to login or shows 'Access Denied' page."""
# Not authenticated or used IP whitelist for auth -> redirect to login.
# Bots doesn't use UI, and users should always use real accounts.
ident = api.get_current_identity()
if ident.is_anonymous or ident.is_bot:
self.redirect(users.create_login_url(self.request.path))
return
# Admin group is empty -> redirect to bootstrap procedure to create it.
if model.is_empty_group(model.ADMIN_GROUP):
self.redirect_to('bootstrap')
return
# No access.
env = {
'page_title': 'Access Denied',
'error': error,
}
self.reply('auth/access_denied.html', env=env, status=403)
class MainHandler(UIHandler):
"""Redirects to first navbar tab."""
@redirect_ui_on_replica
@api.require(api.is_admin)
def get(self):
assert _ui_navbar_tabs
self.redirect(_ui_navbar_tabs[0].navbar_tab_url)
class BootstrapHandler(UIHandler):
"""Creates Administrators group (if necessary) and adds current caller to it.
Requires Appengine level Admin access for its handlers, since Administrators
group may not exist yet. Used to bootstrap a new service instance.
"""
@forbid_ui_on_replica
@api.require(users.is_current_user_admin)
def get(self):
env = {
'page_title': 'Bootstrap',
'admin_group': model.ADMIN_GROUP,
}
self.reply('auth/bootstrap.html', env)
@forbid_ui_on_replica
@api.require(users.is_current_user_admin)
def post(self):
added = model.bootstrap_group(
model.ADMIN_GROUP, [api.get_current_identity()],
'Users that can manage groups')
env = {
'page_title': 'Bootstrap',
'admin_group': model.ADMIN_GROUP,
'added': added,
}
self.reply('auth/bootstrap_done.html', env)
class LinkToPrimaryHandler(UIHandler):
"""A page with confirmation of Primary <-> Replica linking request.
URL to that page is generated by a Primary service.
"""
def decode_link_ticket(self):
"""Extracts ServiceLinkTicket from 't' GET parameter."""
try:
return replication.decode_link_ticket(
self.request.get('t').encode('ascii'))
except (KeyError, ValueError):
self.abort(400)
return
@forbid_ui_on_replica
@api.require(users.is_current_user_admin)
def get(self):
ticket = self.decode_link_ticket()
env = {
'generated_by': ticket.generated_by,
'page_title': 'Switch',
'primary_id': ticket.primary_id,
'primary_url': ticket.primary_url,
}
self.reply('auth/linking.html', env)
@forbid_ui_on_replica
@api.require(users.is_current_user_admin)
def post(self):
ticket = self.decode_link_ticket()
success = True
error_msg = None
try:
replication.become_replica(ticket, api.get_current_identity())
except replication.ProtocolError as exc:
success = False
error_msg = exc.message
env = {
'error_msg': error_msg,
'page_title': 'Switch',
'primary_id': ticket.primary_id,
'primary_url': ticket.primary_url,
'success': success,
}
self.reply('auth/linking_done.html', env)
class ChangeLogHandler(UIHandler):
"""Page with a log of changes to some groups."""
@redirect_ui_on_replica
@api.require(api.is_admin)
def get(self):
env = {
'js_file': '/auth/static/js/change_log.js',
'navbar_tab_id': 'groups',
'page_title': 'Chanage Log',
}
self.reply('auth/change_log.html', env)
class UINavbarTabHandler(UIHandler):
"""Handler for a navbar tab page."""
# URL to the tab (relative to site root).
nvabar_tab_url = None
# ID of the tab, will be used in DOM.
navbar_tab_id = None
# Title of the tab, will be used in tab title and page title.
navbar_tab_title = None
# Relative URL to CSS file with tab's styles.
css_file = None
# Relative URL to javascript file with tab's logic.
js_file_url = None
# Path to a Jinja2 template with tab's markup.
template_file = None
@redirect_ui_on_replica
@api.require(api.is_admin)
def get(self):
"""Renders page HTML to HTTP response stream."""
env = {
'css_file': self.css_file,
'js_file': self.js_file_url,
'navbar_tab_id': self.navbar_tab_id,
'page_title': self.navbar_tab_title,
}
self.reply(self.template_file, env)
################################################################################
## Default tabs.
class GroupsHandler(UINavbarTabHandler):
"""Page with Groups management."""
navbar_tab_url = '/auth/groups'
navbar_tab_id = 'groups'
navbar_tab_title = 'Groups'
css_file = '/auth/static/css/groups.css'
js_file_url = '/auth/static/js/groups.js'
template_file = 'auth/groups.html'
class OAuthConfigHandler(UINavbarTabHandler):
"""Page with OAuth configuration."""
navbar_tab_url = '/auth/oauth_config'
navbar_tab_id = 'oauth_config'
navbar_tab_title = 'OAuth'
js_file_url = '/auth/static/js/oauth_config.js'
template_file = 'auth/oauth_config.html'
class IPWhitelistsHandler(UINavbarTabHandler):
"""Page with IP whitelists configuration."""
navbar_tab_url = '/auth/ip_whitelists'
navbar_tab_id = 'ip_whitelists'
navbar_tab_title = 'IP Whitelists'
js_file_url = '/auth/static/js/ip_whitelists.js'
template_file = 'auth/ip_whitelists.html'
# Register them as default tabs.
_ui_navbar_tabs = (GroupsHandler, OAuthConfigHandler, IPWhitelistsHandler)
| |
#!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2021 Philipp Ebensberger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Evaluate FlexRAM configuration and generate startup code."""
import re
import argparse
# Regex for linker script configuration
ocram_regex = r"^\s*ocrm_size\s*=\s*(?P<size>.*);"
dtcm_regex = r"^\s*dtcm_size\s*=\s*(?P<size>.*);"
itcm_regex = r"^\s*itcm_size\s*=\s*(?P<size>.*);"
# Regex for GPR register base define in NXL hal
gpr_base_regex = r"^.*IOMUXC_GPR_BASE\s*\((?P<base_addr>\w*)u\)"
# Regex for FlexRAM parameters in NXP HAL
fsl_ram_bank_size_regex = r"^.*FSL_FEATURE_FLEXRAM_INTERNAL_RAM_BANK_SIZE\s*\((?P<size>\w*)\)"
fsl_bank_nbr_regex = (
r"^.*FSL_FEATURE_FLEXRAM_INTERNAL_RAM_TOTAL_BANK_NUMBERS\s*\((?P<number>\w*)\)"
)
"""
According to AN12077:
The minimum configuration of OCRAM is 64 KB. This is required
due to ROM code requires at least 64 KB of RAM for its execution.
2.1.1.1. Static configuration - Page 4
"""
ocram_min_size = 0x00010000 # 64 KB
# Value parser
def mimxrt_default_parser(defines_file, features_file, ld_script):
with open(ld_script, "r") as input_file:
input_str = input_file.read()
#
ocram_match = re.search(ocram_regex, input_str, re.MULTILINE)
dtcm_match = re.search(dtcm_regex, input_str, re.MULTILINE)
itcm_match = re.search(itcm_regex, input_str, re.MULTILINE)
with open(defines_file, "r") as input_file:
input_str = input_file.read()
mcu_define_file_match = re.search(gpr_base_regex, input_str, re.MULTILINE)
with open(features_file, "r") as input_file:
input_str = input_file.read()
fsl_ram_bank_size_match = re.search(fsl_ram_bank_size_regex, input_str, re.MULTILINE)
fsl_bank_nbr_match = re.search(fsl_bank_nbr_regex, input_str, re.MULTILINE)
#
extract = {
"ocram_size": int(ocram_match.group("size"), 16),
"dtcm_size": int(dtcm_match.group("size"), 16),
"itcm_size": int(itcm_match.group("size"), 16),
"gpr_base_addr": int(mcu_define_file_match.group("base_addr"), 16),
"fsl_ram_bank_size": int(fsl_ram_bank_size_match.group("size")),
"fsl_bank_nbr": int(fsl_bank_nbr_match.group("number")),
}
# Evaluate configuration
if extract["ocram_size"] < ocram_min_size:
raise ValueError("OCRAM size must be at least {:08X}!".format(ocram_min_size))
if (extract["ocram_size"] % extract["fsl_ram_bank_size"]) != 0:
raise ValueError("Configuration invalid!")
# Check if DTCM and ITCM size is either multiple of 32k or 4k,8k or 16k
if extract["dtcm_size"] != 0x0:
if extract["dtcm_size"] % extract["fsl_ram_bank_size"] != 0:
if extract["dtcm_size"] not in (0x00000000, 0x00001000, 0x00002000, 0x00004000):
raise ValueError("Configuration invalid!")
if extract["itcm_size"] != 0x0:
if extract["itcm_size"] % extract["fsl_ram_bank_size"] != 0:
if extract["itcm_size"] not in (0x00000000, 0x00001000, 0x00002000, 0x00004000):
raise ValueError("Configuration invalid!")
#
return extract
# Code generators
def mimxrt_default_gen_code(extract_dict):
flexram_bank_cfg = "0b"
avail_flexram = extract_dict["fsl_ram_bank_size"] * extract_dict["fsl_bank_nbr"]
if (
extract_dict["ocram_size"] + extract_dict["dtcm_size"] + extract_dict["itcm_size"]
) > avail_flexram:
raise ValueError("Configuration exceeds available FlexRAM!")
bit_patterns = (
(extract_dict["ocram_size"], "01"),
(extract_dict["dtcm_size"], "10"),
(extract_dict["itcm_size"], "11"),
)
for size, pattern in bit_patterns:
for _ in range(0, size, extract_dict["fsl_ram_bank_size"]):
flexram_bank_cfg += pattern
# Generate GPR Register config
print(".equ __iomux_gpr14_adr, 0x{:08X}".format(extract_dict["gpr_base_addr"] + 0x38))
print(".equ __iomux_gpr16_adr, 0x{:08X}".format(extract_dict["gpr_base_addr"] + 0x40))
print(".equ __iomux_gpr17_adr, 0x{:08X}".format(extract_dict["gpr_base_addr"] + 0x44))
print(
".equ __iomux_gpr17_value, 0x{:08X} /* {}k OCRAM, {}k DTCM, {}k ITCM */".format(
int(flexram_bank_cfg, 2),
extract_dict["ocram_size"] // 1024,
extract_dict["dtcm_size"] // 1024,
extract_dict["itcm_size"] // 1024,
)
)
def mimxrt_106x_gen_code(extract_dict):
flexram_bank_cfg = "0b"
avail_flexram = extract_dict["fsl_ram_bank_size"] * extract_dict["fsl_bank_nbr"]
flexram_configurable_ocram = (
extract_dict["ocram_size"] % 524288
) # 512kB OCRAM are not part of FlexRAM configurable memory
if (
flexram_configurable_ocram + extract_dict["dtcm_size"] + extract_dict["itcm_size"]
) > avail_flexram:
raise ValueError("Configuration exceeds available FlexRAM!")
for size, pattern in (
(flexram_configurable_ocram, "01"),
(extract_dict["dtcm_size"], "10"),
(extract_dict["itcm_size"], "11"),
):
for _ in range(0, size, extract_dict["fsl_ram_bank_size"]):
flexram_bank_cfg += pattern
# Generate GPR Register config
print(".equ __iomux_gpr14_adr, 0x{:08X}".format(extract_dict["gpr_base_addr"] + 0x38))
print(".equ __iomux_gpr16_adr, 0x{:08X}".format(extract_dict["gpr_base_addr"] + 0x40))
print(".equ __iomux_gpr17_adr, 0x{:08X}".format(extract_dict["gpr_base_addr"] + 0x44))
print(
".equ __iomux_gpr17_value, 0x{:08X} /* {}k OCRAM (512k OCRAM, {}k from FlexRAM), {}k DTCM, {}k ITCM */".format(
int(flexram_bank_cfg, 2),
extract_dict["ocram_size"] // 1024,
flexram_configurable_ocram // 1024,
extract_dict["dtcm_size"] // 1024,
extract_dict["itcm_size"] // 1024,
)
)
def main(defines_file, features_file, ld_script, controller):
dispatcher = {
"MIMXRT1011": (mimxrt_default_parser, mimxrt_default_gen_code),
"MIMXRT1021": (mimxrt_default_parser, mimxrt_default_gen_code),
"MIMXRT1052": (mimxrt_default_parser, mimxrt_default_gen_code),
"MIMXRT1062": (mimxrt_default_parser, mimxrt_106x_gen_code),
"MIMXRT1064": (mimxrt_default_parser, mimxrt_106x_gen_code),
}
extractor, code_generator = dispatcher[controller]
extract_dict = extractor(defines_file, features_file, ld_script)
code_generator(extract_dict)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="make-flexram-ld.py",
usage="%(prog)s [options] [command]",
description="Evaluate FlexRAM configuration and generate startup code.",
)
parser.add_argument(
"-d",
"--defines_file",
dest="defines_file",
help="Path to MCU defines file",
default="../../../lib/nxp_driver/sdk/devices/MIMXRT1021/MIMXRT1021.h",
)
parser.add_argument(
"-f",
"--features_file",
dest="features_file",
help="Path to MCU features file",
default="../../../lib/nxp_driver/sdk/devices/MIMXRT1021/MIMXRT1021_features.h",
)
parser.add_argument(
"-l",
"--ld_file",
dest="linker_file",
help="Path to the aggregated linker-script",
default="MIMXRT1021.ld",
)
parser.add_argument(
"-c", "--controller", dest="controller", help="Controller name", default="MIMXRT1021"
)
#
args = parser.parse_args()
main(args.defines_file, args.features_file, args.linker_file, args.controller)
| |
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import pprint
import re
import sys
import unittest
import mock
from qj import qj
from qj.tests import qj_test_helper
DEBUG_TESTS = False
# pylint: disable=line-too-long
class RegExp(object):
def __init__(self, pattern, flags=0):
self._p = pattern
self._f = flags
def __eq__(self, o):
if DEBUG_TESTS:
print('%s: %s: \'%s\'' % ('pass' if bool(re.search(self._p, o, self._f)) else 'FAIL', str(self), str(o)))
return bool(re.search(self._p, o, self._f))
def __ne__(self, o):
return not self.__eq__(o)
def __repr__(self):
return '<RegExp:(%s)>' % self._p
class QjTest(unittest.TestCase):
def setUp(self):
qj.LOG = True
qj.LOG_FN = logging.info
qj.MAX_FRAME_LOGS = 100
qj.PREFIX = 'qj: '
qj.COLOR = False
qj._DEBUG_QJ = False
def test_logs(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj('some log')
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> test_logs: 'some log' <\d+>: some log"))
def test_logs_and_returns_arg(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
s_in = 'some log'
s_out = qj(s_in)
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> test_logs_and_returns_arg: s_in <\d+>: some log'))
self.assertIs(s_in, s_out)
def test_no_logs(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG = False
qj.LOG_FN = mock_log_fn
qj('some log')
mock_log_fn.assert_not_called()
def test_no_logs_and_returns_arg(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG = False
qj.LOG_FN = mock_log_fn
s_in = 'some log'
s_out = qj(s_in)
mock_log_fn.assert_not_called()
self.assertIs(s_in, s_out)
def test_logs_with_prefix(self):
with mock.patch('logging.info') as mock_log_fn:
qj.PREFIX = 'QQ: '
qj.LOG_FN = mock_log_fn
qj('some log')
mock_log_fn.assert_called_once_with(RegExp(
r"QQ: <qj_test> test_logs_with_prefix: 'some log' <\d+>: some log"))
def test_logs_max_times(self):
with mock.patch('logging.info') as mock_log_fn:
qj.MAX_FRAME_LOGS = 1
qj.LOG_FN = mock_log_fn
for _ in range(2):
qj('some log')
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_logs_max_times: 'some log' <\d+>: some log")),
mock.call(RegExp(
r'qj: <qj_test> test_logs_max_times: Maximum per-frame logging hit \(1\).')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
# mock_log_fn.reset_mock()
# qj('some log')
# mock_log_fn.assert_not_called()
def test_logs_with_pprint_str_fn(self):
str_fn = qj.STR_FN
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
try:
qj.STR_FN = pprint.pformat
foos = [dict(foo=x, bar=x % 2, baz=x % 3) for x in range(10)]
qj(foos, 'foos', l=lambda x: x, r=foos)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_pprint_str_fn: foos <\d+>: "
r'\(multiline log follows\)\n'
r"\[\{'bar': 0, 'baz': 0, 'foo': 0\},\n \{'bar': 1, 'baz': 1, 'foo': 1\}")),
mock.call(
RegExp(r'qj:\s+\(multiline log follows\)\n'
r"\[\{'bar': 0, 'baz': 0, 'foo': 0\},\n \{'bar': 1, 'baz': 1, 'foo': 1\}")),
mock.call(
RegExp(r'qj:\s+Overridden return value: \(multiline log follows\)\n'
r"\[\{'bar': 0, 'baz': 0, 'foo': 0\},\n \{'bar': 1, 'baz': 1, 'foo': 1\}")),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
finally:
qj.STR_FN = str_fn
def test_logs_with_x(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj(x='some log')
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> test_logs_with_x: x='some log' <\d+>: some log"))
def test_logs_with_s(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj(s='some prefix', x='some log')
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> test_logs_with_s: some prefix <\d+>: some log'))
def test_no_logs_with_s(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG = False
qj.LOG_FN = mock_log_fn
qj(s='some prefix', x='some log')
mock_log_fn.assert_not_called()
def test_logs_with_s_not_a_string(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj(s=42, x='some log')
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> test_logs_with_s_not_a_string: 42 <\d+>: some log'))
def test_logs_with_b(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj(b=True, x='some log')
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> test_logs_with_b: b=True, x='some log' <\d+>: some log"))
mock_log_fn.reset_mock()
qj(b=False, x='some log')
mock_log_fn.assert_not_called()
def test_no_logs_with_b(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG = False
qj.LOG_FN = mock_log_fn
qj(b=True, x='some log')
mock_log_fn.assert_not_called()
def test_logs_with_l(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj(l=lambda _: 'some extra info', x='some log')
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_l: l=lambda _: 'some extra info', x='some log' <\d+>: some log")),
mock.call(RegExp(
r'qj:\s+some extra info')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_logs_with_l_passes_x(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
s = 'some log'
qj(l=lambda x: self.assertIs(x, s), x=s)
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> test_logs_with_l_passes_x: l=lambda x: self.assertIs\(x, s\), x=s <\d+>: some log')),
mock.call(RegExp(
r'qj:\s+None')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_no_logs_with_l(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG = False
qj.LOG_FN = mock_log_fn
qj(l=lambda _: 'some extra info', x='some log')
mock_log_fn.assert_not_called()
qj.LOG = True
qj(b=False, l=lambda _: 'some extra info', x='some log')
mock_log_fn.assert_not_called()
def test_logs_with_d(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
qj.DEBUG_FN = mock_debug_fn
qj(d=True, x='some log')
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> test_logs_with_d: d=True, x='some log' <\d+>: some log"))
mock_debug_fn.assert_called_once()
def test_no_logs_with_d(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG = False
qj.LOG_FN = mock_log_fn
qj(d=True, x='some log')
mock_log_fn.assert_not_called()
mock_debug_fn.assert_not_called()
qj.LOG = True
qj(b=False, d=True, x='some log')
mock_log_fn.assert_not_called()
mock_debug_fn.assert_not_called()
def test_logs_with_p(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj(p=True, x='some log')
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_p: p=True, x='some log' <\d+>: some log")),
mock.call(RegExp(
r'qj:\s+Public properties:\n'
r'\s+__init__\n'
r'\s+capitalize\n')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_no_logs_with_p(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG = False
qj.LOG_FN = mock_log_fn
qj(p=True, x='some log')
mock_log_fn.assert_not_called()
def test_logs_with_p_arg_spec(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
class TestClass(object):
def function_with_args(self, a, b=None, c=True, d='default value'):
pass
def __str__(self):
return 'TestClass object'
qj(p=True, x=TestClass())
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> test_logs_with_p_arg_spec: p=True, x=TestClass\(\) <\d+>: '
r'TestClass object')),
mock.call(RegExp(
r'qj:\s+Public properties:\n'
r'\s+__init__\n'
r"\s+function_with_args\((self, )?a, b=None, c=True, d='default value'\)"
)),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_logs_with_t(self):
with mock.patch('logging.info') as mock_log_fn:
class TestTensorClass(object):
@property
def __module__(self):
return 'tensorflow_test_module'
name = 'foo'
if 'tensorflow' not in sys.modules:
sys.modules['tensorflow'] = TestTensorClass()
sys.modules['tensorflow'].Print = lambda s: s
sys.modules['tensorflow'].shape = lambda _: tuple()
with mock.patch('tensorflow.Print') as tf_print_fn:
x = TestTensorClass()
qj.LOG_FN = mock_log_fn
qj(t=True, x=x)
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> test_logs_with_t: t=True, x=x <\d+>: '
r'<TestTensorClass object at ')),
mock.call(RegExp(
r'qj:\s+Wrapping return value in tf.Print operation.')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
tf_print_fn.assert_called_once_with(
x, [tuple(), x],
summarize=qj.MAX_FRAME_LOGS,
first_n=qj.MAX_FRAME_LOGS,
name=RegExp(r'qj_print_test_logs_with_t_\d+'),
message=RegExp(
r'qj: <qj_test> test_logs_with_t: t=True, x=x <\d+>'))
self.assertEqual(tf_print_fn.call_count, 1)
def test_logs_with_t_x_not_a_tensor(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj(t=True, x='not a tensor')
mock_log_fn.assert_called_once_with(
RegExp(
r"qj: <qj_test> test_logs_with_t_x_not_a_tensor: t=True, x='not a tensor' <\d+>: not a tensor"
))
def test_logs_with_r(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
alternative_return_value = 'some other return value'
out = qj(r=alternative_return_value, x='some log')
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_r: r=alternative_return_value, x='some log' <\d+>: some log")),
mock.call(RegExp(
r'qj:\s+Overridden return value: some other return value')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
self.assertIs(out, alternative_return_value)
def test_logs_with_r_when_r_is_none(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
alternative_return_value = None
out = qj(r=alternative_return_value, x='some log')
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_r_when_r_is_none: r=alternative_return_value, x='some log' <\d+>: some log")),
mock.call(RegExp(
r'qj:\s+Overridden return value: None')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
self.assertIs(out, alternative_return_value)
def test_no_logs_with_r(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG = False
qj.LOG_FN = mock_log_fn
alternative_return_value = 'some other return value'
input_value = 'some log'
out = qj(r=alternative_return_value, x=input_value)
mock_log_fn.assert_not_called()
self.assertIs(out, input_value)
def test_logs_with_indentation(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
for i in range(2):
qj('some log %d' % i)
qj('some log %d' % i)
qj('some log %d' % i)
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_indentation: 'some log %d' % i <\d+>: "
"some log 0")),
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_indentation: 'some log %d' % i <\d+>: "
"some log 0")),
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_indentation: 'some log %d' % i <\d+>: "
"some log 0")),
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_indentation: 'some log %d' % i <\d+>: "
"some log 1")),
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_indentation: 'some log %d' % i <\d+>: "
"some log 1")),
mock.call(RegExp(
r"qj: <qj_test> test_logs_with_indentation: 'some log %d' % i <\d+>: "
"some log 1")),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 6)
def test_logs_in_list_comp(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
_ = [qj('some log') for _ in range(2)]
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_logs_in_list_comp: 'some log' <\d+>: some log")),
mock.call(RegExp(
r"qj: <qj_test> test_logs_in_list_comp: 'some log' <\d+>: some log")),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_logs_in_dict_comp(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
_ = {i: qj('some log') for i in range(2)}
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_logs_in_dict_comp: 'some log' <\d+>: some log")),
mock.call(RegExp(
r"qj: <qj_test> test_logs_in_dict_comp: 'some log' <\d+>: some log")),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_expected_locals_mods(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
k = None
local_var_names = None
# Can't store the dictionary in itself, it turns out...
local_vars = {k: v for k, v in locals().items() if k != 'local_vars'}
qj('some log')
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> test_expected_locals_mods: 'some log' <\d+>: some log"))
# Make sure that none of the existing variables got modified.
self.assertEqual(local_vars, {k: v for k, v in locals().items()
if (k != '__qj_magic_wocha_doin__' and
k != 'local_vars')})
# Make sure that only the new variable name is added.
local_var_names = set([k for k in local_vars.keys()])
local_var_names.add('__qj_magic_wocha_doin__')
local_var_names.add('local_vars')
self.assertEqual(local_var_names, set([k for k in locals().keys()]))
def test_make_global(self):
if hasattr(__builtins__, 'qj'):
delattr(__builtins__, 'qj')
self.assertRaises(AttributeError, lambda: getattr(__builtins__, 'qj'))
qj.make_global()
if __name__ == '__main__':
# Running with `$ python qj/tests/qj_tests.py` goes down this path.
self.assertEqual(qj, __builtins__.__dict__['qj'])
else:
# Running with `$ nosetests` goes down this path.
self.assertEqual(qj, __builtins__['qj'])
def test_multiline(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj('some\nlog')
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> test_multiline: 'some\\nlog' <\d+>: \(multiline log follows\)\n"
"some\nlog"))
def test_multiline_with_l(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj(l=lambda _: 'some\nextra\ninfo', x='some\nlog')
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r"qj: <qj_test> test_multiline_with_l: l=lambda _: 'some\\nextra\\ninfo', x='some\\nlog' <\d+>: "
r'\(multiline log follows\)\nsome\nlog')),
mock.call(
RegExp(r'qj:\s+\(multiline log follows\)\nsome\nextra\ninfo')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_multiline_with_r(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
alternative_return_value = 'some other\nreturn value'
out = qj(r=alternative_return_value, x='some\nlog')
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_multiline_with_r: r=alternative_return_value, x='some\\nlog' <\d+>: "
r'\(multiline log follows\)\nsome\nlog')),
mock.call(RegExp(
r'qj:\s+Overridden return value: \(multiline log follows\)\n'
r'some other\nreturn value')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
self.assertIs(out, alternative_return_value)
def test_r_magic_works_across_modules(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj.make_global()
input_value = 'some log'
out = qj_test_helper.LogToQJ(x=input_value)
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test_helper> LogToQJ: \*\*kwargs <\d+>: some log'))
self.assertIs(out, input_value)
mock_log_fn.reset_mock()
out = qj_test_helper.LogToQJ(x=input_value, r=None)
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test_helper> LogToQJ: \*\*kwargs <\d+>: some log')),
mock.call(RegExp(
r'qj:\s+Overridden return value: None')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
self.assertIsNone(out)
mock_log_fn.reset_mock()
out = qj_test_helper.LogToQJQJ(x=input_value)
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test_helper> LogToQJQJ: \*\*kwargs <\d+>: some log'))
self.assertIs(out, input_value)
mock_log_fn.reset_mock()
out = qj_test_helper.LogToQJQJ(x=input_value, r=None)
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test_helper> LogToQJQJ: \*\*kwargs <\d+>: some log')),
mock.call(RegExp(
r'qj:\s+Overridden return value: None')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
self.assertIsNone(out)
def test_logs_with_positional_args(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
qj.DEBUG_FN = mock_debug_fn
alternative_return_value = 'some other return value'
out = qj('some log', 'some prefix', lambda _: 'some extra info', True,
True, False, False, alternative_return_value, False, True,
False, False, False, False, False, False, False)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_positional_args: some prefix '
r'<\d+>: some log')),
mock.call(
RegExp(r'qj:\s+some extra info')),
mock.call(
RegExp(r'qj:\s+Public properties:\n')),
mock.call(
RegExp(r'qj:\s+Overridden return value: some other return value')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 4)
mock_debug_fn.assert_called_once()
self.assertIs(out, alternative_return_value)
def test_no_logs_with_positional_args(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
input_value = 'some log'
alternative_return_value = 'some other return value'
out = qj('some log', 'some prefix', lambda _: 'some extra info', True,
True, False, False, alternative_return_value, False, False,
False, False, False, False, False, False, False)
mock_log_fn.assert_not_called()
mock_debug_fn.assert_not_called()
self.assertIs(out, input_value)
def test_logs_max_times_ends_with_warning(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
qj.MAX_FRAME_LOGS = 1
original_return_value = 'some log'
alternative_return_value = 'some other return value'
out = []
for _ in range(2):
out.append(qj(original_return_value, 'some prefix', lambda _: 'some extra info', d=True,
p=True, r=alternative_return_value, b=True))
qj('other log', 'other prefix')
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_max_times_ends_with_warning:'
r' some prefix <\d+>: some log')),
mock.call(
RegExp(r'qj:\s+some extra info')),
mock.call(
RegExp(r'qj:\s+Public properties:\n')),
mock.call(
RegExp(r'qj:\s+Overridden return value: some other return value')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_max_times_ends_with_warning:'
r' Maximum per-frame logging hit \(1\)\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_max_times_ends_with_warning:'
r' other prefix <\d+>: other log')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_max_times_ends_with_warning:'
r' Maximum per-frame logging hit \(1\)\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 7)
mock_debug_fn.assert_called_once()
self.assertIs(out[0], alternative_return_value)
self.assertIs(out[1], original_return_value)
def test_logs_with_pad(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj('some log', pad='#')
qj('some other log', pad=3)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'#+')),
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_pad: 'some log', pad='#' <\d+>: some log")),
mock.call(
RegExp(r'#+')),
mock.call(
RegExp(r'\n\n')),
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_pad: 'some other log', pad=3 <\d+>: some other log")),
mock.call(
RegExp(r'\n\n')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 6)
def test_logs_with_tictoc(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj('tic log', tic=1)
qj('toc log', toc=1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_tictoc: 'tic log', tic=1 <\d+>: tic log")),
mock.call(
RegExp(r'qj:\s+Added tic\.')),
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_tictoc: 'toc log', toc=1 <\d+>: toc log")),
mock.call(
RegExp(r"qj:\s+\d\.\d\d\d\d seconds since 'tic log', tic=1\.")),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 4)
def test_logs_with_tictoc_no_x(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj(tic=1)
qj(toc=1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_no_x: tic=1 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_no_x: toc=1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
def test_logs_with_tictoc_list_comp(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
_ = [qj(x, tic=1, toc=1) for x in range(2)]
qj(toc=1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_list_comp: x, tic=1, toc=1 <\d+>: 0')),
mock.call(
RegExp(r'qj:\s+Added tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_list_comp: x, tic=1, toc=1 <\d+>: 1')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since x, tic=1, toc=1\.')),
mock.call(
RegExp(r'qj:\s+Added tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_list_comp: \s?toc=1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since x, tic=1, toc=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 7)
def test_logs_with_tictoc_nested(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj(tic=1)
qj(tic=2)
qj(toc=1)
qj(toc=1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_nested: tic=1 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_nested: tic=2 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_nested: toc=1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=2\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_nested: toc=1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 6)
def test_logs_with_tictoc_negative_toc(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj(tic=1)
qj(tic=2)
qj(toc=-1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_negative_toc: tic=1 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_negative_toc: tic=2 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_negative_toc: toc=-1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=2\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 5)
self.assertEqual(len(qj._tics), 0)
def test_logs_with_tictoc_across_fn_calls(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
def tictoc_across_fn_calls():
qj(tic=2)
qj(tic=1)
tictoc_across_fn_calls()
qj(toc=-1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_across_fn_calls: tic=1 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> tictoc_across_fn_calls: tic=2 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_across_fn_calls: toc=-1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=2\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 5)
self.assertEqual(len(qj._tics), 0)
def test_logs_with_tictoc_no_unmatched_tic(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj(toc=1)
mock_log_fn.assert_called_once_with(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_no_unmatched_tic: toc=1 <\d+>: Unable to compute toc -- no unmatched tic\.'))
self.assertEqual(len(qj._tics), 0)
def test_logs_with_time(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
def foo():
pass
qj(foo, time=1)()
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_time: foo, time=1 <\d+>: <function .*foo at 0x.*>')),
mock.call(
RegExp(r'qj:\s+Wrapping return value in timing function\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_time: Average timing for <function .*foo at 0x.*> across 1 call <\d+>: \d\.\d\d\d\d seconds')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
def test_logs_with_time_decorator(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
@qj(time=1)
def foo():
pass
foo()
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_time_decorator: time=1 <\d+>: Preparing decorator to measure timing\.\.\.')),
mock.call(
RegExp(r'qj:\s+Decorating <function .*foo at 0x.*> with timing function\.')),
mock.call().__nonzero__() if sys.version_info[0] < 3 else mock.call().__bool__(), # TODO(iansf): it's unclear why this is necessary in this case.
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_time_decorator: Average timing for <function .*foo at 0x.*> across 1 call <\d+>: \d\.\d\d\d\d seconds')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
def test_logs_with_catch(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
qj.DEBUG_FN = mock_debug_fn
def foo():
raise Exception('FOO')
qj(foo, catch=1)()
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_catch: foo, catch=1 <\d+>: <function .*foo at 0x.*>')),
mock.call(
RegExp(r'qj:\s+Wrapping return value in exception function\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_catch: Caught an exception in <function .*foo at 0x.*> <\d+>: FOO')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
self.assertEqual(mock_debug_fn.call_count, 1)
def test_logs_with_catch_decorator(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
qj.DEBUG_FN = mock_debug_fn
@qj(catch=1)
def foo():
raise Exception('FOO')
foo()
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_catch_decorator: catch=1 <\d+>: Preparing decorator to catch exceptions\.\.\.')),
mock.call(
RegExp(r'qj:\s+Decorating <function .*foo at 0x.*> with exception function\.')),
mock.call().__nonzero__() if sys.version_info[0] < 3 else mock.call().__bool__(), # TODO(iansf): it's unclear why this is necessary in this case.
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_catch_decorator: Caught an exception in <function .*foo at 0x.*> <\d+>: FOO')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
self.assertEqual(mock_debug_fn.call_count, 1)
def test_logs_with_log_all_calls(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
s = qj('abc', log_all_calls=1)
s.replace('a', 'b')
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_log_all_calls: 'abc', log_all_calls=1 <\d+>: abc")),
mock.call(
RegExp(r'qj:\s+Wrapping all public method calls for object\.')),
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_log_all_calls: calling replace <\d+>: replace\('a', 'b'\)")),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_log_all_calls: returning from replace <\d+>: bbc')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 4)
def test_logs_no_s_empty(self):
def empty():
qj()
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
empty()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> empty: <empty log> <\d+>:'))
def test_logs_no_s_basic(self):
def basic():
x = 2
qj(x)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> basic: x <\d+>: 2'))
def test_logs_no_s_basic_add(self):
def basic_add():
x = 2
qj(1 + x)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_add()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> basic_add: 1 \+ x <\d+>: 3'))
def test_logs_no_s_basic_mul(self):
def basic_mul():
x = 2
qj(1 * x)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_mul()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> basic_mul: 1 \* x <\d+>: 2'))
def test_logs_no_s_basic_floordiv(self):
def basic_floordiv():
x = 2
qj(3 // x)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_floordiv()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> basic_floordiv: 3 // x <\d+>: 1'))
def test_logs_no_s_order_of_operations(self):
def order_of_operations():
x = 2
qj(1 + 2 * x)
qj(2 * x + 1)
qj(2 * (x + 1))
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
order_of_operations()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> order_of_operations: 1 \+ 2 \* x <\d+>: 5')),
mock.call(RegExp(
r'qj: <qj_test> order_of_operations: 2 \* x \+ 1 <\d+>: 5')),
mock.call(RegExp(
r'qj: <qj_test> order_of_operations: 2 \* \(x \+ 1\) <\d+>: 6')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
def test_logs_no_s_basic_funcall(self):
def basic_funcall():
x = 2
qj(abs(-x))
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_funcall()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> basic_funcall: abs\(-x\) <\d+>: 2'))
def test_logs_no_s_basic_obj_funcall(self):
def basic_obj_funcall():
x = '2'
qj(x.strip())
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_obj_funcall()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> basic_obj_funcall: x.strip\(\) <\d+>: 2'))
def test_logs_no_s_basic_list(self):
def basic_list():
x = [1, 2]
qj(x)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_list()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> basic_list: x <\d+>: \[1, 2\]'))
def test_logs_no_s_basic_tuple(self):
def basic_tuple():
x = (1, 2)
qj(x)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_tuple()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> basic_tuple: x <\d+>: \(1, 2\)'))
def test_logs_no_s_basic_dict(self):
def basic_dict():
x = {'a': 1, 'b': 2}
qj(x)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_dict()
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> basic_dict: x <\d+>: \{('a': 1, 'b': 2|'b': 2, 'a': 1)\}"))
def test_logs_no_s_list_comp(self):
def list_comp():
x = [1, 2]
return [qj(a) for a in x]
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
list_comp()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> list_comp: a <\d+>: 1')),
mock.call(RegExp(
r'qj: <qj_test> list_comp: a <\d+>: 2'))
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_logs_no_s_basic_gen(self):
def basic_gen():
x = [1, 2]
g = (qj(a) for a in x)
for a in g:
pass
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_gen()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> basic_gen: a <\d+>: 1')),
mock.call(RegExp(
r'qj: <qj_test> basic_gen: a <\d+>: 2'))
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_logs_no_s_dict_comp(self):
def dict_comp():
x = {'a': 1, 'b': 2}
return {qj(k): qj(v) for k, v in x.items()}
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
dict_comp()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> dict_comp: *k <\d+>: a')),
mock.call(RegExp(
r'qj: <qj_test> dict_comp: *v <\d+>: 1')),
mock.call(RegExp(
r'qj: <qj_test> dict_comp: *k <\d+>: b')),
mock.call(RegExp(
r'qj: <qj_test> dict_comp: *v <\d+>: 2')),
], any_order=True)
self.assertEqual(mock_log_fn.call_count, 4)
def test_logs_no_s_basic_lambda(self):
def basic_lambda():
x = 2
(lambda: qj(x))()
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
basic_lambda()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> basic_lambda.lambda: x <\d+>: 2'))
def test_logs_no_s_embedded_lambda(self):
def embedded_lambda():
a = 1
b = 2
qj(a, l=lambda x: qj(b))
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
embedded_lambda()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> embedded_lambda: a, l=lambda x: qj\(b\) <\d+>: 1')),
mock.call(RegExp(
r'qj: <qj_test> qj.lambda: b <\d+>: 2')),
mock.call(RegExp(
r'qj:\s+2')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
def test_logs_no_s_contains_list(self):
def contains_list():
x = 1
qj([x, x + 1, x + 2])
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
contains_list()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> contains_list: \[x, x \+ 1, x \+ 2\] <\d+>: \[1, 2, 3\]'))
def test_logs_no_s_contains_list_comp_basic(self):
def contains_list_comp_basic():
l = [1, 2, 3]
qj([x for x in l])
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
contains_list_comp_basic()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> contains_list_comp_basic: \[x for x in l\] <\d+>: \[1, 2, 3\]'))
def test_logs_no_s_contains_list_comp_with_list_comp(self):
def contains_list_comp_with_list_comp():
l = [1, 2, 3]
qj([[x + y for y in [2, 3, 4]] for x in l])
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
contains_list_comp_with_list_comp()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> contains_list_comp_with_list_comp: \[\[x \+ y for y in \[2, 3, 4\]\] for x in l\] <\d+>: \[\[3, 4, 5\],'))
def test_logs_no_s_contains_dict_comp_basic(self):
def contains_dict_comp_basic():
l = [1, 2, 3]
qj({x: x + 1 for x in l})
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
contains_dict_comp_basic()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> contains_dict_comp_basic: \{x: x \+ 1 for x in l\} <\d+>: '
r'\{[1-3]: [2-4], [1-3]: [2-4], [1-3]: [2-4]\}'))
def test_logs_no_s_contains_dict_comp_multiarg(self):
def contains_dict_comp_multiarg():
l = [1, 2, 3]
s = 'abc'
qj({k: x + 1 for k, x in zip(list(s), l)})
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
contains_dict_comp_multiarg()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> contains_dict_comp_multiarg: \{k: x \+ 1 for k, x in zip\(list\(s\), l\)\} <\d+>: '
r"\{'[a-c]': [2-4], '[a-c]': [2-4], '[a-c]': [2-4]\}"))
def test_logs_no_s_contains_dict_comp_closure(self):
def contains_dict_comp_closure():
l = [1, 2, 3]
s = 'abc'
t = 'def'
qj({k + s + t: x + 1 for k, x in zip(list(s), l)})
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
contains_dict_comp_closure()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> contains_dict_comp_closure: \{k \+ s \+ t: x \+ 1 for k, x in zip\(list\(s\), l\)\} <\d+>: '
r"\{'(aabcdef|babcdef|cabcdef)': [2-4], '(aabcdef|babcdef|cabcdef)': [2-4], '(aabcdef|babcdef|cabcdef)': [2-4]\}"))
def test_logs_no_s_multiline_basic(self):
def multiline_basic():
x = 2
qj(
x
)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
multiline_basic()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> multiline_basic: x <\d+>: 2'))
def test_logs_no_s_multiline_many_arg(self):
def multiline_many_arg():
x = 2
qj(x,
s='',
l=None,
d=False,
p=0,
t=0,
n=0,
z=0,
b=1)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
multiline_many_arg()
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> multiline_many_arg: x, s='', l=None, d=False, p=0, t=0, n=0, z=0, b=1 <\d+>: 2"))
def test_logs_no_s_multiline_list_comp(self):
def multiline_list_comp():
l = [1, 2, 3]
qj(
[x
for x in l]
)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
multiline_list_comp()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> multiline_list_comp: \[x for x in l\] <\d+>: \[1, 2, 3\]'))
def test_logs_no_s_multiline_set_comp(self):
def multiline_set_comp():
a = dict(x=1, y=2)
b = dict(x=3, y=4)
qj({
(
qj('%d_%d' % (v['x'], v['y'])),
qj(tuple(sorted(v.keys()))),
)
for v in [a, b]
})
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
multiline_set_comp()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> multiline_set_comp: '%d_%d' % \(v\['x'\], v\['y'\]\) <\d+>: 1_2")),
mock.call(RegExp(
r"qj: <qj_test> multiline_set_comp: tuple\(sorted\(v\.keys\(\)\)\) <\d+>: \('x', 'y'\)")),
mock.call(RegExp(
r"qj: <qj_test> multiline_set_comp: '%d_%d' % \(v\['x'\], v\['y'\]\) <\d+>: 3_4")),
mock.call(RegExp(
r"qj: <qj_test> multiline_set_comp: tuple\(sorted\(v\.keys\(\)\)\) <\d+>: \('x', 'y'\)")),
mock.call(RegExp(
r"qj: <qj_test> multiline_set_comp: "
r"\{ \( qj\('%d_%d' % \(v\['x'\], v\['y'\]\)\), qj\(tuple\(sorted\(v.keys\(\)\)\)\), \) "
r"for v in \[a, b\] \} <\d+>: "
r".*\('(1_2|3_4)', \('x', 'y'\)\), \('(1_2|3_4)', \('x', 'y'\)\)")), # Python 2.7 and 3.6 represent sets differently
], any_order=True)
self.assertEqual(mock_log_fn.call_count, 5)
def test_logs_no_s_no_whitespace(self):
def no_whitespace():
x = 1
qj([x,x+1,x+2]) # pylint: disable=bad-whitespace
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
no_whitespace()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> no_whitespace: \[x,x\+1,x\+2\] <\d+>: \[1, 2, 3\]'))
def test_logs_no_s_substring_conflicts(self):
def substring_conflicts():
x = 1
y = 2
xx = 1
yy = 4
qj([x, y]); qj([xx, yy]) # pylint: disable=multiple-statements
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
substring_conflicts()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> substring_conflicts: \[x, y\] <\d+>: \[1, 2\]')),
mock.call(RegExp(
r'qj: <qj_test> substring_conflicts: \[xx, yy\] <\d+>: \[1, 4\]')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_logs_no_s_with_splat_basic(self):
def with_splat_basic():
a = [2, '', None]
qj(*a)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
with_splat_basic()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> with_splat_basic: \*a <\d+>: 2'))
def test_logs_no_s_with_splat_as_well(self):
def with_splat_as_well():
x = 2
a = [None, False]
qj(x, '', *a)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
with_splat_as_well()
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> with_splat_as_well: x, '', \*a <\d+>: 2"))
def test_logs_no_s_with_splat_and_kw(self):
def with_splat_and_kw():
x = 2
a = ['', None]
d = {'d': 0}
qj(x, *a, **d)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
with_splat_and_kw()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> with_splat_and_kw: x, \*a, \*\*d <\d+>: 2'))
def test_logs_no_s_nested(self):
def nested():
x = 2
qj(qj(x))
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
nested()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> nested: x <\d+>: 2')),
mock.call(RegExp(
r'qj: <qj_test> nested: qj\(x\) <\d+>: 2')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_logs_no_s_subscript(self):
def subscript():
x = [1, 2, 3]
qj(x[0])
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
subscript()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> subscript: x\[0\] <\d+>: 1'))
def test_logs_no_s_subscript_with_args(self):
def subscript_with_args():
x = [1, 2, 3]
qj(x[0], b=1)
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
subscript_with_args()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> subscript_with_args: x\[0\], b=1 <\d+>: 1'))
def test_logs_no_s_subscript_dict(self):
def subscript():
d = dict(x=2)
qj(d['x'])
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
subscript()
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> subscript: d\['x'\] <\d+>: 2"))
def test_logs_no_s_slice_0(self):
def slice_0():
s = 'abcdef'
qj(s[:])
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
slice_0()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> slice_0: s\[:\] <\d+>: abcdef'))
def test_logs_no_s_slice_1(self):
def slice_1():
s = 'abcdef'
x = 2
qj(s[x:])
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
slice_1()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> slice_1: s\[x:\] <\d+>: cdef'))
def test_logs_no_s_slice_2(self):
def slice_2():
s = 'abcdef'
x = 2
qj(s[x:-1])
qj(s[:-1])
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
slice_2()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> slice_2: s\[x:-1\] <\d+>: cde')),
mock.call(RegExp(
r'qj: <qj_test> slice_2: s\[:-1\] <\d+>: abcde')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_logs_no_s_slice_3(self):
def slice_3():
s = 'abcdef'
x = 2
qj(s[-1:x:-1])
qj(s[-1::-1])
qj(s[::])
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
slice_3()
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test> slice_3: s\[-1:x:-1\] <\d+>: fed')),
mock.call(RegExp(
r'qj: <qj_test> slice_3: s\[-1::-1\] <\d+>: fedcba')),
mock.call(RegExp(
r'qj: <qj_test> slice_3: s\[::\] <\d+>: abcdef')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
def test_logs_no_s_calls_len(self):
def calls_len():
l = [1, 2, 3]
qj(len(l))
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
calls_len()
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test> calls_len: len\(l\) <\d+>: 3'))
# pylint: enable=line-too-long
if __name__ == '__main__':
unittest.main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that creates, patches and deletes a Cloud SQL instance, and also
creates, patches and deletes a database inside the instance, in Google Cloud.
This DAG relies on the following OS environment variables
https://airflow.apache.org/concepts.html#variables
* GCP_PROJECT_ID - Google Cloud project for the Cloud SQL instance.
* INSTANCE_NAME - Name of the Cloud SQL instance.
* DB_NAME - Name of the database inside a Cloud SQL instance.
"""
import os
from datetime import datetime
from urllib.parse import urlsplit
from airflow import models
from airflow.providers.google.cloud.operators.cloud_sql import (
CloudSQLCreateInstanceDatabaseOperator,
CloudSQLCreateInstanceOperator,
CloudSQLDeleteInstanceDatabaseOperator,
CloudSQLDeleteInstanceOperator,
CloudSQLExportInstanceOperator,
CloudSQLImportInstanceOperator,
CloudSQLInstancePatchOperator,
CloudSQLPatchInstanceDatabaseOperator,
)
from airflow.providers.google.cloud.operators.gcs import (
GCSBucketCreateAclEntryOperator,
GCSObjectCreateAclEntryOperator,
)
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project')
INSTANCE_NAME = os.environ.get('GCSQL_MYSQL_INSTANCE_NAME', 'test-mysql')
INSTANCE_NAME2 = os.environ.get('GCSQL_MYSQL_INSTANCE_NAME2', 'test-mysql2')
DB_NAME = os.environ.get('GCSQL_MYSQL_DATABASE_NAME', 'testdb')
EXPORT_URI = os.environ.get('GCSQL_MYSQL_EXPORT_URI', 'gs://INVALID BUCKET NAME/fileName')
IMPORT_URI = os.environ.get('GCSQL_MYSQL_IMPORT_URI', 'gs://INVALID BUCKET NAME/fileName')
# Bodies below represent Cloud SQL instance resources:
# https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances
FAILOVER_REPLICA_NAME = INSTANCE_NAME + "-failover-replica"
READ_REPLICA_NAME = INSTANCE_NAME + "-read-replica"
# [START howto_operator_cloudsql_create_body]
body = {
"name": INSTANCE_NAME,
"settings": {
"tier": "db-n1-standard-1",
"backupConfiguration": {"binaryLogEnabled": True, "enabled": True, "startTime": "05:00"},
"activationPolicy": "ALWAYS",
"dataDiskSizeGb": 30,
"dataDiskType": "PD_SSD",
"databaseFlags": [],
"ipConfiguration": {
"ipv4Enabled": True,
"requireSsl": True,
},
"locationPreference": {"zone": "europe-west4-a"},
"maintenanceWindow": {"hour": 5, "day": 7, "updateTrack": "canary"},
"pricingPlan": "PER_USE",
"replicationType": "ASYNCHRONOUS",
"storageAutoResize": True,
"storageAutoResizeLimit": 0,
"userLabels": {"my-key": "my-value"},
},
"failoverReplica": {"name": FAILOVER_REPLICA_NAME},
"databaseVersion": "MYSQL_5_7",
"region": "europe-west4",
}
# [END howto_operator_cloudsql_create_body]
body2 = {
"name": INSTANCE_NAME2,
"settings": {
"tier": "db-n1-standard-1",
},
"databaseVersion": "MYSQL_5_7",
"region": "europe-west4",
}
# [START howto_operator_cloudsql_create_replica]
read_replica_body = {
"name": READ_REPLICA_NAME,
"settings": {
"tier": "db-n1-standard-1",
},
"databaseVersion": "MYSQL_5_7",
"region": "europe-west4",
"masterInstanceName": INSTANCE_NAME,
}
# [END howto_operator_cloudsql_create_replica]
# [START howto_operator_cloudsql_patch_body]
patch_body = {
"name": INSTANCE_NAME,
"settings": {
"dataDiskSizeGb": 35,
"maintenanceWindow": {"hour": 3, "day": 6, "updateTrack": "canary"},
"userLabels": {"my-key-patch": "my-value-patch"},
},
}
# [END howto_operator_cloudsql_patch_body]
# [START howto_operator_cloudsql_export_body]
export_body = {
"exportContext": {"fileType": "sql", "uri": EXPORT_URI, "sqlExportOptions": {"schemaOnly": False}}
}
# [END howto_operator_cloudsql_export_body]
# [START howto_operator_cloudsql_import_body]
import_body = {"importContext": {"fileType": "sql", "uri": IMPORT_URI}}
# [END howto_operator_cloudsql_import_body]
# [START howto_operator_cloudsql_db_create_body]
db_create_body = {"instance": INSTANCE_NAME, "name": DB_NAME, "project": GCP_PROJECT_ID}
# [END howto_operator_cloudsql_db_create_body]
# [START howto_operator_cloudsql_db_patch_body]
db_patch_body = {"charset": "utf16", "collation": "utf16_general_ci"}
# [END howto_operator_cloudsql_db_patch_body]
with models.DAG(
'example_gcp_sql',
schedule_interval='@once', # Override to match your needs
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example'],
) as dag:
# ############################################## #
# ### INSTANCES SET UP ######################### #
# ############################################## #
# [START howto_operator_cloudsql_create]
sql_instance_create_task = CloudSQLCreateInstanceOperator(
project_id=GCP_PROJECT_ID, body=body, instance=INSTANCE_NAME, task_id='sql_instance_create_task'
)
# [END howto_operator_cloudsql_create]
sql_instance_create_2_task = CloudSQLCreateInstanceOperator(
project_id=GCP_PROJECT_ID, body=body2, instance=INSTANCE_NAME2, task_id='sql_instance_create_task2'
)
# [END howto_operator_cloudsql_create]
sql_instance_read_replica_create = CloudSQLCreateInstanceOperator(
project_id=GCP_PROJECT_ID,
body=read_replica_body,
instance=READ_REPLICA_NAME,
task_id='sql_instance_read_replica_create',
)
# ############################################## #
# ### MODIFYING INSTANCE AND ITS DATABASE ###### #
# ############################################## #
# [START howto_operator_cloudsql_patch]
sql_instance_patch_task = CloudSQLInstancePatchOperator(
project_id=GCP_PROJECT_ID, body=patch_body, instance=INSTANCE_NAME, task_id='sql_instance_patch_task'
)
# [END howto_operator_cloudsql_patch]
sql_instance_patch_task2 = CloudSQLInstancePatchOperator(
project_id=GCP_PROJECT_ID, body=patch_body, instance=INSTANCE_NAME, task_id='sql_instance_patch_task2'
)
# [START howto_operator_cloudsql_db_create]
sql_db_create_task = CloudSQLCreateInstanceDatabaseOperator(
project_id=GCP_PROJECT_ID, body=db_create_body, instance=INSTANCE_NAME, task_id='sql_db_create_task'
)
sql_db_create_task2 = CloudSQLCreateInstanceDatabaseOperator(
body=db_create_body, instance=INSTANCE_NAME, task_id='sql_db_create_task2'
)
# [END howto_operator_cloudsql_db_create]
# [START howto_operator_cloudsql_db_patch]
sql_db_patch_task = CloudSQLPatchInstanceDatabaseOperator(
project_id=GCP_PROJECT_ID,
body=db_patch_body,
instance=INSTANCE_NAME,
database=DB_NAME,
task_id='sql_db_patch_task',
)
sql_db_patch_task2 = CloudSQLPatchInstanceDatabaseOperator(
body=db_patch_body, instance=INSTANCE_NAME, database=DB_NAME, task_id='sql_db_patch_task2'
)
# [END howto_operator_cloudsql_db_patch]
# ############################################## #
# ### EXPORTING SQL FROM INSTANCE 1 ############ #
# ############################################## #
export_url_split = urlsplit(EXPORT_URI)
# For export to work we need to add the Cloud SQL instance's Service Account
# write access to the destination GCS bucket.
# [START howto_operator_cloudsql_export_gcs_permissions]
sql_gcp_add_bucket_permission_task = GCSBucketCreateAclEntryOperator(
entity=f"user-{sql_instance_create_task.output['service_account_email']}",
role="WRITER",
bucket=export_url_split[1], # netloc (bucket)
task_id='sql_gcp_add_bucket_permission_task',
)
# [END howto_operator_cloudsql_export_gcs_permissions]
# [START howto_operator_cloudsql_export]
sql_export_task = CloudSQLExportInstanceOperator(
project_id=GCP_PROJECT_ID, body=export_body, instance=INSTANCE_NAME, task_id='sql_export_task'
)
sql_export_task2 = CloudSQLExportInstanceOperator(
body=export_body, instance=INSTANCE_NAME, task_id='sql_export_task2'
)
# [END howto_operator_cloudsql_export]
# ############################################## #
# ### IMPORTING SQL TO INSTANCE 2 ############## #
# ############################################## #
import_url_split = urlsplit(IMPORT_URI)
# For import to work we need to add the Cloud SQL instance's Service Account
# read access to the target GCS object.
# [START howto_operator_cloudsql_import_gcs_permissions]
sql_gcp_add_object_permission_task = GCSObjectCreateAclEntryOperator(
entity=f"user-{sql_instance_create_2_task.output['service_account_email']}",
role="READER",
bucket=import_url_split[1], # netloc (bucket)
object_name=import_url_split[2][1:], # path (strip first '/')
task_id='sql_gcp_add_object_permission_task',
)
# For import to work we also need to add the Cloud SQL instance's Service Account
# write access to the whole bucket!.
sql_gcp_add_bucket_permission_2_task = GCSBucketCreateAclEntryOperator(
entity=f"user-{sql_instance_create_2_task.output['service_account_email']}",
role="WRITER",
bucket=import_url_split[1], # netloc
task_id='sql_gcp_add_bucket_permission_2_task',
)
# [END howto_operator_cloudsql_import_gcs_permissions]
# [START howto_operator_cloudsql_import]
sql_import_task = CloudSQLImportInstanceOperator(
project_id=GCP_PROJECT_ID, body=import_body, instance=INSTANCE_NAME2, task_id='sql_import_task'
)
sql_import_task2 = CloudSQLImportInstanceOperator(
body=import_body, instance=INSTANCE_NAME2, task_id='sql_import_task2'
)
# [END howto_operator_cloudsql_import]
# ############################################## #
# ### DELETING A DATABASE FROM AN INSTANCE ##### #
# ############################################## #
# [START howto_operator_cloudsql_db_delete]
sql_db_delete_task = CloudSQLDeleteInstanceDatabaseOperator(
project_id=GCP_PROJECT_ID, instance=INSTANCE_NAME, database=DB_NAME, task_id='sql_db_delete_task'
)
sql_db_delete_task2 = CloudSQLDeleteInstanceDatabaseOperator(
instance=INSTANCE_NAME, database=DB_NAME, task_id='sql_db_delete_task2'
)
# [END howto_operator_cloudsql_db_delete]
# ############################################## #
# ### INSTANCES TEAR DOWN ###################### #
# ############################################## #
# [START howto_operator_cloudsql_replicas_delete]
sql_instance_failover_replica_delete_task = CloudSQLDeleteInstanceOperator(
project_id=GCP_PROJECT_ID,
instance=FAILOVER_REPLICA_NAME,
task_id='sql_instance_failover_replica_delete_task',
)
sql_instance_read_replica_delete_task = CloudSQLDeleteInstanceOperator(
project_id=GCP_PROJECT_ID, instance=READ_REPLICA_NAME, task_id='sql_instance_read_replica_delete_task'
)
# [END howto_operator_cloudsql_replicas_delete]
# [START howto_operator_cloudsql_delete]
sql_instance_delete_task = CloudSQLDeleteInstanceOperator(
project_id=GCP_PROJECT_ID, instance=INSTANCE_NAME, task_id='sql_instance_delete_task'
)
sql_instance_delete_task2 = CloudSQLDeleteInstanceOperator(
project_id=GCP_PROJECT_ID, instance=INSTANCE_NAME2, task_id='sql_instance_delete_task2'
)
# [END howto_operator_cloudsql_delete]
(
sql_instance_create_task
>> sql_instance_create_2_task
>> sql_instance_read_replica_create
>> sql_instance_patch_task
>> sql_instance_patch_task2
>> sql_db_create_task
>> sql_db_create_task2
>> sql_db_patch_task
>> sql_db_patch_task2
>> sql_gcp_add_bucket_permission_task
>> sql_export_task
>> sql_export_task2
>> sql_gcp_add_object_permission_task
>> sql_gcp_add_bucket_permission_2_task
>> sql_import_task
>> sql_import_task2
>> sql_db_delete_task
>> sql_db_delete_task2
>> sql_instance_failover_replica_delete_task
>> sql_instance_read_replica_delete_task
>> sql_instance_delete_task
>> sql_instance_delete_task2
)
# Task dependencies created via `XComArgs`:
# sql_instance_create_task >> sql_gcp_add_bucket_permission_task
# sql_instance_create_2_task >> sql_gcp_add_object_permission_task
# sql_instance_create_2_task >> sql_gcp_add_bucket_permission_2_task
| |
#!/usr/bin/env python
"""
------------------------------------------------------------------------
Routine to translate X and Y pixel coordinates to RA, DEC values in
serial mode.
Usage: python pix2sky_serial.py [options] image pixfile
Input:
image: Input image with basic header keywords
pixfile: Input file with (x, y) pixel value in column 1 and 2
[Options]:
--help: help
--version: program version
--verbose: show result messages
--quiet: don't show result messages
--filename: output file name (default is pix2sky.dat)
--degree: (ra, dec) in degrees? (default is yes)
Author:
Navtej Singh
Organization:
Centre for Astronomy, National University of Ireland, Galway, Ireland
Version:
15 December 2011 1.0 Original version
------------------------------------------------------------------------
"""
# Load python modules to be used in the routine
import sys, math
from os.path import join, exists
from StringIO import StringIO
from optparse import OptionParser
# Get header keywords and save in class variables
# ===============================================
def getHeader(image):
print >> sys.stdout, '\n Getting image header keywords...'
# Input can be a single FITS image, multi-extension image or
# multi-extension image with particular extension
if len(image.split( '[', 1 )) > 1:
ext = image.split('[', 1)[1].replace(']', '')
image = image.split('[', 1)[0]
else:
ext = ''
# Open Header Unit List (HDU) to read header keywords
try:
hdulist = pyfits.open(image)
except:
print >> sys.stderr, 'Error: Not able to read FITS header. Exiting.'
sys.exit(-1)
hdulist.close()
# Get header parameters - checking number of extensions and using 1st extension
# in case of multi extension FITS image
if len( hdulist ) > 1:
if ext == '':
hdrdata = hdulist[1].header
else:
hdrdata = hdulist[int(ext)].header
else:
hdrdata = hdulist[0].header
# Get CRPIX keyword values
crpix1 = hdrdata['CRPIX1']
crpix2 = hdrdata['CRPIX2']
# Get CRVAL keyword values
ra0 = hdrdata['CRVAL1']
dec0 = hdrdata['CRVAL2']
# Get CD keyword values
cd11 = hdrdata['CD1_1']
cd12 = hdrdata['CD1_2']
cd21 = hdrdata['CD2_1']
cd22 = hdrdata['CD2_2']
# Return image header keywords
return ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22
# Convert RA from degree to hour:min:sec
# ======================================
def degree2hours(degrees):
hour = int(degrees)
tmp = (degrees - hour) * 60
min = int(tmp)
sec = (tmp - min) * 60
return '%2d%s%02d%s%2.4f' %(hour, ':', min, ':', sec)
# Translate X,Y image pixel coordinates to sky coordinates RA,DEC
# ===============================================================
def translate(x, y, ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22, degree):
# Formulas based on IRAF implementation of xy2rd task
xi = cd11 * (x - crpix1) + cd12 * (y - crpix2)
eta = cd21 * (x - crpix1) + cd22 * (y - crpix2)
xi = math.radians(xi)
eta = math.radians(eta)
ra0 = math.radians(ra0)
dec0 = math.radians(dec0)
ra = math.atan2(xi, math.cos(dec0) - eta * math.sin(dec0)) + ra0
dec = math.atan2(eta * math.cos(dec0) + math.sin(dec0), math.sqrt((math.cos(dec0) - eta * math.sin(dec0))**2 + xi**2))
ra = math.degrees(ra)
dec = math.degrees(dec)
ra = ra % 360.0
if ra < 0.0:
ra = ra + 360.0
if degree == 'no':
ra = ra / 15.0
ra = degree2hours(ra)
dec = degree2hours(dec)
print >> sys.stdout, 'X = %6.3f%s' %(x, '\t'), ' Y = %6.3f' %y, ' RA = %s' %ra, ' DEC = %s' %dec
else:
print >> sys.stdout, 'X = %6.3f%s' %(x, '\t'), ' Y = %6.3f' %y, ' RA = %3.9f' %ra, ' DEC = %3.9f' %dec
return (x, y, ra, dec)
# pix2sky routine to proccess x,y pixel pairs
# ===========================================
def pix2sky(image, infile, degree = 'yes', outfile = None):
# Read image header keywords
ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22 = getHeader(image)
# Open input file with x and y pixel values
try:
ifile = open(infile, 'r')
except:
print >> sys.stderr, 'Error: Not able to open the input file ', infile, '. Exiting.'
sys.exit(-1)
# Set the output file name
if not outfile:
if len(infile.rsplit('/')) > 1:
outfile = join(infile.rsplit('/')[0], 'pix2sky.out')
else:
outfile = 'pix2sky.out'
# Open the output file
try:
ofile = open(outfile, 'w')
except:
print >> sys.stderr, 'Error: Not able to open the output file ', outfile, ' for writing. Exiting.'
sys.exit(-1)
# Write data headers to the output file
ofile.write('# ---------------------------------------------------------\n')
ofile.write('# X Y RA DEC \n')
ofile.write('# ---------------------------------------------------------\n')
# Write results to the output file
while 1:
line = ifile.readline()
if not line:
break
if line[0] != '#':
res = translate(float(line.split()[0] ), float( line.split()[1] ), ra0, dec0, crpix1, crpix2, cd11, cd12, cd21, cd22, degree)
ofile.write('%10s%10s%18s%18s%s' %(str(res[0]), str(res[1]), str(res[2]), str(res[3]), '\n'))
# Close input and output files
try:
ifile.close()
ofile.close()
except:
print >> sys.stderr, 'Warning: Not able to close the input and the output files.'
print >> sys.stdout, '\n Results written to - ', outfile
# Main function - doing some data validation before calling pix2sky method
# ========================================================================
def main(image, pixelfile, degree = 'yes', outfile = None):
if not exists(image.split( '[', 1 )[0]):
print >> sys.stderr, 'Error: Image ', image, ' does not exist. Exiting.'
sys.exit(-1)
if not exists(pixelfile):
print >> sys.stderr, 'Error: Pixel file ', pixelfile, ' does not exist. Exiting.'
sys.exit(-1)
pix2sky(image, pixelfile, degree, outfile)
# Entry point for PIX2SKY_SERIAL utility
# ======================================
if __name__ == '__main__':
usage = "Usage: %prog [options] image pixfile"
description = "Description. Utility to convert X/Y pixel image coordinates to RA/DEC sky coordinates in serial mode."
parser = OptionParser(usage = usage, version = "%prog 1.0", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-d", "--degree", dest = "degree", metavar="DEGREE",
action="store", help = "ra/dec in degree? [default is yes]",
choices=['yes', 'no'], default = 'yes'
)
parser.add_option("-f", "--filename", dest = "filename",
action='store', metavar="FILE", help = "output file name [default is pix2sky.out]"
)
(options, args) = parser.parse_args()
# Check for number of input arguments
if len(args) != 2:
parser.error("Incorrect number of arguments")
print >> sys.stdout, '\n Starting processing...'
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
# Check if pyraf module is available
try:
import pyfits
except:
print >> sys.stderr, 'Error: Python module pyfits not found. Exiting.'
sys.exit(-1)
main(args[0], args[1], options.degree, options.filename)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout
print >> sys.stdout, '\n Process completed successfully.'
| |
"""
Parser and utilities for the smart 'if' tag
"""
import operator
# Using a simple top down parser, as described here:
# http://effbot.org/zone/simple-top-down-parsing.htm.
# 'led' = left denotation
# 'nud' = null denotation
# 'bp' = binding power (left = lbp, right = rbp)
class TokenBase(object):
"""
Base class for operators and literals, mainly for debugging and for throwing
syntax errors.
"""
id = None # node/token type name
value = None # used by literals
first = second = None # used by tree nodes
def nud(self, parser):
# Null denotation - called in prefix context
raise parser.error_class(
"Not expecting '%s' in this position in if tag." % self.id
)
def led(self, left, parser):
# Left denotation - called in infix context
raise parser.error_class(
"Not expecting '%s' as infix operator in if tag." % self.id
)
def display(self):
"""
Returns what to display in error messages for this node
"""
return self.id
def __repr__(self):
out = [str(x) for x in [self.id, self.first, self.second] if x is not None]
return "(" + " ".join(out) + ")"
def infix(bp, func):
"""
Creates an infix operator, given a binding power and a function that
evaluates the node
"""
class Operator(TokenBase):
lbp = bp
def led(self, left, parser):
self.first = left
self.second = parser.expression(bp)
return self
def eval(self, context):
try:
return func(context, self.first, self.second)
except Exception:
# Templates shouldn't throw exceptions when rendering. We are
# most likely to get exceptions for things like {% if foo in bar
# %} where 'bar' does not support 'in', so default to False
return False
return Operator
def prefix(bp, func):
"""
Creates a prefix operator, given a binding power and a function that
evaluates the node.
"""
class Operator(TokenBase):
lbp = bp
def nud(self, parser):
self.first = parser.expression(bp)
self.second = None
return self
def eval(self, context):
try:
return func(context, self.first)
except Exception:
return False
return Operator
# Operator precedence follows Python.
# NB - we can get slightly more accurate syntax error messages by not using the
# same object for '==' and '='.
# We defer variable evaluation to the lambda to ensure that terms are
# lazily evaluated using Python's boolean parsing logic.
OPERATORS = {
'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)),
'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)),
'not': prefix(8, lambda context, x: not x.eval(context)),
'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)),
'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)),
'=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)),
'!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)),
'>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)),
'>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)),
'<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)),
'<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)),
}
# Assign 'id' to each:
for key, op in OPERATORS.items():
op.id = key
class Literal(TokenBase):
"""
A basic self-resolvable object similar to a Django template variable.
"""
# IfParser uses Literal in create_var, but TemplateIfParser overrides
# create_var so that a proper implementation that actually resolves
# variables, filters etc is used.
id = "literal"
lbp = 0
def __init__(self, value):
self.value = value
def display(self):
return repr(self.value)
def nud(self, parser):
return self
def eval(self, context):
return self.value
def __repr__(self):
return "(%s %r)" % (self.id, self.value)
class EndToken(TokenBase):
lbp = 0
def nud(self, parser):
raise parser.error_class("Unexpected end of expression in if tag.")
EndToken = EndToken()
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
# pre-pass necessary to turn 'not','in' into single token
l = len(tokens)
mapped_tokens = []
i = 0
while i < l:
token = tokens[i]
if token == "not" and i + 1 < l and tokens[i+1] == "in":
token = "not in"
i += 1 # skip 'in'
mapped_tokens.append(self.translate_token(token))
i += 1
self.tokens = mapped_tokens
self.pos = 0
self.current_token = self.next()
def translate_token(self, token):
try:
op = OPERATORS[token]
except (KeyError, TypeError):
return self.create_var(token)
else:
return op()
def next(self):
if self.pos >= len(self.tokens):
return EndToken
else:
retval = self.tokens[self.pos]
self.pos += 1
return retval
def parse(self):
retval = self.expression()
# Check that we have exhausted all the tokens
if self.current_token is not EndToken:
raise self.error_class("Unused '%s' at end of if expression." %
self.current_token.display())
return retval
def expression(self, rbp=0):
t = self.current_token
self.current_token = self.next()
left = t.nud(self)
while rbp < self.current_token.lbp:
t = self.current_token
self.current_token = self.next()
left = t.led(left, self)
return left
def create_var(self, value):
return Literal(value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.