gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from threading import Thread
import publisher
from entity import *
from ..artifactmgt.git.agentgithandler import *
from ..artifactmgt.repository import Repository
from ..util import cartridgeagentutils
from ..util.log import LogFactory
SUPER_TENANT_ID = "-1234"
SUPER_TENANT_REPO_PATH = "/repository/deployment/server/"
TENANT_REPO_PATH = "/repository/tenants/"
log = LogFactory().get_log(__name__)
"""
Event execution related logic
"""
def on_instance_started_event():
log.debug("Processing instance started event...")
# TODO: copy artifacts extension
execute_event_extendables(constants.INSTANCE_STARTED_EVENT, {})
def create_dummy_interface():
log.debug("Processing lvs dummy interface creation...")
lvs_vip = Config.lvs_virtual_ip.split("|")
log.debug("LVS dummy interface creation values %s %s " % (lvs_vip[0], lvs_vip[1]))
execute_event_extendables(
constants.CREATE_LVS_DUMMY_INTERFACE,
{"EVENT": constants.CREATE_LVS_DUMMY_INTERFACE,
"LVS_DUMMY_VIRTUAL_IP": lvs_vip[0],
"LVS_SUBNET_MASK": lvs_vip[1]}
)
def on_instance_activated_event():
log.debug("Processing instance activated event...")
execute_event_extendables(constants.INSTANCE_ACTIVATED_EVENT, {})
def on_artifact_updated_event(artifacts_updated_event):
log.debug(
"Processing artifact updated event for [tenant] %s [cluster] %s [status] %s"
% (str(artifacts_updated_event.tenant_id), artifacts_updated_event.cluster_id, artifacts_updated_event.status))
cluster_id_event = str(artifacts_updated_event.cluster_id).strip()
cluster_id_payload = Config.cluster_id
repo_url = str(artifacts_updated_event.repo_url).strip()
if repo_url == "":
log.error("Repository URL is empty. Failed to process artifact updated event.")
return
if cluster_id_payload is None or cluster_id_payload == "":
log.error("Cluster ID in payload is empty. Failed to process artifact updated event.")
return
if cluster_id_payload != cluster_id_event:
log.debug("Cluster ID in artifact updated event does not match. Skipping event handler.")
return
repo_password = None
if artifacts_updated_event.repo_password is not None:
secret = Config.cartridge_key
repo_password = cartridgeagentutils.decrypt_password(artifacts_updated_event.repo_password, secret)
if Config.app_path is None:
log.error("Repository path is empty. Failed to process artifact updated event.")
return
if not validate_repo_path(Config.app_path):
log.error(
"Repository path cannot be accessed, or is invalid. Failed to process artifact updated event. [App Path] %s"
% Config.app_path)
return
repo_username = artifacts_updated_event.repo_username
tenant_id = artifacts_updated_event.tenant_id
is_multitenant = Config.is_multiTenant
commit_enabled = artifacts_updated_event.commit_enabled
# create repo object
local_repo_path = get_repo_path_for_tenant(str(tenant_id), Config.app_path, is_multitenant)
repo_info = Repository(repo_url, repo_username, repo_password, local_repo_path, tenant_id, commit_enabled)
log.info("Executing checkout job on artifact updated event...")
try:
Config.artifact_checkout_plugin.plugin_object.checkout(repo_info)
except Exception as e:
log.exception(
"Checkout job on artifact updated event failed for tenant: %s %s" % (repo_info.tenant_id, e))
# execute artifact updated extension
plugin_values = {"ARTIFACT_UPDATED_CLUSTER_ID": artifacts_updated_event.cluster_id,
"ARTIFACT_UPDATED_TENANT_ID": artifacts_updated_event.tenant_id,
"ARTIFACT_UPDATED_REPO_URL": artifacts_updated_event.repo_url,
"ARTIFACT_UPDATED_REPO_PASSWORD": artifacts_updated_event.repo_password,
"ARTIFACT_UPDATED_REPO_USERNAME": artifacts_updated_event.repo_username,
"ARTIFACT_UPDATED_STATUS": artifacts_updated_event.status}
try:
execute_event_extendables(constants.ARTIFACT_UPDATED_EVENT, plugin_values)
except Exception as e:
log.exception("Could not execute plugins for artifact updated event: %s" % e)
if not Config.activated:
# publish instance activated event if not yet activated
publisher.publish_instance_activated_event()
on_instance_activated_event()
update_artifacts = Config.read_property(constants.ENABLE_ARTIFACT_UPDATE, True)
auto_commit = Config.is_commits_enabled
auto_checkout = Config.is_checkout_enabled
log.info("ADC configuration: [update_artifacts] %s, [auto-commit] %s, [auto-checkout] %s"
% (update_artifacts, auto_commit, auto_checkout))
if update_artifacts:
try:
update_interval = int(Config.artifact_update_interval)
except ValueError:
log.exception("Invalid artifact sync interval specified: %s" % ValueError)
update_interval = 10
log.info("Artifact updating task enabled, update interval: %s seconds" % update_interval)
log.info("Auto Commit is turned %s " % ("on" if auto_commit else "off"))
log.info("Auto Checkout is turned %s " % ("on" if auto_checkout else "off"))
AgentGitHandler.schedule_artifact_update_task(
repo_info,
auto_checkout,
auto_commit,
update_interval)
def on_instance_cleanup_cluster_event():
log.debug("Processing instance cleanup cluster event...")
cleanup(constants.INSTANCE_CLEANUP_CLUSTER_EVENT)
def on_instance_cleanup_member_event():
log.debug("Processing instance cleanup member event...")
cleanup(constants.INSTANCE_CLEANUP_MEMBER_EVENT)
def on_member_activated_event(member_activated_event):
log.debug(
"Processing Member activated event: [service] %r [cluster] %r [member] %r"
% (member_activated_event.service_name,
member_activated_event.cluster_id,
member_activated_event.member_id))
member_initialized = is_member_initialized_in_topology(
member_activated_event.service_name,
member_activated_event.cluster_id,
member_activated_event.member_id)
if not member_initialized:
log.error("Member has not initialized, failed to execute member activated event")
return
execute_event_extendables(constants.MEMBER_ACTIVATED_EVENT, {})
def on_complete_topology_event(complete_topology_event):
log.debug("Processing Complete topology event...")
service_name_in_payload = Config.service_name
cluster_id_in_payload = Config.cluster_id
member_id_in_payload = Config.member_id
if not Config.initialized:
member_initialized = is_member_initialized_in_topology(
service_name_in_payload,
cluster_id_in_payload,
member_id_in_payload)
if member_initialized:
# Set cartridge agent as initialized since member is available and it is in initialized state
Config.initialized = True
log.info(
"Member initialized [member id] %s, [cluster-id] %s, [service] %s"
% (member_id_in_payload, cluster_id_in_payload, service_name_in_payload))
topology = complete_topology_event.get_topology()
service = topology.get_service(service_name_in_payload)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name_in_payload)
cluster = service.get_cluster(cluster_id_in_payload)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id_in_payload)
plugin_values = {"TOPOLOGY_JSON": json.dumps(topology.json_str),
"MEMBER_LIST_JSON": json.dumps(cluster.member_list_json)}
execute_event_extendables(constants.COMPLETE_TOPOLOGY_EVENT, plugin_values)
def on_member_initialized_event(member_initialized_event):
"""
Member initialized event is sent by cloud controller once volume attachment and
ip address allocation is completed successfully
:param member_initialized_event:
:return:
"""
log.debug("Processing Member initialized event...")
service_name_in_payload = Config.service_name
cluster_id_in_payload = Config.cluster_id
member_id_in_payload = Config.member_id
if not Config.initialized and member_id_in_payload == member_initialized_event.member_id:
member_exists = member_exists_in_topology(
service_name_in_payload,
cluster_id_in_payload,
member_id_in_payload)
log.debug("Member exists: %s" % member_exists)
if member_exists:
Config.initialized = True
mark_member_as_initialized(service_name_in_payload, cluster_id_in_payload, member_id_in_payload)
log.info("Instance marked as initialized on member initialized event")
else:
raise Exception("Member [member-id] %s not found in topology while processing member initialized "
"event. [Topology] %s" % (member_id_in_payload, TopologyContext.get_topology()))
execute_event_extendables(constants.MEMBER_INITIALIZED_EVENT, {})
def on_complete_tenant_event(complete_tenant_event):
log.debug("Processing Complete tenant event...")
tenant_list_json = complete_tenant_event.tenant_list_json
log.debug("Complete tenants:" + json.dumps(tenant_list_json))
plugin_values = {"TENANT_LIST_JSON": json.dumps(tenant_list_json)}
execute_event_extendables(constants.COMPLETE_TENANT_EVENT, plugin_values)
def on_member_terminated_event(member_terminated_event):
log.debug(
"Processing Member terminated event: [service] %s [cluster] %s [member] %s"
% (member_terminated_event.service_name, member_terminated_event.cluster_id, member_terminated_event.member_id))
member_initialized = is_member_initialized_in_topology(
member_terminated_event.service_name,
member_terminated_event.cluster_id,
member_terminated_event.member_id
)
if not member_initialized:
log.error("Member has not initialized, failed to execute member terminated event")
return
execute_event_extendables(constants.MEMBER_TERMINATED_EVENT, {})
def on_member_suspended_event(member_suspended_event):
log.debug(
"Processing Member suspended event: [service] %s [cluster] %s [member] %s"
% (member_suspended_event.service_name, member_suspended_event.cluster_id, member_suspended_event.member_id))
member_initialized = is_member_initialized_in_topology(
member_suspended_event.service_name,
member_suspended_event.cluster_id,
member_suspended_event.member_id
)
if not member_initialized:
log.error("Member has not initialized, failed to execute member suspended event")
return
execute_event_extendables(constants.MEMBER_SUSPENDED_EVENT, {})
def on_member_started_event(member_started_event):
log.debug(
"Processing Member started event: [service] %s [cluster] %s [member] %s"
% (member_started_event.service_name, member_started_event.cluster_id, member_started_event.member_id))
member_initialized = is_member_initialized_in_topology(
member_started_event.service_name,
member_started_event.cluster_id,
member_started_event.member_id
)
if not member_initialized:
log.error("Member has not initialized, failed to execute member started event")
return
execute_event_extendables(constants.MEMBER_STARTED_EVENT, {})
def start_server_extension():
log.debug("Processing start server extension...")
service_name_in_payload = Config.service_name
cluster_id_in_payload = Config.cluster_id
member_id_in_payload = Config.member_id
member_initialized = is_member_initialized_in_topology(
service_name_in_payload, cluster_id_in_payload, member_id_in_payload)
if not member_initialized:
log.error("Member has not initialized, failed to execute start server event")
return
execute_event_extendables("StartServers", {})
def volume_mount_extension(persistence_mappings_payload):
log.debug("Processing volume mount extension...")
execute_event_extendables("VolumeMount", persistence_mappings_payload)
def on_domain_mapping_added_event(domain_mapping_added_event):
tenant_domain = find_tenant_domain(domain_mapping_added_event.tenant_id)
log.debug(
"Processing Domain mapping added event: [tenant-id] " + str(domain_mapping_added_event.tenant_id) +
" [tenant-domain] " + tenant_domain + " [domain-name] " + domain_mapping_added_event.domain_name +
" [application-context] " + domain_mapping_added_event.application_context
)
plugin_values = {"SUBSCRIPTION_APPLICATION_ID": domain_mapping_added_event.application_id,
"SUBSCRIPTION_SERVICE_NAME": domain_mapping_added_event.service_name,
"SUBSCRIPTION_DOMAIN_NAME": domain_mapping_added_event.domain_name,
"SUBSCRIPTION_CLUSTER_ID": domain_mapping_added_event.cluster_id,
"SUBSCRIPTION_TENANT_ID": int(domain_mapping_added_event.tenant_id),
"SUBSCRIPTION_TENANT_DOMAIN": tenant_domain,
"SUBSCRIPTION_CONTEXT_PATH":
domain_mapping_added_event.context_path}
execute_event_extendables(constants.DOMAIN_MAPPING_ADDED_EVENT, plugin_values)
def on_domain_mapping_removed_event(domain_mapping_removed_event):
tenant_domain = find_tenant_domain(domain_mapping_removed_event.tenant_id)
log.info(
"Domain mapping removed event received: [tenant-id] " + str(domain_mapping_removed_event.tenant_id) +
" [tenant-domain] " + tenant_domain + " [domain-name] " + domain_mapping_removed_event.domain_name
)
plugin_values = {"SUBSCRIPTION_APPLICATION_ID": domain_mapping_removed_event.application_id,
"SUBSCRIPTION_SERVICE_NAME": domain_mapping_removed_event.service_name,
"SUBSCRIPTION_DOMAIN_NAME": domain_mapping_removed_event.domain_name,
"SUBSCRIPTION_CLUSTER_ID": domain_mapping_removed_event.cluster_id,
"SUBSCRIPTION_TENANT_ID": int(domain_mapping_removed_event.tenant_id),
"SUBSCRIPTION_TENANT_DOMAIN": tenant_domain}
execute_event_extendables(constants.DOMAIN_MAPPING_REMOVED_EVENT, plugin_values)
def on_copy_artifacts_extension(src, dest):
log.debug("Processing Copy artifacts extension...")
plugin_values = {"SOURCE": src, "DEST": dest}
execute_event_extendables("CopyArtifacts", plugin_values)
def on_tenant_subscribed_event(tenant_subscribed_event):
log.debug(
"Processing Tenant subscribed event: [tenant] " + str(tenant_subscribed_event.tenant_id) +
" [service] " + tenant_subscribed_event.service_name + " [cluster] " + tenant_subscribed_event.cluster_ids
)
execute_event_extendables(constants.TENANT_SUBSCRIBED_EVENT, {})
def on_application_signup_removed_event(application_signup_removal_event):
log.debug(
"Processing Tenant unsubscribed event: [tenant] " + str(application_signup_removal_event.tenantId) +
" [application ID] " + str(application_signup_removal_event.applicationId)
)
if Config.application_id == application_signup_removal_event.applicationId:
AgentGitHandler.remove_repo(application_signup_removal_event.tenantId)
execute_event_extendables(constants.APPLICATION_SIGNUP_REMOVAL_EVENT, {})
def cleanup(event):
log.debug("Executing cleanup extension for event %s..." % event)
publisher.publish_maintenance_mode_event()
execute_event_extendables("clean", {})
publisher.publish_instance_ready_to_shutdown_event()
def execute_event_extendables(event, input_values):
""" Execute the extensions and plugins related to the event
:param event: The event name string
:param input_values: the values to be passed to the plugin
:return:
"""
try:
input_values = add_common_input_values(input_values)
except Exception as e:
log.error("Error while adding common input values for event extendables: %s" % e)
input_values["EVENT"] = event
log.debug("Executing extensions for [event] %s with [input values] %s" % (event, input_values))
# Execute the extension
execute_extension_for_event(event, input_values)
# Execute the plugins
execute_plugins_for_event(event, input_values)
def execute_plugins_for_event(event, input_values):
""" For each plugin registered for the specified event, start a plugin execution thread
:param str event: The event name string
:param dict input_values: the values to be passed to the plugin
:return:
"""
try:
plugins_for_event = Config.plugins.get(event)
if plugins_for_event is not None:
for plugin_info in plugins_for_event:
log.debug("Executing plugin %s for event %s" % (plugin_info.name, event))
plugin_thread = PluginExecutor(plugin_info, input_values)
plugin_thread.start()
# block till plugin run completes.
plugin_thread.join()
else:
log.debug("No plugins registered for event %s" % event)
except Exception as e:
log.exception("Error while executing plugin for event %s: %s" % (event, e))
def execute_extension_for_event(event, extension_values):
""" Execute the extension related to the event
:param event: The event name string
:param extension_values: the values to be passed to the plugin
:return:
"""
try:
if Config.extension_executor is not None:
log.debug("Executing extension for event [%s]" % event)
extension_thread = PluginExecutor(Config.extension_executor, extension_values)
extension_thread.start()
# block till plugin run completes.
extension_thread.join()
else:
log.debug("No extensions registered for event %s" % event)
except OSError as e:
log.warn("No extension was found for event %s: %s" % (event, e))
except Exception as e:
log.exception("Error while executing extension for event %s: %s" % (event, e))
def get_repo_path_for_tenant(tenant_id, git_local_repo_path, is_multitenant):
""" Finds the repository path for tenant to clone from the remote repository
:param tenant_id:
:param git_local_repo_path:
:param is_multitenant:
:return:
"""
repo_path = ""
if is_multitenant:
if tenant_id == SUPER_TENANT_ID:
# super tenant, /repository/deploy/server/
super_tenant_repo_path = Config.super_tenant_repository_path
# "app_path"
repo_path += git_local_repo_path
if super_tenant_repo_path is not None and super_tenant_repo_path != "":
super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.startswith("/") \
else "/" + super_tenant_repo_path
super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.endswith("/") \
else super_tenant_repo_path + "/"
# "app_path/repository/deploy/server/"
repo_path += super_tenant_repo_path
else:
# "app_path/repository/deploy/server/"
repo_path += SUPER_TENANT_REPO_PATH
else:
# normal tenant, /repository/tenants/tenant_id
tenant_repo_path = Config.tenant_repository_path
# "app_path"
repo_path += git_local_repo_path
if tenant_repo_path is not None and tenant_repo_path != "":
tenant_repo_path = tenant_repo_path if tenant_repo_path.startswith("/") else "/" + tenant_repo_path
tenant_repo_path = tenant_repo_path if tenant_repo_path.endswith("/") else tenant_repo_path + "/"
# "app_path/repository/tenants/244653444"
repo_path += tenant_repo_path + tenant_id
else:
# "app_path/repository/tenants/244653444"
repo_path += TENANT_REPO_PATH + tenant_id
# tenant_dir_path = git_local_repo_path + AgentGitHandler.TENANT_REPO_PATH + tenant_id
# GitUtils.create_dir(repo_path)
else:
# not multi tenant, app_path
repo_path = git_local_repo_path
log.debug("Repo path returned : %r" % repo_path)
return repo_path
def is_member_initialized_in_topology(service_name, cluster_id, member_id):
if member_exists_in_topology(service_name, cluster_id, member_id):
topology = TopologyContext.get_topology()
service = topology.get_service(service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name)
cluster = service.get_cluster(cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)
member = cluster.get_member(member_id)
if member is None:
raise Exception("Member id not found in topology [member] %s" % member_id)
log.info("Found member: " + member.to_json())
if member.status == MemberStatus.Initialized:
return True
return False
def member_exists_in_topology(service_name, cluster_id, member_id):
topology = TopologyContext.get_topology()
service = topology.get_service(service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name)
cluster = service.get_cluster(cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)
activated_member = cluster.get_member(member_id)
if activated_member is None:
log.error("Member id not found in topology [member] %s" % member_id)
return False
return True
def mark_member_as_initialized(service_name, cluster_id, member_id):
topology = TopologyContext.get_topology()
service = topology.get_service(service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name)
cluster = service.get_cluster(cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)
member = cluster.get_member(member_id)
if member is None:
raise Exception("Member id not found in topology [member] %s" % member_id)
member.status = MemberStatus.Initialized
def add_common_input_values(plugin_values):
"""
Adds the common parameters to be used by the extension scripts
:param dict[str, str] plugin_values: Dictionary to be added
:return: Dictionary with updated parameters
:rtype: dict[str, str]
"""
if plugin_values is None:
plugin_values = {}
elif type(plugin_values) != dict:
plugin_values = {"VALUE1": str(plugin_values)}
plugin_values["APPLICATION_PATH"] = Config.app_path
plugin_values["PARAM_FILE_PATH"] = Config.read_property(constants.PARAM_FILE_PATH, False)
plugin_values["PERSISTENCE_MAPPINGS"] = Config.persistence_mappings
lb_cluster_id_in_payload = Config.lb_cluster_id
lb_private_ip, lb_public_ip = get_lb_member_ip(lb_cluster_id_in_payload)
plugin_values["LB_IP"] = lb_private_ip if lb_private_ip is not None else Config.lb_private_ip
plugin_values["LB_PUBLIC_IP"] = lb_public_ip if lb_public_ip is not None else Config.lb_public_ip
topology = TopologyContext.get_topology()
if topology.initialized:
service = topology.get_service(Config.service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % Config.service_name)
cluster = service.get_cluster(Config.cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % Config.cluster_id)
member = cluster.get_member(Config.member_id)
if member is None:
raise Exception("Member id not found in topology [member] %s" % Config.member_id)
add_properties(service.properties, plugin_values, "SERVICE_PROPERTY")
add_properties(cluster.properties, plugin_values, "CLUSTER_PROPERTY")
add_properties(member.properties, plugin_values, "MEMBER_PROPERTY")
plugin_values.update(Config.get_payload_params())
return clean_process_parameters(plugin_values)
def add_properties(properties, params, prefix):
"""
Adds the given property list to the parameters list with given prefix in the parameter name
:param dict[str, str] properties: service properties
:param dict[str, str] params:
:param str prefix:
:return: dict[str, str]
"""
if properties is None or properties.items() is None:
return
for key in properties:
params[prefix + "_" + key] = str(properties[key])
def get_lb_member_ip(lb_cluster_id):
topology = TopologyContext.get_topology()
services = topology.get_services()
for service in services:
clusters = service.get_clusters()
for cluster in clusters:
members = cluster.get_members()
for member in members:
if member.cluster_id == lb_cluster_id:
return member.member_default_private_ip, member.member_default_public_ip
return None, None
def clean_process_parameters(params):
"""
Removes any null valued parameters before passing them to the extension scripts
:param dict params:
:return: cleaned parameters
:rtype: dict
"""
for key, value in params.items():
if value is None:
del params[key]
return params
def find_tenant_domain(tenant_id):
tenant = TenantContext.get_tenant(tenant_id)
if tenant is None:
raise RuntimeError("Tenant could not be found: [tenant-id] %s" % str(tenant_id))
return tenant.tenant_domain
def validate_repo_path(app_path):
# app path would be ex: /var/www, or /opt/server/data
return os.path.isabs(app_path)
class PluginExecutor(Thread):
""" Executes a given plugin on a separate thread, passing the given dictionary of values to the plugin entry method
"""
def __init__(self, plugin_info, values):
Thread.__init__(self)
self.__plugin_info = plugin_info
self.__values = values
self.__log = LogFactory().get_log(__name__)
def run(self):
try:
self.__plugin_info.plugin_object.run_plugin(self.__values)
except Exception as e:
self.__log.exception("Error while executing plugin %s: %s" % (self.__plugin_info.name, e))
| |
# Copyright 2017 Bracket Computing, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# https://github.com/brkt/brkt-cli/blob/master/LICENSE
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and
# limitations under the License.
import abc
import base64
import json
import logging
import re
import time
import uuid
from datetime import datetime, timedelta
import iso8601
from brkt_cli.validation import ValidationError
SLEEP_ENABLED = True
MAX_BACKOFF_SECS = 10
# Supported crypto options for the disks
CRYPTO_GCM = 'gcm'
CRYPTO_XTS = 'xts'
# Size of the metavisor disk (in GBs)
METAVISOR_DISK_SIZE = 6
log = logging.getLogger(__name__)
class BracketError(Exception):
pass
class Deadline(object):
"""Convenience class for bounding how long execution takes."""
def __init__(self, secs_from_now, clock=time):
self.deadline = clock.time() + secs_from_now
self.clock = clock
def is_expired(self):
"""Return whether or not the deadline has passed.
Returns:
True if the deadline has passed. False otherwise.
"""
return self.clock.time() >= self.deadline
class RetryExceptionChecker(object):
""" Abstract class, implemented by callsites that need custom
exception checking for the retry() function.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def is_expected(self, exception):
pass
def sleep(seconds):
if SLEEP_ENABLED:
time.sleep(seconds)
def retry(function, on=None, exception_checker=None, timeout=15.0,
initial_sleep_seconds=0.25):
""" Retry the given function until it completes successfully. Before
retrying, sleep for initial_sleep_seconds. Double the sleep time on each
retry. If the timeout is exceeded or an unexpected exception is raised,
raise the underlying exception.
:param function the function that will be retried
:param on a list of expected Exception classes
:param exception_checker an instance of RetryExceptionChecker that is
used to determine if the exception is expected
:param timeout stop retrying if this number of seconds have lapsed
:param initial_sleep_seconds
"""
start_time = time.time()
def _wrapped(*args, **kwargs):
for attempt in xrange(1, 1000):
try:
return function(*args, **kwargs)
except Exception as e:
now = time.time()
expected = False
if exception_checker and exception_checker.is_expected(e):
expected = True
if on and e.__class__ in on:
expected = True
if not expected:
raise
if now - start_time > timeout:
log.error(
'Exceeded timeout of %s seconds for %s',
timeout,
function.__name__)
raise
else:
sleep(initial_sleep_seconds * float(attempt))
return _wrapped
def get_domain_from_brkt_env(brkt_env):
"""Return the domain string from the api_host in the brkt_env. """
api_host = brkt_env.api_host
if not api_host:
raise ValidationError('api_host endpoint not in brkt_env: %s' %
brkt_env)
# Consider the domain to be everything after the first '.' in
# the api_host.
return api_host.split('.', 1)[1]
def make_nonce():
"""Returns a 32bit nonce in hex encoding"""
return str(uuid.uuid4()).split('-')[0]
def validate_ip_address(ip_addr):
try:
a = ip_addr.split('.')
if len(a) != 4:
return False
for d in a:
if not d.isdigit():
return False
if int(d) > 255 or int(d) < 0:
return False
return True
except:
return False
def validate_dns_name_ip_address(hostname):
""" Verifies that the input hostname is indeed a valid
host name or ip address
:return True if valid, returns False otherwise
"""
# ensure length does not exceed 255 characters
if len(hostname) > 255:
return False
# remove the last dot from the end
if hostname[-1] == ".":
hostname = hostname[:-1]
valid = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(valid.match(x) for x in hostname.split("."))
def append_suffix(name, suffix, max_length=None):
""" Append the suffix to the given name. If the appended length exceeds
max_length, truncate the name to make room for the suffix.
:return: The possibly truncated name with the suffix appended
"""
if not suffix:
return name
if max_length:
truncated_length = max_length - len(suffix)
name = name[:truncated_length]
return name + suffix
def urlsafe_b64encode(content):
""" Encode the given content as URL-safe base64 and remove padding. """
return base64.urlsafe_b64encode(content).replace(b'=', b'')
def urlsafe_b64decode(base64_string):
""" Decode the given base64 string, generated by urlsafe_b64encode().
"""
# Reinstate removed padding.
removed = len(base64_string) % 4
if removed > 0:
base64_string += b'=' * (4 - removed)
return base64.urlsafe_b64decode(base64_string)
def parse_name_value(name_value):
""" Parse a string in NAME=VALUE format.
:return: a tuple of name, value
:raise: ValidationError if name_value is malformed
"""
m = re.match(r'([^=]+)=(.+)', name_value)
if not m:
raise ValidationError(
'%s is not in the format NAME=VALUE' % name_value)
return m.group(1), m.group(2)
def render_table_rows(rows, row_prefix=''):
""" Render the supplied rows as a table. This computes the maximum width
of each column and renders each row such that all columns are left
justified. Each value must be formattable as a string.
An example:
>>> from brkt_cli.util import render_table_rows
>>> rows = [["foo", "bar", "baz"], ["foofoo", "barbarbar", "baz"]]
>>> print render_table_rows(rows)
foo bar baz
foofoo barbarbar baz
>>>
:param rows a list of lists that represent the rows that are to be
rendered.
:param row_prefix an optional string that will be prepended to each
row after it has been rendered.
:return the rows rendered as a string.
"""
if len(rows) == 0:
return ''
widths = [0 for _ in rows[0]]
for row in rows:
for ii, col in enumerate(row):
widths[ii] = max(widths[ii], len(col))
fmts = []
for width in widths:
fmts.append('{:' + str(width) + '}')
fmt = " ".join(fmts)
lines = []
for row in rows:
lines.append(row_prefix + fmt.format(*row))
table = "\n".join(lines)
return table
def parse_endpoint(endpoint):
"""Parse a <host>[:<port>] string into its constituent parts.
:param endpoint a string of the form <host>[:<port>]
:return a tuple of (host, port). port is None if not specified.
:raises ValidationError if an invalid string is supplied.
"""
parts = endpoint.split(':')
if len(parts) > 2:
raise ValidationError(endpoint + ' must be in the form host[:port]')
host = parts[0]
port = None
if len(parts) == 2:
try:
port = int(parts[1])
except ValueError:
raise ValidationError('Invalid port: %s' % parts[1])
if not validate_dns_name_ip_address(host):
raise ValidationError('Invalid hostname: ' + host)
return host, port
def write_to_file_or_stdout(content, path=None):
""" Write a content to either the given path, or stdout if path is None.
:raise ValidationError if the file can't be written
"""
if not path:
print content
return
try:
with open(path, 'w') as f:
f.write(content)
except IOError as e:
raise ValidationError('Unable to write to %s: %s' % (path, e))
def pretty_print_json(d, indent=4):
""" Format the given dictionary as a JSON string.
"""
return json.dumps(
d, sort_keys=True, indent=indent, separators=(',', ': '))
def timestamp_to_datetime(ts):
""" Convert a Unix timestamp to a datetime with timezone set to UTC. """
return datetime.fromtimestamp(ts, tz=iso8601.UTC)
def datetime_to_timestamp(dt):
""" Convert a datetime to a Unix timestamp in seconds. """
time_zero = timestamp_to_datetime(0)
return (dt - time_zero).total_seconds()
def parse_duration(duration_string):
""" Return a timedelta that represents the given string.
:param duration_string a string in the format N[dhms].
:return a timedelta object
:raise ValidationError if the string is malformed
"""
m = re.match('\d+[smhd]$', duration_string)
if m:
dur = int(duration_string[:-1])
unit = duration_string[-1]
if unit == 'h':
return timedelta(hours=dur)
elif unit == 'm':
return timedelta(minutes=dur)
elif unit == 's':
return timedelta(seconds=dur)
elif unit == 'd':
return timedelta(days=dur)
else:
raise ValidationError(
'Duration must be formatted as N[dhms]: ' + duration_string)
def parse_timestamp(ts_string):
""" Return a datetime that represents the given timestamp
string. The string can be a Unix timestamp in seconds, an ISO 8601
timestamp, or a time duration formatted as N[dhms].
:return a datetime object
:raise ValidationError if ts_string is malformed
"""
now = int(time.time())
dt_now = timestamp_to_datetime(now)
# Parse duration.
try:
return dt_now + parse_duration(ts_string)
except ValidationError:
pass
# Parse integer timestamp.
m = re.match('\d+(\.\d+)?$', ts_string)
if m:
t = float(ts_string)
if t < now:
raise ValidationError(
'%s is earlier than the current timestamp (%s).' % (
ts_string, now))
return timestamp_to_datetime(t)
# Parse ISO 8601 timestamp.
try:
dt = iso8601.parse_date(ts_string)
except iso8601.ParseError:
raise ValidationError(
'Timestamp "%s" must be a duration '
'(e.g. 45s, 30m, 6h, 5d).' % ts_string
)
if dt < dt_now:
raise ValidationError(
'%s is earlier than the current timestamp (%s).' % (
ts_string, dt_now))
return dt
| |
import logging
from share.normalize import ctx, tools
from share.normalize.parsers import Parser
logger = logging.getLogger(__name__)
PEOPLE_TYPES = (
'ContactPerson',
'DataCurator',
'Editor',
'ProjectLeader',
'ProjectManager',
'ProjectMember',
'RelatedPerson',
'Researcher',
'Supervisor',
'WorkPackageLeader'
)
NOT_PEOPLE_TYPES = (
'Distributor',
'HostingInstitution',
'RegistrationAgency',
'RegistrationAuthority',
'ResearchGroup'
)
# Other ambiguous types
# 'DataCollector',
# 'DataManager',
# 'Producer',
# 'RightsHolder',
# 'Sponsor',
# 'Other'
def try_contributor_type(value, target_list_types):
try:
contrib_type_item = value['@contributorType']
if contrib_type_item in target_list_types:
return value
return None
except KeyError:
return None
def get_contributors(options, contrib_type):
"""
Returns list of contributors names based on their type.
"""
contribs = []
for value in options:
val = try_contributor_type(value, contrib_type)
if val:
contribs.append(val)
return contribs
def force_text(data):
if isinstance(data, str):
return data
if data is None:
return ''
if isinstance(data, dict):
if '#text' in data:
return data['#text']
raise Exception('#text is not in {}'.format(data))
if isinstance(data, list):
for item in data:
try:
return force_text(item)
except Exception:
pass
raise Exception('No value in list {} is a string.'.format(data))
raise Exception('{} is not a string or a dictionary.'.format(data))
def get_agent_type(agent, person=False):
"""
Returns agent type based on contributor type.
"""
is_not_person = try_contributor_type(agent, NOT_PEOPLE_TYPES)
is_person = try_contributor_type(agent, PEOPLE_TYPES)
try:
agent_name = agent.creatorName
except KeyError:
agent_name = agent.contributorName
if person and is_person:
return agent_name
elif not person and is_not_person:
return agent_name
# break OneOf option
raise KeyError()
RELATION_MAP = {
'IsCitedBy': 'Cites',
'Cites': 'Cites',
'IsSupplementedBy': 'IsSupplementTo',
'IsSupplementTo': 'IsSupplementTo',
'IsContinuedBy': 'Extends',
'Continues': 'Extends',
'IsNewVersionOf': '',
'IsPreviousVersionOf': '',
'References': 'References',
'IsReferencedBy': 'References',
'IsPartOf': 'IsPartOf',
'HasPart': 'IsPartOf',
'IsDocumentedBy': 'Documents',
'Documents': 'Documents',
'IsCompiledBy': 'Compiles',
'Compiles': 'Compiles',
'IsVariantFormOf': '',
'IsOriginalFormOf': '',
'IsIdenticalTo': 'IsIdenticalTo',
'IsReviewedBy': 'Reviews',
'Reviews': 'Reviews',
'IsDerivedFrom': 'IsDerivedFrom',
'IsSourceOf': 'IsDerivedFrom',
'IsMetadataFor': '',
'HasMetadata': '',
}
INVERSE_RELATIONS = (
'IsCitedBy',
'IsSupplementedBy',
'IsContinuedBy',
'IsNewVersionOf',
'IsReferencedBy',
'IsPartOf',
'IsDocumentedBy',
'IsCompiledBy',
'IsVariantFormOf',
'IsReviewedBy',
'IsDerivedFrom',
'IsMetadataFor'
)
RELATIONS = (
'Cites',
'IsSupplementTo',
'Continues',
'References',
'IsPreviousVersionOf',
'HasPart',
'Documents',
'Compiles',
'IsOriginalFormOf',
'Reviews',
'IsSourceOf',
'HasMetadata',
'IsIdenticalTo'
)
def get_related_works(options, inverse):
results = []
for option in options:
if not option.get('#text') or option['#text'].lower() == 'null':
continue
if not option.get('@preprocessed'):
option['@preprocessed'] = True
option['#text'] = {
'PMID': 'http://www.ncbi.nlm.nih.gov/pubmed/{}'
}.get(option.get('@relatedIdentifierType'), '{}').format(option['#text'])
relation = option['@relationType']
if inverse and relation in INVERSE_RELATIONS:
results.append(option)
elif not inverse and relation in RELATIONS:
results.append(option)
return results
def get_relation_type(relation_type):
normalized_relation = RELATION_MAP[relation_type]
return normalized_relation or 'WorkRelation'
class AgentIdentifier(Parser):
uri = ctx
class AffiliatedAgent(Parser):
schema = tools.GuessAgentType(ctx, default='organization')
name = ctx
class IsAffiliatedWith(Parser):
related = tools.Delegate(AffiliatedAgent, ctx)
class ContributorAgent(Parser):
schema = tools.OneOf(
tools.GuessAgentType(
tools.RunPython(
get_agent_type,
ctx,
person=False
),
default='organization'
),
tools.GuessAgentType(
tools.OneOf(
ctx.creatorName,
ctx.contributorName
)
)
)
name = tools.OneOf(ctx.creatorName, ctx.contributorName)
identifiers = tools.Map(
tools.Delegate(AgentIdentifier),
tools.Try(
tools.IRI(
tools.RunPython(
force_text,
ctx.nameIdentifier
)
),
exceptions=(ValueError,)
)
)
related_agents = tools.Map(tools.Delegate(IsAffiliatedWith), tools.Concat(tools.Try(
tools.Filter(lambda x: bool(x), tools.RunPython(force_text, ctx.affiliation))
)))
class Extra:
name_identifier = tools.Try(ctx.nameIdentifier)
name_identifier_scheme = tools.Try(ctx.nameIdentifier['@nameIdentifierScheme'])
name_identifier_scheme_uri = tools.Try(ctx.nameIdentifier['@schemeURI'])
contributor_type = tools.Try(ctx.contributorType)
# v.4 new givenName and familyName properties
given_name = tools.OneOf(
ctx.creatorName['@givenName'],
ctx.contributorName['@givenName'],
tools.Static(None)
)
family_name = tools.OneOf(
ctx.creatorName['@familyName'],
ctx.contributorName['@familyName'],
tools.Static(None)
)
class FunderAgent(Parser):
schema = tools.GuessAgentType(
tools.OneOf(ctx.funderName, ctx.contributorName),
default='organization'
)
name = tools.OneOf(ctx.funderName, ctx.contributorName)
identifiers = tools.Map(
tools.Delegate(AgentIdentifier),
tools.Try(
tools.IRI(
tools.OneOf(
ctx.funderIdentifier,
tools.RunPython(
force_text,
ctx.nameIdentifier
),
tools.Static(None)
)
),
exceptions=(ValueError,)
)
)
class Extra:
name_identifier = tools.Try(ctx.nameIdentifier)
name_identifier_scheme = tools.Try(ctx.nameIdentifier['@nameIdentifierScheme'])
name_identifier_scheme_uri = tools.Try(ctx.nameIdentifier['@schemeURI'])
funder_identifier = tools.Try(ctx.funderIdentifier)
funder_identifier_type = tools.Try(ctx.funderIdentifierType)
contributor_type = tools.Try(ctx.contributorType)
class HostAgent(Parser):
schema = tools.GuessAgentType(ctx.contributorName, default='organization')
name = tools.Try(ctx.contributorName)
identifiers = tools.Map(
tools.Delegate(AgentIdentifier),
tools.Try(
tools.IRI(
tools.RunPython(
force_text,
ctx.nameIdentifier
)
),
exceptions=(ValueError,)
)
)
class Extra:
name_identifier = tools.Try(ctx.nameIdentifier)
name_identifier_scheme = tools.Try(ctx.nameIdentifier['@nameIdentifierScheme'])
name_identifier_scheme_uri = tools.Try(ctx.nameIdentifier['@schemeURI'])
contributor_type = tools.Try(ctx.contributorType)
class PublisherAgent(Parser):
schema = tools.GuessAgentType(ctx, default='organization')
name = ctx
class ContributorRelation(Parser):
schema = 'Contributor'
agent = tools.Delegate(ContributorAgent, ctx)
cited_as = tools.OneOf(ctx.creatorName, ctx.contributorName)
class CreatorRelation(ContributorRelation):
schema = 'Creator'
order_cited = ctx('index')
class HostRelation(Parser):
schema = 'Host'
agent = tools.Delegate(HostAgent, ctx)
class PublisherRelation(Parser):
schema = 'Publisher'
agent = tools.Delegate(PublisherAgent, ctx)
class Award(Parser):
name = tools.Try(ctx.awardTitle)
description = tools.Try(ctx.awardNumber)
uri = tools.Try(ctx.awardURI)
class ThroughAwards(Parser):
award = tools.Delegate(Award, ctx)
class FunderRelation(Parser):
schema = 'Funder'
agent = tools.Delegate(FunderAgent, ctx)
awards = tools.Map(tools.Delegate(ThroughAwards), tools.Try(tools.RunPython('get_award', ctx)))
def get_award(self, obj):
obj['awardURI']
return obj
class Tag(Parser):
name = ctx
class ThroughTags(Parser):
tag = tools.Delegate(Tag, ctx)
class WorkIdentifier(Parser):
uri = tools.DOI(tools.RunPython(
force_text,
ctx
))
class Extra:
identifier_type = tools.Try(ctx['@identifierType'])
class RelatedWorkIdentifier(Parser):
schema = 'WorkIdentifier'
uri = tools.IRI(tools.RunPython(
force_text,
ctx
))
class Extra:
related_identifier_type = ctx['@relatedIdentifierType']
relation_type = tools.Try(ctx['@relationType'])
related_metadata_scheme = tools.Try(ctx['@relatedMetadataScheme'])
scheme_URI = tools.Try(ctx['@schemeURI'])
scheme_type = tools.Try(ctx['@schemeType'])
class RelatedWork(Parser):
schema = 'CreativeWork'
identifiers = tools.Map(tools.Delegate(RelatedWorkIdentifier), ctx)
class WorkRelation(Parser):
schema = tools.RunPython(get_relation_type, ctx['@relationType'])
related = tools.Delegate(RelatedWork, ctx)
class InverseWorkRelation(Parser):
schema = tools.RunPython(get_relation_type, ctx['@relationType'])
subject = tools.Delegate(RelatedWork, ctx)
class Subject(Parser):
name = ctx
class ThroughSubjects(Parser):
subject = tools.Delegate(Subject, ctx)
class CreativeWork(Parser):
'''
Documentation for Datacite's metadata:
https://schema.labs.datacite.org/meta/kernel-4.0/doc/DataCite-MetadataKernel_v4.0.pdf
'''
def get_schema(self, type):
return {
'dataset': 'DataSet',
'software': 'Software',
'text/book': 'Book',
'text/book chapter': 'Book',
'text/book prospectus': 'Book',
'text/book series': 'Book',
'text/conference abstract': 'ConferencePaper',
'text/conference paper': 'ConferencePaper',
'text/conference poster': 'Poster',
'text/dissertation': 'Dissertation',
'text/edited book': 'Book',
'text/journal article': 'Article',
'text/journal issue': 'Article',
'text/patent': 'Patent',
'text/report': 'Report',
'text/supervised student publication': 'Thesis',
'text/working paper': 'WorkingPaper'
# 'audiovisual': '',
# 'collection': '',
# 'event': '',
# 'image': '',
# 'interactiveresource': '',
# 'model': '',
# 'physicalobject': '',
# 'service': '',
# 'sound': '',
# 'text15': '',
# 'workflow': '',
# 'text/book review': '',
# 'text/conference program': '',
# 'text/dictionary entry': '',
# 'text/disclosure': '',
# 'text/encyclopedia entry': '',
# 'text/Funding submission': '',
# 'text/license': '',
# 'text/magazine article': '',
# 'text/manual': '',
# 'text/newsletter article': '',
# 'text/newspaper article': '',
# 'text/online resource': '',
# 'text/registered copyright': '',
# 'text/research tool': '',
# 'text/tenure-promotion': '',
# 'text/test': '',
# 'text/trademark': '',
# 'text/translation': '',
# 'text/university academic unit': '',
# 'text/website': '',
}.get(type.lower()) or 'CreativeWork'
schema = tools.RunPython(
'get_schema', tools.Try(
ctx.record.metadata['oai_datacite'].payload.resource.resourceType['@resourceTypeGeneral'],
default='CreativeWork'
)
)
title = tools.RunPython(
force_text,
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.titles.title)
)
description = tools.RunPython(
force_text,
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.descriptions.description[0])
)
rights = tools.Try(
tools.Join(
tools.RunPython(
'text_list',
tools.Concat(ctx.record.metadata['oai_datacite'].payload.resource.rightsList.rights)
)
)
)
language = tools.ParseLanguage(tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.language))
related_agents = tools.Concat(
tools.Map(
tools.Delegate(CreatorRelation),
tools.Concat(tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.creators.creator))
),
tools.Map(
tools.Delegate(ContributorRelation),
tools.Concat(tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.contributors.contributor))
),
tools.Map(tools.Delegate(
PublisherRelation),
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.publisher)
),
tools.Map(tools.Delegate(HostRelation), tools.RunPython(
get_contributors,
tools.Concat(tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.contributors.contributor)),
['HostingInstitution']
)),
# v.3 Funder is a contributor type
# v.4 FundingReference replaces funder contributor type
tools.Map(tools.Delegate(FunderRelation), tools.RunPython(
get_contributors,
tools.Concat(tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.contributors.contributor)),
['Funder']
)),
tools.Map(
tools.Delegate(FunderRelation),
tools.Concat(tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.fundingReference))
)
)
# v.4 New, free text, 'subjectScheme' attribute on subject
subjects = tools.Map(
tools.Delegate(ThroughSubjects),
tools.Subjects(
tools.RunPython(
'text_list',
tools.Concat(
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.subjects.subject),
)
)
)
)
tags = tools.Map(
tools.Delegate(ThroughTags),
tools.RunPython(
force_text,
tools.Concat(
tools.Maybe(tools.Maybe(ctx.record, 'metadata')['oai_datacite'], 'type'),
tools.RunPython(
'text_list',
(tools.Concat(tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.subjects.subject)))
),
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.formats.format),
tools.Try(ctx.record.metadata['oai_datacite'].datacentreSymbol),
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.resourceType['#text']),
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.resourceType['@resourceTypeGeneral']),
tools.Maybe(ctx.record.header, 'setSpec'),
tools.Maybe(ctx.record.header, '@status')
)
)
)
identifiers = tools.Concat(
tools.Map(
tools.Delegate(WorkIdentifier),
tools.Concat(
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.identifier)
)
),
tools.Map(
tools.Delegate(WorkIdentifier),
tools.Concat(
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.alternateIdentifiers.alternateidentifier)
)
)
)
related_works = tools.Concat(
tools.Map(
tools.Delegate(WorkRelation),
tools.RunPython(
get_related_works,
tools.Concat(
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.relatedIdentifiers.relatedIdentifier)
),
False
)
),
tools.Map(
tools.Delegate(InverseWorkRelation),
tools.RunPython(
get_related_works,
tools.Concat(
tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.relatedIdentifiers.relatedIdentifier)
),
True
)
)
)
date_updated = tools.ParseDate(tools.Try(ctx.record.header.datestamp))
date_published = tools.ParseDate(tools.Try(tools.RunPython('get_date_type', tools.Concat(ctx.record.metadata['oai_datacite'].payload.resource.dates.date), 'Issued')))
free_to_read_type = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.rightsList.rights['@rightsURI'])
free_to_read_date = tools.ParseDate(tools.Try(tools.RunPython('get_date_type', tools.Concat(ctx.record.metadata['oai_datacite'].payload.resource.dates.date), 'Available')))
is_deleted = tools.RunPython('check_status', tools.Try(ctx.record.header['@status']))
class Extra:
"""
Fields that are combined in the base parser are relisted as singular elements that match
their original entry to preserve raw data structure.
"""
status = tools.Try(ctx.record.header['@status'])
datestamp = tools.ParseDate(ctx.record.header.datestamp)
set_spec = tools.Try(ctx.record.header.setSpec)
is_reference_quality = tools.Try(ctx.record.metadata['oai_datacite'].isReferenceQuality)
schema_version = tools.Try(ctx.record.metadata['oai_datacite'].schemaVersion)
datacentre_symbol = tools.Try(ctx.record.metadata['oai_datacite'].datacentreSymbol)
identifiers = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.identifier)
alternate_identifiers = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.alternateIdentifiers.alternateidentifier)
titles = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.titles.title)
publisher = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.publisher)
publication_year = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.publicationYear)
subject = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.subjects.subject)
resourceType = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.resourceType)
sizes = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.size)
format_type = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.formats.format)
version = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.version)
rights = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.rights)
rightsList = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.rightsList)
related_identifiers = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.relatedIdentifiers.relatedIdentifier)
description = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.descriptions)
dates = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.dates.date)
contributors = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.contributors.contributor)
creators = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.creators)
# v.4 new property geoLocationPolygon, in addition to geoLocationPoint and geoLocationBox
geolocations = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.geoLocations)
funding_reference = tools.Try(ctx.record.metadata['oai_datacite'].payload.resource.fundingReference)
def check_status(self, status):
if status == 'deleted':
return True
return False
def get_date_type(self, date_obj, date_type):
date = None
for obj in date_obj:
if obj['@dateType'] == date_type:
date = obj['#text']
if date and date != '0000':
return date
# raise KeyError to break TryLink
raise KeyError()
def text_list(self, data):
text_list = []
if isinstance(data, list):
for item in data:
if isinstance(item, dict):
if '#text' in item:
text_list.append(item['#text'])
continue
elif isinstance(item, str):
text_list.append(item)
continue
logger.warning('#text is not in {} and it is not a string'.format(item))
return text_list
else:
raise Exception('{} is not a list.'.format(data))
| |
import numpy as np
import math
from pylab import *
from palettable.wesanderson import Zissou_5 as wsZ
import matplotlib.ticker as mtick
from scipy.interpolate import interp1d
from scipy.interpolate import griddata
#read JP and TH files
def read_JP_files(fname):
da = np.genfromtxt(fname, delimiter=" ", comments='#')
return da[:,0], da[:,1], da[:,2], da[:,3],da[:,4],da[:,5]
#Read JN files
def read_JN_files(fname):
da = np.genfromtxt(fname, delimiter=",")
return da[:,0],da[:,1],da[:,2],da[:,3],da[:,4],da[:,5],da[:,6],da[:,7],da[:,8]
#Read csvs from webdigitized
def read_csv_files(fname):
da = np.genfromtxt(fname, delimiter=",")
return da[:,0],da[:,1]
## Plot
fig = figure(figsize=(9,10), dpi=80)
rc('font', family='serif')
rc('xtick', labelsize='xx-small')
rc('ytick', labelsize='xx-small')
gs = GridSpec(400, 4)
gs.update(wspace = 0.34)
#gs.update(hspace = 0.4)
lsize = 7.0
#phase limits
xmin = -0.04
xmax = 1.04
#error window limits
eymin = -5.0
eymax = 5.0
#figure shape parameters
panelh = 45
epanelh = 25
skiph = 30
mfiglim = 0
#path to files
path_JP = ""
#labels size
tsize = 10.0
nu = '600'
#nu = '400'
#fig.text(0.5, 0.92, 'Sphere', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.72, 'Obl 1Hz', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.52, 'Obl+Dopp 600Hz $\Delta t=0$', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.32, 'Obl+Dopp 600Hz', ha='center', va='center', size=tsize)
#fig.text(0.5, 0.12, 'Phase',ha='center', va='center', size=lsize)
for j in range(3):
#for j in [2]:
if j == 0:
fname = path_JP + 'HT_30.csv'
fname2 = path_JP + 'polar_f600_bb_r10_m1.8_d40_i30_x10.csv'
if j == 1:
fname = path_JP + 'HT_60.csv'
fname2 = path_JP + 'polar_f600_bb_r10_m1.8_d40_i60_x10.csv'
if j == 2:
fname = path_JP + 'HT_90.csv'
fname2 = path_JP + 'polar_f600_bb_r15_m1.8_d40_i90_x10.csv'
#read JP data
phase, N6kev = read_csv_files(fname)
#phase, N2kev, N6kev, N12kev, Nbol, Fbol, F2kev, F6kev, F12kev = read_JN_files(fname)
#read JN data
phase2, N2kev2, N6kev2, N12kev2, Nbol2, Fbol2, F2kev2, F6kev2, F12kev2 = read_JN_files(fname2)
phasetmp = phase2
for i in [1]:
#for i in range(4):
#frame for the main pulse profile fig
ax1 = subplot(gs[mfiglim:mfiglim+panelh, i])
ax1.minorticks_on()
ax1.set_xticklabels([])
ax1.set_xlim(xmin, xmax)
#ax1.yaxis.major.formatter.set_powerlimits((0,0))
formatter = ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((0,0))
ax1.yaxis.set_major_formatter(formatter)
#ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
#xmft = ScalarFormatter()
#xmft.set_powerlimits((-2,2))
#ax1.xaxis.set_major_formatter(xmft)
if i == 0:
ax1.set_ylabel('$N$ (2 keV)\n[ph cm$^{-2}$ s$^{-1}$ keV$^{-1}$]',size=lsize)
flux = N2kev
flux2 = N2kev2
elif i == 1:
ax1.set_ylabel('$N$ (6 keV)',size=lsize)
flux = N6kev
flux2 = N6kev2
elif i == 2:
ax1.set_ylabel('$N$ (12 keV)',size=lsize)
flux = N12kev
flux2 = N12kev2
elif i == 3:
ax1.set_ylabel('Bolometric [ph cm$^{-2}$ s$^{-1}$]',size=lsize)
flux = Nbol
flux2 = Nbol2
#flux = Fbol
#flux2 = Fbol2
#flux /= np.max(flux)
#flux2 /= np.max(flux2)
print np.max(flux2)
#flux2 *= 1000.0
flux2 *= 6.0e11
indxs = []
for q in range(len(flux2)):
if not (np.isnan(flux2[q])):
#if not (flux2[q] == flux2[q]):
indxs.append(q)
phase2 = phasetmp[indxs]
flux2 = flux2[indxs]
#JP data
ax1.plot(phase, flux, 'k-')
if i == 1:
pshft = 0.0
merr = 1.0e6
for pshift in np.linspace(-0.1, 0.1, 100):
fluxi2 = griddata(phase2 + pshift, flux2, (phase), method='cubic', fill_value=0.0)
err = (fluxi2/flux - 1)*100
serr = 0.0
for ijk in range(len(err)):
if fluxi2[ijk] != 0:
serr += np.abs(err[ijk])
if serr < merr:
merr = serr
pshft = pshift
print "min shift:", pshft
#arbitrary phase shifts
#flux2 = flux2 * 0.99
phase2 = phase2 + pshft
if j == 0:
phase2 = phase2 + 0.00 #- pshft
elif j == 1:
phase2 = phase2 + 0.00 #- pshft
elif j == 2:
phase2 = phase2 + 0.035 - pshft
elif j == 3:
phase2 = phase2 + 0.05 - pshft
#phase = phase - 0.01
#JN data
#ax1.plot(phase2, flux2, 'r:')
ax1.plot(phase2, flux2, 'r--')
#ax1.plot(phase2, flux2, 'r-', linewidth=0.3)
#frame for the error panel
ax2 = subplot(gs[(mfiglim+panelh):(mfiglim+panelh+epanelh), i])
ax2.minorticks_on()
ax2.set_xlim(xmin, xmax)
ax2.set_ylim(eymin, eymax)
if i == 0:
ax2.set_ylabel('$\Delta$ %',size=lsize)
#if j != 3:
# ax2.set_xticklabels([])
if j == 2:
ax2.set_xlabel('Phase', size=lsize)
ax2.plot([xmin, xmax], [0.0, 0.0], 'r--', linewidth=0.3)
#interpolate error
#fluxi = interp1d(phase, flux, kind='linear')
#fluxi2 = griddata(phase2, flux2, (phase), method='cubic')
fluxi2 = griddata(phase2, flux2, (phase), method='linear')
#fluxi = interp1d(phase, flux, kind='cubic')
err = (flux/fluxi2 - 1)*100
#flux2i = interp1d(phase2, flux2, kind='cubic', fill_value='extrapolate')
#err = (flux/flux2i(phase) - 1)*100
#for q in range(len(phase)):
# print phase[q], err[q], fluxi2[q], flux[q]
ax2.plot(phase, err, 'k-', linewidth = 0.4)
#optional errors for range of phase shifts
for pshift in np.linspace(-0.01, 0.01, 10):
fluxi2 = griddata(phase2+pshift, flux2, (phase), method='cubic')
err = (flux/fluxi2 - 1)*100
ax2.plot(phase, err, 'b-', linewidth = 0.4)
mfiglim += panelh+epanelh+skiph
savefig('fig4.pdf', bbox_inches='tight')
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import time
import datetime
from twisted.python import log
from twisted.internet import defer
from .interface import IRouterContainer
from txtorcon.util import find_keywords
# look like "2014-01-25T02:12:14.593772"
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
class Circuit(object):
"""
Used by :class:`txtorcon.TorState` to represent one of Tor's circuits.
This is kept up-to-date by the :class`txtorcon.TorState` that owns it, and
individual circuits can be listened to for updates (or listen to
every one using :meth:`txtorcon.TorState.add_circuit_listener`)
:ivar path:
contains a list of :class:`txtorcon.Router` objects
representing the path this Circuit takes. Mostly this will be
3 or 4 routers long. Note that internally Tor uses single-hop
paths for some things. See also the *purpose*
instance-variable.
:ivar streams:
contains a list of Stream objects representing all streams
currently attached to this circuit.
:ivar state:
contains a string from Tor describing the current state of the
stream. From control-spec.txt section 4.1.2, these are:
- NEW: New request to connect
- NEWRESOLVE: New request to resolve an address
- REMAP: Address re-mapped to another
- SENTCONNECT: Sent a connect cell along a circuit
- SENTRESOLVE: Sent a resolve cell along a circuit
- SUCCEEDED: Received a reply; stream established
- FAILED: Stream failed and not retriable
- CLOSED: Stream closed
- DETACHED: Detached from circuit; still retriable
:ivar purpose:
The reason this circuit was built. Values can currently be one
of (but see control-spec.txt 4.1.1):
- GENERAL
- HS_CLIENT_INTRO
- HS_CLIENT_REND
- HS_SERVICE_INTRO
- HS_SERVICE_REND
- TESTING
- CONTROLLER
For most purposes, you'll want to look at GENERAL circuits only.
:ivar id:
The ID of this circuit, a number (or None if unset).
:ivar is_built:
A Deferred that will callback() when this Circuit hits BUILT state.
"""
def __init__(self, routercontainer):
"""
:param routercontainer: should implement
:class:`txtorcon.interface.IRouterContainer`.
"""
self.listeners = []
self.router_container = IRouterContainer(routercontainer)
self.torstate = routercontainer
self.path = []
self.streams = []
self.purpose = None
self.id = None
self.state = 'UNKNOWN'
self.build_flags = []
self.flags = {}
#: callback()d when this circuit hits BUILT
self.is_built = defer.Deferred()
# this is used to hold a Deferred that will callback() when
# this circuit is being CLOSED or FAILED.
self._closing_deferred = None
# caches parsed value for time_created()
self._time_created = None
@property
def time_created(self):
if self._time_created is not None:
return self._time_created
if 'TIME_CREATED' in self.flags:
# strip off milliseconds
t = self.flags['TIME_CREATED'].split('.')[0]
tstruct = time.strptime(t, TIME_FORMAT)
self._time_created = datetime.datetime(*tstruct[:7])
return self._time_created
def listen(self, listener):
if listener not in self.listeners:
self.listeners.append(listener)
def unlisten(self, listener):
self.listeners.remove(listener)
def close(self, **kw):
"""
This asks Tor to close the underlying circuit object. See
:meth:`txtorcon.torstate.TorState.close_circuit`
for details.
You may pass keyword arguments to take care of any Flags Tor
accepts for the CLOSECIRCUIT command. Currently, this is only
"IfUnused". So for example: circ.close(IfUnused=True)
:return: Deferred which callbacks with this Circuit instance
ONLY after Tor has confirmed it is gone (not simply that the
CLOSECIRCUIT command has been queued). This could be a while
if you included IfUnused.
"""
self._closing_deferred = defer.Deferred()
def close_command_is_queued(*args):
return self._closing_deferred
d = self.torstate.close_circuit(self.id, **kw)
d.addCallback(close_command_is_queued)
return self._closing_deferred
def age(self, now=datetime.datetime.utcnow()):
"""
Returns an integer which is the difference in seconds from
'now' to when this circuit was created.
Returns None if there is no created-time.
"""
if not self.time_created:
return None
return (now - self.time_created).seconds
def _create_flags(self, kw):
"""
this clones the kw dict, adding a lower-case version of every
key (duplicated in stream.py; put in util?)
"""
flags = {}
for k in kw.keys():
flags[k] = kw[k]
flags[k.lower()] = kw[k]
return flags
def update(self, args):
# print "Circuit.update:",args
if self.id is None:
self.id = int(args[0])
[x.circuit_new(self) for x in self.listeners]
else:
if int(args[0]) != self.id:
raise RuntimeError("Update for wrong circuit.")
self.state = args[1]
kw = find_keywords(args)
self.flags = kw
if 'PURPOSE' in kw:
self.purpose = kw['PURPOSE']
if 'BUILD_FLAGS' in kw:
self.build_flags = kw['BUILD_FLAGS'].split(',')
if self.state == 'LAUNCHED':
self.path = []
[x.circuit_launched(self) for x in self.listeners]
else:
if self.state != 'FAILED' and self.state != 'CLOSED':
if len(args) > 2:
self.update_path(args[2].split(','))
if self.state == 'BUILT':
[x.circuit_built(self) for x in self.listeners]
if not self.is_built.called:
self.is_built.callback(self)
elif self.state == 'CLOSED':
if len(self.streams) > 0:
# FIXME it seems this can/does happen if a remote
# router crashes or otherwise shuts down a circuit
# with streams on it still
log.err(RuntimeError("Circuit is %s but still has %d streams" %
(self.state, len(self.streams))))
flags = self._create_flags(kw)
self.maybe_call_closing_deferred()
[x.circuit_closed(self, **flags) for x in self.listeners]
elif self.state == 'FAILED':
if len(self.streams) > 0:
log.err(RuntimeError("Circuit is %s but still has %d streams" %
(self.state, len(self.streams))))
flags = self._create_flags(kw)
self.maybe_call_closing_deferred()
[x.circuit_failed(self, **flags) for x in self.listeners]
def maybe_call_closing_deferred(self):
"""
Used internally to callback on the _closing_deferred if it
exists.
"""
if self._closing_deferred:
self._closing_deferred.callback(self)
self._closing_deferred = None
def update_path(self, path):
"""
There are EXTENDED messages which don't include any routers at
all, and any of the EXTENDED messages may have some arbitrary
flags in them. So far, they're all upper-case and none start
with $ luckily. The routers in the path should all be
LongName-style router names (this depends on them starting
with $).
For further complication, it's possible to extend a circuit to
a router which isn't in the consensus. nickm via #tor thought
this might happen in the case of hidden services choosing a
rendevouz point not in the current consensus.
"""
oldpath = self.path
self.path = []
for p in path:
if p[0] != '$':
break
# this will create a Router if we give it a router
# LongName that doesn't yet exist
router = self.router_container.router_from_id(p)
self.path.append(router)
if len(self.path) > len(oldpath):
[x.circuit_extend(self, router) for x in self.listeners]
oldpath = self.path
def __str__(self):
path = ' '.join([x.ip for x in self.path])
return "<Circuit %d %s [%s] for %s>" % (self.id, self.state, path,
self.purpose)
| |
#Fits an emission ine with a Gaussian and returns the amplitude, standard deviation, and continuum line
#Usage: run FitEmission.py 'a6' 4861 to fit the lines at rest wavelengths 6563 (Ha) for the a6 mask.
#Typing run FitEmission.py 'a6' 'HaNII' will fit all three lines around Ha simulaaneously
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
from astropy.convolution import convolve, Box1DKernel
from scipy.interpolate import splrep, splev
from scipy.signal import medfilt
from scipy.optimize import curve_fit,nnls
#Location of output data file
dataout = '/Users/blorenz/COSMOS/COSMOSData/lineflux.txt'
viewdataout = '/Users/blorenz/COSMOS/COSMOSData/lineflux_view.txt'
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/COSMOSData/fitEmissionOut/'
#The location with the file for all of our data
ourdatapath = '/Users/blorenz/COSMOS/COSMOSData/all_c_hasinger.txt'
#Where the calibrated spectra are stored
caldatapath = '/Users/blorenz/COSMOS/COSMOSData/flxFitsFileOut/'
#File for all of the emission/absorption features of the galaxy (to mask out other features when fitting)
linedata = '/Users/blorenz/COSMOS/COSMOSData/corFitsFileOut/galaxylines.dat'
#File for the MAD of the difference in flux of duplicates in each line (to flag low S/N lines)
maddatapath = '/Users/blorenz/COSMOS/COSMOSData/linemad.txt'
#Read in the spectral lines for masking
gallines = ascii.read(linedata).to_pandas()
#Remove all absoption lines
gallines = gallines[gallines.col2==1]
gallines = gallines.reset_index()
#Read in the mad of the lines
maddata = ascii.read(maddatapath).to_pandas()
#Read the datafile (if there is one), then create a blank one to write to:
if os.path.exists(dataout):
outarr = ascii.read(dataout).to_pandas()
else: outarr = pd.DataFrame()
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#Fontsizes for plotting
axisfont = 18
ticksize = 16
titlefont = 24
legendfont = 16
textfont = 16
#Set the letnum
letnum = sys.argv[1]
#Read in all of our data
ourdata = ascii.read(ourdatapath).to_pandas()
ourdata = ourdata[ourdata.ImageName.str.contains('feb1' + letnum[1] + '_' + letnum[0]) == True]
ourdata = ourdata[ourdata.Unsure == 0]
ourdata = ourdata[ourdata.Bad == 0]
ourdata = ourdata[ourdata.Flag3 == 0]
#ourdata = ourdata[ourdata.Flag1 == 0]
ourdata = ourdata[ourdata.Star == 0]
#Function to make the mask before the gaussian
def getMask(modelspec,sigspec,spectrum):
#Model continuum
m = modelspec
#Find all of the pixels where the flux goes to 0 or negative, and set those to 0
maskline = (spectrum > 0)
#Get the weights so we can downweight by noise
w = divz(1,sigspec)*maskline
return m,w
#Find the objid of every object, and it's corresponding letter number combination
#objs[0] - objid
#objs[1] - letter
#objs[2] - number
objs = [(i[4:10],i[17],i[15]) for i in ourdata.ImageName]
#Start two counters to run along the plot
plt1 = 0
plt10 = 0
plt1b = 0
plt10b = 0
#Set the gridsize, so 12 means a 12x12 grid
gridsize = 12
#Start the plot before the loop:
fig,axarr = plt.subplots(gridsize,gridsize,figsize = (150,80))
figb,axarrb = plt.subplots(gridsize,gridsize,figsize = (150,80))
#Loop the fitting over all objects
#for i in range(16,20):
for i in range(len(objs)):
#Mark the data as good
fitflag = 0 #Good data
#Set that we are not looking at the lines around Ha
HaNII = False
#Get the redshift
zcc = ourdata.iloc[i].z_cc
#Set the location of the data file
flxfits = caldatapath + 'flx_' + objs[i][0] + '_feb1' + objs[i][2] + '_' + objs[i][1] + 'big.fits'
#Read in its datafile if it exists
if os.path.exists(flxfits):
flxdata = fits.open(flxfits)[0].data
flxhead = fits.open(flxfits)[0].header
#Read in the spectrum and model
spec = flxdata[0]
noise = flxdata[1] #?
model = flxdata[3]
#Calculate the wavelength range for the data
crval1 = flxhead["crval1"]
crpix1 = flxhead["crpix1"]
cdelt1 = flxhead["cdelt1"]
naxis1 = flxhead["naxis1"]
dcflag = flxhead["dc-flag"]
exptime = flxhead['exptime']
wavelength = (1.0+np.arange(naxis1)-crpix1)*cdelt1 + crval1
#Loop over all of the emission lines to fit:
#for j in range(1, len(sys.argv)):
#Changed to only fitting one line at a time, don't want to unindent everything
if 1==1:
#line = int(sys.argv[j])
line = sys.argv[2]
#Check if we are fitting the Ha and NII lines toether:
if line == 'HaNII':
line = 6563
#Variable to know that we are fitting three lines
HaNII = True
#Dataframe that we will store everything in
HaNIIdat = pd.DataFrame()
#Set up the rest wavelengths for the lines
HaNIIdat.at[0,'restwave'] = 6548.1
HaNIIdat.at[1,'restwave'] = 6562.8
HaNIIdat.at[2,'restwave'] = 6583.0
else: line = int(line)
#Compute the wavelength of the line redshifted to the galaxy
zline = (1+zcc)*line
#Set the range over which to look for the line (in angstroms, each pixel is 2A)
srange = 50
#Set the short range to try to find the peak
shrange = 6
#Find the indices to crop the spectra around the line
idx = np.logical_and(wavelength > zline-srange, wavelength < zline+srange)
idx2 = np.logical_and(wavelength > zline-shrange, wavelength < zline+shrange)
#Special case for OII doublet if it isn't redshifted into view:
if zline < 4910:
idx = np.arange(0,srange)
idx2 = np.arange(0,shrange)
fitflag = 5 #Flagged for not in view
#Crop the spectrum to the proper range
waveline = wavelength[idx]
specline = spec[idx]
shspecline = spec[idx2]
modelline = model[idx]
noiseline = noise[idx]
shnoiseline = noise[idx2]
#Redshift the lines to the current galaxy
zgallines = gallines.col1*(1+zcc)
#Mask out the spectral lines with this function
#data - the data to mask out
#line - the line to keep (others are masked)
def droplines(wavedrop=waveline,specdrop=specline,modeldrop=modelline,noisedrop = noiseline,zline=zline,peakwave=0,zcc=zcc,HaNII = HaNII):
#Mark that we plot the dropped region
pdrop = 1
#We first find the line that you are fitting so we don't mask it
#Compute the differenc between the current line and every line in the data
linediff = zgallines - zline
#Find the index of the closest value to 0. There may be negatives
closelineidx = np.abs(linediff).idxmin()
#Save the name of the line for later
linename = gallines.iloc[closelineidx].col3
restwave = gallines.iloc[closelineidx].col1
#Drop the closest line from the table so that we mask the others
otherlines = zgallines.drop(closelineidx)
#Special case for OII doublet, since it should find 3726.2, then also drop 3728.9
if linename == '[OII]':
otherlines = otherlines.drop(closelineidx+1)
restwave = 3727
#Special case for Ha three lines, since it should find Ha, then also drop NII on either side of it
if HaNII:
otherlines = otherlines.drop(closelineidx-1)
otherlines = otherlines.drop(closelineidx+1)
#Find the other lines that are around the current line, as integers
rounded = [np.round(i) for i in otherlines if (i > zline-srange and i < zline+srange)]
#Make them even if they are odd to match up with wavelengths
centers = [int(i)+(int(i)&1) for i in rounded]
#Find offset from expected
lineval = gallines.iloc[closelineidx].col1
zlineval = lineval*(1+zcc)
if peakwave:
waveoffset = peakwave-zline
#Round it and make it even
waveoffset = np.floor(waveoffset)
waveoffset = int(waveoffset)+(int(waveoffset)&1)
centers = [i+waveoffset for i in centers]
#Arrays for the pixels on either side of each center
centerrange = [np.arange(i-shrange,i+shrange+2,2) for i in centers]
#Find the indices where the arrays match (we will drop these)
dropidx = [np.nonzero(np.in1d(wavedrop,i))[0] for i in centerrange]
#Save this version for plotting
pdropidx = dropidx
#Drop the values at those indices from both wavelength and spectrum
#Fixes a bug when they are not the same length -happens if line is on an edge
if len(dropidx) == 2:
dropidx = np.append(dropidx[0],dropidx[1])
elif not dropidx:
#Variable to say whether or not to plot the dropidx
pdrop = 0
#Drop the lines
newwave = np.delete(wavedrop,dropidx)
newspec = np.delete(specdrop,dropidx)
newmodel = np.delete(modeldrop,dropidx)
newnoise = np.delete(noisedrop,dropidx)
return newwave,newspec,newmodel,newnoise,dropidx,linename,restwave,pdropidx,pdrop
#Mask the other emission lines
dropwaveline,dropspecline,dropmodelline,dropnoiseline,dropidx,linename,restwave,pdropidx,pdrop = droplines()
m,w = getMask(dropmodelline, dropnoiseline, dropspecline)
#Model continuum
#m = dropmodelline
#Get the weights so we can downweight by noise
#w = divz(1,dropnoiseline)
#Set up Gaussian Function
#mu - mean value of the gaussian
#sigma - standard deviation
def gauss3(x, mu, sigma):
A,B = amp3(x,mu,sigma)
g = np.exp(-0.5*(x-mu)**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2) #NORMALIZED GAUSSIAN
s = A*g + B*m
return s
#A is area under Gauss curve, B is the scale factor of the continuum
def amp3(x, mu, sigma):
g = np.exp(-0.5*(x-mu)**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2) #NORMALIZED GAUSSIAN
A,B = nnls(np.transpose([g,m])*w[::,np.newaxis],dropspecline*w)[0]
return A,B
def gaussHa(x, z, sigma48, sigma63, sigma83):
A48,A63,A83,B = ampHa(x, z, sigma48, sigma63, sigma83)
g48 = np.exp(-0.5*(x-(6548.1*(1+z)))**2/(np.e**sigma48)**2)/np.sqrt(2*np.pi*(np.e**sigma48)**2)
g63 = np.exp(-0.5*(x-(6562.8*(1+z)))**2/(np.e**sigma63)**2)/np.sqrt(2*np.pi*(np.e**sigma63)**2)
g83 = np.exp(-0.5*(x-(6583.0*(1+z)))**2/(np.e**sigma83)**2)/np.sqrt(2*np.pi*(np.e**sigma83)**2)
s = A48*g48 + A63*g63 + A83*g83 + B*m
return s
#A is area under Gauss curve, B is the scale factor of the continuum
def ampHa(x, z, sigma48, sigma63, sigma83):
g48 = np.exp(-0.5*(x-(6548.1*(1+z)))**2/(np.e**sigma48)**2)/np.sqrt(2*np.pi*(np.e**sigma48)**2)
g63 = np.exp(-0.5*(x-(6562.8*(1+z)))**2/(np.e**sigma63)**2)/np.sqrt(2*np.pi*(np.e**sigma63)**2)
g83 = np.exp(-0.5*(x-(6583.0*(1+z)))**2/(np.e**sigma83)**2)/np.sqrt(2*np.pi*(np.e**sigma83)**2)
A48,A63,A83,B = nnls(np.transpose([g48,g63,g83,m])*w[::,np.newaxis],dropspecline*w)[0]
return A48,A63,A83,B
###Set initial guess parameters
#find the highest peak, get the wavelength value of it
#Index of highest peak
pkidx = np.argmax(shspecline)+srange/2-shrange/2
#Wavelength of peak
peakwave = waveline[pkidx]
guess3 = (peakwave,np.log(2))
guesscurve3 = gauss3(dropwaveline,guess3[0],guess3[1])
#Set the bounds, from expected position of the line +- 4 pixels, and sigma from 2 to 10
bounds3 = ([restwave*(1+zcc)-8,np.log(2)],[restwave*(1+zcc)+8,np.log(10)])
#Special case for OII doublet
if linename == 'O[II]':
guess3 = (peakwave,np.log(4))
guesscurve3 = gauss3(dropwaveline,guess3[0],guess3[1])
#Set the bounds
bounds3 = ([restwave*(1+zcc)-8,np.log(2)],[restwave*(1+zcc)+8,np.log(15)])
#Special case for Ha lines, need to set for all three gaussians
if HaNII:
guessHa = (zcc,np.log(2),np.log(2),np.log(2))
guesscurveHa = gaussHa(dropwaveline,guessHa[0],guessHa[1],guessHa[2],guessHa[3])
boundsHa = ([zcc-0.0012,np.log(2),np.log(2),np.log(2)],[zcc+0.0012,np.log(10),np.log(10),np.log(10)])
#Check if there is a lot of bad data
if np.count_nonzero(~np.isnan(specline)):
try:
#Fit the Gaussian
#coeff3, var_matrix3 = curve_fit(gauss3, waveline, specline, p0=guess3, bounds=bounds3)
if not HaNII:
coeff3, var_matrix3 = curve_fit(gauss3, dropwaveline, dropspecline, p0=guess3, bounds=bounds3)
else:
coeffHa, var_matrixHa = curve_fit(gaussHa, dropwaveline, dropspecline, p0=guessHa, bounds=boundsHa)
#Fit again with a proper mask
#Mask the other emission lines
if not HaNII:
peakwave = coeff3[0]
dropwaveline,dropspecline,dropmodelline,dropnoiseline,dropidx,linename,restwave,pdropidx,pdrop = droplines(peakwave=peakwave)
guess3 = (peakwave,coeff3[1])
#Redefine the gauss functions since now the model and noise have changed
m,w = getMask(dropmodelline, dropnoiseline, dropspecline)
#Model continuum
#m = dropmodelline
#Get the weights so we can downweight by noise
#w = divz(1,dropnoiseline)
#Set up Gaussian Function
#mu - mean value of the gaussian
#sigma - log(standard deviation)
def gauss3(x, mu, sigma):
A,B = amp3(x,mu,sigma)
g = np.exp(-0.5*(x-mu)**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2) #NORMALIZED GAUSSIAN
s = A*g + B*m
return s
#A is area under Gauss curve, B is the scale factor of the continuum
def amp3(x, mu, sigma):
g = np.exp(-0.5*(x-mu)**2/(np.e**sigma)**2)/np.sqrt(2*np.pi*(np.e**sigma)**2) #NORMALIZED GAUSSIAN
A,B = nnls(np.transpose([g,m])*w[::,np.newaxis],dropspecline*w)[0]
return A,B
#Only fit if you're not doing HaNII, otherwise nothing is masked so we don't need to fit again
if not HaNII:
coeff3, var_matrix3 = curve_fit(gauss3, dropwaveline, dropspecline, p0=guess3, bounds=bounds3)
#Compute the values of the fit
if not HaNII:
gausscurve3 = gauss3(dropwaveline,coeff3[0],coeff3[1]) #
amp3 = amp3(dropwaveline,coeff3[0],coeff3[1]) #
mu3 = coeff3[0]
stddev3 = np.e**np.abs(coeff3[1])
flux3 = amp3[0]
scale3 = amp3[1]
else:
gausscurveHa = gaussHa(dropwaveline,coeffHa[0],coeffHa[1],coeffHa[2],coeffHa[3])
ampHa = ampHa(dropwaveline,coeffHa[0],coeffHa[1],coeffHa[2],coeffHa[3])
#Fit redshift
zgauss = coeffHa[0]
#Mean of each line
for num in np.arange(0,3):
HaNIIdat.at[num,'mu'] = HaNIIdat.iloc[num]['restwave']*(1+zgauss)
HaNIIdat.at[num,'sig'] = np.e**np.abs(coeffHa[num+1])
HaNIIdat.at[num,'flux'] = ampHa[num]
HaNIIdat.at[num,'scale'] = ampHa[3]
mu3 = HaNIIdat.iloc[1]['mu']
stddev3 = HaNIIdat.iloc[1]['sig']
flux3 = HaNIIdat.iloc[1]['flux']
scale3 = HaNIIdat.iloc[1]['scale']
#Compute chi^2 statistics in the range of the line
if not HaNII:
#Degrees of freedom: mu, sigma, area, scale
dof = 4
#Set the lower and upper bounds for the region to find chi2
chilb = mu3-2*stddev3
chiub = mu3+2*stddev3
#Get only the indices in that region
cidx = np.logical_and(dropwaveline > chilb-2, dropwaveline < chiub+2)
arrchi2 = divz((dropspecline[cidx]-gausscurve3[cidx]),dropnoiseline[cidx])**2
chi2 = np.add.reduce(arrchi2)
rchi2 = divz(chi2,len(dropwaveline[cidx])-dof)
#Compute the sum of the fluxes in the line in the same region
sumflux = 2*np.add.reduce(dropspecline[cidx]-dropmodelline[cidx])
else:
#Degrees of freedom: z, scale, sigma (x3, for each line), area (x3, for each line)
dof = 8
cidxarr = []
#Set the lower and upper bounds for the region to find chi2
for num in np.arange(0,3):
HaNIIdat.at[num,'chilb'] = (1+zgauss)*HaNIIdat.iloc[num]['restwave']-2*HaNIIdat.iloc[num]['sig']
HaNIIdat.at[num,'chiub'] = (1+zgauss)*HaNIIdat.iloc[num]['restwave']+2*HaNIIdat.iloc[num]['sig']
cidxarr.append(np.logical_and(dropwaveline > HaNIIdat.iloc[num]['chilb']-2, dropwaveline < HaNIIdat.iloc[num]['chiub']+2))
#Chi2 just in this line
arrchi2 = divz((dropspecline[cidxarr[num]]-gausscurveHa[cidxarr[num]]),dropnoiseline[cidxarr[num]])**2
HaNIIdat.at[num,'chi2'] = np.add.reduce(arrchi2)
HaNIIdat.at[num,'rchi2'] = divz(HaNIIdat.iloc[num]['chi2'],len(dropwaveline[cidxarr[num]])-4)
#Compute the sum of the fluxes in the line in the same region
HaNIIdat.at[num,'sumflux'] = 2*np.add.reduce(dropspecline[cidxarr[num]]-dropmodelline[cidxarr[num]])
zrestline = HaNIIdat.iloc[num]['restwave']*(1+zcc)
idx3 = np.logical_and(waveline > zrestline-shrange, waveline < zrestline+shrange)
HaNIIdat.at[num,'usig'] = np.sqrt(np.add.reduce(noiseline[idx3]**2))
#wsig for each line
#Masks out the other two lines, %3 is %3 is mod3
for num in np.arange(0,3):
wsigidx = np.logical_not(np.logical_or(cidxarr[(num+1)%3],cidxarr[(num+2)%3]))
g = np.exp(-0.5*(dropwaveline[wsigidx]-HaNIIdat.iloc[num]['mu'])**2/HaNIIdat.iloc[num]['sig']**2)/np.sqrt(2*np.pi*HaNIIdat.iloc[num]['sig']**2)
HaNIIdat.at[num,'wsig'] = np.sqrt(np.sum(g*(dropnoiseline[wsigidx]**2))*np.sqrt(2*np.pi*(HaNIIdat.iloc[num]['sig']**2)))
#Chi2 over the whole region
cidxtot = np.logical_or(np.logical_or(cidxarr[0],cidxarr[1]),cidxarr[2])
arrchi2tot = divz((dropspecline[cidxtot]-gausscurveHa[cidxtot]),dropnoiseline[cidxtot])**2
chi2tot = np.add.reduce(arrchi2tot)
rchi2tot = divz(chi2tot,len(dropwaveline[cidxtot])-dof)
#Now compute the weigthed error
#Gaussian curve with area=1
if not HaNII:
g = np.exp(-0.5*(dropwaveline-mu3)**2/stddev3**2)/np.sqrt(2*np.pi*stddev3**2) #NORMALIZED GAUSSIA
wsig = np.sqrt(np.sum(g*(dropnoiseline**2))*np.sqrt(2*np.pi*(stddev3**2)))
usig = np.sqrt(np.add.reduce(shnoiseline**2))
#Get the string of the nearest wavelength to the line. Used for saving everything
linestr = (str(int(np.round(restwave))))
else:
wsig = HaNIIdat.iloc[1]['wsig']
usig = HaNIIdat.iloc[1]['usig']
linestr = 'HaNII'
###Set flags
#Make sure the flag isn't 5 (out of view). if it is, don't flag it otherwise
if fitflag ==5:
pass
#Check if more than half of the spectrum is masked - if so, throw it out
elif (len(np.where(w<=0)[0])>(len(dropwaveline)/3)):
fitflag = 1 #Marks bad data
#Check if the width of the line hit the bounds
elif (stddev3 > 7.0):
fitflag = 2 #Marks bad sigma
#Check if the scale got significantly shifted, like means bad data
elif ((scale3 < 0.7) or (scale3 > 1.3)):
fitflag = 4 #Marks strange scaling
#Check the flag for each line when fitting HaNII
if HaNII:
for num in np.arange(0,3):
if fitflag == 1: HaNIIdat.at[num,'flag'] = 1
elif (HaNIIdat.iloc[num]['sig'] > 7.0):
HaNIIdat.at[num,'flag'] = 2
elif ((HaNIIdat.iloc[num]['scale'] < 0.7) or (HaNIIdat.iloc[num]['scale'] > 1.3)):
HaNIIdat.at[num,'flag'] = 4
else:
HaNIIdat.at[num,'flag'] = 0
def mkplot(plt10,plt1,plt10b,plt1b,gridsize):
#Create the plot
#fig,ax0 = plt.subplots(figsize = (13,7))
#Set the axis to the correct number - check if it is flagged or not
if fitflag:
ax0 = axarrb[plt10b,plt1b]
#Increment the counters for next time
plt1b = plt1b + 1
if plt1b == gridsize:
plt1b = 0
plt10b = plt10b + 1
else:
ax0 = axarr[plt10,plt1]
#Increment the counters for next time
plt1 = plt1 + 1
if plt1 == gridsize:
plt1 = 0
plt10 = plt10 + 1
#Plotting
ax0.plot(waveline,specline,color='cornflowerblue',label='Spectrum')
#ax0.plot(dropwaveline,dropspecline,color='darkblue',label='Masked Spectrum')
#This will break if one of the lines has an empty array, the except statement fixes it. This is only for plotting
if pdrop:
if dropidx[0].size > 0:
try: [ax0.axvspan(np.min(waveline[j]),np.max(waveline[j]), color='indianred', alpha=0.1) for j in pdropidx]
except: [ax0.axvspan(np.min(waveline[j]),np.max(waveline[j]), color='indianred', alpha=0.1) for j in dropidx]
#Check if any weights were set to 0 - if so, plot the mask for those
if np.where(w<=0)[0].any():
[ax0.plot(dropwaveline[j],dropspecline[j], marker='o', color='red', alpha=0.7) for j in np.where(w<=0)[0]]
#Plot the region over which we fit chi2
if not HaNII:
ax0.axvspan(np.min(dropwaveline[cidx]),np.max(dropwaveline[cidx]), color='grey', alpha=0.2, label='chi2 region')
else:
#[ax0.axvspan(np.min(dropwaveline[cidxarr[num]]),np.max(dropwaveline[cidxarr[num]]), color='grey', alpha=0.2, label='chi2 region') for num in np.arange(0,3)]
pass
ax0.plot(waveline,modelline,color='red',label='Model')
#ax0.plot(dropwaveline,guesscurve3,color='orange',label='Initial Guess')
ax0.plot(dropwaveline,dropnoiseline,color='orange',label='Noise')
#Titles, axes, legends
ax0.set_title('H$\\alpha$ and N[II], Rest $\lambda$ ' + str(int(np.round(6563))) + ', z=' + str(np.around(zcc,4)) + ', OBJID ' + objs[i][0] + objs[i][1] + objs[i][2],fontsize = titlefont)
ax0.legend(fontsize = legendfont,loc=1)
ax0.set_xlabel('Wavelength ($\AA$)',fontsize = axisfont)
ax0.set_ylabel('Flux ($10^{-17}$ erg/s/${cm}^2/\AA$)',fontsize = axisfont)
ax0.tick_params(labelsize = ticksize)
return ax0, plt10, plt1, plt10b, plt1b
ax0,plt10,plt1,plt10b,plt1b = mkplot(plt10,plt1,plt10b,plt1b,gridsize)
if not HaNII:
ax0.text(0.02,0.95,'Mean: ' + str(round(mu3,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.90,'Std Dev: ' + str(round(stddev3,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.85,'Scale: ' + str(round(amp3[1],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.80,'Flux: ' + str(round(amp3[0],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.02,0.75,'Sumflux: ' + str(round(sumflux,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.70,'Chi2: ' + str(round(chi2,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.65,'rChi2: ' + str(round(rchi2,2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.60,'wsig: ' + str(round(wsig,3)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.55,'usig: ' + str(round(usig,3)),fontsize = textfont, transform=ax0.transAxes)
else:
ax0.text(0.02,0.95,'Mean: ' + str(round(HaNIIdat.iloc[0]['mu'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.24,0.95, str(round(HaNIIdat.iloc[1]['mu'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.40,0.95, str(round(HaNIIdat.iloc[2]['mu'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.90,'Sigma: ' + str(round(HaNIIdat.iloc[0]['sig'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.24,0.90, str(round(HaNIIdat.iloc[1]['sig'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.40,0.90, str(round(HaNIIdat.iloc[2]['sig'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.85,'Flux: ' + str(round(HaNIIdat.iloc[0]['flux'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.24,0.85, str(round(HaNIIdat.iloc[1]['flux'],2)),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.40,0.85, str(round(HaNIIdat.iloc[2]['flux'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.02,0.80,'Flag: ' + str(int(HaNIIdat.iloc[0]['flag'])),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.15,0.80, str(int(HaNIIdat.iloc[1]['flag'])),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.28,0.80, str(int(HaNIIdat.iloc[2]['flag'])),fontsize = textfont, transform=ax0.transAxes)
ax0.text(0.02,0.80, 'Scale: ' + str(round(HaNIIdat.iloc[2]['scale'],2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.02,0.75, 'zfit: ' + str(round(zgauss,4)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.02,0.65, 'chi2tot: ' + str(round(chi2tot,2)),fontsize = textfont, transform=ax0.transAxes)
#ax0.text(0.02,0.60, 'rchi2tot: ' + str(round(rchi2tot,2)),fontsize = textfont, transform=ax0.transAxes)
if fitflag:
ax0.text(0.02,0.50,'flag: ' + str(fitflag),fontsize = textfont, transform=ax0.transAxes)
#fig.text(0.14,0.60,'Redshift: ' + str(round(zcc,4)),fontsize = textfont)
#fig.text(0.14,0.60,'Luminosity (erg/s): ' + str(round(lumin,2)),fontsize = textfont)
if not HaNII:
ax0.plot(dropwaveline,gausscurve3,color='black',label='Gaussian fit')
else:
ax0.plot(dropwaveline,gausscurveHa,color='black',label='Gaussian fit')
ax0.legend(fontsize = legendfont,loc=1)
#plt.show()
#Store the results to the output array:
#First we find the index with a matching objid
#midx = np.where((outarr.OBJID.astype(float)-float(objs[i][0])==0) and (outarr.Mask == (objs[i][1]+objs[i][2])))[0]
#Get the array of trues and falses where the OBJID and mask both match
tfarr = (outarr.OBJID.astype(float)-float(objs[i][0])==0) & (outarr.Mask == (objs[i][1]+objs[i][2]))
#Get the index of the matching element
midx = outarr.index[tfarr]
#We make sure outarr has correct column types
if os.path.exists(dataout):
#outarr.OBJID = outarr.OBJID.astype(str)
outarr.Mask = outarr.Mask.astype(str)
outarr.fluxfile = outarr.fluxfile.astype(str)
#We check to make sure there is only one.
#If there are none, we append a new row onto outarr
if len(midx)>1:
print('Error, check text document for duplicates')
elif len(midx)==0:
#Makes the index the length of the array, which will add a new row at the bottom
midx = len(outarr)
#Store the info that doesn't change
outarr.at[midx,'OBJID'] = objs[i][0]
outarr.at[midx,'Mask'] = objs[i][1]+objs[i][2]
outarr.at[midx,'fluxfile'] = 'flx_' + objs[i][0] + '_feb1' + objs[i][2] + '_' + objs[i][1] + 'big.fits'
outarr.at[midx,'zcc'] = zcc
#Write in the new info from the fit. outarr.at auto generates new columns if needed
if not HaNII:
outarr.at[midx,linestr + '_mean'] = mu3
outarr.at[midx,linestr + '_stddev'] = stddev3
outarr.at[midx,linestr + '_flux'] = flux3
outarr.at[midx,linestr + '_scale'] = scale3
outarr.at[midx,linestr + '_chi2'] = chi2
outarr.at[midx,linestr + '_rchi2'] = rchi2
outarr.at[midx,linestr + '_sumflux'] = sumflux
outarr.at[midx,linestr + '_wsig'] = wsig
outarr.at[midx,linestr + '_usig'] = usig
outarr.at[midx,linestr + '_flag'] = fitflag
else:
linearr = ['6548','6563','6583']
counter = 0
for linestr in linearr:
outarr.at[midx,linestr + '_mean'] = HaNIIdat.iloc[counter]['mu']
outarr.at[midx,linestr + '_stddev'] = HaNIIdat.iloc[counter]['sig']
outarr.at[midx,linestr + '_flux'] = HaNIIdat.iloc[counter]['flux']
outarr.at[midx,linestr + '_scale'] = HaNIIdat.iloc[counter]['scale']
outarr.at[midx,linestr + '_chi2'] = HaNIIdat.iloc[counter]['chi2']
outarr.at[midx,linestr + '_rchi2'] = HaNIIdat.iloc[counter]['rchi2']
outarr.at[midx,linestr + '_sumflux'] = HaNIIdat.iloc[counter]['sumflux']
outarr.at[midx,linestr + '_wsig'] = HaNIIdat.iloc[counter]['wsig']
outarr.at[midx,linestr + '_usig'] = HaNIIdat.iloc[counter]['usig']
outarr.at[midx,linestr + '_flag'] = HaNIIdat.iloc[counter]['flag']
counter = counter + 1
outarr.at[midx,'6563_chi2tot'] = chi2tot
outarr.at[midx,'6563_rchi2tot'] = rchi2tot
outarr.at[midx,'6563_zgauss'] = zgauss
'''
Flag values:
1 - too many zeros, we threw out the fit
2 - sigma >8, so it hit the bounds.
4 - scale >1.3 or <0.7, probably something wrong with spectrum in the region
5 - the line is not redshifted enough to be in view (e.g. 3727 OII)
'''
except (RuntimeError):
ax0.text(0.14,0.84,'Fitting Failed',fontsize = textfont, transform=ax0.transAxes)
#plt.show()
else: print('Bad data at ' + str(line) + ', too many NaN. ' + 'flx_' + objs[i][0] + '_feb1' + objs[i][2] + '_' + objs[i][1] + 'big.fits' )
#If not, give an error but continue
else: print('Could not read file ' + flxfits)
###Editing the datafile
#Sort by OBJID
outarr = outarr.sort_values('OBJID')
#Sort the columns so the lines are next to each other
outarr = outarr.reindex(sorted(outarr.columns), axis=1)
#Remove all NaN and replace them with -99
outarr = outarr.fillna(value = -99.999999999999)
#Remove columns with this, then take it back out
#outarr = outarr.drop('Ha_chi2',axis=1)
#Write the file
#outarr.to_csv(dataout,index=False)
#Save the figure
#plt.show()
fig.tight_layout()
figb.tight_layout()
if HaNII: linename = 'HaNII'
fig.savefig(figout + 'HaNIIforpres.pdf')
figb.savefig(figout + 'HaNIIforpres_flagged.pdf')
plt.close(fig)
plt.close(figb)
'''
Make a bpt diagram, look at spectra of possible AGN
'''
| |
# Generated from tools/antlr/Fcl.g4 by ANTLR 4.5.3
from antlr4 import *
if __name__ is not None and "." in __name__:
from .FclParser import FclParser
else:
from FclParser import FclParser
# This class defines a complete listener for a parse tree produced by FclParser.
class FclListener(ParseTreeListener):
# Enter a parse tree produced by FclParser#main.
def enterMain(self, ctx:FclParser.MainContext):
pass
# Exit a parse tree produced by FclParser#main.
def exitMain(self, ctx:FclParser.MainContext):
pass
# Enter a parse tree produced by FclParser#fcl.
def enterFcl(self, ctx:FclParser.FclContext):
pass
# Exit a parse tree produced by FclParser#fcl.
def exitFcl(self, ctx:FclParser.FclContext):
pass
# Enter a parse tree produced by FclParser#function_block.
def enterFunction_block(self, ctx:FclParser.Function_blockContext):
pass
# Exit a parse tree produced by FclParser#function_block.
def exitFunction_block(self, ctx:FclParser.Function_blockContext):
pass
# Enter a parse tree produced by FclParser#declaration.
def enterDeclaration(self, ctx:FclParser.DeclarationContext):
pass
# Exit a parse tree produced by FclParser#declaration.
def exitDeclaration(self, ctx:FclParser.DeclarationContext):
pass
# Enter a parse tree produced by FclParser#var_input.
def enterVar_input(self, ctx:FclParser.Var_inputContext):
pass
# Exit a parse tree produced by FclParser#var_input.
def exitVar_input(self, ctx:FclParser.Var_inputContext):
pass
# Enter a parse tree produced by FclParser#var_def.
def enterVar_def(self, ctx:FclParser.Var_defContext):
pass
# Exit a parse tree produced by FclParser#var_def.
def exitVar_def(self, ctx:FclParser.Var_defContext):
pass
# Enter a parse tree produced by FclParser#var_range.
def enterVar_range(self, ctx:FclParser.Var_rangeContext):
pass
# Exit a parse tree produced by FclParser#var_range.
def exitVar_range(self, ctx:FclParser.Var_rangeContext):
pass
# Enter a parse tree produced by FclParser#var_output.
def enterVar_output(self, ctx:FclParser.Var_outputContext):
pass
# Exit a parse tree produced by FclParser#var_output.
def exitVar_output(self, ctx:FclParser.Var_outputContext):
pass
# Enter a parse tree produced by FclParser#fuzzify_block.
def enterFuzzify_block(self, ctx:FclParser.Fuzzify_blockContext):
pass
# Exit a parse tree produced by FclParser#fuzzify_block.
def exitFuzzify_block(self, ctx:FclParser.Fuzzify_blockContext):
pass
# Enter a parse tree produced by FclParser#linguistic_term.
def enterLinguistic_term(self, ctx:FclParser.Linguistic_termContext):
pass
# Exit a parse tree produced by FclParser#linguistic_term.
def exitLinguistic_term(self, ctx:FclParser.Linguistic_termContext):
pass
# Enter a parse tree produced by FclParser#membership_function.
def enterMembership_function(self, ctx:FclParser.Membership_functionContext):
pass
# Exit a parse tree produced by FclParser#membership_function.
def exitMembership_function(self, ctx:FclParser.Membership_functionContext):
pass
# Enter a parse tree produced by FclParser#cosine.
def enterCosine(self, ctx:FclParser.CosineContext):
pass
# Exit a parse tree produced by FclParser#cosine.
def exitCosine(self, ctx:FclParser.CosineContext):
pass
# Enter a parse tree produced by FclParser#dsigm.
def enterDsigm(self, ctx:FclParser.DsigmContext):
pass
# Exit a parse tree produced by FclParser#dsigm.
def exitDsigm(self, ctx:FclParser.DsigmContext):
pass
# Enter a parse tree produced by FclParser#gauss.
def enterGauss(self, ctx:FclParser.GaussContext):
pass
# Exit a parse tree produced by FclParser#gauss.
def exitGauss(self, ctx:FclParser.GaussContext):
pass
# Enter a parse tree produced by FclParser#gauss2.
def enterGauss2(self, ctx:FclParser.Gauss2Context):
pass
# Exit a parse tree produced by FclParser#gauss2.
def exitGauss2(self, ctx:FclParser.Gauss2Context):
pass
# Enter a parse tree produced by FclParser#gbell.
def enterGbell(self, ctx:FclParser.GbellContext):
pass
# Exit a parse tree produced by FclParser#gbell.
def exitGbell(self, ctx:FclParser.GbellContext):
pass
# Enter a parse tree produced by FclParser#sigm.
def enterSigm(self, ctx:FclParser.SigmContext):
pass
# Exit a parse tree produced by FclParser#sigm.
def exitSigm(self, ctx:FclParser.SigmContext):
pass
# Enter a parse tree produced by FclParser#piece_wise_linear.
def enterPiece_wise_linear(self, ctx:FclParser.Piece_wise_linearContext):
pass
# Exit a parse tree produced by FclParser#piece_wise_linear.
def exitPiece_wise_linear(self, ctx:FclParser.Piece_wise_linearContext):
pass
# Enter a parse tree produced by FclParser#singleton.
def enterSingleton(self, ctx:FclParser.SingletonContext):
pass
# Exit a parse tree produced by FclParser#singleton.
def exitSingleton(self, ctx:FclParser.SingletonContext):
pass
# Enter a parse tree produced by FclParser#singletons.
def enterSingletons(self, ctx:FclParser.SingletonsContext):
pass
# Exit a parse tree produced by FclParser#singletons.
def exitSingletons(self, ctx:FclParser.SingletonsContext):
pass
# Enter a parse tree produced by FclParser#trape.
def enterTrape(self, ctx:FclParser.TrapeContext):
pass
# Exit a parse tree produced by FclParser#trape.
def exitTrape(self, ctx:FclParser.TrapeContext):
pass
# Enter a parse tree produced by FclParser#trian.
def enterTrian(self, ctx:FclParser.TrianContext):
pass
# Exit a parse tree produced by FclParser#trian.
def exitTrian(self, ctx:FclParser.TrianContext):
pass
# Enter a parse tree produced by FclParser#points.
def enterPoints(self, ctx:FclParser.PointsContext):
pass
# Exit a parse tree produced by FclParser#points.
def exitPoints(self, ctx:FclParser.PointsContext):
pass
# Enter a parse tree produced by FclParser#atom.
def enterAtom(self, ctx:FclParser.AtomContext):
pass
# Exit a parse tree produced by FclParser#atom.
def exitAtom(self, ctx:FclParser.AtomContext):
pass
# Enter a parse tree produced by FclParser#defuzzify_block.
def enterDefuzzify_block(self, ctx:FclParser.Defuzzify_blockContext):
pass
# Exit a parse tree produced by FclParser#defuzzify_block.
def exitDefuzzify_block(self, ctx:FclParser.Defuzzify_blockContext):
pass
# Enter a parse tree produced by FclParser#defuzzify_item.
def enterDefuzzify_item(self, ctx:FclParser.Defuzzify_itemContext):
pass
# Exit a parse tree produced by FclParser#defuzzify_item.
def exitDefuzzify_item(self, ctx:FclParser.Defuzzify_itemContext):
pass
# Enter a parse tree produced by FclParser#defuzzify_range.
def enterDefuzzify_range(self, ctx:FclParser.Defuzzify_rangeContext):
pass
# Exit a parse tree produced by FclParser#defuzzify_range.
def exitDefuzzify_range(self, ctx:FclParser.Defuzzify_rangeContext):
pass
# Enter a parse tree produced by FclParser#default_value.
def enterDefault_value(self, ctx:FclParser.Default_valueContext):
pass
# Exit a parse tree produced by FclParser#default_value.
def exitDefault_value(self, ctx:FclParser.Default_valueContext):
pass
# Enter a parse tree produced by FclParser#defuzzification_method.
def enterDefuzzification_method(self, ctx:FclParser.Defuzzification_methodContext):
pass
# Exit a parse tree produced by FclParser#defuzzification_method.
def exitDefuzzification_method(self, ctx:FclParser.Defuzzification_methodContext):
pass
# Enter a parse tree produced by FclParser#rule_block.
def enterRule_block(self, ctx:FclParser.Rule_blockContext):
pass
# Exit a parse tree produced by FclParser#rule_block.
def exitRule_block(self, ctx:FclParser.Rule_blockContext):
pass
# Enter a parse tree produced by FclParser#rule_item.
def enterRule_item(self, ctx:FclParser.Rule_itemContext):
pass
# Exit a parse tree produced by FclParser#rule_item.
def exitRule_item(self, ctx:FclParser.Rule_itemContext):
pass
# Enter a parse tree produced by FclParser#operator_definition.
def enterOperator_definition(self, ctx:FclParser.Operator_definitionContext):
pass
# Exit a parse tree produced by FclParser#operator_definition.
def exitOperator_definition(self, ctx:FclParser.Operator_definitionContext):
pass
# Enter a parse tree produced by FclParser#operator_definition_or.
def enterOperator_definition_or(self, ctx:FclParser.Operator_definition_orContext):
pass
# Exit a parse tree produced by FclParser#operator_definition_or.
def exitOperator_definition_or(self, ctx:FclParser.Operator_definition_orContext):
pass
# Enter a parse tree produced by FclParser#operator_definition_and.
def enterOperator_definition_and(self, ctx:FclParser.Operator_definition_andContext):
pass
# Exit a parse tree produced by FclParser#operator_definition_and.
def exitOperator_definition_and(self, ctx:FclParser.Operator_definition_andContext):
pass
# Enter a parse tree produced by FclParser#activation_method.
def enterActivation_method(self, ctx:FclParser.Activation_methodContext):
pass
# Exit a parse tree produced by FclParser#activation_method.
def exitActivation_method(self, ctx:FclParser.Activation_methodContext):
pass
# Enter a parse tree produced by FclParser#accumulation_method.
def enterAccumulation_method(self, ctx:FclParser.Accumulation_methodContext):
pass
# Exit a parse tree produced by FclParser#accumulation_method.
def exitAccumulation_method(self, ctx:FclParser.Accumulation_methodContext):
pass
# Enter a parse tree produced by FclParser#rule_def.
def enterRule_def(self, ctx:FclParser.Rule_defContext):
pass
# Exit a parse tree produced by FclParser#rule_def.
def exitRule_def(self, ctx:FclParser.Rule_defContext):
pass
# Enter a parse tree produced by FclParser#rule_name.
def enterRule_name(self, ctx:FclParser.Rule_nameContext):
pass
# Exit a parse tree produced by FclParser#rule_name.
def exitRule_name(self, ctx:FclParser.Rule_nameContext):
pass
# Enter a parse tree produced by FclParser#if_clause.
def enterIf_clause(self, ctx:FclParser.If_clauseContext):
pass
# Exit a parse tree produced by FclParser#if_clause.
def exitIf_clause(self, ctx:FclParser.If_clauseContext):
pass
# Enter a parse tree produced by FclParser#condition.
def enterCondition(self, ctx:FclParser.ConditionContext):
pass
# Exit a parse tree produced by FclParser#condition.
def exitCondition(self, ctx:FclParser.ConditionContext):
pass
# Enter a parse tree produced by FclParser#subcondition.
def enterSubcondition(self, ctx:FclParser.SubconditionContext):
pass
# Exit a parse tree produced by FclParser#subcondition.
def exitSubcondition(self, ctx:FclParser.SubconditionContext):
pass
# Enter a parse tree produced by FclParser#subcondition_bare.
def enterSubcondition_bare(self, ctx:FclParser.Subcondition_bareContext):
pass
# Exit a parse tree produced by FclParser#subcondition_bare.
def exitSubcondition_bare(self, ctx:FclParser.Subcondition_bareContext):
pass
# Enter a parse tree produced by FclParser#subcondition_paren.
def enterSubcondition_paren(self, ctx:FclParser.Subcondition_parenContext):
pass
# Exit a parse tree produced by FclParser#subcondition_paren.
def exitSubcondition_paren(self, ctx:FclParser.Subcondition_parenContext):
pass
# Enter a parse tree produced by FclParser#then_clause.
def enterThen_clause(self, ctx:FclParser.Then_clauseContext):
pass
# Exit a parse tree produced by FclParser#then_clause.
def exitThen_clause(self, ctx:FclParser.Then_clauseContext):
pass
# Enter a parse tree produced by FclParser#conclusion.
def enterConclusion(self, ctx:FclParser.ConclusionContext):
pass
# Exit a parse tree produced by FclParser#conclusion.
def exitConclusion(self, ctx:FclParser.ConclusionContext):
pass
# Enter a parse tree produced by FclParser#sub_conclusion.
def enterSub_conclusion(self, ctx:FclParser.Sub_conclusionContext):
pass
# Exit a parse tree produced by FclParser#sub_conclusion.
def exitSub_conclusion(self, ctx:FclParser.Sub_conclusionContext):
pass
# Enter a parse tree produced by FclParser#with_x.
def enterWith_x(self, ctx:FclParser.With_xContext):
pass
# Exit a parse tree produced by FclParser#with_x.
def exitWith_x(self, ctx:FclParser.With_xContext):
pass
# Enter a parse tree produced by FclParser#data_type.
def enterData_type(self, ctx:FclParser.Data_typeContext):
pass
# Exit a parse tree produced by FclParser#data_type.
def exitData_type(self, ctx:FclParser.Data_typeContext):
pass
| |
"""Certbot command line util function"""
import argparse
import copy
import inspect
from typing import Any
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from acme import challenges
from certbot import configuration
from certbot import errors
from certbot import util
from certbot._internal import constants
from certbot.compat import os
if TYPE_CHECKING:
from certbot._internal.cli import helpful
class _Default:
"""A class to use as a default to detect if a value is set by a user"""
def __bool__(self) -> bool:
return False
def __eq__(self, other: Any) -> bool:
return isinstance(other, _Default)
def __hash__(self) -> int:
return id(_Default)
def __nonzero__(self) -> bool:
return self.__bool__()
def read_file(filename: str, mode: str = "rb") -> Tuple[str, Any]:
"""Returns the given file's contents.
:param str filename: path to file
:param str mode: open mode (see `open`)
:returns: absolute path of filename and its contents
:rtype: tuple
:raises argparse.ArgumentTypeError: File does not exist or is not readable.
"""
try:
filename = os.path.abspath(filename)
with open(filename, mode) as the_file:
contents = the_file.read()
return filename, contents
except IOError as exc:
raise argparse.ArgumentTypeError(exc.strerror)
def flag_default(name: str) -> Any:
"""Default value for CLI flag."""
# XXX: this is an internal housekeeping notion of defaults before
# argparse has been set up; it is not accurate for all flags. Call it
# with caution. Plugin defaults are missing, and some things are using
# defaults defined in this file, not in constants.py :(
return copy.deepcopy(constants.CLI_DEFAULTS[name])
def config_help(name: str, hidden: bool = False) -> Optional[str]:
"""Extract the help message for a `configuration.NamespaceConfig` property docstring."""
if hidden:
return argparse.SUPPRESS
return inspect.getdoc(getattr(configuration.NamespaceConfig, name))
class HelpfulArgumentGroup:
"""Emulates an argparse group for use with HelpfulArgumentParser.
This class is used in the add_group method of HelpfulArgumentParser.
Command line arguments can be added to the group, but help
suppression and default detection is applied by
HelpfulArgumentParser when necessary.
"""
def __init__(self, helpful_arg_parser: "helpful.HelpfulArgumentParser", topic: str) -> None:
self._parser = helpful_arg_parser
self._topic = topic
def add_argument(self, *args: Any, **kwargs: Any) -> None:
"""Add a new command line argument to the argument group."""
self._parser.add(self._topic, *args, **kwargs)
class CustomHelpFormatter(argparse.HelpFormatter):
"""This is a clone of ArgumentDefaultsHelpFormatter, with bugfixes.
In particular we fix https://bugs.python.org/issue28742
"""
def _get_help_string(self, action: argparse.Action) -> Optional[str]:
helpstr = action.help
if action.help and '%(default)' not in action.help and '(default:' not in action.help:
if action.default != argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if helpstr and (action.option_strings or action.nargs in defaulting_nargs):
helpstr += ' (default: %(default)s)'
return helpstr
class _DomainsAction(argparse.Action):
"""Action class for parsing domains."""
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace,
domain: Union[str, Sequence[Any], None],
option_string: Optional[str] = None) -> None:
"""Just wrap add_domains in argparseese."""
add_domains(namespace, str(domain) if domain is not None else None)
def add_domains(args_or_config: Union[argparse.Namespace, configuration.NamespaceConfig],
domains: Optional[str]) -> List[str]:
"""Registers new domains to be used during the current client run.
Domains are not added to the list of requested domains if they have
already been registered.
:param args_or_config: parsed command line arguments
:type args_or_config: argparse.Namespace or
configuration.NamespaceConfig
:param str domain: one or more comma separated domains
:returns: domains after they have been normalized and validated
:rtype: `list` of `str`
"""
validated_domains: List[str] = []
if not domains:
return validated_domains
for domain in domains.split(","):
domain = util.enforce_domain_sanity(domain.strip())
validated_domains.append(domain)
if domain not in args_or_config.domains:
args_or_config.domains.append(domain)
return validated_domains
class CaseInsensitiveList(list):
"""A list that will ignore case when searching.
This class is passed to the `choices` argument of `argparse.add_arguments`
through the `helpful` wrapper. It is necessary due to special handling of
command line arguments by `set_by_cli` in which the `type_func` is not applied."""
def __contains__(self, element: object) -> bool:
if not isinstance(element, str):
return False
return super().__contains__(element.lower())
def _user_agent_comment_type(value: str) -> str:
if "(" in value or ")" in value:
raise argparse.ArgumentTypeError("may not contain parentheses")
return value
class _EncodeReasonAction(argparse.Action):
"""Action class for parsing revocation reason."""
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace,
reason: Union[str, Sequence[Any], None],
option_string: Optional[str] = None) -> None:
"""Encodes the reason for certificate revocation."""
if reason is None:
raise ValueError("Unexpected null reason.")
code = constants.REVOCATION_REASONS[str(reason).lower()]
setattr(namespace, self.dest, code)
def parse_preferred_challenges(pref_challs: Iterable[str]) -> List[str]:
"""Translate and validate preferred challenges.
:param pref_challs: list of preferred challenge types
:type pref_challs: `list` of `str`
:returns: validated list of preferred challenge types
:rtype: `list` of `str`
:raises errors.Error: if pref_challs is invalid
"""
aliases = {"dns": "dns-01", "http": "http-01"}
challs = [c.strip() for c in pref_challs]
challs = [aliases.get(c, c) for c in challs]
unrecognized = ", ".join(name for name in challs
if name not in challenges.Challenge.TYPES)
if unrecognized:
raise errors.Error(
"Unrecognized challenges: {0}".format(unrecognized))
return challs
class _PrefChallAction(argparse.Action):
"""Action class for parsing preferred challenges."""
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace,
pref_challs: Union[str, Sequence[Any], None],
option_string: Optional[str] = None) -> None:
if pref_challs is None:
raise ValueError("Unexpected null pref_challs.")
try:
challs = parse_preferred_challenges(str(pref_challs).split(","))
except errors.Error as error:
raise argparse.ArgumentError(self, str(error))
namespace.pref_challs.extend(challs)
class _DeployHookAction(argparse.Action):
"""Action class for parsing deploy hooks."""
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None) -> None:
renew_hook_set = namespace.deploy_hook != namespace.renew_hook
if renew_hook_set and namespace.renew_hook != values:
raise argparse.ArgumentError(
self, "conflicts with --renew-hook value")
namespace.deploy_hook = namespace.renew_hook = values
class _RenewHookAction(argparse.Action):
"""Action class for parsing renew hooks."""
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None) -> None:
deploy_hook_set = namespace.deploy_hook is not None
if deploy_hook_set and namespace.deploy_hook != values:
raise argparse.ArgumentError(
self, "conflicts with --deploy-hook value")
namespace.renew_hook = values
def nonnegative_int(value: str) -> int:
"""Converts value to an int and checks that it is not negative.
This function should used as the type parameter for argparse
arguments.
:param str value: value provided on the command line
:returns: integer representation of value
:rtype: int
:raises argparse.ArgumentTypeError: if value isn't a non-negative integer
"""
try:
int_value = int(value)
except ValueError:
raise argparse.ArgumentTypeError("value must be an integer")
if int_value < 0:
raise argparse.ArgumentTypeError("value must be non-negative")
return int_value
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Gaussian Mixture Model Class:
contains the basic fields and methods of GMMs
The class GMM _old uses C bindings which are
computationally and memory efficient.
Author : Bertrand Thirion, 2006-2009
"""
import numpy as np
from scipy.linalg import eigvalsh
class GridDescriptor(object):
"""
A tiny class to handle cartesian grids
"""
def __init__(self, dim=1, lim=None, n_bins=None):
"""
Parameters
----------
dim: int, optional,
the dimension of the grid
lim: list of len(2*self.dim),
the limits of the grid as (xmin, xmax, ymin, ymax, ...)
n_bins: list of len(self.dim),
the number of bins in each direction
"""
self.dim = dim
if lim is not None:
self.set(lim, n_bins)
if np.size(n_bins) == self.dim:
self.n_bins = np.ravel(np.array(n_bins))
def set(self, lim, n_bins=10):
""" set the limits of the grid and the number of bins
Parameters
----------
lim: list of len(2*self.dim),
the limits of the grid as (xmin, xmax, ymin, ymax, ...)
n_bins: list of len(self.dim), optional
the number of bins in each direction
"""
if len(lim) == 2 * self.dim:
self.lim = lim
else:
raise ValueError("Wrong dimension for grid definition")
if np.size(n_bins) == self.dim:
self.n_bins = np.ravel(np.array(n_bins))
else:
raise ValueError("Wrong dimension for grid definition")
def make_grid(self):
""" Compute the grid points
Returns
-------
grid: array of shape (nb_nodes, self.dim)
where nb_nodes is the prod of self.n_bins
"""
size = np.prod(self.n_bins)
grid = np.zeros((size, self.dim))
grange = []
for j in range(self.dim):
xm = self.lim[2 * j]
xM = self.lim[2 * j + 1]
if np.isscalar(self.n_bins):
xb = self.n_bins
else:
xb = self.n_bins[j]
gr = xm + float(xM - xm) / (xb - 1) * np.arange(xb).astype('f')
grange.append(gr)
if self.dim == 1:
grid = np.array([[grange[0][i]] for i in range(xb)])
if self.dim == 2:
for i in range(self.n_bins[0]):
for j in range(self.n_bins[1]):
grid[i * self.n_bins[1] + j] = np.array(
[grange[0][i], grange[1][j]])
if self.dim == 3:
for i in range(self.n_bins[0]):
for j in range(self.n_bins[1]):
for k in range(self.n_bins[2]):
q = (i * self.n_bins[1] + j) * self.n_bins[2] + k
grid[q] = np.array([grange[0][i], grange[1][j],
grange[2][k]])
if self.dim > 3:
raise NotImplementedError(
'only dimensions <4 are currently handled')
return grid
def best_fitting_GMM(x, krange, prec_type='full', niter=100, delta=1.e-4,
ninit=1, verbose=0):
"""
Given a certain dataset x, find the best-fitting GMM
with a number k of classes in a certain range defined by krange
Parameters
----------
x: array of shape (n_samples,dim)
the data from which the model is estimated
krange: list of floats,
the range of values to test for k
prec_type: string (to be chosen within 'full','diag'), optional,
the covariance parameterization
niter: int, optional,
maximal number of iterations in the estimation process
delta: float, optional,
increment of data likelihood at which convergence is declared
ninit: int
number of initialization performed
verbose=0: verbosity mode
Returns
-------
mg : the best-fitting GMM instance
"""
if np.size(x) == x.shape[0]:
x = np.reshape(x, (np.size(x), 1))
dim = x.shape[1]
bestbic = - np.inf
for k in krange:
lgmm = GMM(k, dim, prec_type)
gmmk = lgmm.initialize_and_estimate(x, None, niter, delta, ninit,
verbose)
bic = gmmk.evidence(x)
if bic > bestbic:
bestbic = bic
bgmm = gmmk
if verbose:
print 'k', k, 'bic', bic
return bgmm
def plot2D(x, my_gmm, z=None, with_dots=True, log_scale=False, mpaxes=None,
verbose=0):
"""
Given a set of points in a plane and a GMM, plot them
Parameters
----------
x: array of shape (npoints, dim=2),
sample points
my_gmm: GMM instance,
whose density has to be ploted
z: array of shape (npoints), optional
that gives a labelling of the points in x
by default, it is not taken into account
with_dots, bool, optional
whether to plot the dots or not
log_scale: bool, optional
whether to plot the likelihood in log scale or not
mpaxes=None, int, optional
if not None, axes handle for plotting
verbose: verbosity mode, optional
Returns
-------
gd, GridDescriptor instance,
that represents the grid used in the function
ax, handle to the figure axes
Notes
-----
``my_gmm`` is assumed to have have a 'nixture_likelihood' method that takes
an array of points of shape (np, dim) and returns an array of shape
(np,my_gmm.k) that represents the likelihood component-wise
"""
import matplotlib.pyplot as plt
if x.shape[1] != my_gmm.dim:
raise ValueError('Incompatible dimension between data and model')
if x.shape[1] != 2:
raise ValueError('this works only for 2D cases')
gd1 = GridDescriptor(2)
xmin, xmax = x.min(0), x.max(0)
xm = 1.1 * xmin[0] - 0.1 * xmax[0]
xs = 1.1 * xmax[0] - 0.1 * xmin[0]
ym = 1.1 * xmin[1] - 0.1 * xmax[1]
ys = 1.1 * xmax[1] - 0.1 * xmin[1]
gd1.set([xm, xs, ym, ys], [51, 51])
grid = gd1.make_grid()
L = my_gmm.mixture_likelihood(grid)
if verbose:
intl = L.sum() * (xs - xm) * (ys - ym) / 2500
print 'integral of the density on the domain ', intl
if mpaxes == None:
plt.figure()
ax = plt.subplot(1, 1, 1)
else:
ax = mpaxes
gdx = gd1.n_bins[0]
Pdens = np.reshape(L, (gdx, np.size(L) / gdx))
extent = [xm, xs, ym, ys]
if log_scale:
plt.imshow(np.log(Pdens.T), alpha=2.0, origin='lower',
extent=extent)
else:
plt.imshow(Pdens.T, alpha=2.0, origin='lower', extent=extent)
if with_dots:
if z == None:
plt.plot(x[:, 0], x[:, 1], 'o')
else:
hsv = plt.cm.hsv(range(256))
col = hsv[range(0, 256, 256 // int(z.max() + 1))]
for k in range(z.max() + 1):
plt.plot(x[z == k, 0], x[z == k, 1], 'o', color=col[k])
plt.axis(extent)
plt.colorbar()
return gd1, ax
class GMM(object):
"""Standard GMM.
this class contains the following members
k (int): the number of components in the mixture
dim (int): is the dimension of the data
prec_type = 'full' (string) is the parameterization
of the precisions/covariance matrices:
either 'full' or 'diagonal'.
means: array of shape (k,dim):
all the means (mean parameters) of the components
precisions: array of shape (k,dim,dim):
the precisions (inverse covariance matrix) of the components
weights: array of shape(k): weights of the mixture
fixme
-----
no copy method
"""
def __init__(self, k=1, dim=1, prec_type='full', means=None,
precisions=None, weights=None):
"""
Initialize the structure, at least with the dimensions of the problem
Parameters
----------
k (int) the number of classes of the model
dim (int) the dimension of the problem
prec_type = 'full' : coavriance:precision parameterization
(diagonal 'diag' or full 'full').
means = None: array of shape (self.k,self.dim)
precisions = None: array of shape (self.k,self.dim,self.dim)
or (self.k, self.dim)
weights=None: array of shape (self.k)
By default, means, precision and weights are set as
zeros()
eye()
1/k ones()
with the correct dimensions
"""
self.k = k
self.dim = dim
self.prec_type = prec_type
self.means = means
self.precisions = precisions
self.weights = weights
if self.means == None:
self.means = np.zeros((self.k, self.dim))
if self.precisions == None:
if prec_type == 'full':
prec = np.reshape(np.eye(self.dim), (1, self.dim, self.dim))
self.precisions = np.repeat(prec, self.k, 0)
else:
self.precisions = np.ones((self.k, self.dim))
if self.weights == None:
self.weights = np.ones(self.k) * 1.0 / self.k
def plugin(self, means, precisions, weights):
"""
Set manually the weights, means and precision of the model
Parameters
----------
means: array of shape (self.k,self.dim)
precisions: array of shape (self.k,self.dim,self.dim)
or (self.k, self.dim)
weights: array of shape (self.k)
"""
self.means = means
self.precisions = precisions
self.weights = weights
self.check()
def check(self):
"""
Checking the shape of different matrices involved in the model
"""
if self.means.shape[0] != self.k:
raise ValueError("self.means does not have correct dimensions")
if self.means.shape[1] != self.dim:
raise ValueError("self.means does not have correct dimensions")
if self.weights.size != self.k:
raise ValueError("self.weights does not have correct dimensions")
if self.dim != self.precisions.shape[1]:
raise ValueError(
"self.precisions does not have correct dimensions")
if self.prec_type == 'full':
if self.dim != self.precisions.shape[2]:
raise ValueError(
"self.precisions does not have correct dimensions")
if self.prec_type == 'diag':
if np.shape(self.precisions) != np.shape(self.means):
raise ValueError(
"self.precisions does not have correct dimensions")
if self.precisions.shape[0] != self.k:
raise ValueError(
"self.precisions does not have correct dimensions")
if self.prec_type not in ['full', 'diag']:
raise ValueError('unknown precisions type')
def check_x(self, x):
"""
essentially check that x.shape[1]==self.dim
x is returned with possibly reshaping
"""
if np.size(x) == x.shape[0]:
x = np.reshape(x, (np.size(x), 1))
if x.shape[1] != self.dim:
raise ValueError('incorrect size for x')
return x
def initialize(self, x):
"""Initializes self according to a certain dataset x:
1. sets the regularizing hyper-parameters
2. initializes z using a k-means algorithm, then
3. upate the parameters
Parameters
----------
x, array of shape (n_samples,self.dim)
the data used in the estimation process
"""
from .utils import kmeans
n = x.shape[0]
#1. set the priors
self.guess_regularizing(x, bcheck=1)
# 2. initialize the memberships
if self.k > 1:
_, z, _ = kmeans(x, self.k)
else:
z = np.zeros(n).astype(np.int)
l = np.zeros((n, self.k))
l[np.arange(n), z] = 1
# 3.update the parameters
self.update(x, l)
def pop(self, like, tiny=1.e-15):
"""compute the population, i.e. the statistics of allocation
Parameters
----------
like: array of shape (n_samples,self.k):
the likelihood of each item being in each class
"""
sl = np.maximum(tiny, np.sum(like, 1))
nl = (like.T / sl).T
return np.sum(nl, 0)
def update(self, x, l):
""" Identical to self._Mstep(x,l)
"""
self._Mstep(x, l)
def likelihood(self, x):
"""
return the likelihood of the model for the data x
the values are weighted by the components weights
Parameters
----------
x array of shape (n_samples,self.dim)
the data used in the estimation process
Returns
-------
like, array of shape(n_samples,self.k)
component-wise likelihood
"""
like = self.unweighted_likelihood(x)
like *= self.weights
return like
def unweighted_likelihood_(self, x):
"""
return the likelihood of each data for each component
the values are not weighted by the component weights
Parameters
----------
x: array of shape (n_samples,self.dim)
the data used in the estimation process
Returns
-------
like, array of shape(n_samples,self.k)
unweighted component-wise likelihood
"""
n = x.shape[0]
like = np.zeros((n, self.k))
for k in range(self.k):
# compute the data-independent factor first
w = - np.log(2 * np.pi) * self.dim
m = np.reshape(self.means[k], (1, self.dim))
b = self.precisions[k]
if self.prec_type == 'full':
w += np.log(eigvalsh(b)).sum()
dx = m - x
q = np.sum(np.dot(dx, b) * dx, 1)
else:
w += np.sum(np.log(b))
q = np.dot((m - x) ** 2, b)
w -= q
w /= 2
like[:, k] = np.exp(w)
return like
def unweighted_likelihood(self, x):
"""
return the likelihood of each data for each component
the values are not weighted by the component weights
Parameters
----------
x: array of shape (n_samples,self.dim)
the data used in the estimation process
Returns
-------
like, array of shape(n_samples,self.k)
unweighted component-wise likelihood
Notes
-----
Hopefully faster
"""
xt = x.T.copy()
n = x.shape[0]
like = np.zeros((n, self.k))
for k in range(self.k):
# compute the data-independent factor first
w = - np.log(2 * np.pi) * self.dim
m = np.reshape(self.means[k], (self.dim, 1))
b = self.precisions[k]
if self.prec_type == 'full':
w += np.log(eigvalsh(b)).sum()
dx = xt - m
sqx = dx * np.dot(b, dx)
q = np.zeros(n)
for d in range(self.dim):
q += sqx[d]
else:
w += np.sum(np.log(b))
q = np.dot(b, (m - xt) ** 2)
w -= q
w /= 2
like[:, k] = np.exp(w)
return like
def mixture_likelihood(self, x):
"""Returns the likelihood of the mixture for x
Parameters
----------
x: array of shape (n_samples,self.dim)
the data used in the estimation process
"""
x = self.check_x(x)
like = self.likelihood(x)
sl = np.sum(like, 1)
return sl
def average_log_like(self, x, tiny=1.e-15):
"""returns the averaged log-likelihood of the mode for the dataset x
Parameters
----------
x: array of shape (n_samples,self.dim)
the data used in the estimation process
tiny = 1.e-15: a small constant to avoid numerical singularities
"""
x = self.check_x(x)
like = self.likelihood(x)
sl = np.sum(like, 1)
sl = np.maximum(sl, tiny)
return np.mean(np.log(sl))
def evidence(self, x):
"""Computation of bic approximation of evidence
Parameters
----------
x array of shape (n_samples,dim)
the data from which bic is computed
Returns
-------
the bic value
"""
x = self.check_x(x)
tiny = 1.e-15
like = self.likelihood(x)
return self.bic(like, tiny)
def bic(self, like, tiny=1.e-15):
"""Computation of bic approximation of evidence
Parameters
----------
like, array of shape (n_samples, self.k)
component-wise likelihood
tiny=1.e-15, a small constant to avoid numerical singularities
Returns
-------
the bic value, float
"""
sl = np.sum(like, 1)
sl = np.maximum(sl, tiny)
bicc = np.sum(np.log(sl))
# number of parameters
n = like.shape[0]
if self.prec_type == 'full':
eta = self.k * (1 + self.dim + (self.dim * self.dim + 1) / 2) - 1
else:
eta = self.k * (1 + 2 * self.dim) - 1
bicc = bicc - np.log(n) * eta
return bicc
def _Estep(self, x):
"""
E step of the EM algo
returns the likelihood per class of each data item
Parameters
----------
x array of shape (n_samples,dim)
the data used in the estimation process
Returns
-------
likelihood array of shape(n_samples,self.k)
component-wise likelihood
"""
return self.likelihood(x)
def guess_regularizing(self, x, bcheck=1):
"""
Set the regularizing priors as weakly informative
according to Fraley and raftery;
Journal of Classification 24:155-181 (2007)
Parameters
----------
x array of shape (n_samples,dim)
the data used in the estimation process
"""
small = 0.01
# the mean of the data
mx = np.reshape(x.mean(0), (1, self.dim))
dx = x - mx
vx = np.dot(dx.T, dx) / x.shape[0]
if self.prec_type == 'full':
px = np.reshape(np.diag(1.0 / np.diag(vx)),
(1, self.dim, self.dim))
else:
px = np.reshape(1.0 / np.diag(vx), (1, self.dim))
px *= np.exp(2.0 / self.dim * np.log(self.k))
self.prior_means = np.repeat(mx, self.k, 0)
self.prior_weights = np.ones(self.k) / self.k
self.prior_scale = np.repeat(px, self.k, 0)
self.prior_dof = self.dim + 2
self.prior_shrinkage = small
self.weights = np.ones(self.k) * 1.0 / self.k
if bcheck:
self.check()
def _Mstep(self, x, like):
"""
M step regularized according to the procedure of
Fraley et al. 2007
Parameters
----------
x: array of shape(n_samples,self.dim)
the data from which the model is estimated
like: array of shape(n_samples,self.k)
the likelihood of the data under each class
"""
from numpy.linalg import pinv
tiny = 1.e-15
pop = self.pop(like)
sl = np.maximum(tiny, np.sum(like, 1))
like = (like.T / sl).T
# shrinkage,weights,dof
self.weights = self.prior_weights + pop
self.weights = self.weights / self.weights.sum()
# reshape
pop = np.reshape(pop, (self.k, 1))
prior_shrinkage = self.prior_shrinkage
shrinkage = pop + prior_shrinkage
# means
means = np.dot(like.T, x) + self.prior_means * prior_shrinkage
self.means = means / shrinkage
#precisions
empmeans = np.dot(like.T, x) / np.maximum(pop, tiny)
empcov = np.zeros(np.shape(self.precisions))
if self.prec_type == 'full':
for k in range(self.k):
dx = x - empmeans[k]
empcov[k] = np.dot(dx.T, like[:, k:k + 1] * dx)
#covariance
covariance = np.array([pinv(self.prior_scale[k])
for k in range(self.k)])
covariance += empcov
dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1))
addcov = np.array([np.dot(dx[k], dx[k].T) for k in range(self.k)])
apms = np.reshape(prior_shrinkage * pop / shrinkage,
(self.k, 1, 1))
covariance += (addcov * apms)
dof = self.prior_dof + pop + self.dim + 2
covariance /= np.reshape(dof, (self.k, 1, 1))
# precision
self.precisions = np.array([pinv(covariance[k]) \
for k in range(self.k)])
else:
for k in range(self.k):
dx = x - empmeans[k]
empcov[k] = np.sum(dx ** 2 * like[:, k:k + 1], 0)
# covariance
covariance = np.array([1.0 / self.prior_scale[k]
for k in range(self.k)])
covariance += empcov
dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1))
addcov = np.array([np.sum(dx[k] ** 2, 0) for k in range(self.k)])
apms = np.reshape(prior_shrinkage * pop / shrinkage, (self.k, 1))
covariance += addcov * apms
dof = self.prior_dof + pop + self.dim + 2
covariance /= np.reshape(dof, (self.k, 1))
# precision
self.precisions = np.array([1.0 / covariance[k] \
for k in range(self.k)])
def map_label(self, x, like=None):
"""return the MAP labelling of x
Parameters
----------
x array of shape (n_samples,dim)
the data under study
like=None array of shape(n_samples,self.k)
component-wise likelihood
if like==None, it is recomputed
Returns
-------
z: array of shape(n_samples): the resulting MAP labelling
of the rows of x
"""
if like == None:
like = self.likelihood(x)
z = np.argmax(like, 1)
return z
def estimate(self, x, niter=100, delta=1.e-4, verbose=0):
""" Estimation of the model given a dataset x
Parameters
----------
x array of shape (n_samples,dim)
the data from which the model is estimated
niter=100: maximal number of iterations in the estimation process
delta = 1.e-4: increment of data likelihood at which
convergence is declared
verbose=0: verbosity mode
Returns
-------
bic : an asymptotic approximation of model evidence
"""
# check that the data is OK
x = self.check_x(x)
# alternation of E/M step until convergence
tiny = 1.e-15
av_ll_old = - np.inf
for i in range(niter):
l = self._Estep(x)
av_ll = np.mean(np.log(np.maximum(np.sum(l, 1), tiny)))
if av_ll < av_ll_old + delta:
if verbose:
print 'iteration:', i, 'log-likelihood:', av_ll,\
'old value:', av_ll_old
break
else:
av_ll_old = av_ll
if verbose:
print i, av_ll, self.bic(l)
self._Mstep(x, l)
return self.bic(l)
def initialize_and_estimate(self, x, z=None, niter=100, delta=1.e-4,\
ninit=1, verbose=0):
"""Estimation of self given x
Parameters
----------
x array of shape (n_samples,dim)
the data from which the model is estimated
z = None: array of shape (n_samples)
a prior labelling of the data to initialize the computation
niter=100: maximal number of iterations in the estimation process
delta = 1.e-4: increment of data likelihood at which
convergence is declared
ninit=1: number of initialization performed
to reach a good solution
verbose=0: verbosity mode
Returns
-------
the best model is returned
"""
bestbic = - np.inf
bestgmm = GMM(self.k, self.dim, self.prec_type)
bestgmm.initialize(x)
for i in range(ninit):
# initialization -> Kmeans
self.initialize(x)
# alternation of E/M step until convergence
bic = self.estimate(x, niter=niter, delta=delta, verbose=0)
if bic > bestbic:
bestbic = bic
bestgmm.plugin(self.means, self.precisions, self.weights)
return bestgmm
def train(self, x, z=None, niter=100, delta=1.e-4, ninit=1, verbose=0):
"""Idem initialize_and_estimate
"""
return self.initialize_and_estimate(x, z, niter, delta, ninit, verbose)
def test(self, x, tiny=1.e-15):
"""Returns the log-likelihood of the mixture for x
Parameters
----------
x array of shape (n_samples,self.dim)
the data used in the estimation process
Returns
-------
ll: array of shape(n_samples)
the log-likelihood of the rows of x
"""
return np.log(np.maximum(self.mixture_likelihood(x), tiny))
def show_components(self, x, gd, density=None, mpaxes=None):
"""Function to plot a GMM -- Currently, works only in 1D
Parameters
----------
x: array of shape(n_samples, dim)
the data under study
gd: GridDescriptor instance
density: array os shape(prod(gd.n_bins))
density of the model one the discrete grid implied by gd
by default, this is recomputed
mpaxes: axes handle to make the figure, optional,
if None, a new figure is created
"""
import matplotlib.pyplot as plt
if density is None:
density = self.mixture_likelihood(gd.make_grid())
if gd.dim > 1:
raise NotImplementedError("only implemented in 1D")
step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3)
bins = max(10, int((x.max() - x.min()) / step))
xmin = 1.1 * x.min() - 0.1 * x.max()
xmax = 1.1 * x.max() - 0.1 * x.min()
h, c = np.histogram(x, bins, [xmin, xmax], normed=True)
# Make code robust to new and old behavior of np.histogram
c = c[:len(h)]
offset = (xmax - xmin) / (2 * bins)
c += offset / 2
grid = gd.make_grid()
if mpaxes == None:
plt.figure()
ax = plt.axes()
else:
ax = mpaxes
ax.plot(c + offset, h, linewidth=2)
for k in range(self.k):
ax.plot(grid, density[:, k], linewidth=2)
ax.set_title('Fit of the density with a mixture of Gaussians',
fontsize=12)
legend = ['data']
for k in range(self.k):
legend.append('component %d' % (k + 1))
l = ax.legend(tuple(legend))
for t in l.get_texts():
t.set_fontsize(12)
ax.set_xticklabels(ax.get_xticks(), fontsize=12)
ax.set_yticklabels(ax.get_yticks(), fontsize=12)
def show(self, x, gd, density=None, axes=None):
"""
Function to plot a GMM, still in progress
Currently, works only in 1D and 2D
Parameters
----------
x: array of shape(n_samples, dim)
the data under study
gd: GridDescriptor instance
density: array os shape(prod(gd.n_bins))
density of the model one the discrete grid implied by gd
by default, this is recomputed
"""
import matplotlib.pyplot as plt
# recompute the density if necessary
if density is None:
density = self.mixture_likelihood(gd, x)
if axes is None:
axes = plt.figure()
if gd.dim == 1:
from ..statistics.empirical_pvalue import \
smoothed_histogram_from_samples
h, c = smoothed_histogram_from_samples(x, normalized=True)
offset = (c.max() - c.min()) / (2 * c.size)
grid = gd.make_grid()
h /= h.sum()
h /= (2 * offset)
plt.plot(c[: -1] + offset, h)
plt.plot(grid, density)
if gd.dim == 2:
plt.figure()
xm, xM, ym, yM = gd.lim[0:3]
gd0 = gd.n_bins[0]
Pdens = np.reshape(density, (gd0, np.size(density) / gd0))
axes.imshow(Pdens.T, None, None, None, 'nearest',
1.0, None, None, 'lower', [xm, xM, ym, yM])
axes.plot(x[:, 0], x[:, 1], '.k')
axes.axis([xm, xM, ym, yM])
return axes
| |
# coding: utf-8
"""
Our City App
Our City App internal apis # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
import urllib3
from oca.exceptions import ApiException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
| |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pub/sub mechanism for status messages."""
from threading import Lock
from collections import deque
from logging import DEBUG
from twisted.internet import reactor
from aquilon.python_patches import load_uuid_quickly
uuid = load_uuid_quickly() # pylint: disable=C0103
# Some requests can generate many debug messages. After this limit is
# passed older records will be replaced with None.
# This is somewhat arbitrary as a number. If each log message was a
# string of 100 bytes then 10000 would be about 1 MB. However the log
# record object has quite a few more fields (source file, line, function
# name, and potentially exception info) which can bloat this quite a bit.
MAX_DEBUG_MESSAGES_PER_REQUEST = 10000
class RequestStatus(object):
"""Store status information for each incoming request.
Each request will get one of these objects to write status
information into. Other commands (currently only show_request)
can attach StatusSubscriber instances that will be called whenever
new status info is available. The info is stored as LogRecord
objects from the python logging module.
The initial implementation of this class used Condition objects to
send notifications about available messages. This caused two
problems. First and most important, deadlocks. This code needs
to work from threads that are themselves using lock mechanisms.
Second, speed. Anything beyond the simple Lock mechanism is
implemented in python (that is, not natively). Performance is
horrible.
The current implementation uses callbacks to reduce the number
of threads touching this object. Since there could be multiple
threads we still need to lock operations that change internal
state. The lock in use is the primitive Lock provided by python.
It is the only lock construct implemented natively in cPython
(as of 2.5) - the rest are built on top of it.
"""
def __init__(self, auditid, requestid=None):
"""Should only be created by the StatusCatalog."""
self.auditid = str(auditid) if auditid is not None else None
self.requestid = requestid
self.user = ""
self.command = ""
self.args = []
self.description = ""
self.subscriber_descriptions = []
self.records = []
self.debug_fifo = deque()
self.is_finished = False
# Dict of subscribers to the length of the records list the last
# time it was processed by the subscriber.
self.subscribers = {}
# This lock should be aquired for any access to records, is_finished,
# or subscribers.
# Need to be careful to be as fine-grained as possible.
self.lock = Lock()
def create_description(self, user, command, id, kwargs):
massaged = []
for (k, v) in kwargs.items():
if k == 'format' and v == 'raw':
continue
if v == 'True':
massaged.append(" --%s" % k)
elif v is None:
pass
else:
massaged.append(" --%s=%s" % (k, v))
description = '[%s] %s: aq %s%s' % (id, user, command,
"".join(massaged))
if str(id) != self.auditid:
self.subscriber_descriptions.append(description)
return
self.user = user
self.command = command
self.kwargs = kwargs
self.description = description
def publish(self, record):
"""Add a new message into the status log and notify watchers."""
with self.lock:
self.records.append(record)
# Constrain the number of debug messages kept to keep memory
# usage in check.
if record.levelno <= DEBUG:
self.debug_fifo.append(len(self.records) - 1)
if len(self.debug_fifo) > MAX_DEBUG_MESSAGES_PER_REQUEST:
remove_index = self.debug_fifo.popleft()
self.records[remove_index] = None
for (subscriber, processed) in self.subscribers.items():
self._notify_subscriber(subscriber, processed)
def add_subscriber(self, subscriber):
"""The subscriber should subclass/implement StatusSubscriber."""
# Only need to lock the operations if not finished (more incoming
# records expected).
with self.lock:
if not self.is_finished:
self.subscribers[subscriber] = 0
self._notify_subscriber(subscriber, processed=0)
return
self._notify_subscriber(subscriber, processed=0)
subscriber.finish()
def _notify_subscriber(self, subscriber, processed):
# No lock in this method... it may not be necessary if the
# messages are finished. Lock is taken at a higher level when needed.
known = len(self.records)
for record_index in range(processed, known):
record = self.records[record_index]
# The record may have been replaced by None if too many
# debug messages were seen.
if record is not None:
subscriber.process(record)
if subscriber in self.subscribers:
self.subscribers[subscriber] = known
def finish(self):
"""Indicate that no more messages will be published."""
with self.lock:
self.is_finished = True
while self.subscribers:
(subscriber, processed) = self.subscribers.popitem()
if processed < len(self.records):
self._notify_subscriber(subscriber, processed)
subscriber.finish()
class StatusCatalog(object):
"""Global store for all StatusRequest objects."""
__shared_state = {}
status_by_auditid = {}
status_by_requestid = {}
def __init__(self):
"""Borg object."""
self.__dict__ = self.__shared_state
def get_request_status(self, auditid=None, requestid=None):
"""Retrieve a previously created RequestStatus."""
status = None
if auditid is not None:
auditid = str(auditid)
status = self.status_by_auditid.get(auditid)
if not status and requestid in self.status_by_requestid:
status = self.status_by_requestid.get(requestid)
return status
def create_request_status(self, auditid, requestid=None):
"""Create a new RequestStatus and store it."""
if auditid is not None:
auditid = str(auditid)
if not requestid:
requestid = uuid.uuid4()
status = RequestStatus(auditid, requestid)
self.status_by_requestid[requestid] = status
self.status_by_auditid[auditid] = status
return status
return None
def remove_by_auditid(self, status):
"""Mark the RequestStatus as finished and remove references to it."""
status.finish()
self.status_by_auditid.pop(status.auditid, None)
# Clean up any unused requestid entries after one minute.
reactor.callLater(60, self.remove_by_requestid, status)
def remove_by_requestid(self, status):
"""Mark the RequestStatus as no longer needed by the client."""
if status.requestid:
self.status_by_requestid.pop(status.requestid, None)
class StatusSubscriber(object):
"""Objects that want to be notified of new records should subclass this."""
def __init__(self):
pass
def process(self, record): # pragma: no cover
"""Each record will be passed to process() as it comes in."""
pass
def finish(self): # pragma: no cover
"""Called after all records have been processed."""
pass
| |
import pandas as pd
from .context import gtfstk, sample
from gtfstk import *
def test_valid_str():
assert valid_str("hello3")
assert not valid_str(np.nan)
assert not valid_str(" ")
def test_valid_time():
assert valid_time("2:43:00")
assert valid_time("22:43:00")
assert valid_time("42:43:00")
assert not valid_time("142:43:00")
def test_valid_date():
assert valid_date("20140310")
assert not valid_date("2014031")
def test_valid_timezone():
assert valid_timezone("Africa/Abidjan")
assert not valid_timezone("zoom")
def test_valid_lang():
assert valid_lang("aa")
assert not valid_lang("aaa")
def test_valid_currency():
assert valid_currency("AED")
assert not valid_currency("aed")
def test_valid_url():
assert valid_url("http://www.example.com")
assert not valid_url("www.example.com")
def test_valid_email():
assert valid_email("a@b.c.com")
assert not valid_email("a@b@c.com")
def test_valid_color():
assert valid_color("00FFFF")
assert not valid_color("0FF")
assert not valid_color("GGFFFF")
def test_check_for_required_columns():
assert not check_for_required_columns([], "routes", sample.routes)
feed = sample.copy()
del feed.routes["route_type"]
assert check_for_required_columns([], "routes", feed.routes)
def test_check_for_invalid_columns():
assert not check_for_invalid_columns([], "routes", sample.routes)
feed = sample.copy()
feed.routes["bingo"] = "snoop"
assert check_for_invalid_columns([], "routes", feed.routes)
def test_check_table():
feed = sample.copy()
cond = feed.routes["route_id"].isnull()
assert not check_table([], "routes", feed.routes, cond, "Bingo")
assert check_table([], "routes", feed.routes, ~cond, "Bongo")
def test_check_column():
feed = sample.copy()
assert not check_column([], "agency", feed.agency, "agency_url", valid_url)
feed.agency["agency_url"].iat[0] = "example.com"
assert check_column([], "agency", feed.agency, "agency_url", valid_url)
def test_check_column_id():
feed = sample.copy()
assert not check_column_id([], "routes", feed.routes, "route_id")
feed.routes["route_id"].iat[0] = np.nan
assert check_column_id([], "routes", feed.routes, "route_id")
def test_check_column_linked_id():
feed = sample.copy()
assert not check_column_linked_id(
[], "trips", feed.trips, "route_id", feed.routes
)
feed.trips["route_id"].iat[0] = "Hummus!"
assert check_column_linked_id(
[], "trips", feed.trips, "route_id", feed.routes
)
def test_format_problems():
problems = [("ba", "da", "boom", "boom")]
assert problems == format_problems(problems, as_df=False)
e = format_problems(problems, as_df=True)
assert isinstance(e, pd.DataFrame)
assert e.columns.tolist() == ["type", "message", "table", "rows"]
def test_check_agency():
assert not check_agency(sample)
feed = sample.copy()
feed.agency = None
assert check_agency(feed)
feed = sample.copy()
del feed.agency["agency_name"]
assert check_agency(feed)
feed = sample.copy()
feed.agency["b"] = 3
assert check_agency(feed, include_warnings=True)
feed = sample.copy()
feed.agency = feed.agency.append(feed.agency.iloc[0])
assert check_agency(feed)
feed = sample.copy()
feed.agency["agency_name"] = ""
assert check_agency(feed)
for col in [
"agency_timezone",
"agency_url",
"agency_fare_url",
"agency_lang",
"agency_phone",
"agency_email",
]:
feed = sample.copy()
feed.agency[col] = ""
assert check_agency(feed)
def test_check_calendar():
assert not check_calendar(sample)
assert check_calendar(sample, include_warnings=True) # feed has expired
feed = sample.copy()
feed.calendar = None
assert not check_calendar(feed)
feed = sample.copy()
del feed.calendar["service_id"]
assert check_calendar(feed)
feed = sample.copy()
feed.calendar["yo"] = 3
assert not check_calendar(feed)
assert check_calendar(feed, include_warnings=True)
feed = sample.copy()
feed.calendar["service_id"].iat[0] = feed.calendar["service_id"].iat[1]
assert check_calendar(feed)
for col in [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
"start_date",
"end_date",
]:
feed = sample.copy()
feed.calendar[col].iat[0] = "5"
assert check_calendar(feed)
def test_check_calendar_dates():
assert not check_calendar_dates(sample)
feed = sample.copy()
feed.calendar_dates = None
assert not check_calendar_dates(feed)
feed = sample.copy()
del feed.calendar_dates["service_id"]
assert check_calendar_dates(feed)
feed = sample.copy()
feed.calendar_dates["yo"] = 3
assert not check_calendar_dates(feed)
assert check_calendar_dates(feed, include_warnings=True)
feed = sample.copy()
feed.calendar_dates = feed.calendar_dates.append(
feed.calendar_dates.iloc[0]
)
assert check_calendar_dates(feed)
for col in ["date", "exception_type"]:
feed = sample.copy()
feed.calendar_dates[col].iat[0] = "5"
assert check_calendar_dates(feed)
def test_check_fare_attributes():
assert not check_fare_attributes(sample)
feed = sample.copy()
feed.fare_attributes = None
assert not check_fare_attributes(feed)
feed = sample.copy()
del feed.fare_attributes["fare_id"]
assert check_fare_attributes(feed)
feed = sample.copy()
feed.fare_attributes["yo"] = 3
assert not check_fare_attributes(feed)
assert check_fare_attributes(feed, include_warnings=True)
feed = sample.copy()
feed.fare_attributes = feed.fare_attributes.append(
feed.fare_attributes.iloc[0]
)
assert check_fare_attributes(feed)
feed = sample.copy()
feed.fare_attributes["currency_type"] = "jubjub"
assert check_fare_attributes(feed)
for col in ["payment_method", "transfers", "transfer_duration"]:
feed = sample.copy()
feed.fare_attributes[col] = -7
assert check_fare_attributes(feed)
def test_check_fare_rules():
assert not check_fare_rules(sample)
feed = sample.copy()
feed.fare_rules = None
assert not check_fare_rules(feed)
feed = sample.copy()
del feed.fare_rules["fare_id"]
assert check_fare_rules(feed)
feed = sample.copy()
feed.fare_rules["yo"] = 3
assert not check_fare_rules(feed)
assert check_fare_rules(feed, include_warnings=True)
for col in [
"fare_id",
"route_id",
"origin_id",
"destination_id",
"contains_id",
]:
feed = sample.copy()
feed.fare_rules[col] = "tuberosity"
print(col)
print(feed.fare_rules)
assert check_fare_rules(feed)
def test_check_feed_info():
# Create feed_info table
feed = sample.copy()
columns = [
"feed_publisher_name",
"feed_publisher_url",
"feed_lang",
"feed_start_date",
"feed_end_date",
"feed_version",
]
rows = [["slurp", "http://slurp.burp", "aa", "21110101", "21110102", "69"]]
feed.feed_info = pd.DataFrame(rows, columns=columns)
assert not check_feed_info(feed)
feed1 = feed.copy()
feed1.feed_info = None
assert not check_feed_info(feed1)
feed1 = feed.copy()
del feed1.feed_info["feed_lang"]
assert check_feed_info(feed1)
feed1 = feed.copy()
feed1.feed_info["yo"] = 3
assert not check_feed_info(feed1)
assert check_feed_info(feed1, include_warnings=True)
for col in columns:
feed1 = feed.copy()
feed1.feed_info[col] = ""
assert check_feed_info(feed1)
def test_check_frequencies():
assert not check_frequencies(sample)
feed = sample.copy()
feed.frequencies = None
assert not check_frequencies(feed)
feed = sample.copy()
del feed.frequencies["trip_id"]
assert check_frequencies(feed)
feed = sample.copy()
feed.frequencies["yo"] = 3
assert not check_frequencies(feed)
assert check_frequencies(feed, include_warnings=True)
feed = sample.copy()
feed.frequencies["trip_id"].iat[0] = "ratatat"
assert check_frequencies(feed)
for col in ["start_time", "end_time"]:
feed = sample.copy()
feed.frequencies[col] = "07:00:00"
assert check_frequencies(feed)
feed = sample.copy()
feed.frequencies = feed.frequencies.append(feed.frequencies.iloc[0])
assert check_frequencies(feed)
for col in ["headway_secs", "exact_times"]:
feed = sample.copy()
feed.frequencies[col] = -7
assert check_frequencies(feed)
def test_check_routes():
assert not check_routes(sample)
feed = sample.copy()
feed.routes = None
assert check_routes(feed)
feed = sample.copy()
del feed.routes["route_id"]
assert check_routes(feed)
feed = sample.copy()
feed.routes["bingo"] = 3
assert check_routes(feed, include_warnings=True)
feed = sample.copy()
feed.routes["route_id"].iat[0] = feed.routes["route_id"].iat[1]
assert check_routes(feed)
feed = sample.copy()
feed.routes["agency_id"] = "Hubba hubba"
assert check_routes(feed)
feed = sample.copy()
feed.routes["route_short_name"].iat[0] = ""
assert check_routes(feed)
feed = sample.copy()
feed.routes["route_short_name"].iat[0] = ""
feed.routes["route_long_name"].iat[0] = ""
assert check_routes(feed)
feed = sample.copy()
feed.routes["route_type"].iat[0] = 8
assert check_routes(feed)
feed = sample.copy()
feed.routes["route_color"].iat[0] = "FFF"
assert check_routes(feed)
feed = sample.copy()
feed.routes["route_text_color"].iat[0] = "FFF"
assert check_routes(feed)
feed = sample.copy()
feed.routes["route_short_name"].iat[1] = feed.routes[
"route_short_name"
].iat[0]
feed.routes["route_long_name"].iat[1] = feed.routes["route_long_name"].iat[
0
]
assert not check_routes(feed)
assert check_routes(feed, include_warnings=True)
feed = sample.copy()
feed.routes["route_id"].iat[0] = "Shwing"
assert not check_routes(feed)
assert check_routes(feed, include_warnings=True)
feed = sample.copy()
feed.agency = None
assert check_routes(feed)
def test_check_shapes():
assert not check_shapes(sample)
# Make a nonempty shapes table to check
feed = sample.copy()
rows = [
["1100015", -16.743_632, 145.668_255, 10001, 1.2],
["1100015", -16.743_522, 145.668_394, 10002, 1.3],
]
columns = [
"shape_id",
"shape_pt_lat",
"shape_pt_lon",
"shape_pt_sequence",
"shape_dist_traveled",
]
feed.shapes = pd.DataFrame(rows, columns=columns)
assert not check_shapes(feed)
feed1 = feed.copy()
del feed1.shapes["shape_id"]
assert check_shapes(feed1)
feed1 = feed.copy()
feed1.shapes["yo"] = 3
assert not check_shapes(feed1)
assert check_shapes(feed1, include_warnings=True)
feed1 = feed.copy()
feed1.shapes["shape_id"].iat[0] = ""
assert check_shapes(feed1)
for column in ["shape_pt_lon", "shape_pt_lat"]:
feed1 = feed.copy()
feed1.shapes[column] = 185
assert check_shapes(feed1)
feed1 = feed.copy()
feed1.shapes["shape_pt_sequence"].iat[1] = feed1.shapes[
"shape_pt_sequence"
].iat[0]
assert check_shapes(feed1)
feed1 = feed.copy()
feed1.shapes["shape_dist_traveled"].iat[1] = 0
assert check_shapes(feed1)
feed1 = feed.copy()
t1 = feed1.shapes.iloc[0].copy()
t2 = feed1.shapes.iloc[1].copy()
feed1.shapes.iloc[0] = t2
feed1.shapes.iloc[1] = t1
assert not check_shapes(feed1)
def test_check_stops():
assert not check_stops(sample)
feed = sample.copy()
feed.stops = None
assert check_stops(feed)
feed = sample.copy()
del feed.stops["stop_id"]
assert check_stops(feed)
feed = sample.copy()
feed.stops["b"] = 3
assert check_stops(feed, include_warnings=True)
feed = sample.copy()
feed.stops["stop_id"].iat[0] = feed.stops["stop_id"].iat[1]
assert check_stops(feed)
for column in ["stop_code", "stop_desc", "zone_id", "parent_station"]:
feed = sample.copy()
feed.stops[column] = ""
assert check_stops(feed)
for column in ["stop_url", "stop_timezone"]:
feed = sample.copy()
feed.stops[column] = "Wa wa"
assert check_stops(feed)
for column in [
"stop_lon",
"stop_lat",
"location_type",
"wheelchair_boarding",
]:
feed = sample.copy()
feed.stops[column] = 185
assert check_stops(feed)
feed = sample.copy()
feed.stops["parent_station"] = "bingo"
assert check_stops(feed)
feed = sample.copy()
feed.stops["location_type"] = 1
feed.stops["parent_station"] = "bingo"
assert check_stops(feed)
feed = sample.copy()
feed.stops["location_type"] = 0
feed.stops["parent_station"] = feed.stops["stop_id"].iat[1]
assert check_stops(feed)
feed = sample.copy()
# valid location type
feed.stops["location_type"] = 2
assert not check_stops(feed)
# requires a location
feed.stops["stop_lat"] = np.NaN
assert check_stops(feed)
# valid location_type, does not require location
feed.stops["location_type"] = 3
assert not check_stops(feed)
# valid location_type, does not require location
feed.stops["location_type"] = 4
assert not check_stops(feed)
# location type 4 requires a parent station
feed.stops["parent_station"] = np.NaN
assert check_stops(feed)
# valid parent station for location type 4
feed.stops["stop_lat"] = 0.0
feed.stops["parent_station"] = feed.stops["stop_id"].iat[1]
feed.stops["parent_station"].iat[1] = np.NaN
feed.stops["location_type"].iat[1] = 1
assert not check_stops(feed)
feed = sample.copy()
feed.stops["stop_id"].iat[0] = "Flippity flew"
assert not check_stops(feed)
assert check_stops(feed, include_warnings=True)
feed = sample.copy()
feed.stop_times = None
assert not check_stops(feed)
assert check_stops(feed, include_warnings=True)
def test_check_stop_times():
assert not check_stop_times(sample)
feed = sample.copy()
feed.stop_times = None
assert check_stop_times(feed)
feed = sample.copy()
del feed.stop_times["stop_id"]
assert check_stop_times(feed)
feed = sample.copy()
feed.stop_times["b"] = 3
assert check_stop_times(feed, include_warnings=True)
feed = sample.copy()
feed.stop_times["trip_id"].iat[0] = "bingo"
assert check_stop_times(feed)
for col in ["arrival_time", "departure_time"]:
feed = sample.copy()
feed.stop_times[col].iat[0] = "1:0:00"
assert check_stop_times(feed)
feed = sample.copy()
feed.stop_times["arrival_time"].iat[-1] = np.nan
assert check_stop_times(feed)
feed = sample.copy()
feed.stop_times["stop_id"].iat[0] = "bingo"
assert check_stop_times(feed)
feed = sample.copy()
feed.stop_times["stop_headsign"].iat[0] = ""
assert check_stop_times(feed)
feed = sample.copy()
feed.stop_times["stop_sequence"].iat[1] = feed.stop_times[
"stop_sequence"
].iat[0]
assert check_stop_times(feed)
for col in ["pickup_type", "drop_off_type"]:
feed = sample.copy()
feed.stop_times[col] = "bongo"
assert check_stop_times(feed)
feed = sample.copy()
feed.stop_times["shape_dist_traveled"] = 1
feed.stop_times["shape_dist_traveled"].iat[1] = 0.9
assert check_stop_times(feed)
feed = sample.copy()
feed.stop_times["timepoint"] = 3
assert check_stop_times(feed)
feed = sample.copy()
feed.stop_times["departure_time"].iat[1] = feed.stop_times[
"departure_time"
].iat[0]
assert not check_stop_times(feed)
assert check_stop_times(feed, include_warnings=True)
# Return the correct index of the missing time
feed = sample.copy()
# Index 2 is the first stop of the second trip
# Index 2 is the last stop of the second trip
feed.stop_times["arrival_time"].iat[6] = np.nan
t1, t2 = feed.stop_times.iloc[2].copy(), feed.stop_times.iloc[6].copy()
feed.stop_times.iloc[2], feed.stop_times.iloc[6] = t2, t1
assert check_stop_times(feed)[0][3][0] == 2
# Check for last stop of last trip
# Trips are ordered by trip_id so the STBA trip_id from the sample feed
# is last and its last stop (index 1) is the last row
feed = sample.copy()
feed.stop_times["arrival_time"].iat[1] = np.nan
assert check_stop_times(feed)
def test_check_transfers():
assert not check_transfers(sample)
# Create transfers table
feed = sample.copy()
columns = [
"from_stop_id",
"to_stop_id",
"transfer_type",
"min_transfer_time",
]
rows = [
[feed.stops["stop_id"].iat[0], feed.stops["stop_id"].iat[1], 2, 3600]
]
feed.transfers = pd.DataFrame(rows, columns=columns)
assert not check_transfers(feed)
feed1 = feed.copy()
del feed1.transfers["from_stop_id"]
assert check_transfers(feed1)
feed1 = feed.copy()
feed1.transfers["yo"] = 3
assert not check_transfers(feed1)
assert check_transfers(feed1, include_warnings=True)
for col in set(columns) - set(["transfer_type", "min_transfer_time"]):
feed1 = feed.copy()
feed1.transfers[col].iat[0] = ""
assert check_transfers(feed1)
for col in ["transfer_type", "min_transfer_time"]:
feed1 = feed.copy()
feed1.transfers[col] = -7
assert check_transfers(feed1)
def test_check_trips():
assert not check_trips(sample)
feed = sample.copy()
feed.trips = None
assert check_trips(feed)
feed = sample.copy()
del feed.trips["trip_id"]
assert check_trips(feed)
feed = sample.copy()
feed.trips["b"] = 3
assert check_trips(feed, include_warnings=True)
feed = sample.copy()
feed.trips["trip_id"].iat[0] = feed.trips["trip_id"].iat[1]
assert check_trips(feed)
feed = sample.copy()
feed.trips["route_id"] = "Hubba hubba"
assert check_trips(feed)
feed = sample.copy()
feed.trips["service_id"] = "Boom boom"
assert check_trips(feed)
feed = sample.copy()
feed.trips["direction_id"].iat[0] = 7
assert check_trips(feed)
feed = sample.copy()
feed.trips["block_id"].iat[0] = ""
assert check_trips(feed)
feed = sample.copy()
feed.trips["shape_id"].iat[0] = "Hello"
assert check_trips(feed)
feed = sample.copy()
feed.trips["wheelchair_accessible"] = ""
assert check_trips(feed)
feed = sample.copy()
tid = feed.trips["trip_id"].iat[0]
feed.stop_times = feed.stop_times[feed.stop_times["trip_id"] != tid].copy()
assert not check_trips(feed)
assert check_trips(feed, include_warnings=True)
feed = sample.copy()
feed.stop_times = None
assert not check_trips(feed)
assert check_trips(feed, include_warnings=True)
def test_validate():
assert not validate(sample, as_df=False, include_warnings=False)
| |
import logging
import gevent
import requests
from ouimeaux.config import get_cache, WemoConfiguration
from ouimeaux.device import DeviceUnreachable
from ouimeaux.device.switch import Switch
from ouimeaux.device.insight import Insight
from ouimeaux.device.maker import Maker
from ouimeaux.device.lightswitch import LightSwitch
from ouimeaux.device.motion import Motion
from ouimeaux.device.bridge import Bridge
from ouimeaux.discovery import UPnP
from ouimeaux.signals import discovered, devicefound
from ouimeaux.subscribe import SubscriptionRegistry
from ouimeaux.utils import matcher
_NOOP = lambda *x: None
log = logging.getLogger(__name__)
reqlog = logging.getLogger("requests")
reqlog.disabled = True
class StopBroadcasting(Exception):
pass
class UnknownDevice(Exception):
pass
class Environment(object):
def __init__(self, switch_callback=_NOOP, motion_callback=_NOOP, bridge_callback=_NOOP,
maker_callback=_NOOP, with_discovery=True, with_subscribers=True, with_cache=None,
bind=None, config_filename=None):
"""
Create a WeMo environment.
@param switch_callback: A function to be called when a new switch is
discovered.
@type switch_callback: function
@param motion_callback: A function to be called when a new motion is
discovered.
@type motion_callback: function
@param with_subscribers: Whether to register for events with discovered
devices.
@type with_subscribers: bool
@param bind: ip:port to which to bind the response server.
@type bind: str
"""
self._config = WemoConfiguration(filename=config_filename)
self.upnp = UPnP(bind=bind or self._config.bind)
discovered.connect(self._found_device, self.upnp)
self.registry = SubscriptionRegistry()
if with_cache is None:
with_cache = (self._config.cache if self._config.cache is not None else True)
self._with_cache = with_cache
self._with_discovery = with_discovery
self._with_subscribers = with_subscribers
self._switch_callback = switch_callback
self._motion_callback = motion_callback
self._bridge_callback = bridge_callback
self._maker_callback = maker_callback
self._switches = {}
self._motions = {}
self._bridges = {}
self._makers = {}
self.devices = {}
def __iter__(self):
return self.devices.itervalues()
def start(self):
"""
Start the server(s) necessary to receive information from devices.
"""
if self._with_cache:
with get_cache() as c:
for dev in c.devices:
self._process_device(dev, cache=False)
if self._with_discovery:
# Start the server to listen to new devices
self.upnp.server.set_spawn(2)
self.upnp.server.start()
if self._with_subscribers:
# Start the server to listen to events
self.registry.server.set_spawn(2)
self.registry.server.start()
def wait(self, timeout=None):
"""
Wait for events.
"""
try:
if timeout:
gevent.sleep(timeout)
else:
while True:
gevent.sleep(1000)
except (KeyboardInterrupt, SystemExit, Exception):
pass
def discover(self, seconds=2):
"""
Discover devices in the environment.
@param seconds: Number of seconds to broadcast requests.
@type seconds: int
"""
log.info("Discovering devices")
with gevent.Timeout(seconds, StopBroadcasting) as timeout:
try:
try:
while True:
self.upnp.broadcast()
gevent.sleep(1)
except Exception as e:
raise StopBroadcasting(e)
except StopBroadcasting:
return
def _found_device(self, sender, **kwargs):
address = kwargs['address']
headers = kwargs['headers']
usn = headers['usn']
if usn.startswith('uuid:Socket'):
klass = Switch
elif usn.startswith('uuid:Lightswitch'):
klass = LightSwitch
elif usn.startswith('uuid:Insight'):
klass = Insight
elif usn.startswith('uuid:Sensor'):
klass = Motion
elif usn.startswith('uuid:Bridge'):
klass = Bridge
elif usn.startswith('uuid:Maker'):
klass = Maker
else:
log.info("Unrecognized device type. USN={0}".format(usn))
return
device = klass(headers['location'])
log.info("Found device %r at %s" % (device, address))
self._process_device(device)
def _process_device(self, device, cache=None):
if isinstance(device, Switch):
callback = self._switch_callback
registry = self._switches
elif isinstance(device, Motion):
callback = self._motion_callback
registry = self._motions
elif isinstance(device, Bridge):
callback = self._bridge_callback
registry = self._bridges
for light in device.Lights:
log.info("Found light \"%s\" connected to \"%s\"" % (light, device.name))
elif isinstance(device, Maker):
callback = self._maker_callback
registry = self._makers
else:
return
self.devices[device.name] = device
registry[device.name] = device
if self._with_subscribers:
self.registry.register(device)
self.registry.on(device, 'BinaryState',
device._update_state)
try:
if isinstance(device, Bridge):
pass
else:
device.ping()
except DeviceUnreachable:
return
else:
if cache if cache is not None else self._with_cache:
with get_cache() as c:
c.add_device(device)
devicefound.send(device)
callback(device)
def list_switches(self):
"""
List switches discovered in the environment.
"""
return self._switches.keys()
def list_motions(self):
"""
List motions discovered in the environment.
"""
return self._motions.keys()
def list_makers(self):
"""
List makers discovered in the environment.
"""
return self._makers.keys()
def list_bridges(self):
"""
List bridges discovered in the environment.
"""
return self._bridges.keys()
def get(self, name):
alias = self._config.aliases.get(name)
if alias:
matches = lambda x: x == alias
elif name:
matches = matcher(name)
else:
matches = _NOOP
for k in self.devices:
if matches(k):
return self.devices[k]
else:
raise UnknownDevice(name)
def get_switch(self, name):
"""
Get a switch by name.
"""
try:
return self._switches[name]
except KeyError:
raise UnknownDevice(name)
def get_motion(self, name):
"""
Get a motion by name.
"""
try:
return self._motions[name]
except KeyError:
raise UnknownDevice(name)
def get_bridge(self, name):
"""
Get a bridge by name.
"""
try:
return self._bridges[name]
except KeyError:
raise UnknownDevice(name)
def get_maker(self, name):
"""
Get a maker by name.
"""
try:
return self._makers[name]
except KeyError:
raise UnknownDevice(name)
if __name__ == "__main__":
# Use with python -i
environment = Environment()
| |
"""Simplex method for linear programming
The *simplex* method uses a traditional, full-tableau implementation of
Dantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex).
This algorithm is included for backwards compatibility and educational
purposes.
.. versionadded:: 0.15.0
Warnings
--------
The simplex method may encounter numerical difficulties when pivot
values are close to the specified tolerance. If encountered try
remove any redundant constraints, change the pivot strategy to Bland's
rule or increase the tolerance value.
Alternatively, more robust methods maybe be used. See
:ref:`'interior-point' <optimize.linprog-interior-point>` and
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`.
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
"""
import numpy as np
from warnings import warn
from .optimize import OptimizeResult, OptimizeWarning, _check_unknown_options
from ._linprog_util import _postsolve
def _pivot_col(T, tol=1e-9, bland=False):
"""
Given a linear programming simplex tableau, determine the column
of the variable to enter the basis.
Parameters
----------
T : 2-D array
A 2-D array representing the simplex tableau, T, corresponding to the
linear programming problem. It should have the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0]]
for a Phase 2 problem, or the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0],
[c'[0], c'[1], ..., c'[n_total], 0]]
for a Phase 1 problem (a problem in which a basic feasible solution is
sought prior to maximizing the actual objective. ``T`` is modified in
place by ``_solve_simplex``.
tol : float
Elements in the objective row larger than -tol will not be considered
for pivoting. Nominally this value is zero, but numerical issues
cause a tolerance about zero to be necessary.
bland : bool
If True, use Bland's rule for selection of the column (select the
first column with a negative coefficient in the objective row,
regardless of magnitude).
Returns
-------
status: bool
True if a suitable pivot column was found, otherwise False.
A return of False indicates that the linear programming simplex
algorithm is complete.
col: int
The index of the column of the pivot element.
If status is False, col will be returned as nan.
"""
ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)
if ma.count() == 0:
return False, np.nan
if bland:
# ma.mask is sometimes 0d
return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0]
return True, np.ma.nonzero(ma == ma.min())[0][0]
def _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False):
"""
Given a linear programming simplex tableau, determine the row for the
pivot operation.
Parameters
----------
T : 2-D array
A 2-D array representing the simplex tableau, T, corresponding to the
linear programming problem. It should have the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0]]
for a Phase 2 problem, or the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0],
[c'[0], c'[1], ..., c'[n_total], 0]]
for a Phase 1 problem (a Problem in which a basic feasible solution is
sought prior to maximizing the actual objective. ``T`` is modified in
place by ``_solve_simplex``.
basis : array
A list of the current basic variables.
pivcol : int
The index of the pivot column.
phase : int
The phase of the simplex algorithm (1 or 2).
tol : float
Elements in the pivot column smaller than tol will not be considered
for pivoting. Nominally this value is zero, but numerical issues
cause a tolerance about zero to be necessary.
bland : bool
If True, use Bland's rule for selection of the row (if more than one
row can be used, choose the one with the lowest variable index).
Returns
-------
status: bool
True if a suitable pivot row was found, otherwise False. A return
of False indicates that the linear programming problem is unbounded.
row: int
The index of the row of the pivot element. If status is False, row
will be returned as nan.
"""
if phase == 1:
k = 2
else:
k = 1
ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)
if ma.count() == 0:
return False, np.nan
mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)
q = mb / ma
min_rows = np.ma.nonzero(q == q.min())[0]
if bland:
return True, min_rows[np.argmin(np.take(basis, min_rows))]
return True, min_rows[0]
def _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9):
"""
Pivot the simplex tableau inplace on the element given by (pivrow, pivol).
The entering variable corresponds to the column given by pivcol forcing
the variable basis[pivrow] to leave the basis.
Parameters
----------
T : 2-D array
A 2-D array representing the simplex tableau, T, corresponding to the
linear programming problem. It should have the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0]]
for a Phase 2 problem, or the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0],
[c'[0], c'[1], ..., c'[n_total], 0]]
for a Phase 1 problem (a problem in which a basic feasible solution is
sought prior to maximizing the actual objective. ``T`` is modified in
place by ``_solve_simplex``.
basis : 1-D array
An array of the indices of the basic variables, such that basis[i]
contains the column corresponding to the basic variable for row i.
Basis is modified in place by _apply_pivot.
pivrow : int
Row index of the pivot.
pivcol : int
Column index of the pivot.
"""
basis[pivrow] = pivcol
pivval = T[pivrow, pivcol]
T[pivrow] = T[pivrow] / pivval
for irow in range(T.shape[0]):
if irow != pivrow:
T[irow] = T[irow] - T[pivrow] * T[irow, pivcol]
# The selected pivot should never lead to a pivot value less than the tol.
if np.isclose(pivval, tol, atol=0, rtol=1e4):
message = (
"The pivot operation produces a pivot value of:{0: .1e}, "
"which is only slightly greater than the specified "
"tolerance{1: .1e}. This may lead to issues regarding the "
"numerical stability of the simplex method. "
"Removing redundant constraints, changing the pivot strategy "
"via Bland's rule or increasing the tolerance may "
"help reduce the issue.".format(pivval, tol))
warn(message, OptimizeWarning, stacklevel=5)
def _solve_simplex(T, n, basis, callback, postsolve_args,
maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0,
):
"""
Solve a linear programming problem in "standard form" using the Simplex
Method. Linear Programming is intended to solve the following problem form:
Minimize::
c @ x
Subject to::
A @ x == b
x >= 0
Parameters
----------
T : 2-D array
A 2-D array representing the simplex tableau, T, corresponding to the
linear programming problem. It should have the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0]]
for a Phase 2 problem, or the form:
[[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],
[A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],
.
.
.
[A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],
[c[0], c[1], ..., c[n_total], 0],
[c'[0], c'[1], ..., c'[n_total], 0]]
for a Phase 1 problem (a problem in which a basic feasible solution is
sought prior to maximizing the actual objective. ``T`` is modified in
place by ``_solve_simplex``.
n : int
The number of true variables in the problem.
basis : 1-D array
An array of the indices of the basic variables, such that basis[i]
contains the column corresponding to the basic variable for row i.
Basis is modified in place by _solve_simplex
callback : callable, optional
If a callback function is provided, it will be called within each
iteration of the algorithm. The callback must accept a
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
Current solution vector
fun : float
Current value of the objective function
success : bool
True only when a phase has completed successfully. This
will be False for most iterations.
slack : 1-D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
the corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
that is, ``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
postsolve_args : tuple
Data needed by _postsolve to convert the solution to the standard-form
problem into the solution to the original problem.
maxiter : int
The maximum number of iterations to perform before aborting the
optimization.
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to serve as an optimal solution.
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
bland : bool
If True, choose pivots using Bland's rule [3]_. In problems which
fail to converge due to cycling, using Bland's rule can provide
convergence at the expense of a less optimal path about the simplex.
nit0 : int
The initial iteration number used to keep an accurate iteration total
in a two-phase problem.
Returns
-------
nit : int
The number of iterations. Used to keep an accurate iteration total
in the two-phase problem.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
"""
nit = nit0
status = 0
message = ''
complete = False
if phase == 1:
m = T.shape[1]-2
elif phase == 2:
m = T.shape[1]-1
else:
raise ValueError("Argument 'phase' to _solve_simplex must be 1 or 2")
if phase == 2:
# Check if any artificial variables are still in the basis.
# If yes, check if any coefficients from this row and a column
# corresponding to one of the non-artificial variable is non-zero.
# If found, pivot at this term. If not, start phase 2.
# Do this for all artificial variables in the basis.
# Ref: "An Introduction to Linear Programming and Game Theory"
# by Paul R. Thie, Gerard E. Keough, 3rd Ed,
# Chapter 3.7 Redundant Systems (pag 102)
for pivrow in [row for row in range(basis.size)
if basis[row] > T.shape[1] - 2]:
non_zero_row = [col for col in range(T.shape[1] - 1)
if abs(T[pivrow, col]) > tol]
if len(non_zero_row) > 0:
pivcol = non_zero_row[0]
_apply_pivot(T, basis, pivrow, pivcol, tol)
nit += 1
if len(basis[:m]) == 0:
solution = np.empty(T.shape[1] - 1, dtype=np.float64)
else:
solution = np.empty(max(T.shape[1] - 1, max(basis[:m]) + 1),
dtype=np.float64)
while not complete:
# Find the pivot column
pivcol_found, pivcol = _pivot_col(T, tol, bland)
if not pivcol_found:
pivcol = np.nan
pivrow = np.nan
status = 0
complete = True
else:
# Find the pivot row
pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland)
if not pivrow_found:
status = 3
complete = True
if callback is not None:
solution[:] = 0
solution[basis[:n]] = T[:n, -1]
x = solution[:m]
x, fun, slack, con = _postsolve(
x, postsolve_args
)
res = OptimizeResult({
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': nit,
'success': status == 0 and complete,
'phase': phase,
'complete': complete,
})
callback(res)
if not complete:
if nit >= maxiter:
# Iteration limit exceeded
status = 1
complete = True
else:
_apply_pivot(T, basis, pivrow, pivcol, tol)
nit += 1
return nit, status
def _linprog_simplex(c, c0, A, b, callback, postsolve_args,
maxiter=1000, tol=1e-9, disp=False, bland=False,
**unknown_options):
"""
Minimize a linear objective function subject to linear equality and
non-negativity constraints using the two phase simplex method.
Linear programming is intended to solve problems of the following form:
Minimize::
c @ x
Subject to::
A @ x == b
x >= 0
User-facing documentation is in _linprog_doc.py.
Parameters
----------
c : 1-D array
Coefficients of the linear objective function to be minimized.
c0 : float
Constant term in objective function due to fixed (and eliminated)
variables. (Purely for display.)
A : 2-D array
2-D array such that ``A @ x``, gives the values of the equality
constraints at ``x``.
b : 1-D array
1-D array of values representing the right hand side of each equality
constraint (row) in ``A``.
callback : callable, optional
If a callback function is provided, it will be called within each
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
Current solution vector
fun : float
Current value of the objective function
success : bool
True when an algorithm has completed successfully.
slack : 1-D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
the corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
that is, ``b - A_eq @ x``
phase : int
The phase of the algorithm being executed.
status : int
An integer representing the status of the optimization::
0 : Algorithm proceeding nominally
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
postsolve_args : tuple
Data needed by _postsolve to convert the solution to the standard-form
problem into the solution to the original problem.
Options
-------
maxiter : int
The maximum number of iterations to perform.
disp : bool
If True, print exit status message to sys.stdout
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to serve as an optimal solution.
bland : bool
If True, use Bland's anti-cycling rule [3]_ to choose pivots to
prevent cycling. If False, choose pivots which should lead to a
converged solution more quickly. The latter method is subject to
cycling (non-convergence) in rare instances.
unknown_options : dict
Optional arguments not used by this particular solver. If
`unknown_options` is non-empty a warning is issued listing all
unused options.
Returns
-------
x : 1-D array
Solution vector.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
message : str
A string descriptor of the exit status of the optimization.
iteration : int
The number of iterations taken to solve the problem.
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
Notes
-----
The expected problem formulation differs between the top level ``linprog``
module and the method specific solvers. The method specific solvers expect a
problem in standard form:
Minimize::
c @ x
Subject to::
A @ x == b
x >= 0
Whereas the top level ``linprog`` module expects a problem of form:
Minimize::
c @ x
Subject to::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
The original problem contains equality, upper-bound and variable constraints
whereas the method specific solver requires equality constraints and
variable non-negativity.
``linprog`` module converts the original problem to standard form by
converting the simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
"""
_check_unknown_options(unknown_options)
status = 0
messages = {0: "Optimization terminated successfully.",
1: "Iteration limit reached.",
2: "Optimization failed. Unable to find a feasible"
" starting point.",
3: "Optimization failed. The problem appears to be unbounded.",
4: "Optimization failed. Singular matrix encountered."}
n, m = A.shape
# All constraints must have b >= 0.
is_negative_constraint = np.less(b, 0)
A[is_negative_constraint] *= -1
b[is_negative_constraint] *= -1
# As all constraints are equality constraints the artificial variables
# will also be basic variables.
av = np.arange(n) + m
basis = av.copy()
# Format the phase one tableau by adding artificial variables and stacking
# the constraints, the objective row and pseudo-objective row.
row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis]))
row_objective = np.hstack((c, np.zeros(n), c0))
row_pseudo_objective = -row_constraints.sum(axis=0)
row_pseudo_objective[av] = 0
T = np.vstack((row_constraints, row_objective, row_pseudo_objective))
nit1, status = _solve_simplex(T, n, basis, callback=callback,
postsolve_args=postsolve_args,
maxiter=maxiter, tol=tol, phase=1,
bland=bland
)
# if pseudo objective is zero, remove the last row from the tableau and
# proceed to phase 2
nit2 = nit1
if abs(T[-1, -1]) < tol:
# Remove the pseudo-objective row from the tableau
T = T[:-1, :]
# Remove the artificial variable columns from the tableau
T = np.delete(T, av, 1)
else:
# Failure to find a feasible starting point
status = 2
messages[status] = (
"Phase 1 of the simplex method failed to find a feasible "
"solution. The pseudo-objective function evaluates to {0:.1e} "
"which exceeds the required tolerance of {1} for a solution to be "
"considered 'close enough' to zero to be a basic solution. "
"Consider increasing the tolerance to be greater than {0:.1e}. "
"If this tolerance is unacceptably large the problem may be "
"infeasible.".format(abs(T[-1, -1]), tol)
)
if status == 0:
# Phase 2
nit2, status = _solve_simplex(T, n, basis, callback=callback,
postsolve_args=postsolve_args,
maxiter=maxiter, tol=tol, phase=2,
bland=bland, nit0=nit1
)
solution = np.zeros(n + m)
solution[basis[:n]] = T[:n, -1]
x = solution[:m]
return x, status, messages[status], int(nit2)
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import traceback
from oslo.config import cfg
import six
from oslo.log.openstack.common.gettextutils import _, _LE
from oslo.log.openstack.common import importutils
from oslo.log.openstack.common import jsonutils
from oslo.log.openstack.common import local
from oslo.log.openstack.common import log as logging
from oslo.log.openstack.common import versionutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_RPC_ENVELOPE_VERSION = '2.0'
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is::
{
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
_REMOTE_POSTFIX = '_Remote'
class RPCException(Exception):
msg_fmt = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.msg_fmt
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
msg_fmt = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""Initiates Timeout object.
:param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being
called
"""
self.info = info
self.topic = topic
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
class DuplicateMessageError(RPCException):
msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
msg_fmt = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
msg_fmt = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
msg_fmt = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class RpcVersionCapError(RPCException):
msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
topic, except that it is part of a set of such consumers (the
"pool") which may run in parallel. Every pool of workers will
receive a given message, but only one worker in the pool will
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
:param proxy: The object that will handle all incoming messages.
:param pool_name: String containing the name of the pool of workers
"""
raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers.
Uses given topic from the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
:param callback: Callable to be invoked for each message.
:type callback: callable accepting one argument
:param pool_name: The name of the consumer pool.
:type pool_name: str
:param topic: The routing topic for desired messages.
:type topic: str
:param exchange_name: The name of the message exchange where
the client should attach. Defaults to
the configured exchange.
:type exchange_name: str
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
def _fix_passwords(d):
"""Sanitizes the password fields in the dictionary."""
for k in six.iterkeys(d):
if k.lower().find('password') != -1:
d[k] = '<SANITIZED>'
elif k.lower() in SANITIZE:
d[k] = '<SANITIZED>'
elif isinstance(d[k], list):
for e in d[k]:
if isinstance(e, dict):
_fix_passwords(e)
elif isinstance(d[k], dict):
_fix_passwords(d[k])
return d
return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_LE("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module not in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
local.store.context = self
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
# TODO(russellb) This method is a bit of a nova-ism. It makes
# some assumptions about the data in the request context sent
# across rpc, while the rest of this class does not. We could get
# rid of this if we changed the nova code that uses this to
# convert the RpcContext back to its native RequestContext doing
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
context = self.deepcopy()
context.values['is_admin'] = True
context.values.setdefault('roles', [])
if 'admin' not in context.values['roles']:
context.values['roles'].append('admin')
if read_deleted is not None:
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""Encapsulates actual exception expected to be hit by a RPC proxy object.
Merely instantiating it records the current exception information, which
will be passed back to the RPC client without exceptional logging.
"""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer.
"""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
# TODO(sirp): we should deprecate this in favor of
# using `versionutils.is_compatible` directly
def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible.
:param imp_version: The version implemented
:param version: The version requested by an incoming message.
"""
return versionutils.is_compatible(version, imp_version)
def serialize_msg(raw_msg):
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg
| |
from copy import deepcopy
from datetime import datetime
from django.utils.translation import ugettext as _
from openbudgets.apps.sheets.models import Template, Sheet, SheetItem
from openbudgets.apps.entities.models import Entity
from openbudgets.apps.international.utilities import translated_fields
from openbudgets.apps.transport.incoming.parsers import register, ParsingError
from openbudgets.apps.transport.incoming.parsers.template import TemplateParser
from openbudgets.apps.transport.incoming.errors import MetaParsingError, NodeNotFoundError
def _rows_filter(obj, row_num=None):
if obj['has_children']:
return True
else:
if 'budget' in obj:
if obj['budget'] is not None:
return True
else:
return 'actual' in obj and obj['actual'] is not None
elif 'actual' in obj:
return obj['actual'] is not None
else:
raise ParsingError(_('Neither actual nor budget columns found.'))
class SheetParser(TemplateParser):
container_model = Sheet
item_model = SheetItem
ITEM_ATTRIBUTES = ['budget', 'actual', 'node', 'description', 'sheet']\
+ translated_fields(SheetItem)
CONTAINER_ATTRIBUTES = ['entity', 'period_start', 'period_end']
ITEM_CLEANING_EXCLUDE = ['node', 'sheet', 'lookup']
def __init__(self, container_object_dict):
super(SheetParser, self).__init__(container_object_dict)
self.template_parser = self._init_template_parser()
@classmethod
def resolve(cls, deferred):
container_dict = deferred['container']
if not container_dict:
raise Exception('Deferred object missing container dict: %s' % container_dict)
instance = cls(container_dict)
instance.objects_lookup = deferred['items']
instance.template_parser.objects_lookup = deferred['template_parser']['items']
return instance
def clean(self, data):
for row_num, obj in enumerate(data):
self._clean_amount(obj, 'actual')
self._clean_amount(obj, 'budget')
return super(SheetParser, self).clean(data=data)
def _clean_amount(self, obj, attr):
missing = '__missing__'
amount = obj.get(attr, missing)
if amount == missing or amount == '':
obj[attr] = None
else:
try:
obj[attr] = float(obj[attr])
except (ValueError, TypeError):
obj[attr] = None
def validate(self, data, keep_cache=False):
# do the sheet clean first
data = self.clean(data)
if self.template_parser:
template_valid, template_errors = self.template_parser.validate(data=deepcopy(data), keep_cache=True)
self.skipped_rows = self.template_parser.skipped_rows
else:
#TODO: add support for parsing sheets without a parent template to inherit from
template_valid = False
template_errors = []
# here we continue with the rest the `super` logic for `validate()`
# generate a lookup table with each item uniquely identified
self._generate_lookup(data)
self.keep_cache = keep_cache
# run a dry save of the data
self.save(dry=True)
if self.template_parser:
self.template_parser._clear_cache()
return template_valid and self.valid, self.errors + template_errors
def save(self, dry=False):
template_saved = True
if not dry:
template_saved = self.template_parser.save()
if template_saved:
self.dry = dry
# create an instance of the container
self._create_container()
if dry:
# loop the lookup table and save every item
for key, obj in self.objects_lookup.iteritems():
self._save_item(obj, key)
if not self.keep_cache:
self._clear_cache()
else:
template_cache = self.template_parser.saved_cache
# loop the template's saved nodes cache and save item for every node that's not in the sourcefile
for key, obj in template_cache.iteritems():
if key not in self.objects_lookup:
self._save_item(obj, key, is_node=True)
# loop the lookup table and save item for every row
for key, obj in self.objects_lookup.iteritems():
self._save_item(obj, key)
# post save
self._save_amounts()
self._save_sheet_amounts()
self.dry = False
self.template_parser.cleanup()
self.cleanup()
return True
return False
def deferred(self):
deferred = super(SheetParser, self).deferred()
deferred['template_parser'] = self.template_parser.deferred()
return deferred
def _save_item(self, obj, key, is_node=False):
node = None
# check if we already saved this object and have it in cache
if key in self.saved_cache:
return self.saved_cache[key]
if is_node:
node = obj
obj = {}
self._add_to_container(obj, key)
item = self._create_item(obj, key, node)
# cache the saved object
self.saved_cache[key] = item
def _create_container(self, container_dict=None, exclude=None):
data = container_dict or self.container_object_dict
data['template'] = self.template_parser.container_object
fields_to_exclude = ['template']
if exclude:
fields_to_exclude += exclude
super(TemplateParser, self)._create_container(container_dict=data, exclude=fields_to_exclude)
def _generate_lookup(self, data):
self.rows_objects_lookup = self.template_parser.rows_objects_lookup
self.objects_lookup = deepcopy(self.template_parser.objects_lookup)
def _create_item(self, obj, key, node=None):
if node:
obj['node'] = node
elif key in self.template_parser.saved_cache:
obj['node'] = self.template_parser.saved_cache[key]
elif self.dry:
# prepare data for the error
columns = ['code', 'parent', 'parentscope']
values = []
for col in columns:
if col not in obj:
columns.remove(col)
else:
values.append(obj[col])
self.throw(
NodeNotFoundError(
row=self.rows_objects_lookup[key],
columns=columns,
values=values
)
)
else:
raise ParsingError(_('Did not find a node for the item in row: %s') % self.rows_objects_lookup[key])
return super(TemplateParser, self)._create_item(obj, key)
def _clean_object(self, obj, key):
super(SheetParser, self)._clean_object(obj, key)
if 'budget' not in obj or obj['budget'] == '':
obj['budget'] = None
if 'actual' not in obj or obj['actual'] == '':
obj['actual'] = None
def _add_to_container(self, obj, key):
if not self.dry:
obj['sheet'] = self.container_object
def _init_template_parser(self):
container_dict_copy = deepcopy(self.container_object_dict)
#TODO: refactor this into a proper clean method
if 'template' in container_dict_copy:
del container_dict_copy['template']
parent_template, blueprint = self._get_parent_template(container_dict_copy)
if 'period_end' in container_dict_copy:
del container_dict_copy['period_end']
if parent_template:
return TemplateParser(container_dict_copy, extends=parent_template, blueprint=blueprint, rows_filters=(_rows_filter,))
return False
def _get_parent_template(self, container_dict):
parent = None
blueprint = None
entity = self._set_entity()
# set the entity also on the template container object
# it will be used for generating a name and cleaned later
container_dict['entity'] = entity
if entity:
date = datetime.strptime(container_dict['period_start'], '%Y-%m-%d')
try:
blueprint = entity.division.templates.filter(period_start__lte=date).order_by('-period_start')[0]
container_dict['blueprint'] = blueprint
except IndexError:
raise ParsingError(_(u'Could not find a '
u'blueprint template for entity {entity}'
u' and start date '
u'{period_start}').format(entity=entity.name,
period_start=container_dict['period_start']))
if entity:
# try getting the container model for same period or containing it
qs = self.container_model.objects.filter(
entity=entity,
period_start__lte=container_dict['period_start'],
period_end__gte=container_dict['period_end']
).order_by('-period_end')[:1]
if qs.count():
parent = qs[0].template
else:
# try getting the latest container model prior to this one
qs = self.container_model.objects.filter(
entity=entity,
period_end__lte=container_dict['period_start']
).order_by('-period_end')[:1]
if qs.count():
parent = qs[0].template
else:
# try getting the earliest container model later then this one
qs = self.container_model.objects.filter(
entity=entity,
period_start__gte=container_dict['period_end']
).order_by('period_start')[:1]
if qs.count():
parent = qs[0].template
else:
# try getting the standard template for this entity's division
qs = Template.objects.filter(
divisions=entity.division,
period_start__lte=container_dict['period_start']
).order_by('-period_start')[:1]
if qs.count():
parent = qs[0]
else:
#TODO: handle this case of no previous template found
raise ParsingError(_('Could not find a parent template for input: %s') % container_dict)
return parent, blueprint
def _set_entity(self):
container_dict = self.container_object_dict
try:
if not isinstance(container_dict['entity'], Entity):
entity = Entity.objects.get(pk=container_dict['entity'])
return entity
else:
return container_dict['entity']
except Entity.DoesNotExist as e:
if self.dry:
self.throw(
MetaParsingError(
reason='Could not find Entity with key: %s' % container_dict['entity']
)
)
else:
raise e
def _save_sheet_amounts(self):
summable_items = self.item_model.objects.filter(sheet=self.container_object,
node__parent__isnull=True,
node__direction='EXPENDITURE')
sheet_budget = sum([item.budget or 0 for item in summable_items])
sheet_actual = sum([item.actual or 0 for item in summable_items])
self.container_object.budget = sheet_budget
self.container_object.actual = sheet_actual
self.container_object.save()
def _save_amounts(self):
children_lookup = {}
keys_by_level = {1: []}
def _make_adder(attr):
def _add(a, b):
b_value = getattr(b, attr)
if a is None:
if b_value is None:
return None
else:
return float(b_value)
elif b_value is None:
return a
else:
return a + float(b_value)
return _add
for key, item in self.saved_cache.iteritems():
node = item.node
parent = node.parent
if parent:
parent_key = parent.path
level = len(parent_key.split(self.PATH_DELIMITER))
if parent_key not in children_lookup:
children_lookup[parent_key] = []
children_lookup[parent_key].append(item)
if not level in keys_by_level:
keys_by_level[level] = []
keys_by_level[level].append(parent_key)
else:
parent_key = node.path
if parent_key not in children_lookup:
children_lookup[parent_key] = []
keys_by_level[1].append(parent_key)
levels = keys_by_level.keys()
levels.sort(reverse=True)
for level in levels:
keys = keys_by_level[level]
for key in keys:
children = children_lookup[key]
item = self.saved_cache[key]
for child in children:
# set the item's parent for all children
child.parent = item
child.save()
item.budget = reduce(_make_adder('budget'), children, None)
item.actual = reduce(_make_adder('actual'), children, None)
item.save()
register('sheet', SheetParser)
| |
#
# libmsym.py
# libmsym
#
# Created by Marcus Johansson on 07/10/15.
# Copyright (c) 2015 Marcus Johansson.
#
# Distributed under the MIT License ( See LICENSE file or copy at http://opensource.org/licenses/MIT )
#
from ctypes import *
from ctypes.util import find_library
from enum import Enum
from copy import copy
from . import _libmsym_install_location, export
_lib = None
@export
class Error(Exception):
def __init__(self, value, details=""):
super().__init__(value)
self.value=value
self.details=details
def __str__(self):
return repr(self.value) + ": " + repr(self.details)
def __repr__(self):
return self.__str__()
try:
import numpy as np
except ImportError:
np = None
@export
class SymmetryOperation(Structure):
NONE = 0,
HORIZONTAL = 1
VERTICAL = 2
DIHEDRAL = 3
IDENTITY = 0,
PROPER_ROTATION = 1
IMPROPER_ROTATION = 2
REFLECTION = 3
INVERSION = 4
_names = ["E","C","S","\u03C3","i"]
_proper_rotation_type_names = ["", "", "'", "''"]
_reflection_type_names = ["", "h", "v", "d"]
_fields_ = [("type", c_int),
("order", c_int),
("power", c_int),
("orientation", c_int),
("_v", (c_double*3)),
("conjugacy_class", c_int)]
@property
def vector(self):
return self._v[0:3]
def __str__(self):
orientation = ""
order = ""
power = ""
axis = ""
if self.type == self.PROPER_ROTATION and self.order == 2:
orientation = self._proper_rotation_type_names[self.orientation]
elif self.type == self.REFLECTION:
orientation = self._reflection_type_names[self.orientation]
axis = " with normal vector " + repr(self.vector)
if self.type in [self.PROPER_ROTATION, self.IMPROPER_ROTATION]:
order = str(self.order)
power = "^" + str(self.power)
axis = " around " + repr(self.vector)
return __name__ + "." + self.__class__.__name__ + "( " + self._names[self.type] + order + orientation + power + axis + ", conjugacy class: " + str(self.conjugacy_class) + " )"
def __repr__(self):
return self.__str__()
@export
class Element(Structure):
_fields_ = [("_id", c_void_p),
("mass", c_double),
("_v", c_double*3),
("charge", c_int),
("_name",c_char*4)]
@property
def coordinates(self):
return self._v[0:3]
@coordinates.setter
def coordinates(self, coordinates):
self._v = (c_double*3)(*coordinates)
@property
def name(self):
return self._name.decode()
@name.setter
def name(self, name):
self._name = name.encode('ascii')
class _RealSphericalHarmonic(Structure):
_fields_ = [("n", c_int),
("l", c_int),
("m", c_int)]
class _BasisFunctionUnion(Union):
_fields_ = [("_rsh", _RealSphericalHarmonic)]
@export
class BasisFunction(Structure):
_fields_ = [("_id", c_void_p),
("_type", c_int),
("_element", POINTER(Element)),
("_f", _BasisFunctionUnion),
("_name",c_char*8)]
def __init__(self, element=None):
if element == None:
raise Error("Basis function requires an element")
super().__init__()
self.element = element
def _set_element_pointer(self, element):
self._element = pointer(element)
@property
def name(self):
return self._name.decode()
@name.setter
def name(self, name):
self._name = name.encode('ascii')
@export
class RealSphericalHarmonic(BasisFunction):
def __init__(self, element=None, n=0, l=0, m=0, name=""):
super().__init__(element=element)
self._type = 0
self._f._rsh.n = n
self._f._rsh.l = l
self._f._rsh.m = m
self.name = name
@property
def n(self):
return self._f._rsh.n
@n.setter
def n(self, n):
self._f._rsh.n = n
@property
def l(self):
return self._f._rsh.l
@l.setter
def l(self, n):
self._f._rsh.n = l
@property
def m(self):
return self._f._rsh.m
@m.setter
def m(self, n):
self._f._rsh.n = m
class SALC(Structure):
_fields_ = [("_d", c_int),
("_fl", c_int),
("_pf", POINTER(c_double)),
("_f", POINTER(POINTER(BasisFunction)))]
_pf_array = None
basis_functions = []
def _update_basis_functions(self, basis_function_addresses, basis):
self.basis_functions = [basis[basis_function_addresses.index(addressof(p.contents))] for p in self._f[0:self._fl]]
#@property
#def partner_functions(self):
# if self._pf_array is None:
# pf = cast(self._pf,POINTER(c_double*self._fl))
# self._pf_array = [f[0:self._fl] for f in pf[0:self._d]]
#
# return self._pf_array
@property
def partner_functions(self):
if np is None:
raise ImportError("numpy is not available.")
if self._pf_array is None:
self._pf_array = np.ctypeslib.as_array(self._pf, shape = (self._d,self._fl))
return self._pf_array
@export
class SubrepresentationSpace(Structure):
_fields_ = [("symmetry_species", c_int),
("_salc_length", c_int),
("_salcs", POINTER(SALC))]
_salcarray = None
@property
def salcs(self):
if self._salcarray is None:
self._salcarray = self._salcs[0:self._salc_length]
return self._salcarray
@export
class PartnerFunction(Structure):
_fields_ = [("index", c_int),
("dim",c_int)]
@export
class SymmetrySpecies(Structure):
_fields_ = [("_d", c_int),
("_r", c_int),
("_name",c_char*8)]
@property
def dim(self):
return self._d
@property
def reducible(self):
return self._r > 1
@property
def name(self):
return self._name.decode()
class _Thresholds(Structure):
_fields_ = [("zero", c_double),
("geometry", c_double),
("angle", c_double),
("equivalence", c_double),
("eigfact", c_double),
("permutation", c_double),
("orthogonalization", c_double)]
@export
class CharacterTable(Structure):
_fields_ = [("_d", c_int),
("_classc", POINTER(c_int)),
("_sops", POINTER(POINTER(SymmetryOperation))),
("_s", POINTER(SymmetrySpecies)),
("_table", POINTER(c_double))]
_table_array = None
_class_count_array = None
_symmetry_species = None
@property
def table(self):
if np is None:
raise ImportError("numpy is not available.")
if self._table_array is None:
self._table_array = np.ctypeslib.as_array(self._table, shape = (self._d,self._d))
return self._table_array
@property
def class_count(self):
if self._class_count_array is None:
self._class_count_array = self._classc[0:self._d]
return self._class_count_array
def _update_symmetry_operations(self, symmetry_operations):
addresses = [addressof(sop) for sop in symmetry_operations]
self.symmetry_operations = [symmetry_operations[addresses.index(addressof(sop.contents))] for sop in self._sops[0:self._d]]
@property
def symmetry_species(self):
if self._symmetry_species is None:
self._symmetry_species = self._s[0:self._d]
return self._symmetry_species
class _ReturnCode(c_int):
SUCCESS = 0
INVALID_INPUT = -1
INVALID_CONTEXT = -2
INVALID_THRESHOLD = -3
INVALID_ELEMENTS = -4
INVALID_BASIS = -5
INVALID_POINT_GROUP = -6
INVALID_EQUIVALENCE_SET = -7
INVALID_PERMUTATION = -8
INVALID_GEOMETRY = -9
INVALID_CHARACTER_TABLE = -10
INVALID_SUBSPACE = -11
INVALID_SUBGROUPS = -12
INVALID_AXES = -13
SYMMETRY_ERROR = -14
PERMUTATION_ERROR = -15
POINT_GROUP_ERROR = -16
SYMMETRIZATION_ERROR = -17
SUBSPACE_ERROR = -18
def __str__(self):
#init is not called on the return type so we can't contruct data on creation, don't decode details here, may be too late
error_string = _lib.msymErrorString(self.value).decode()
return repr(error_string)
def __repr__(self):
return self.__str__()
def init(library_location=None):
if(library_location is None):
raise Error("Cannot find libmsym shared library")
global _lib
_lib = CDLL(library_location)
_Context = POINTER(type('msym_context', (Structure,), {}))
_lib.msymErrorString.argtypes = [c_int]
_lib.msymErrorString.restype = c_char_p
_lib.msymCreateContext.restype = _Context
_lib.msymCreateContext.argtypes = []
_lib.msymGetDefaultThresholds.restype = POINTER(_Thresholds)
_lib.msymGetDefaultThresholds.argtypes = []
_lib.msymSetThresholds.restype = _ReturnCode
_lib.msymSetThresholds.argtypes = [_Context, POINTER(_Thresholds)]
_lib.msymReleaseContext.restype = _ReturnCode
_lib.msymReleaseContext.argtypes = [_Context]
_lib.msymGetErrorDetails.restype = c_char_p
_lib.msymGetErrorDetails.argtypes = []
_lib.msymFindSymmetry.restype = _ReturnCode
_lib.msymFindSymmetry.argtypes = [_Context]
_lib.msymSetPointGroupByName.restype = _ReturnCode
_lib.msymSetPointGroupByName.argtypes = [_Context, c_char_p]
_lib.msymGetPointGroupName.restype = _ReturnCode
_lib.msymGetPointGroupName.argtypes = [_Context, c_int, c_char_p]
_lib.msymSetElements.restype = _ReturnCode
_lib.msymSetElements.argtypes = [_Context, c_int, POINTER(Element)]
_lib.msymGenerateElements.restype = _ReturnCode
_lib.msymGenerateElements.argtypes = [_Context, c_int, POINTER(Element)]
_lib.msymGetElements.restype = _ReturnCode
_lib.msymGetElements.argtypes = [_Context, POINTER(c_int), POINTER(POINTER(Element))]
_lib.msymGetSymmetryOperations.restype = _ReturnCode
_lib.msymGetSymmetryOperations.argtypes = [_Context, POINTER(c_int), POINTER(POINTER(SymmetryOperation))]
_lib.msymSymmetrizeElements.restype = _ReturnCode
_lib.msymSymmetrizeElements.argtypes = [_Context]
_lib.msymSetBasisFunctions.restype = _ReturnCode
_lib.msymSetBasisFunctions.argtypes = [_Context, c_int, POINTER(BasisFunction)]
_lib.msymGetBasisFunctions.restype = _ReturnCode
_lib.msymGetBasisFunctions.argtypes = [_Context, POINTER(c_int), POINTER(POINTER(BasisFunction))]
_lib.msymGetSubrepresentationSpaces.restype = _ReturnCode
_lib.msymGetSubrepresentationSpaces.argtypes = [_Context, POINTER(c_int), POINTER(POINTER(SubrepresentationSpace))]
_lib.msymGetCharacterTable.restype = _ReturnCode
_lib.msymGetCharacterTable.argtypes = [_Context, POINTER(POINTER(CharacterTable))]
if np is None:
_SALCsMatrix = c_void_p
_SALCsSpecies = POINTER(c_int)
_NPDArray = POINTER(c_double)
else:
_SALCsMatrix = np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS')
_SALCsSpecies = np.ctypeslib.ndpointer(dtype=np.int32, ndim=1, flags='C_CONTIGUOUS')
_NPDArray = np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags='C_CONTIGUOUS')
_lib.msymSymmetrySpeciesComponents.restype = _ReturnCode
_lib.msymSymmetrySpeciesComponents.argtypes = [_Context, c_int, _NPDArray, c_int, _NPDArray]
_lib.msymGetSALCs.restype = _ReturnCode
_lib.msymGetSALCs.argtypes = [_Context, c_int, _SALCsMatrix, _SALCsSpecies, POINTER(PartnerFunction)]
_lib.msymSymmetrizeWavefunctions.restype = _ReturnCode
_lib.msymSymmetrizeWavefunctions.argtypes = [_Context, c_int, _SALCsMatrix, _SALCsSpecies, POINTER(PartnerFunction)]
_libmsym_location = find_library('msym')
if _libmsym_location is None:
_libmsym_location = _libmsym_install_location
if not (_libmsym_location is None):
init(_libmsym_location)
@export
class Context(object):
_ctx = None
def __init__(self, elements=[], basis_functions=[], point_group=""):
if(_lib is None):
raise Error("Shared library not loaded")
self._elements = []
self._basis_functions = []
self._point_group = None
self._subrepresentation_spaces = None
self._character_table = None
self._ctx = _lib.msymCreateContext()
if not self._ctx:
raise RuntimeError('Failed to create libmsym context')
pthresholds = _lib.msymGetDefaultThresholds()
default_thresholds = pthresholds.contents
if default_thresholds is None:
raise RuntimeError('Failed get libmsym default thresholds')
self._thresholds = copy(default_thresholds)
if len(elements) > 0:
self._set_elements(elements)
if len(basis_functions) > 0:
self._set_basis_functions(basis_functions)
if len(point_group) > 0:
self._set_point_group(point_group)
self.find_symmetry()
def __del__(self):
self._destruct()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._destruct()
def _destruct(self):
if self._ctx:
_lib.msymReleaseContext(self._ctx)
self._ctx = None
@staticmethod
def _assert_success(error):
if not error.value == _ReturnCode.SUCCESS:
raise Error(error, details = _lib.msymGetErrorDetails().decode())
def _get_basis_function_addresses(self):
if not self._ctx:
raise RuntimeError
cbfs = POINTER(BasisFunction)()
csize = c_int(0)
self._assert_success(_lib.msymGetBasisFunctions(self._ctx,byref(csize),byref(cbfs)))
return [addressof(bf) for bf in cbfs[0:csize.value]]
def _set_elements(self, elements):
if not self._ctx:
raise RuntimeError
self._subrepresentation_spaces = None
self._character_table = None
self._salcs = None
self._basis_functions = []
size = len(elements)
element_array = (Element*size)(*elements)
self._assert_success(_lib.msymSetElements(self._ctx, size, element_array))
self._element_array = element_array
self._elements = elements
def _set_point_group(self, point_group):
if not self._ctx:
raise RuntimeError
self._subrepresentation_spaces = None
self._character_table = None
self._salcs = None
cname = c_char_p(point_group.encode('ascii'))
self._assert_success(_lib.msymSetPointGroupByName(self._ctx, cname))
self._update_symmetry_operations()
def _set_basis_functions(self, basis_functions):
if not self._ctx:
raise RuntimeError
self._subrepresentation_spaces = None
self._salcs = None
size = len(basis_functions)
for bf in basis_functions:
bf._set_element_pointer(self._element_array[self._elements.index(bf.element)])
self._assert_success(_lib.msymSetBasisFunctions(self._ctx, size, (BasisFunction*size)(*basis_functions)))
self._basis_functions = basis_functions
if not self._point_group is None:
self._update_symmetry_operations()
self._update_character_table()
def _update_elements(self):
if not self._ctx:
raise RuntimeError
celements = POINTER(Element)()
csize = c_int(0)
self._assert_success(_lib.msymGetElements(self._ctx,byref(csize),byref(celements)))
self._elements_array = celements
self._elements = celements[0:csize.value]
def _update_symmetry_operations(self):
if not self._ctx:
raise RuntimeError
csops = POINTER(SymmetryOperation)()
csize = c_int(0)
self._assert_success(_lib.msymGetSymmetryOperations(self._ctx,byref(csize),byref(csops)))
self._symmetry_operations = csops[0:csize.value]
def _update_point_group(self):
if not self._ctx:
raise RuntimeError
cname = (c_char*8)()
self._assert_success(_lib.msymGetPointGroupName(self._ctx,sizeof(cname),cname))
self._point_group = cname.value.decode()
def _update_subrepresentation_spaces(self):
if not self._ctx:
raise RuntimeError
basis_function_addresses = self._get_basis_function_addresses()
csrs = POINTER(SubrepresentationSpace)()
csize = c_int(0)
self._assert_success(_lib.msymGetSubrepresentationSpaces(self._ctx,byref(csize),byref(csrs)))
srs = csrs[0:csize.value]
for s in srs:
for salc in s.salcs:
salc._update_basis_functions(basis_function_addresses, self._basis_functions)
self._subrepresentation_spaces = srs
def _update_character_table(self):
if not self._ctx:
raise RuntimeError
cct = POINTER(CharacterTable)()
self._assert_success(_lib.msymGetCharacterTable(self._ctx,byref(cct)))
self._character_table = cct.contents
self._character_table._update_symmetry_operations(self._symmetry_operations)
def _update_salcs(self):
if not self._ctx:
raise RuntimeError
if np is None:
raise ImportError("numpy is not available.")
csize = len(self._basis_functions)
partner_functions = (PartnerFunction*csize)()
salcs = np.zeros((csize,csize),dtype=np.float64)
species = np.zeros((csize),dtype=np.int32)
self._assert_success(_lib.msymGetSALCs(self._ctx,csize,salcs,species,partner_functions))
self._salcs = (salcs, species, partner_functions[0:csize])
def set_thresholds(self, **kwargs):
for key in kwargs.keys():
if not key in ['zero','geometry','angle','equivalence',
'eigfact','permutation','orthogonalization']:
raise Error('Unrecognized threshold argument')
setattr(self._thresholds, key, kwargs[key])
self._assert_success(_lib.msymSetThresholds(self._ctx, pointer(self._thresholds)))
@property
def elements(self):
return self._elements
@elements.setter
def elements(self, elements):
self._set_elements(elements)
@property
def basis_functions(self):
return self._basis_functions
@basis_functions.setter
def basis_functions(self, basis_functions):
self._set_basis_functions(basis_functions)
@property
def point_group(self):
return self._point_group
@point_group.setter
def point_group(self, point_group):
self._set_point_group(point_group)
@property
def symmetry_operations(self):
return self._symmetry_operations
def find_symmetry(self):
if not self._ctx:
raise RuntimeError
self._assert_success(_lib.msymFindSymmetry(self._ctx))
self._update_point_group()
self._update_symmetry_operations()
return self._point_group
def symmetrize_elements(self):
if not self._ctx:
raise RuntimeError
cerror = c_double(0)
self._assert_success(_lib.msymSymmetrizeElements(self._ctx, byref(cerror)))
self._update_elements()
return self._elements
@property
def subrepresentation_spaces(self):
if self._subrepresentation_spaces is None:
self._update_subrepresentation_spaces()
return self._subrepresentation_spaces
@property
def character_table(self):
if self._character_table is None:
self._update_character_table()
return self._character_table
@property
def salcs(self):
if self._salcs is None:
self._update_salcs()
return self._salcs
def symmetrize_wavefunctions(self,m):
if not self._ctx:
raise RuntimeError
if np is None:
raise ImportError("numpy is not available.")
csize = len(self._basis_functions)
(d1,d2) = m.shape
if not (d1 == csize and d2 == csize):
raise ValueError("Must provide a " + str(csize) + "x" + str(csize))
wf = np.ascontiguousarray(m, dtype=np.float64)
partner_functions = (PartnerFunction*csize)()
species = np.zeros((csize),dtype=np.int32)
self._assert_success(_lib.msymSymmetrizeWavefunctions(self._ctx,csize,wf,species,partner_functions))
return (wf, species, partner_functions[0:csize])
def generate_elements(self, elements):
if not self._ctx:
raise RuntimeError
self._subrepresentation_spaces = None
self._character_table = None
self._salcs = None
self._element_array = None
self._basis_functions = []
self._elements = []
size = len(elements)
element_array = (Element*size)(*elements)
self._assert_success(_lib.msymGenerateElements(self._ctx, size, element_array))
self._update_elements()
return self._elements
def symmetry_species_components(self, wf):
wf_size = len(wf)
if not wf_size == len(self.basis_functions):
raise ValueError("Must provide an array of length " + str(len(self.basis_functions)))
species_size = self.character_table._d
species = np.zeros((species_size),dtype=np.float64)
wf = np.ascontiguousarray(wf, dtype=np.float64)
self._assert_success(_lib.msymSymmetrySpeciesComponents(self._ctx, wf_size, wf, species_size, species))
return species
| |
import json #added to support ajax calls
import re
#custom objects defined in __init__.py
from . import Individual, Stickers, Sticker, Permit, Makes, Models, Vehicle
from datetime import date, datetime, timedelta
from django.conf import settings
from django.core import serializers #added to support ajax calls
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext, loader, Context
#from djtools.utils.mail import send_mail
from django.core.mail import send_mail
#Creates connection to informix
from djzbar.utils.informix import do_sql
import simplejson
if settings.DEBUG:
TO_LIST = ["mkishline@carthage.edu",]
else:
TO_LIST = ["mkishline@carthage.edu",]
BCC = settings.MANAGERS
def search(request, redir_acad_yr = None, redir_txt = '', redir_id = 0):
thisYear = date.today().year - 1
if date.today().month >= 4:
thisYear = thisYear + 1
acadYearRange = range(2012, thisYear + 1)
acadYear = '%s%s' % (thisYear % 100, (thisYear % 100) + 1)
acadYearList = []
for yr in acadYearRange:
val = '%s%s' % (yr % 100, (yr % 100) + 1)
txt = '%s-%s' % (yr, yr + 1)
acadYearList.append([val, txt])
search = {'year':thisYear,'acadYear':acadYear,'ID':0,'text':''}
carYears = getCarYears()
states = getStates()
lots = []
individual = None
personSQL = ''
debugvar = ''
year = '20' + search['acadYear'][0:2]
if date.today().month <= 5:
year = '20' + search['acadYear'][2:4]
if request.method == 'POST':
search['acadYear'] = request.POST.get('academicYear')
search['text'] = request.POST.get('searchText')
search['ID'] = request.POST.get('searchID')
elif redir_acad_yr != None:
search['acadYear'] = redir_acad_yr
search['text'] = redir_txt
search['ID'] = redir_id
isSearched = False
if search['ID'] > 0:
isSearched = True
individual = Individual(search['ID'], year, search['acadYear'])
#lots = getLots(individual.bldg != 'CMTR', individual.bldg == 'APT')
lots = getLots(individual.bldg != 'CMTR' and individual.bldg != 'OFF', individual.bldg == 'APT')
summary = getLotSummary(search['acadYear'])
return render_to_response(
"manager/search.html", {
'years':acadYearList,'search':search,'currentAcadYear':acadYear,
'person':individual,'isSearched':isSearched,
'lots':lots,'states':states,'summary':summary,'carYears':carYears,
'present':date.today(),'debug':'%s' % (debugvar)
},
context_instance=RequestContext(request)
)
def create(request):
#Create a vehicle record and associate it with a user
vehicleInsert = addVehicle(
request.POST.get('studentId'),
request.POST.get('license'),
request.POST.get('st_plate'),
request.POST.get('carMake'),
request.POST.get('carModel'),
request.POST.get('carYear'),
request.POST.get('acadYear')
)
# If a sticker was specified and the status was changed,
# create the permit record
if request.POST.get('sticker') != '' and request.POST.get('sticker') != None:
assignStickerToVehicle(
request.POST.get('sticker'),
vehicleInsert,
request.POST.get('active_date'),
request.POST.get('inactive_date'),
request.POST.get('permitComment')
)
if request.POST.get('carMake').lower() == 'zzgenericmake':
email_data = {
'actionType':'created','comment':request.POST.get('permitComment')
}
"""
send_mail(
None, ['mkishline@carthage.edu'], 'Generic vehicle in Parking Admin', 'confirmation@carthage.edu',
'manager/email_genericvehicle.html', email_data
)
"""
"""
return render_to_response(
"manager/success.html",
{'debug':vehicleInsert },
context_instance=RequestContext(request)
)
"""
try:
redirect = reverse(
'manager_search_redirect',
kwargs={
'redir_id':request.POST.get('searchID'),
'redir_acad_yr':request.POST.get('academicYear')
}
)
except:
redirect = reverse('manager_search')
return HttpResponseRedirect(redirect)
def update(request):
if request.POST.get('takeAction') == "update":
#Update the vehicle record
vehicleUpdate = updateVehicle(
request.POST.get('veh_no'),
request.POST.get('license'),
request.POST.get('st_plate'),
request.POST.get('carMake'),
request.POST.get('carModel'),
request.POST.get('carYear')
)
veh = Vehicle().loadByID(int(vehicleUpdate))
#If a sticker was specified, create the permit record
if request.POST.get('sticker') != '' and request.POST.get('sticker') != None:
if request.POST.get('permitstatus') == '' or request.POST.get('permitstatus') == None:
vehicleUpdate = permitUpdate(
request.POST.get('permit_no'),
request.POST.get('active_date'),
request.POST.get('inactive_date'),
request.POST.get('permitComment')
)
#assignSticker(sticker_txt, veh_no, active_date, permit_no = None, inactive_date = None, permit_comment = ''):
"""
assignStickerNoInsert(
request.POST.get('sticker'),
request.POST.get('veh_no'),
request.POST.get('permit_no'),
request.POST.get('active_date')
)
"""
assignSticker(
request.POST.get('sticker'),
request.POST.get('veh_no'),
request.POST.get('active_date'),
request.POST.get('permit_no')
)
else:
#If a sticker already exists for the vehicle
if veh.permitid != None and veh.permitid > 0:
#Get the existing sticker attached to the record
sticker = Sticker(veh.permit_code, veh.acad_yr).updateStatus(request.POST.get('permitstatus'))
#Change the sticker's status
#sticker.updateStatus(request.POST.get('permitstatus'))
#Inactivate the parking permit record (also clears a lotloctn space)
old_permit = Permit(veh.permitid).inactivate()
#Assign the sticker to the vehicle (creates the permit record)
"""
assignStickerToVehicle(
request.POST.get('sticker'),
request.POST.get('veh_no'),
request.POST.get('active_date'),
request.POST.get('inactive_date'),
request.POST.get('permitComment')
)
"""
assignSticker(
request.POST.get('sticker'),
request.POST.get('veh_no'),
request.POST.get('active_date'),
request.POST.get('permit_no'),
request.POST.get('inactive_date'),
request.POST.get('permitComment')
)
elif request.POST.get('takeAction') == "delete":
#vehicleUpdate = expireVehicle(request.POST.get('veh_no'))
vehicleUpdate = "Delete"
else:
vehicleUpdate = ("Action '%s' did not match") % (request.POST.get('takeAction'))
try:
redirect = reverse(
'manager_search_redirect',
kwargs={
'redir_id':request.POST.get('searchID'),
'redir_acad_yr':request.POST.get('academicYear')
}
)
except:
redirect = reverse('manager_search')
return HttpResponseRedirect(redirect)
#Get the collection of lots that are available to an individual given their residency information
def getLots(isResident = None, isInApt = None, isMotorcycle = None, includeFull = False):
if isMotorcycle == True or isMotorcycle == 'true':
lotSQL = (
' SELECT TRIM(lot.lotcode) AS lotcode, TRIM(lot.txt) AS txt, 0 AS spots'
' FROM lot_table lot'
' WHERE TODAY BETWEEN lot.active_date AND NVL(lot.inactive_date, TODAY)'
' AND lot.lotcode = "MCYC"'
)
else:
lotSQL = (' SELECT TRIM(spaces.lotcode) AS lotcode, TRIM(lot.txt) AS txt, COUNT(spaces.lotloctn) AS spots'
' FROM lot_table lot INNER JOIN prkglot_rec spaces ON lot.lotcode = spaces.lotcode'
' WHERE TODAY BETWEEN lot.active_date AND NVL(lot.inactive_date, TODAY)'
#' AND spaces.lot_stat = ""'
' AND'
' ('
)
#Check if arguments were passed into the function
if isResident != None:
#If the individual is a campus resident they have access to
#all lots besides Commuter, Lot S, and the virtual Motorcycle lot
if isResident == 'true' or isResident == True:
lotSQL += ' lot.lotcode NOT IN ("CMTR","LOTS","MCYC")'
#Individuals who live in the apartments are allowed to buy a permit for Lot S
if isInApt == True or isInApt == 'true':
lotSQL += ' OR lot.lotcode = "LOTS"'
else:
lotSQL += ' lot.lotcode = "CMTR"'
lotSQL += ') GROUP BY spaces.lotcode, lot.txt'
#if not includeFull or includeFull == 'false':
# lotSQL += ' HAVING COUNT(spaces.lotloctn) > 0'
lotSQL += ' ORDER BY TRIM(lot.txt)'
try:
lot_results = do_sql(lotSQL).fetchall()
except:
lot_results = lotSQL
return lot_results
def getCarYears(minYear = 1900):
return range(1947, date.today().year + 2)
#Get the list of states to be used in the drop down lists
def getStates():
stateSQL = (
' SELECT TRIM(st) AS st, TRIM(txt) AS txt'
' FROM st_table'
' WHERE low_zone > 0'
' AND high_zone > 0'
' ORDER BY txt'
)
return do_sql(stateSQL).fetchall()
def getLotSummary(acadYear):
summarySQL = (
" SELECT"
" TRIM(lot_table.txt) lot_txt,"
" CASE TRIM(lot_rec.lot_stat)"
" WHEN 'A' THEN 'Allocated/Sold'"
" WHEN '' THEN 'Available'"
" WHEN 'S' THEN 'Held in Reserve'"
" WHEN 'R' THEN 'Reserved (dumpsters/construction)'"
#" WHEN 'R' THEN 'Dumpster'"
" WHEN 'H' THEN 'Handicap'"
" WHEN 'W' THEN 'Waitlist'"
#" WHEN 'F' THEN 'Fleet Vehicle'"
" END AS status,"
" COUNT(*) AS spaces,"
" CASE"
" WHEN NVL(lotSpaces.hasSpaces,0) = 0 THEN 'Full'"
" ELSE 'Available'"
" END AS lot_status"
" FROM prkglot_rec lot_rec INNER JOIN lot_table ON lot_rec.lotcode = lot_table.lotcode"
#" AND lot_rec.lot_acadyr = lot_table.acadyr"
" LEFT JOIN ("
" SELECT lotcode, COUNT(*) hasSpaces"
" FROM prkglot_rec"
" WHERE lot_stat = ''"
" AND lot_acadyr = '%s'"
" GROUP BY lotcode"
" ) lotSpaces ON lot_rec.lotcode = lotSpaces.lotcode"
" WHERE TODAY BETWEEN lot_rec.active_date AND NVL(lot_rec.inactive_date, TODAY)"
" AND TODAY BETWEEN lot_table.active_date AND NVL(lot_table.inactive_date, TODAY)"
#" AND acadyr = '%s'"
" AND lot_acadyr = '%s'"
" AND lot_rec.lot_stat NOT IN ('R','F')"
" GROUP BY lot_txt, lot_rec.lot_stat, lot_status"
" ORDER BY lot_txt, status"
) % (acadYear, acadYear)
return do_sql(summarySQL).fetchall()
#SQL commented out to prevent accidental inserts
def addVehicle(id, license, st_plate, make, model, model_yr, acadyr):
insertVehicleSQL = (
" INSERT INTO veh_rec (id, license, st_plate, make, model, model_yr, acadyr, issued_date)"
" VALUES (%s, '%s', '%s', '%s', '%s', %s, '%s', TODAY)"
) % (id, license, st_plate, make, model, model_yr, acadyr)
do_sql(insertVehicleSQL)
#getVehicleSQL = ("SELECT DISTINCT dbinfo('sqlca.sqlerrd1') AS veh_no FROM veh_rec")
getVehicleSQL = ("SELECT veh_no FROM veh_rec WHERE id = %s AND license = '%s' AND acadyr = '%s'") % (id, license, acadyr)
veh_results = do_sql(getVehicleSQL).fetchone()
return veh_results.veh_no
#return insertVehicleSQL
def updateVehicle(veh_no, license, st_plate, make, model, model_yr):
updateVehicleSQL = (
" UPDATE veh_rec"
" SET license = '%s',"
" st_plate = '%s',"
" make = '%s',"
" model = '%s',"
" model_yr = %s"
" WHERE"
" veh_no = %s"
) % (license, st_plate, make, model, model_yr, veh_no)
do_sql(updateVehicleSQL)
return veh_no
def reserveSpot(acadYear, lot):
getNextSQL = (
" SELECT"
" lotcode, MIN(lot_no) AS lot_no, MIN(lotloctn) AS lotloctn"
" FROM"
" prkglot_rec"
" WHERE"
" TODAY BETWEEN active_date AND NVL(inactive_date, TODAY)"
" AND"
" TRIM(lot_stat) = ''"
" AND"
" lot_acadyr = '%s'"
" AND"
" lotcode = '%s'"
" GROUP BY lotcode"
) % (acadYear, lot)
getNext = do_sql(getNextSQL).fetchone()
reserveSpotSQL = (
" UPDATE prkglot_rec"
" SET lot_stat = 'A'"
" WHERE"
" lot_no = %s"
) % (getNext.lot_no)
do_sql(reserveSpotSQL)
return getNext.lot_no
#return ("%s<br /><br />%s") % (getNextSQL, reserveSpotSQL)
def assignPermit(id, veh_no, nextSpot, acadYear):
insertPermitSQL = (
" INSERT INTO prkgpermt_rec"
" (lotcode, lotloctn, permit_code, acadyr, permt_id, veh_no, permt_stat, active_date, permtcmmnt)"
" VALUES ('%s', '%s', '%s', '%s', %s, %s, '%s', '%s', '%s')"
) % (nextSpot.lotcode, nextSpot.lotloctn, '', acadYear, id, veh_no, '', TODAY, '')
do_sql(insertPermitSQL)
def permitUpdate(permit_no, active_date, inactive_date, comment):
updatePermitSQL = (
" UPDATE prkgpermt_rec"
" SET active_date = '%s',"
" inactive_date = '%s',"
" permtcmmnt = '%s'"
" WHERE permt_no = %s"
) % (active_date, inactive_date, comment, permit_no)
do_sql(updatePermitSQL)
return updatePermitSQL
def assignStickerToVehicle(sticker_txt, veh_no, active_date, inactive_date = None, permit_comment = ''):
#Get vehicle record
vehicle = Vehicle().loadByID(veh_no)
#Get sticker record
selectStickerSQL = (
" SELECT sticker.*"
" FROM prkgstckr_rec sticker"
" WHERE sticker.permit_txt = '%s'"
" AND sticker.permt_stat = ''"
) % (sticker_txt)
sticker_results = do_sql(selectStickerSQL)
sticker = sticker_results.fetchone()
if sticker != None:
#Update prkgstckr_rec (flag sticker as sold)
updateStickerSQL = (
" UPDATE prkgstckr_rec"
" SET permt_stat = 'A'"
" , issue_date = '%s'"
" WHERE"
" permit_stckrcd = '%s'"
) % (active_date, sticker.permit_stckrcd)
do_sql(updateStickerSQL)
#Get the next available spot in the lot
consumeLotLocationSQL = (
" UPDATE prkglot_rec"
" SET lot_stat = 'A'"
" , lotcmmnt = '%s,%s'"
" WHERE"
" lotcode = '%s'"
" AND"
" lotloctn = ("
" SELECT MIN(lotloctn)"
" FROM prkglot_rec"
" WHERE lotcode = '%s'"
" AND lot_stat = ''"
" AND lot_acadyr = '%s'"
" )"
) % (vehicle.id, veh_no, sticker.permit_assocdlot, sticker.permit_assocdlot, vehicle.acad_yr)
do_sql(consumeLotLocationSQL)
#Get lot location detail
getLotLocationSQL = (
" SELECT prkglot_rec.*"
" FROM prkglot_rec"
" WHERE lotcmmnt = '%s,%s'"
) % (vehicle.id, veh_no)
lot_loc = do_sql(getLotLocationSQL).fetchone()
#Create prkgpermt_rec record
insertPermitSQL = (
" INSERT INTO prkgpermt_rec (lotcode, lotloctn, permit_code, acadyr, permt_id, veh_no, permt_stat, active_date, inactive_date, permtcmmnt)"
" VALUES ('%s', '%s', '%s', '%s', %s, %s, '%s', '%s', '%s', '%s')"
) % (sticker.permit_assocdlot, lot_loc.lotloctn, sticker.permit_stckrcd, vehicle.acad_yr, vehicle.id, veh_no, 'A', active_date, inactive_date, permit_comment)
do_sql(insertPermitSQL)
else:
return -1
return None
def assignStickerNoInsert(sticker_txt, veh_no, permit_no, active_date):
vehicle = Vehicle().loadByID(veh_no)
selectStickerSQL = (
" SELECT stckr.*"
" FROM prkgstckr_rec stckr"
" WHERE stckr.permit_txt = '%s'"
" AND stckr.permt_stat = ''"
) % (sticker_txt)
sticker_results = do_sql(selectStickerSQL)
if sticker_results != None:
sticker_current = sticker_results.fetchone()
updateStickerSQL = (
" UPDATE prkgstckr_rec"
" SET permt_stat = 'A'"
" , issue_date = '%s'"
" WHERE"
" permit_stckrcd = '%s'"
) % (active_date, sticker_current.permit_stckrcd)
do_sql(updateStickerSQL)
consumeLotLocationSQL = (
" UPDATE prkglot_rec"
" SET lot_stat = 'A'"
" , lotcmmnt = '%s,%s'"
" WHERE"
" lotcode = '%s'"
" AND"
" lotloctn = ("
" SELECT MIN(lotloctn)"
" FROM prkglot_rec"
" WHERE lotcode = '%s'"
" AND lot_stat = ''"
" )"
) % (vehicle.id, veh_no, sticker_current.permit_assocdlot, sticker_current.permit_assocdlot)
do_sql(consumeLotLocationSQL)
getLotLocationSQL = (
" SELECT prkglot_rec.*"
" FROM prkglot_rec"
" WHERE lotcmmnt = '%s,%s'"
) % (vehicle.id, veh_no)
lot_loc = do_sql(getLotLocationSQL).fetchone()
updatePermitSQL = (
" UPDATE prkgpermt_rec"
" SET lotcode = '%s'"
" , lotloctn = '%s'"
" , permit_code = '%s'"
" , permt_stat = 'A'"
" WHERE permt_no = %s"
) % (sticker_current.permit_assocdlot, lot_loc.lotloctn, sticker_current.permit_stckrcd, permit_no)
do_sql(updatePermitSQL)
def assignSticker(sticker_txt, veh_no, active_date, permit_no = 0, inactive_date = None, permit_comment = ''):
vehicle = Vehicle().loadByID(veh_no)
selectStickerSQL = (
" SELECT stckr.*"
" FROM prkgstckr_rec stckr"
" WHERE stckr.permit_txt = '%s'"
" AND stckr.permt_stat = ''"
) % (sticker_txt)
sticker_results = do_sql(selectStickerSQL)
sticker_current = sticker_results.fetchone()
if sticker_current:
updateStickerSQL = (
" UPDATE prkgstckr_rec"
" SET permt_stat = 'A'"
" , issue_date = '%s'"
" WHERE"
" permit_stckrcd = '%s'"
" AND permit_acadyr = '%s'"
) % (active_date, sticker_current.permit_stckrcd, vehicle.acad_yr)
do_sql(updateStickerSQL)
getLotLocationSQL = (
" SELECT"
" FIRST 1 lotloctn"
" FROM"
" ("
" SELECT"
" lot_rec.lotloctn,"
" CASE"
" WHEN lot_rec.lot_stat = 'A' THEN 0"
" ELSE lot_rec.lotloctn::integer"
" END AS priority"
" FROM"
" prkglot_rec lot_rec LEFT JOIN prkgpermt_rec permit_rec ON lot_rec.lotcode = permit_rec.lotcode"
" AND lot_rec.lot_acadyr = permit_rec.acadyr"
" AND lot_rec.lotloctn = permit_rec.lotloctn"
" WHERE"
" lot_rec.lotcode = '%s'"
" AND"
" lot_rec.lot_acadyr = '%s'"
" AND"
" ("
" lot_rec.lot_stat = ''"
" OR"
" ("
" lot_rec.lot_stat = 'A'"
" AND"
" permit_rec.permt_no IS NOT NULL"
" AND"
" permit_rec.permt_no = %s"
" )"
" )"
" ORDER BY lot_stat DESC, lot_rec.lotloctn"
" )"
) % (sticker_current.permit_assocdlot, vehicle.acad_yr, permit_no)
lot_loc = do_sql(getLotLocationSQL).first()
consumeLotLocationSQL = (
" UPDATE prkglot_rec"
" SET lot_stat = 'A'"
" , lotcmmnt = '%s,%s'"
" WHERE"
" lotcode = '%s'"
" AND"
" lot_acadyr = '%s'"
" AND"
" lotloctn = '%s'"
#" lotloctn = ("
#" SELECT MIN(lotloctn)"
#" FROM prkglot_rec"
#" WHERE lotcode = '%s'"
#" AND lot_stat = ''"
#" AND lot_acadyr = '%s'"
#" )"
) % (vehicle.id, veh_no, sticker_current.permit_assocdlot, vehicle.acad_yr, lot_loc.lotloctn)
#) % (vehicle.id, veh_no, sticker_current.permit_assocdlot, vehicle.acad_yr, sticker_current.permit_assocdlot, vehicle.acad_yr)
do_sql(consumeLotLocationSQL)
"""
getLotLocationSQL = (
" SELECT prkglot_rec.*"
" FROM prkglot_rec"
" WHERE lotcmmnt = '%s,%s'"
" AND lot_acadyr = '%s'"
) % (vehicle.id, veh_no, vehicle.acad_yr)
#lot_loc = do_sql(getLotLocationSQL).fetchone()
lot_loc = do_sql(getLotLocationSQL).first()
"""
if vehicle.id == 1319170:
send_mail("Debug parking",
"Student: %s\n permit_no: %s\n lotloctn: %s\n int permit number is 0: %s" % (vehicle.id, permit_no, lot_loc.lotloctn, int(permit_no) == 0),
"confirmation@carthage.edu",['mkishline@carthage.edu'],
fail_silently=True
)
if permit_no == 0:
permitSQL = (
" INSERT INTO prkgpermt_rec (lotcode, lotloctn, permit_code, acadyr, permt_id, veh_no, permt_stat, active_date, inactive_date, permtcmmnt)"
" VALUES ('%s', '%s', '%s', '%s', %s, %s, '%s', '%s', '%s', '%s')"
) % (sticker.permit_assocdlot, lot_loc.lotloctn, sticker.permit_stckrcd, vehicle.acad_yr, vehicle.id, veh_no, 'A', active_date, inactive_date, permit_comment)
else:
permitSQL = (
" UPDATE prkgpermt_rec"
" SET lotcode = '%s'"
" , lotloctn = '%s'"
" , permit_code = '%s'"
" , permt_stat = 'A'"
" WHERE permt_no = %s"
) % (sticker_current.permit_assocdlot, lot_loc.lotloctn, sticker_current.permit_stckrcd, permit_no)
if vehicle.id == 1319170:
send_mail("Debug parking SQL","%s" % (permitSQL), 'confirmation@carthage.edu', ['mkishline@carthage.edu'], fail_silently=True)
do_sql(permitSQL)
def removeStickerFromVehicle(permit_status, veh_no):
#Update prkgpermt_rec (set inactive date)
#Update prkgstckr_rec (flag sticker as S=Surrender, L=Lost, D=Damaged/Destroyed)
#Get vehicle information (to be used in sticker query)
vehicle = Vehicle().loadByID(veh_no)
#Update sticker status
"""
updateStickerSQL = (
" UPDATE prkgstckr_rec"
" SET permt_stat = '%s'"
" , inactive_date = TODAY"
" WHERE permit_stckrcd = '%s'"
" AND permit_acadyr = '%s'"
) % (permit_status, vehicle.permit_code, vehicle.acad_yr)
do_sql(updateStickerSQL)
"""
Sticker(vehicle.permit_code, vehicle.acad_yr).updateStatus(permit_status)
"""
updatePermitSQL = (
" UPDATE prkgpermt_rec"
" SET inactive_date = TODAY"
" WHERE veh_no = %s"
) % (veh_no)
"""
Permit(vehicle.permitid).inactivate()
return None
def expireVehicle(veh_no):
vehicleExpireSQL = (
" UPDATE veh_rec"
" SET inactive_date = TODAY"
" WHERE veh_no = %s"
) % (veh_no)
do_sql(vehicleExpireSQL)
return veh_no
def ajaxLots(request):
isResident = request.GET.get('isResident')
isInApt = request.GET.get('isInApt')
isMotorcycle = request.GET.get('isMotorcycle')
lot_results = getLots(isResident, isInApt, isMotorcycle)
jsonDump = []
try:
for lot in lot_results:
tmpdict = {'lotcode':lot.lotcode,'txt':lot.txt}
jsonDump.append(tmpdict)
except Exception as e:
jsonDump.append({'lotcode':'none','txt':lot_results})
return HttpResponse(simplejson.dumps(jsonDump), content_type="application/json")
def ajaxCarMakes(request):
#Retrieve the list of car makes for a specific year
makes = Makes().getByYear(request.GET.get('year'))
#Create string of makes delimited by a ','
tmp = ','.join([make.make_code for make in makes])
return HttpResponse(simplejson.dumps(tmp), content_type="applicaton/json")
def ajaxCarModels(request):
year = request.GET.get('year')
make = request.GET.get('make')
#Retrieve the list of models given the year and make of the car
models = Models().getByYearMake(year, make)
#Create string of models delimited by a ","
tmp = ''
if len(models):
tmp = ','.join([model.model_code for model in models])
return HttpResponse(simplejson.dumps(tmp), content_type="application/json")
def ajaxStickers(request):
lotcode = request.GET.get('lotcode')
acadYear = request.GET.get('acadYear')
originalSticker = request.GET.get('originalSticker')
stickers = Stickers().forLot(lotcode, acadYear, originalSticker)
tmp = ','.join([sticker.permit_txt for sticker in stickers])
return HttpResponse(simplejson.dumps(tmp), content_type="application/json")
#def ajaxSearch(request, acadYear):
def ajaxSearch(request):
acadYear = request.GET.get('acadYear')
#The jQueryUI autocomplete widget passes its value as a GET variable called "term"
searchTerm = request.GET.get('term')
#Academic year is passed as part of the URL. Since academic years are a concatenation of the last two digits of the years (ie 1314 = 2013-2014)
#we simply take the first two characters and prepend "20" resulting in "2013"
thisYear = '20' + acadYear[0:2]
if date.today().month <= 5:
thisYear = '20' + acadYear[2:4]
searchSQL = (
' SELECT'
' TRIM(IDrec.lastname) || ", " || TRIM(IDrec.firstname) || " (" || IDrec.id || ")" AS value, IDrec.id, IDrec.firstname, IDrec.lastname'
' FROM'
' id_rec IDrec INNER JOIN stu_serv_rec SRVrec ON IDrec.id = SRVrec.id'
' AND SRVrec.yr = %s'
) % (thisYear)
#Does the search term match the format of an ID? (numeric value of 4 characters or more)
if re.match(r'\d{4,}', searchTerm) != None:
searchSQL += ' WHERE IDrec.id = %s' % (searchTerm)
#Does the search term match the format for a permit? (alpha character followed by an optional "-" and then 1 or more digits)
elif re.match(r'[a-z]{1,2}\-?\d{1,}', searchTerm, re.I) != None:
searchSQL += (
' LEFT JOIN veh_rec VEHrec ON IDrec.id = VEHrec.id'
' AND VEHrec.acadyr = "%s"'
' LEFT JOIN prkgpermt_rec PRKrec ON VEHrec.veh_no = PRKrec.veh_no'
' LEFT JOIN prkgstckr_rec STKrec ON PRKrec.permit_code = STKrec.permit_stckrcd'
' AND VEHrec.acadyr = STKrec.permit_acadyr'
' WHERE LOWER(STKrec.permit_txt) LIKE LOWER("%s%%")'
) % (acadYear, re.sub(r'\-', '', searchTerm))
#If neither of the above conditions are met, we assume the search term represents the last name
else:
searchSQL += ' WHERE LOWER(IDrec.lastname) LIKE LOWER("%s%%")' % (searchTerm)
#Add grouping to filter out duplicates and sorting so multiple results are more easily viewed by user
searchSQL += (
' GROUP BY value, id, IDrec.lastname, IDrec.firstname'
' ORDER BY IDrec.lastname, IDrec.firstname'
)
#Create a serializable entity with the results from the query
jsonDump = []
try:
search_results = do_sql(searchSQL).fetchall()
for result in search_results:
tmpdict = {'id':result.id,'value':result.value}
jsonDump.append(tmpdict)
except:
jsonDump = searchSQL
return HttpResponse(simplejson.dumps(jsonDump), content_type="application/json")
| |
""" Grid splitting model with connection database back references.
The Grid object provides methods to manipulate a 2D grid of Tile objects, that
contain zero or more Site objects. Site objects are considered immutable.
To construct the Grid object, the initial grid and an empty_tile_type_pkey must
be provided. The initial grid should be provided as a map of 2 element int
tuples to Tile objects. Tile objects should already contain their initial
sites prior to construction of the Grid object.
"""
from enum import Enum
from collections import namedtuple
class Direction(Enum):
""" Grid directions. """
NORTH = 1
SOUTH = 2
EAST = 3
WEST = 4
NORTH = Direction.NORTH
SOUTH = Direction.SOUTH
EAST = Direction.EAST
WEST = Direction.WEST
OPPOSITE_DIRECTIONS = {
NORTH: SOUTH,
SOUTH: NORTH,
EAST: WEST,
WEST: EAST,
}
# Zipper direction when splitting in a direction
SPLIT_NEXT_DIRECTIONS = {
NORTH: EAST,
SOUTH: EAST,
EAST: SOUTH,
WEST: SOUTH,
}
def opposite_direction(direction):
""" Return opposite direction of given direction.
>>> opposite_direction(NORTH)
<Direction.SOUTH: 2>
>>> opposite_direction(SOUTH)
<Direction.NORTH: 1>
>>> opposite_direction(EAST)
<Direction.WEST: 4>
>>> opposite_direction(WEST)
<Direction.EAST: 3>
"""
return OPPOSITE_DIRECTIONS[direction]
# Right handed coordinate system, N/S in y, E/W in x, E is x-positive,
# S is y-positive.
DIRECTION_OFFSET = {
NORTH: [0, -1],
SOUTH: [0, 1],
EAST: [1, 0],
WEST: [-1, 0],
}
def coordinate_in_direction(coord, direction):
""" Given a coordinate, returns a new coordinate 1 step in direction.
Coordinate system is right handed, N/S in y, E/W in x. E is x-positive.
S is y-positive.
Parameters
----------
coord : Tuple of 2 ints
Starting coordinate
direction : Direction
Direction to add unit vector.
Returns
-------
Tuple of 2 ints
Coordinate 1 unit step in specified direction. Will return none if
x or y coordinate is negative.
Examples
--------
>>> coordinate_in_direction((0, 0), SOUTH)
(0, 1)
>>> coordinate_in_direction((0, 0), EAST)
(1, 0)
>>> coordinate_in_direction((1, 1), SOUTH)
(1, 2)
>>> coordinate_in_direction((1, 1), NORTH)
(1, 0)
>>> coordinate_in_direction((1, 1), EAST)
(2, 1)
>>> coordinate_in_direction((1, 1), WEST)
(0, 1)
# Returns None for negative coordinates.
>>> coordinate_in_direction((0, 0), NORTH)
>>> coordinate_in_direction((1, 0), NORTH)
>>> coordinate_in_direction((0, 0), WEST)
>>> coordinate_in_direction((0, 1), WEST)
"""
x, y = coord
dx, dy = DIRECTION_OFFSET[direction]
x += dx
y += dy
if x < 0 or y < 0:
return None
else:
return x, y
class Site(namedtuple('Site', ('name', 'phy_tile_pkey', 'tile_type_pkey',
'site_type_pkey', 'site_pkey', 'x', 'y'))):
""" Object to hold back reference information for a site. """
pass
class Tile(object):
""" Tile instance within the grid.
Attributes
----------
root_phy_tile_pkeys : list of ints
The list of root_phy_tile_pkey's.
By default a tile typically has one root phy_tile_pkey, which is the
phy_tile this initial represents.
If two Tile objects are merged, the root_phy_tile_pkeys are also merged.
If a Tile object is split, only one of the split tiles will take all
of the root_phy_tile_pkeys, and the other tiles will have no
root_phy_tile_pkeys.
Invariant: Each phy_tile_pkey will appear in 1 and only 1 Tile
object's root_phy_tile_pkeys list.
Because of the invariant, the root_phy_tile_pkeys list can be used as
a default assignment of children of the relevant phy_tile_pkey items
(e.g. wires, pips, sites, etc).
phy_tile_pkeys : list of ints
The list of phy_tile_pkey's. This is the list of all phy_tile_pkey's
that are involved in this tile via either a tile merge or split.
By default a tile typically has one phy_tile_pkey, which is the
phy_tile this initial represents.
If two Tile objects are split, all output tiles will get a copy of the
original phy_tile_pkeys list. This attribute can be used to determine
what phy_tile_pkeys were used to make this tile.
sites : list of Site objects
This is the list of Site's contained within this tile. This should
be initial set to the Site objects contained within the original
phy_tile.
Invariant: Each Site object will be contained within exactly one Tile
object.
split_sites : boolean
True if this tile was split.
Invariant: Each split tile will contain exactly one Site object.
Invariant: Two tiles that were split cannot be merged, otherwise the
resulting Tile will have two Sites, potentially from different
phy_tile_pkey, which cannot be presented using FASm prefixes.
neighboors : Map of Direction to Tile object
Linked list pointers to neighboors tiles.
Invariant: Underlying linked link should be rectangular after an
operation on the grid. An single operation on the Tile will typically
invalidate the overall grid, it is up to the Grid object to enforce
the rectangular constraint.
Invariant: Underlying linked link must not be circular.
"""
def __init__(
self, root_phy_tile_pkeys, phy_tile_pkeys, tile_type_pkey, sites
):
self.root_phy_tile_pkeys = root_phy_tile_pkeys
self.phy_tile_pkeys = phy_tile_pkeys
self.tile_type_pkey = tile_type_pkey
self.sites = sites
self.split_sites = False
self.neighboors = {}
def link_neighboor_in_direction(self, other_tile, direction_to_other_tile):
""" Connect this tile to another tile in a specific direction.
It is legal to call this method on an existing connection, but it is
not legal to call this method to replace an existing connection.
Parameters
----------
other_tile : Tile object
Other Tile object to connect in specified direction.
direction_to_other_tile : Direction
Direction to connect other tile.
"""
if direction_to_other_tile in self.neighboors:
assert id(
self.neighboors[direction_to_other_tile]
) == id(other_tile), (self.neighboors, direction_to_other_tile)
self.neighboors[direction_to_other_tile] = other_tile
direction_to_this_tile = opposite_direction(direction_to_other_tile)
if direction_to_this_tile in other_tile.neighboors:
assert id(other_tile.neighboors[direction_to_this_tile]
) == id(self)
other_tile.neighboors[direction_to_this_tile] = self
def insert_in_direction(self, other_tile, direction_to_other_tile):
""" Insert a tile in a specified direction.
Parameters
----------
other_tile : Tile object
Other Tile object to insert in specified direction.
direction_to_other_tile : Direction
Direction to insert other tile.
"""
old_neighboor = self.neighboors.get(direction_to_other_tile, None)
direction_to_this_tile = opposite_direction(direction_to_other_tile)
self.neighboors[direction_to_other_tile] = other_tile
other_tile.neighboors[direction_to_this_tile] = self
if old_neighboor is not None:
other_tile.neighboors[direction_to_other_tile] = old_neighboor
old_neighboor.neighboors[direction_to_this_tile] = other_tile
def walk_in_direction(self, direction):
""" Walk in specified direction from this Tile node.
Parameters
----------
direction : Direction
Direction to walk in.
Yields
------
tile : Tile
Tile in specified direction. First Tile object will always be the
tile whose walk_in_direction was invoked. When the end of the grid
is encounted, no more tiles will be yielded.
"""
node = self
while True:
yield node
if direction in node.neighboors:
node = node.neighboors[direction]
else:
break
def check_grid_loc(grid_loc_map):
""" Verifies input grid makes sense.
Internal grid consistency is defined as:
- Has an origin location @ (0, 0)
- Is rectangular
- Has no gaps.
Parameters
----------
grid_loc_map : Dict of 2 int tuple to Tile objects
Grid being checked.
Raises
------
AssertionError
If provided grid does not conform to assumptions about grid.
"""
xs, ys = zip(*grid_loc_map.keys())
max_x = max(xs)
max_y = max(ys)
for x in range(max_x + 1):
for y in range(max_y + 1):
assert (x, y) in grid_loc_map, (x, y)
def build_mesh(current, visited, loc, grid_loc_map):
""" Stitch grid_loc_map into a double-linked list 2D mesh.
Modifies Tile object neighboors attributes to form a doubly linked list
2D mesh.
It is strongly recommended that grid_loc_map be passed to check_grid_loc
prior to calling build_mesh to verify grid invariants.
Parameters
----------
current : Tile object
visited : set of python object id's
Should be empty on root invocation.
loc : Location of current Tile object argument
grid_loc_map : Dict of 2 int tuple to Tile objects
Grid being converted to linked list form.
"""
for direction in (SOUTH, EAST):
new_loc = coordinate_in_direction(loc, direction)
if new_loc in grid_loc_map:
current.link_neighboor_in_direction(
grid_loc_map[new_loc], direction
)
if id(grid_loc_map[new_loc]) not in visited:
visited.add(id(grid_loc_map[new_loc]))
build_mesh(
grid_loc_map[new_loc], visited, new_loc, grid_loc_map
)
class Grid(object):
""" Object for manipulating a 2D grid of Tile objects.
Parameters
----------
grid_loc_map : Dict of 2 int tuple to Tile objects
Initial grid of Tile objects.
empty_tile_type_pkey : int
tile_type_pkey to use when creating new empty tiles during tile splits.
"""
def __init__(self, grid_loc_map, empty_tile_type_pkey):
# Make sure initial grid is sane
check_grid_loc(grid_loc_map)
# Keep root object of grid.
self.origin = grid_loc_map[(0, 0)]
# Convert grid to doubly-linked list.
build_mesh(self.origin, set(), (0, 0), grid_loc_map)
# Keep list of all Tile objects for convience.
self.items = grid_loc_map.values()
self.empty_tile_type_pkey = empty_tile_type_pkey
def column(self, x):
""" Return Tile object at top of column.
Parameters
----------
x : int
0 based column to retrive.
Returns
-------
top_of_column : Tile
Tile object at top of column
"""
top_of_column = self.origin
for _ in range(x):
top_of_column = top_of_column.neighboors[EAST]
return top_of_column
def row(self, y):
""" Return Tile object at right of row.
Parameters
----------
y : int
0 based row to retrive.
Returns
-------
right_of_row : Tile
Tile object at right of row
"""
right_of_row = self.origin
for _ in range(y):
right_of_row = right_of_row.neighboors[SOUTH]
def split_tile(self, tile, tile_type_pkeys, split_direction, split_map):
""" Split tile in specified direction.
This method requires that the tiles required to perform the split (e.g.
len(tile_type_pkeys)-1 tiles in split_direction from tile) have
tile_type_pkey == empty_tile_type_pkey, e.g. they are empty tiles.
If empty tiles must be inserted into the grid to accomidate the split,
this must be done prior to calling this method.
Parameters
----------
tile : Tile object
Tile being split
tile_type_pkeys : List of int
List of new tile_type_pkeys to be used after the tile split.
The tile being split will become tile_type_pkeys[0], the next tile
in split_direction will become tile_type_pkeys[1], etc.
split_direction : Direction
Which direction from tile should the split occur.
split_map : Dict of (int, int) to int
Mapping of site location (x, y) to tile_type_pkey indicies.
This enables control over which sites go to which tiles based on
their coordinate.
min(split_map.values()) >= 0
max(split_map.values()) < len(tile_type_pkeys)
"""
sites = tile.sites
tile.tile_type_pkey = self.empty_tile_type_pkey
phy_tile_pkeys = set(tile.phy_tile_pkeys)
new_tiles = []
for idx, tile in enumerate(tile.walk_in_direction(split_direction)):
assert tile.tile_type_pkey == self.empty_tile_type_pkey, (
tile.tile_type_pkey
)
tile.phy_tile_pkeys = []
new_tiles.append(tile)
if idx + 1 >= len(tile_type_pkeys):
break
for tile, new_tile_type_pkey in zip(new_tiles, tile_type_pkeys):
assert tile.tile_type_pkey == self.empty_tile_type_pkey
tile.tile_type_pkey = new_tile_type_pkey
tile.phy_tile_pkeys = list(
set(tile.phy_tile_pkeys) | phy_tile_pkeys
)
tile.sites = []
tile.split_sites = True
for site in sites:
site_idx = split_map[site.x, site.y]
assert site_idx < len(tile_type_pkeys), (
site, site_idx, tile_type_pkeys
)
new_tiles[site_idx].sites.append(site)
def insert_empty(self, top, insert_in_direction):
""" Insert empty row/colum.
Insert a row/column of empty tiles from the tiles in the row/column specified
by top_of_row/column tile. The new empty tiles will have tile_type_pkey
set to empty_tile_type_pkey, and have phy_tile_pkeys of the tile they
were inserted from.
Parameters
----------
top_of_row/column : Tile object
Tile at top of row/column adjcent to where new row/column should be
inserted.
insert_in_direction : Direction
Direction to insert empty tiles, from perspective of the row/column
specified by top_of_row/column.
"""
# Verify that insert direction is not the same as zipper direction.
next_dir = SPLIT_NEXT_DIRECTIONS[insert_in_direction]
# Verify that top is in fact the top of the zipper
assert OPPOSITE_DIRECTIONS[next_dir] not in top.neighboors
empty_tiles = []
for tile in top.walk_in_direction(next_dir):
empty_tile = Tile(
root_phy_tile_pkeys=[],
phy_tile_pkeys=list(tile.phy_tile_pkeys),
tile_type_pkey=self.empty_tile_type_pkey,
sites=[]
)
empty_tiles.append(empty_tile)
tile.insert_in_direction(empty_tile, insert_in_direction)
for a, b in zip(empty_tiles, empty_tiles[1:]):
a.link_neighboor_in_direction(b, next_dir)
self.check_grid()
def split_in_dir(
self,
top,
tile_type_pkey,
tile_type_pkeys,
split_direction,
split_map,
):
""" Split row/column of tiles.
Splits specified tile types into new row/column by first inserting any
required empty row/column in the split direction, and then performing
the split.
Parameters
----------
top_of_row/column : Tile object
Tile at top of row/column where split should be performed.
tile_type_pkey : Tile type to split.
tile_type_pkeys : Refer to split_tile documentation.
split_direction : Direction
Direction to insert perform split. New row/column will be inserted
in that direction to accomidate the tile split.
split_map : Dict of (int, int) to int
Mapping of site location (x, y) to tile_type_pkey indicies.
This enables control over which sites go to which tiles based on
their coordinate.
"""
next_dir = SPLIT_NEXT_DIRECTIONS[split_direction]
# Find how many empty tiles are required to support the split
num_to_insert = 0
for tile in top.walk_in_direction(next_dir):
if tile.tile_type_pkey != tile_type_pkey:
continue
for idx, tile_in_split in enumerate(
tile.walk_in_direction(split_direction)):
if idx == 0:
continue
else:
if tile_in_split.tile_type_pkey != self.empty_tile_type_pkey:
num_to_insert = max(num_to_insert, idx)
if idx + 1 >= len(tile_type_pkeys):
break
for _ in range(num_to_insert):
self.insert_empty(top, split_direction)
for tile in top.walk_in_direction(next_dir):
if tile.tile_type_pkey != tile_type_pkey:
continue
self.split_tile(tile, tile_type_pkeys, split_direction, split_map)
def split_tile_type(
self, tile_type_pkey, tile_type_pkeys, split_direction, split_map
):
""" Split a specified tile type within grid.
Splits specified tile types by finding each column that contains the
relevant tile type, and spliting each column.
Parameters
----------
tile_type_pkey : Tile type to split.
tile_type_pkeys : Refer to split_tile documentation.
split_direction : Direction
Direction to insert perform split.
split_map : Dict of (int, int) to int
Mapping of site location (x, y) to tile_type_pkey indicies.
This enables control over which sites go to which tiles based on
their coordinate.
"""
tiles_seen = set()
tiles = []
tops_to_split = []
next_dir = SPLIT_NEXT_DIRECTIONS[split_direction]
for tile in self.items:
if id(tile) in tiles_seen:
continue
if tile.tile_type_pkey == tile_type_pkey:
# Found a row/column that needs to be split, walk to the bottom of
# the row/column, then back to the top.
for tile in tile.walk_in_direction(next_dir):
pass
for tile in tile.walk_in_direction(
OPPOSITE_DIRECTIONS[next_dir]):
if id(tile) not in tiles_seen:
tiles_seen.add(id(tile))
if tile.tile_type_pkey == tile_type_pkey:
tiles.append(tile)
tops_to_split.append(tile)
for top in tops_to_split:
self.split_in_dir(
top, tile_type_pkey, tile_type_pkeys, split_direction,
split_map
)
def merge_in_dir(self, tile, merge_direction):
""" Merge tile in specified direction.
Merging a tile causes the connects of that tile to be merged into
the tile in the merge direction. The tile that was merged will become
empty.
The original tile root_phy_tile_pkeys and phy_tile_pkeys will appear
first.
Parameters
----------
tile : Tile
Tile to merge
merge_direction : Direction
Direction to merge tiles.
"""
assert merge_direction in tile.neighboors, (tile, merge_direction)
merge_into = tile.neighboors[merge_direction]
merge_into.root_phy_tile_pkeys.extend(tile.root_phy_tile_pkeys)
merge_into.phy_tile_pkeys.extend(tile.phy_tile_pkeys)
merge_into.sites.extend(tile.sites)
tile.sites = list()
tile.tile_type_pkey = self.empty_tile_type_pkey
tile.root_phy_tile_pkeys = list()
tile.phy_tile_pkeys = list()
def merge_tile_type(self, tile_type_pkey, merge_direction):
""" Merge tile types in specified direction.
Parameters
----------
tile_type_pkey : Tile type to split.
merge_direction : Direction
Direction to merge tiles.
"""
for tile in self.items:
if tile.tile_type_pkey == tile_type_pkey:
self.merge_in_dir(tile, merge_direction)
def output_grid(self):
""" Convert grid back to coordinate lookup form.
Returns
-------
grid_loc_map : Dict of 2 int tuple to Tile objects
Output grid of Tile objects.
"""
grid_loc_map = {}
for x, tile in enumerate(self.origin.walk_in_direction(EAST)):
for y, tile in enumerate(tile.walk_in_direction(SOUTH)):
grid_loc_map[(x, y)] = tile
check_grid_loc(grid_loc_map)
return grid_loc_map
def check_grid(self):
""" Verifies that grid linked list model still represents valid grid.
"""
self.output_grid()
| |
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob.exc
from nova.api.openstack.compute.contrib import hosts as os_hosts
from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import db
from nova.openstack.common import log as logging
from nova import test
LOG = logging.getLogger(__name__)
HOST_LIST = {"hosts": [
{"host_name": "host_c1", "service": "compute", "zone": "nova"},
{"host_name": "host_c2", "service": "compute", "zone": "nonova"},
{"host_name": "host_v1", "service": "volume", "zone": "nova"},
{"host_name": "host_v2", "service": "volume", "zone": "nonova"}]
}
HOST_LIST_NOVA_ZONE = [
{"host_name": "host_c1", "service": "compute", "zone": "nova"},
{"host_name": "host_v1", "service": "volume", "zone": "nova"}]
SERVICES_LIST = [
{"host": "host_c1", "topic": "compute", "availability_zone": "nova"},
{"host": "host_c2", "topic": "compute", "availability_zone": "nonova"},
{"host": "host_v1", "topic": "volume", "availability_zone": "nova"},
{"host": "host_v2", "topic": "volume", "availability_zone": "nonova"}]
def stub_service_get_all(self, req):
return SERVICES_LIST
def stub_set_host_enabled(context, host, enabled):
if host == "notimplemented":
raise NotImplementedError()
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
fail = (host == "host_c2")
status = "enabled" if (enabled != fail) else "disabled"
return status
def stub_set_host_maintenance(context, host, mode):
if host == "notimplemented":
raise NotImplementedError()
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
fail = (host == "host_c2")
maintenance = "on_maintenance" if (mode != fail) else "off_maintenance"
return maintenance
def stub_host_power_action(context, host, action):
if host == "notimplemented":
raise NotImplementedError()
return action
def _create_instance(**kwargs):
"""Create a test instance"""
ctxt = context.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
def _create_instance_dict(**kwargs):
"""Create a dictionary for a test instance"""
inst = {}
inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = kwargs.get('user_id', 'admin')
inst['project_id'] = kwargs.get('project_id', 'fake')
inst['instance_type_id'] = '1'
if 'host' in kwargs:
inst['host'] = kwargs.get('host')
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['root_gb'] = kwargs.get('root_gb', 30)
inst['ephemeral_gb'] = kwargs.get('ephemeral_gb', 30)
inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
inst['task_state'] = kwargs.get('task_state', None)
inst['availability_zone'] = kwargs.get('availability_zone', None)
inst['ami_launch_index'] = 0
inst['launched_on'] = kwargs.get('launched_on', 'dummy')
return inst
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class FakeRequestWithNovaZone(object):
environ = {"nova.context": context.get_admin_context()}
GET = {"zone": "nova"}
class HostTestCase(test.TestCase):
"""Test Case for hosts."""
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
self.req = FakeRequest()
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
self.stubs.Set(self.controller.api, 'set_host_enabled',
stub_set_host_enabled)
self.stubs.Set(self.controller.api, 'set_host_maintenance',
stub_set_host_maintenance)
self.stubs.Set(self.controller.api, 'host_power_action',
stub_host_power_action)
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
result = self.controller.update(self.req, host, body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the compute hosts are returned."""
hosts = os_hosts._list_hosts(self.req)
self.assertEqual(hosts, HOST_LIST['hosts'])
def test_list_hosts_with_zone(self):
req = FakeRequestWithNovaZone()
hosts = os_hosts._list_hosts(req)
self.assertEqual(hosts, HOST_LIST_NOVA_ZONE)
def test_disable_host(self):
self._test_host_update('host_c1', 'status', 'disable', 'disabled')
self._test_host_update('host_c2', 'status', 'disable', 'enabled')
def test_enable_host(self):
self._test_host_update('host_c1', 'status', 'enable', 'enabled')
self._test_host_update('host_c2', 'status', 'enable', 'disabled')
def test_enable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'enable', 'on_maintenance')
def test_disable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'disable', 'off_maintenance')
def _test_host_update_notimpl(self, key, val):
def stub_service_get_all_notimpl(self, req):
return [{'host': 'notimplemented', 'topic': None,
'availability_zone': None}]
self.stubs.Set(db, 'service_get_all',
stub_service_get_all_notimpl)
body = {key: val}
self.assertRaises(webob.exc.HTTPNotImplemented,
self.controller.update,
self.req, 'notimplemented', body=body)
def test_disable_host_notimpl(self):
self._test_host_update_notimpl('status', 'disable')
def test_enable_maintenance_notimpl(self):
self._test_host_update_notimpl('maintenance_mode', 'enable')
def test_host_startup(self):
result = self.controller.startup(self.req, "host_c1")
self.assertEqual(result["power_action"], "startup")
def test_host_shutdown(self):
result = self.controller.shutdown(self.req, "host_c1")
self.assertEqual(result["power_action"], "shutdown")
def test_host_reboot(self):
result = self.controller.reboot(self.req, "host_c1")
self.assertEqual(result["power_action"], "reboot")
def _test_host_power_action_notimpl(self, method):
self.assertRaises(webob.exc.HTTPNotImplemented,
method, self.req, "notimplemented")
def test_host_startup_notimpl(self):
self._test_host_power_action_notimpl(self.controller.startup)
def test_host_shutdown_notimpl(self):
self._test_host_power_action_notimpl(self.controller.shutdown)
def test_host_reboot_notimpl(self):
self._test_host_power_action_notimpl(self.controller.reboot)
def test_bad_status_value(self):
bad_body = {"status": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body)
bad_body2 = {"status": "disablabc"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body2)
def test_bad_update_key(self):
bad_body = {"crazy": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body)
def test_bad_update_key_and_correct_udpate_key(self):
bad_body = {"status": "disable", "crazy": "bad"}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, "host_c1", bad_body)
def test_good_udpate_keys(self):
body = {"status": "disable", "maintenance_mode": "enable"}
result = self.controller.update(self.req, 'host_c1', body)
self.assertEqual(result["host"], "host_c1")
self.assertEqual(result["status"], "disabled")
self.assertEqual(result["maintenance_mode"], "on_maintenance")
def test_bad_host(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
self.req, "bogus_host_name", {"status": "disable"})
def test_show_forbidden(self):
self.req.environ["nova.context"].is_admin = False
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.show,
self.req, dest)
self.req.environ["nova.context"].is_admin = True
def test_show_host_not_exist(self):
"""A host given as an argument does not exists."""
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, dest)
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
ctxt = context.get_admin_context()
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0, 'availability_zone': 'dummyzone'}
s_ref = db.service_create(ctxt, dic)
dic = {'service_id': s_ref['id'],
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': '', 'stats': {}}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
"""No instance are running on the given host."""
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 3)
for resource in result['host']:
self.assertTrue(resource['resource']['project'] in proj)
self.assertEqual(len(resource['resource']), 5)
self.assertTrue(set(resource['resource'].keys()) == set(column))
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
"""show() works correctly as expected."""
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
host=s_ref['host'])
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 5)
for resource in result['host']:
self.assertTrue(resource['resource']['project'] in proj)
self.assertEqual(len(resource['resource']), 5)
self.assertTrue(set(resource['resource'].keys()) == set(column))
db.service_destroy(ctxt, s_ref['id'])
db.instance_destroy(ctxt, i_ref1['uuid'])
db.instance_destroy(ctxt, i_ref2['uuid'])
class HostSerializerTest(test.TestCase):
def setUp(self):
super(HostSerializerTest, self).setUp()
self.deserializer = os_hosts.HostUpdateDeserializer()
def test_index_serializer(self):
serializer = os_hosts.HostIndexTemplate()
text = serializer.serialize(HOST_LIST)
tree = etree.fromstring(text)
self.assertEqual('hosts', tree.tag)
self.assertEqual(len(HOST_LIST['hosts']), len(tree))
for i in range(len(HOST_LIST)):
self.assertEqual('host', tree[i].tag)
self.assertEqual(HOST_LIST['hosts'][i]['host_name'],
tree[i].get('host_name'))
self.assertEqual(HOST_LIST['hosts'][i]['service'],
tree[i].get('service'))
def test_update_serializer_with_status(self):
exemplar = dict(host='host_c1', status='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_serializer_with_maintainance_mode(self):
exemplar = dict(host='host_c1', maintenance_mode='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_serializer_with_maintainance_mode_and_status(self):
exemplar = dict(host='host_c1',
maintenance_mode='enabled',
status='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_action_serializer(self):
exemplar = dict(host='host_c1', power_action='reboot')
serializer = os_hosts.HostActionTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_deserializer(self):
exemplar = dict(status='enabled', maintenance_mode='disable')
intext = """<?xml version='1.0' encoding='UTF-8'?>
<updates>
<status>enabled</status>
<maintenance_mode>disable</maintenance_mode>
</updates>"""
result = self.deserializer.deserialize(intext)
self.assertEqual(dict(body=exemplar), result)
| |
#!/usr/bin/python
#this is a conglomerate of fasta-fixing scripts, now called FISH (FASTA ID SWAPPING HELPER) because lol i can acronym.
##
#last edit abigailc@Actaeon Sept 7 2016
#things this doesn't do: play super nice with accession numbers instead of GI numbers. probably easy to convert, (see that one script that one time), but meh
#do it later.
#todo
#import functions from FEAST - append taxonomy, extract, split, etc
#when you create a Fasta object, initialize it with sequence ID and Data by using either gen_original_lists or blast2fasta
class Fasta:
def __init__(self, name):
#all ids and seqs should be stripped of leading and trailing whitespace and have ">" removed for reasons.
#this is the name of the fasta, it can be anything, i'm not really using it right now.
self.name = name
#this is the to-be-modified version of sequence IDs and sequence-Data
# ALWAYS keep IDS and SEQS the same length. id[1] should ALWAYS correspond to seq[1].
self.ids = []
self.seqs = []
# these are the original SEQids and Sequences. They should never be modified after generation in gen_original_lists or blast_to_fasta
self.original_ids = []
self.original_seqs = []
self.species_names = []
self.numbers = []
self.taxid = []
self.taxonomy = []
def ret_name(self):
return self.name
def gen_original_lists(self, fastaname):
with open(fastaname) as fastafile:
for line in fastafile:
if "\n" == line:
pass
if ">" in line:
#write the previous AA seq
try:
AAseq=AAseq.strip()
self.seqs.append(AAseq)
self.original_seqs.append(AAseq)
except:
pass
#initialize a new AAseq
AAseq = ""
#format the seqID
newline = line.strip()
newline = line.strip(">")
#write the seqID
self.ids.append(newline.strip())
self.original_ids.append(newline.strip())
else:
AAseq = AAseq+line
AAseq=AAseq.strip()
#catch the last AAseq pass
self.seqs.append(AAseq)
self.original_seqs.append(AAseq)
print("Initial sequence and ID lists created. Contains "+str(len(self.ids))+" sequences")
def manual_shorten(self, shorts):
#the list of shorts will be provided like "Bacteria,Bac Eukarya,Euk"
changes = shorts.split()
for item in self.ids:
newline = item
index = self.ids.index(item)
for change in changes:
old, new = change.split(",")
newline = newline.replace(old,new)
self.ids[index] = newline
#done
print("Manual shorten complete")
def gen_numbers(self):
for item in self.ids:
number = re.sub("(.*)(\|)(.*)","\\3", item)
self.numbers.append(number)
def gen_species_lists(self):
self.species_names = []
speclist = []
for item in self.ids:
# item will be "Nostoc_punctiforme_PCC_73102|gi#|186468349" or "Blah|Rank|Nostoc_punctiforme_PCC_73102|gi#|186468349"
# for now, ignores anything that isn't Genus_species.
# for example, ignores strain, ., things with an extra
# word, etc.
taxon = re.sub("([^_]*)([A-Z][a-z]*_[a-z]*)(.*)", "\\2", item)
if "#" in taxon:
print ("TAXON error in gen_species_lists():" + taxon)
speclist.append(taxon)
self.species_names.append(taxon)
return speclist
def common_shorten(self, verbose = False):
#TODO: allow input of manual shorten-pairs, possibly in new function
#put your conversions of common strings to shorten here
inte = 0
for item in self.ids:
newline = item
index = self.ids.index(item)
newline = re.sub("bacteria\|", "bac|", newline)
newline = re.sub("bacteriales\|", "bacl|", newline)
newline = re.sub("bacteriaceae\|", "bacc|", newline)
newline = re.sub("Bacteria\|", "Bac|", newline)
newline = re.sub("Archaea\|", "Arc|", newline)
newline = re.sub("Eukaryota\|", "Euk|", newline)
newline = re.sub("Fungi\|", "Fun|", newline)
newline = re.sub("Viridiplantae\|", "Vir|", newline)
newline = re.sub("Metazoa\|", "Met|", newline)
newline = re.sub("mycetes\|", "myc|", newline)
newline = re.sub("mycetales\|", "mycl|", newline)
newline = re.sub("mycetaceae\|", "mycc|", newline)
newline = re.sub("Methanomassiliicoccaceae\|", "Methmasscoc|", newline)
#newline = re.sub("bacteriales\|", "bacles|", newline)
#newline = re.sub("bacteriales\|", "bacles|", newline)
#newline = re.sub("[+=\.]", "", newline)
newline = re.sub("_enterica_subsp_enterica_serovar", "", newline)
if newline == item:
pass
else:
if verbose is True:
print(item)
print(newline)
inte +=1
self.ids[index] = newline
print("Common shorten complete")
print("Fixed "+str(inte)+" lines")
#this should have successfully modified the self.ids list to contain shortened sequence ids.
def length_check(self, length, verbose):
#needs to pass in a number... charnum
toolong = 0
length = int(length)
print("trying to shorten to length "+str(length))
for item in self.ids:
index = self.ids.index(item)
linelength = len(item)
newline= item
if int(linelength) > int(length):
toolong +=1
#change all 12 to 14 if include \n at end of seqids... for now, we are not considering them.
gi = newline[-12:]
rest = re.sub("([^#]*)(#)(.*)", "\\1", newline)
nogi = rest[:-3]
newl = length-13
#12 instead of 12 to leave space for adding a bar.
newnogi = nogi[:newl]
if newnogi[-1:] == "|":
pass
else:
newnodi = newnogi[:-1]
newline = newnogi+"|"+gi
if verbose == True:
print ("LENGTHERROR: "+item[:length]+" || "+item[length:])
print("Tried to fix: "+newline)
self.ids[index] = newline
#end
print("Length-check complete, "+str(toolong)+" sequences were fixed")
def weird_AA_check(self, verbose = False):
lerr = 0
errletters = []
for item in self.seqs:
#if you want to not remove "-" just add it to list of letters.
listofletters = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'Y']
newseq = ""
#open sequences list
index = self.seqs.index(item)
anerror = "no"
for letter in item:
if letter in listofletters:
pass
elif letter == "\n":
pass
else:
if verbose == True:
if letter == "-":
pass
else:
print("LETTERERROR: "+letter)
anerror = "yes"
errletters.append(letter)
letter = ""
lerr +=1
newseq = newseq+letter
if verb == True:
if anerror == "yes":
print(item)
self.seqs[index] = newseq
if verbose == True:
from collections import Counter
counta = Counter(errletters).most_common()
print("There were "+str(lerr)+" letter errors as so:")
print(type(counta))
for thing in counta:
print(thing)
#end
print("weird aa check done")
def weird_ID_check(self, verb = False):
errors = 0
for item in self.ids:
index = self.ids.index(item)
newitem = re.sub("[\[\]]", "", item)
newitem = re.sub("[:;=,/\+'\.\(\)]", "_", newitem)
newitem = re.sub(" ", "_", newitem)
newitem = re.sub("__", "_", newitem)
if item == newitem:
pass
else:
errors += 1
if verb == True:
print("Replacing:\n"+item+"\n with:\n"+newitem)
self.ids[index] = newitem
if verb == True:
print("there were "+str(errors)+" weird_ID errors")
print("weird id check done")
def duplicates_check(self, verb = False):
listoflines = []
rep = 0
num = 0
for line in self.ids:
index = self.ids.index(line)
if line in listoflines:
num+=1
rep = line+"v"+str(num)
listoflines.append(line)
self.ids[index] = rep
if verb == True:
print ("there were "+str(num)+" duplicate sequences that were numbered")
#done
print("duplicate check done")
def index_shorted(self, replace):
#currently does NOT work w/ accession numbers
#here replace is depth and/or gi num eg "2 3 gi"
CTdict = {}
for line in self.ids:
if "|gi#" in line:
taxgi = re.sub("([^#]*)(\|gi#\|?)([0-9]*)(.*)", "\\1~\\3", line)
tax, gi = taxgi.split("~")
taxlist = tax.split("|")
if replace == "gi":
CTdict[line] = gi
if type(replace) is int:
CTdict[line] = taxlist[replace-1]
if type(replace) is str:
listreplace = replace.split()
newid = ""
for item in listreplace:
if item == "gi":
newid = newid+"|"+gi
else:
newid = str(newid)+"|"+str(taxlist[int(item)-1])
newid = newid
CTdict[line] = newid
print(newid)
else:
tax = re.sub("([^#]*)(\|gi#\|?)([0-9]*)(.*)", "\\1", line)
taxlist = tax.split("|")
if replace == "gi":
pass
if type(replace) is int:
CTdict[line] = taxlist[replace-1]
if type(replace) is str:
listreplace = replace.split()
newid = ""
f = 1
for item in listreplace:
f += 1
if item == "gi":
newid = newid+"|NA"
else:
newid = str(newid)+"|"+str(taxlist[int(item)-1])
# #SPECIFICALLY FOR CURRENT USE_CASE, REMOVE LATER
# if f == 2:
# newid = str(newid)+"|"+str(taxlist[int(item)-1])
# if f == 3:
# newid = str(newid)+"|"+str(taxlist[int(item)])
newid = newid
CTdict[line] = newid
print(newid)
for line in self.ids:
index = self.ids.index(line)
newestid = CTdict[line]
self.ids[index] = newestid
print("index check done")
def ten_char(self):
#something
#this one should be done in a seperate loop
CTdict = {}
iteration = 0
for line in self.ids:
iteration +=1
line = line.strip()
## #i have something like
## >Methanococcoides_burtonii|gi|909890
## #i want
## MethBurt00
GenusSpecies = re.sub("([A-Z][a-z]*)(_)([A-Z]*[a-z]*)(.*)", "\\1~\\3", line)
try:
Genus, Species = GenusSpecies.split("~")
g4 = Genus[:4]
try:
s4 = Species[:4]
s3 = Species[:3]
except:
s4 = Species[:2]
s3 = Species[:2]
if iteration < 10:
newid = g4+s4.capitalize()+"0"+str(iteration)
elif iteration > 99:
newid = g4+s3.capitalize()+str(iteration)
else:
newid = g4+s4.capitalize()+str(iteration)
except:
## print(GenusSpecies)
gs8 = GenusSpecies[1:9]
if iteration < 10:
newid = gs8+"0"+str(iteration)
elif iteration > 99:
newid = gs8[:-1]+str(iteration)
else:
newid = gs8+str(iteration)
## print(newid)
CTdict[line] = newid
for line in self.ids:
index = self.ids(line)
newestid = CTdict[line]
self.ids[index] = newestid
print("ten char done")
def mb_version(self):
#shorten seqids to 94 if not already done.
self.length_check(94)
#deal with any duplicates that may have caused
self.duplicates_check()
#remove the # and | characters that MrBayes Hates
for line in self.ids:
if "#" in nline:
nline = re.sub("[#]", "", nline)
if "|" in nline:
nline = re.sub("\|", "_", nline)
#tell you what to do
print("MB version ids created")
print("You should print this too .fasta format, and then convert to nexus however you want")
def load_info_swap(self, info_file_in):
#reads a file of form
# originalID
# changedID
#and generates self.ids from that file.
kid = "no"
vid = "no"
CTdict = {}
with open (info_file_in) as old:
for line in old:
#first pass: gets key (original ID)
#second pass: gets value (new ID)
#if we have no info, get key
if kid == "no":
key = line.strip()
kid = "yes"
continue
elif kid == "yes":
#if we have key and value, record.
if vid == "yes":
CTdict[key]=value
vid = "no"
kid = "no"
continue
#if we have key but no value, get value.
if vid == "no":
value = line.strip()
vid = "yes"
#catch the final pass
CTdict[key]=value
if self.original_ids == []:
for thing in CTdict:
self.ids.append(thing)
self.original_ids.append(CTdict[thing])
else:
for item in self.original_ids:
index = self.original_ids.index(item)
newid = CTdict[item]
self.ids[index] = newid
print("original ids:")
print(self.original_ids)
print("new ids:")
print(self.ids)
#done
#troubleshooting: do not preform this operation after any that change self.ids. this op must be done first, or in a seperate command.
def gen_new_fasta(self, new_fasta_name):
#this should print the changed seqids and changed AA sequences to file.
newfasta = new_fasta_name
# print(len(self.original_ids))
# print(len(self.ids))
# print(len(self.original_seqs))
# print(len(self.seqs))
with open (newfasta, "w") as new:
for i in range(len(self.original_ids)):
new.write(">"+self.ids[i].strip()+"\n")
# print(i) #
#unclear if this needs a "\n" after it... check.#TODO
#print(self.seqs)
#print(type(self.seqs[i]))
new.write(self.seqs[i]+"\n")
print("Finished, your new fasta file is located at "+newfasta)
#done
def extract(self, list_of_keeps):
keep_ids = []
keep_seq = []
success = 0
suc_num = len(list_of_keeps)
for item in list_of_keeps:
item = item.strip()
for thing in self.original_ids:
if thing == item:
keep_ids.append(thing)
index = self.original_ids.index(item)
seq = self.original_seqs[index]
keep_seq.append(seq)
success += 1
if suc_num == success:
print("100% complete extract")
else:
print(str(success)+"out of "+str(suc_num)+" sequences extracted")
self.ids = keep_ids
self.seqs = keep_seq
def swap_in_newick(self, old_newick_name, new_file_name):
#this replaces the tip names in a newick file. sometimes works on nexus files too, but I havent extensively tested it.
newick = old_newick_name
newnewick = new_file_name
with open (newick) as old:
with open (newnewick, "w") as new:
for line in old:
for item in self.original_ids:
index = self.original_ids.index(item)
line = line.replace(item, self.ids[index])
new.write(line)
print("finished, tip-replaced-newick file at: "+newnewick)
#done
def swap_in_nexus(self):
print ("You didn't implement this yet. try using newick replace, it might work")
pass
#something
#to-do, try nexus replace in the meantime, it should work
def gen_info(self, info_file_name):
#writes a file of form
# originalID
# changedID
with open(info_file_name, "w") as inf:
listlength = len(self.original_ids)
if listlength != len(self.ids):
print ("List lengths do not match! FATAL ERROR")
print (self.original_ids)
print (self.ids)
raiseSystemExit
for i in range(listlength):
inf.write(self.original_ids[i])
inf.write(self.ids[i]+"\n")
print("Info file was generated. Named "+info_file_name)
#done
def write_one_seq_per_file(self):
geneflist = []
genenames = []
for i in range(len(self.ids)):
with open("Seq" + str(i), "w") as new:
new.write(">" + self.ids[i]+"\n")
new.write(self.seqs[i]+"\n")
name = re.sub("([^\|]*)(\|)(.*)", "\\1", self.ids[i])
geneflist.append("Seq" + str(i))
genenames.append(name)
return geneflist, genenames
print("one per file generated")
def number_of_sites(self):
return len(self.original_seqs[0])
def shorten(self):
print("shortening ids...")
unk = "no"
normal = 0
ucount = 0
for line in self.ids:
index = self.ids.index(line)
# this removes words in brackets that aren't Species_name
# and then changes NCBI's default naming scheme to be
#>Species_name|#########
# and makes a list of all gi nums and all
# duplicates
# AAH91460.1 Ribosomal protein L3 [Danio rerio]
if "gi|" in line:
number = re.sub("(gi)(\|)([0-9]*)(\|)([A-Za-z]*)(\|)(.*)(\[\'?[A-Z]?[a-z]* ?.*\])(.*)", "\\3", line)
num = number.strip()
edit1 = re.sub("(gi)(\|)([0-9]*)(\|)([A-Za-z]*)(\|)(.*)(\[\'?[A-Z]?[a-z]* ?.*\])(.*)", "\\8\\2\\1#|", line)
#get acc number
else:
number = re.sub("([^ ]*)(.*)(\[\'?[A-Z]?[a-z]* ?.*\])(.*)", "\\1", line)
num = number.strip()
#get edit | AAH91460.1 Ribosomal protein L3 [Danio rerio]
edit1 = re.sub("([^ ]*)(.*)(\[\'?[A-Z]?[a-z]* ?.*\])(.*)", "\\3|", line)
if "[" in edit1:
unk = "no"
normal += 1
else:
unk = "yes"
edit2 = re.sub("[\[\]]", "", edit1)
#for now, leave periods in name due to their necessity in acc numbers (????)
edit3 = re.sub("[:;\.=,/\+'\(\)]", "_", edit2)
edit4 = re.sub(" ", "_", edit3)
edit4 = re.sub("__", "_", edit4)
edit4 = edit4+num
if unk == "no":
self.ids[index] = edit4
else:
print("Unknown Species in ID:" + line)
print("shortened: "+str(normal)+" sequence")
def blast2fasta(self, blastlist, ENTREZ=False, num=False):
# entrez is used to ensure that sequence saved uses correct TAXON, esp. if sequence is a MULTISPECIES entry.
# entrex should be somethin like "Mycobacterium triplex"
#take from MakeSPeciesTree.py version if you want a new sequence for each multispecies thing(!)
# num is how many sequences to write. for species trees, we almost certainly only want one.
# for converting full downloaded .fastas, we will want all of them (default = False means to do all of them)
# Converts blast outfmt "6 sseqid stitle sseq" to original lists if
# entrez = false
#... now converting outfmt "6 sallseqid salltitles sseq" to sh fasta with selection of proper gi/acc/taxon
# this should take format " " blast names and replace them with the proper
# fasta shit
ernum = 0
# we open each file in a unique call to blast2fasta. files should be
# deleted afterwards.
bf = open(blastlist, 'r')
error = 0
end = "no"
for line in bf:
if end == "yes":
break
# gi|738518257|ref|WP_036466735.1|;gi|620038207|emb|CDO87046.1| 50S
# ribosomal protein L15 [Mycobacterium triplex]<>50S ribosomal protein L15
# [Mycobacterium triplex]
gis = re.sub("(.*)(\t)(.*])(\t)([A-Z-]*)", "\\1", line)
names = re.sub("(.*)(\t)(.*])(\t)([A-Z-]*)", "\\3", line)
seq = re.sub("(.*)(\t)(.*])(\t)([A-Z-]*)", "\\5", line)
# this removes sequences with no Species_name given, so as to avoid errors
# downstream
if "\t" in gis:
error += 1
print("ERROR in blast parsing: " + line)
continue
else:
gilist = gis.split(";")
namelist = names.split("<>")
if ENTREZ is False:
index = 0
else:
ENTREZ = ENTREZ.strip("\"")
for item in namelist:
if ENTREZ in item:
index = namelist.index(item)
try:
seqi = gilist[index].strip() + namelist[index].strip()
#end = "yes"
except UnboundLocalError:
error += 1
print("Name error... might fix")
if error == 5:
print("Serious ENTREZ error:")
print(ENTREZ)
print(namelist)
print("This gene wasn't found in this taxon, skipping")
break
continue
# goes to next line, abandoning this one
seqid = re.sub("[ ]", "_", seqi)
# strips for .fasta format
seqid = seqid.strip()
seqid = seqid.strip(">")
# add the new sequence id to the list.
self.ids.append(seqid)
self.original_ids.append(seqid)
# the new sequence
slist = []
count = 0
newseq = ""
for letter in seq:
if count > 79:
count = 0
newseq = newseq + ("\n")
newseq = newseq + letter
count += 1
self.seqs.append(newseq.strip())
self.original_seqs.append(newseq.strip())
print("Blasttofasta id/seq loading complete!")
def SetTaxID(self):
self.taxid = []
for item in self.numbers:
GItoTAXID = "xmllint --xpath '/GBSet/GBSeq/GBSeq_feature-table/GBFeature/GBFeature_quals/GBQualifier[GBQualifier_name=\"db_xref\"]/GBQualifier_value/text()' \"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=protein&id="+item+"&retmode=xml\""
futuretaxid = subprocess.check_output(GItoTAXID, shell=True)
taxid = re.sub("(taxon:)([0-9]*)(.*)", "\\2", futuretaxid)
self.taxid.append(taxid)
def GetTaxonomy(self):
self.taxonomy = []
if self.taxid = []:
print("You need to generate taxids first.. lets try")
self.SetTaxID()
for item in self.taxid:
taxid = number
ranklist = "superkingdom kingdom phylum class order family"
ranklist = ranklist.split()
for r in ranklist:
TAXIDtoRANKNAME = "xmllint --xpath '/TaxaSet/Taxon/LineageEx/Taxon[Rank=\"" + r + \
"\"]/ScientificName/text()' \"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=taxonomy&id=" + taxid + "\""
try:
rankname = subprocess.check_output(TAXIDtoRANKNAME, shell=True)
except:
rankname = "NA"
rankname = re.sub(" ", "_", rankname)
taxdict = {}
taxdict[r]=rankname
self.taxonomy.append(taxdict)
def AppendTaxonomy(self):
for item in self.ids:
index = self.ids.index(item)
rankdict = self.taxonomy[index]
newitem = rankdict["superkingdom"]+"|"+rankdict["kingdom"]+"|"+rankdict["phylum"]+"|"+rankdict["class"]+"|"+rankdict["order"]+"|"+rankdict["family"]+"|"+item
self.ids[index] = newitem
#TODO:
#add get taxonomy to parser..
#this hasn't been implemented in class fasta, so I am leaving it commented out.. subtrees file might be easily replaced using replace.newick but it might take literally ages... unclear.
# def replace2(replace_file, dict_old_new, charnum, verb):
# print("Recognized subtrees file, using subtrees varient")
# outputlist = []
# rep = 0
# replist = []
# newfilename = replace_file.split(".")
# newfilename = newfilename[0]+str(charnum)+"limit."+newfilename[1]
# with open(replace_file) as old:
# if verb == True:
# print("Opening "+replace_file)
# with open (newfilename, "w") as new:
# for line in old:
# line = line.strip()
# for item in dict_old_new:
# if item[:127] in line:
# if item[:127] in replist:
# pass
# else:
# replist.append(item[:127])
# rep+=1
# ## print(line)
# oldline = line
# line = line.replace(item[:127], dict_old_new[item])
# ## if verb == True:
# ## if len(line) <200:
# ## print oldline
# ## print item
# ## print dict_old_new[item]
# ## print(line)
# ## print("\n")
# ## print("\n")
# new.write(line+"\n")
# print("finished with "+newfilename+"made "+str(rep)+" replacements of "+str(len(replist))+" differnt patterns")
# ## print(replist)
# return newfilename
# def gen_original_lists(self, fastaname):
# def load_info_swap(info_file_in):
# def duplicates_check(verb = False):
# def weird_ID_check(verb = False):
# def weird_AA_check(verbose = False):
# def length_check(length, verbose=False):
# def manual_shorten():
# def common_shorten():
# def mb_version():
# def index_shorted(replace):
# def ten_char():
# #write stuff
# def gen_new_fasta(new_fasta_name):
# def swap_in_nexus():
# def swap_in_newick(old_newick_name, new_file_name):
# def gen_info(info_file_name):
if __name__ == "__main__":
print("Running in terminal")
import sys
import argparse
import os
import re
parser = argparse.ArgumentParser(description="All")
#necessary bits
parser.add_argument("directory", nargs='?', default=os.getcwd(), type=str, help="type name of directory to run in where fasta resides, if not pwd")
parser.add_argument("-fas", "--fasta", action = "store", default = False, help="type the name of your .fasta file")
#options to load changes from another file
parser.add_argument("-i", "--infofile", action = "store", default = False, help="Provide an Info File (as generated by this script previously) to pull original and new sequences from")
#options# to check,fix,edit,etc the seqs or seqids
# -length
# -duplicate
# -weirdaa
# -weirdID
parser.add_argument("-sh", "--shorten", action = "store_true", default=False, help="shortens blast (from online) seqIDs")
parser.add_argument("-b2f", "--blast2fasta", action = "store_true", default=False, help="Blast+ output -> fasta download format BUGGY")
parser.add_argument("-l", "--length", action = "store", default=False, help="Provide a max length for your sequenceIDs")
parser.add_argument("-d", "--duplicates", action = "store_true", help="Flag causes identical seqIDs to be numbered 1 2 3 etc to prevent program confusion")
parser.add_argument("-fid", "--fixID", action = "store_true", help="Flag scans SeqIDs and removes weird characters like += etc")
parser.add_argument("-faa", "--fixAA", action = "store_true", help="Flag scans Sequences and removes non-standard AA characters like X B &")
#options to shorten specific words
# -m manual_shorten
# -c common_shorten
parser.add_argument("-c", "--common", action = "store_true", help="Flag causes seqIDs to be shortened in a predefined manner, eg bacteriales->bacles ")
parser.add_argument("-m", "--manual", default = False, action = "store", help="Provide a list of \"original,new\" things to shorten. eg \"Bacteria,Bac Eukaryota,Euk\"")
#special shortening methods
parser.add_argument("-t", "--tenchars", action = "store_true", help="Flag turns sequence IDs into ten character strings")
parser.add_argument("-ba", "--bayes", action = "store_true", help="Flag turns sequences into form that will work as MrBayes input")
parser.add_argument("-p", "--piece", default = False, action = "store", help="Provide taxonomy-depth, gi, or combo for shortening eg \"1 3 gi\"")
#writing methods
parser.add_argument("-wf", "--writefasta", action = "store", default=False, help="Provide name for new fasta file")
parser.add_argument("-wn", "--writenewick", action = "store",default=False, help="Provide name of newick, name of newfile eg \"example.newick replaced.newick\"")
parser.add_argument("-wi", "--writeinformation", action = "store", default=False, help="Provide name for this info_file")
# -fasta
# -newick replace
# -info gen (should this always happen?)
parser.add_argument("-v", "--verbose", action = "store_true", help="prints more information - for debugging mostly. might not be implemented yet")
args = parser.parse_args()
#workflow: do all the things you want to do to change seqID/seq in one step, save the information and .fasta file.
#then, if desired, use that fasta as base to make ten-char shortened, MBversion, or depth-shortened files, also saving info file so they are reversable.
#actual work flow
#change dir if desiredprint(args.fasta)
try:
os.chdir(args.directory)
if args.verbose == True:
print("moved to dir: "+args.directory)
except:
print ("didn't change dir")
if args.verbose:
verb = True
else:
verb = False
#originate the fasta class instance
MyFasta = Fasta("MyFastaName")
if args.blast2fasta != False:
MyFasta.blast2fasta(args.fasta)
else:
if args.fasta != False:
MyFasta.gen_original_lists(args.fasta)
#this should be done in conjunction w / write fasta or replace newick.
if args.infofile != False:
MyFasta.load_info_swap(args.infofile)
#here are the error-fixing calls
if args.duplicates == True:
MyFasta.duplicates_check(verb)
if args.fixID == True:
MyFasta.weird_ID_check(verb)
if args.fixAA == True:
MyFasta.weird_AA_check(verb)
#shortening calls
if args.shorten == True:
MyFasta.shorten()
if args.common == True:
MyFasta.common_shorten(verb)
if args.manual != False:
MyFasta.manual_shorten(args.manual)
if args.piece != False:
MyFasta.index_shorted(args.piece)
if args.length != False:
MyFasta.length_check(args.length, verb)
#these should only be done on their own, not combined w the above. for mrbayes, anything that requires 10 characters.
if args.bayes == True:
MyFasta.mb_version()
if args.tenchars == True:
MyFasta.ten_char()
#write stuff
if args.writefasta != False:
MyFasta.gen_new_fasta(args.writefasta)
if args.writenewick != False:
old, new = args.writenewick.split()
MyFasta.swap_in_newick(old, new)
if args.writeinformation != False:
MyFasta.gen_info(args.writeinformation)
print("All things finished, exiting...")
#TODO
#detailed information on how to use
#test everything
#????
# FISH FASTA ID SWAPPING HELPER
#### this is becoming a dedicated tip-name-editing package.
#requires that tips are in format given by FEAST's shorten or shorten-keep-info
#things it can do:
# 1. shorten too long seqids using common shortening phrases, or by removing info from the species-name (usually catches strain info)
# 2. remove weird characters from seqIDS
# 3. remove weird characters from AA sequences
# do this on 1. fasta files 2. nexus files (maybe? unclear) 3. newick files (maybe? unclear)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import os
import copy
import unittest
from collections import defaultdict
import pytest
from monty.json import MontyDecoder
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.entries.computed_entries import (
CompositionEnergyAdjustment,
ComputedEntry,
ComputedStructureEntry,
ConstantEnergyAdjustment,
EnergyAdjustment,
GibbsComputedStructureEntry,
ManualEnergyAdjustment,
TemperatureEnergyAdjustment,
)
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.util.testing import PymatgenTest
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "vasprun.xml")
vasprun = Vasprun(filepath)
def test_energyadjustment():
ea = EnergyAdjustment(10)
assert ea.name == "Manual adjustment"
assert ea.cls == {}
ead = ea.as_dict()
ea2 = EnergyAdjustment.from_dict(ead)
assert str(ead) == str(ea2.as_dict())
def test_manual_energy_adjustment():
ea = ManualEnergyAdjustment(10)
assert ea.name == "Manual energy adjustment"
assert ea.value == 10
assert ea.explain == "Manual energy adjustment (10.000 eV)"
ead = ea.as_dict()
ea2 = ManualEnergyAdjustment.from_dict(ead)
assert str(ead) == str(ea2.as_dict())
def test_constant_energy_adjustment():
ea = ConstantEnergyAdjustment(8)
assert ea.name == "Constant energy adjustment"
assert ea.value == 8
assert ea.explain == "Constant energy adjustment (8.000 eV)"
ead = ea.as_dict()
ea2 = ConstantEnergyAdjustment.from_dict(ead)
assert str(ead) == str(ea2.as_dict())
def test_composition_energy_adjustment():
ea = CompositionEnergyAdjustment(2, 2, uncertainty_per_atom=0, name="H")
assert ea.name == "H"
assert ea.value == 4
assert ea.explain == "Composition-based energy adjustment (2.000 eV/atom x 2 atoms)"
ead = ea.as_dict()
ea2 = CompositionEnergyAdjustment.from_dict(ead)
assert str(ead) == str(ea2.as_dict())
def test_temp_energy_adjustment():
ea = TemperatureEnergyAdjustment(-0.1, 298, 5, uncertainty_per_deg=0, name="entropy")
assert ea.name == "entropy"
assert ea.value == -0.1 * 298 * 5
assert ea.n_atoms == 5
assert ea.temp == 298
assert ea.explain == "Temperature-based energy adjustment (-0.1000 eV/K/atom x 298 K x 5 atoms)"
ead = ea.as_dict()
ea2 = TemperatureEnergyAdjustment.from_dict(ead)
assert str(ead) == str(ea2.as_dict())
class ComputedEntryTest(unittest.TestCase):
def setUp(self):
self.entry = ComputedEntry(
vasprun.final_structure.composition,
vasprun.final_energy,
parameters=vasprun.incar,
)
self.entry2 = ComputedEntry({"Fe": 2, "O": 3}, 2.3)
self.entry3 = ComputedEntry("Fe2O3", 2.3)
self.entry4 = ComputedEntry("Fe2O3", 2.3, entry_id=1)
self.entry5 = ComputedEntry("Fe6O9", 6.9)
ea = ConstantEnergyAdjustment(-5, name="Dummy adjustment")
self.entry6 = ComputedEntry("Fe6O9", 6.9, correction=-10)
self.entry7 = ComputedEntry("Fe6O9", 6.9, energy_adjustments=[ea])
def test_energy(self):
self.assertAlmostEqual(self.entry.energy, -269.38319884)
self.entry.correction = 1.0
self.assertAlmostEqual(self.entry.energy, -268.38319884)
self.assertAlmostEqual(self.entry3.energy_per_atom, 2.3 / 5)
def test_composition(self):
self.assertEqual(self.entry.composition.reduced_formula, "LiFe4(PO4)4")
self.assertEqual(self.entry2.composition.reduced_formula, "Fe2O3")
self.assertEqual(self.entry5.composition.reduced_formula, "Fe2O3")
self.assertEqual(self.entry5.composition.get_reduced_formula_and_factor()[1], 3)
def test_per_atom_props(self):
entry = ComputedEntry("Fe6O9", 6.9)
entry.energy_adjustments.append(CompositionEnergyAdjustment(-0.5, 9, uncertainty_per_atom=0.1, name="O"))
self.assertAlmostEqual(entry.energy, 2.4)
self.assertAlmostEqual(entry.energy_per_atom, 2.4 / 15)
self.assertAlmostEqual(entry.uncorrected_energy, 6.9)
self.assertAlmostEqual(entry.uncorrected_energy_per_atom, 6.9 / 15)
self.assertAlmostEqual(entry.correction, -4.5)
self.assertAlmostEqual(entry.correction_per_atom, -4.5 / 15)
self.assertAlmostEqual(entry.correction_uncertainty, 0.9)
self.assertAlmostEqual(entry.correction_uncertainty_per_atom, 0.9 / 15)
def test_normalize(self):
entry = ComputedEntry("Fe6O9", 6.9, correction=1)
entry.normalize()
self.assertEqual(entry.composition.formula, "Fe2 O3")
self.assertAlmostEqual(entry.uncorrected_energy, 6.9 / 3)
self.assertAlmostEqual(entry.correction, 1 / 3)
self.assertAlmostEqual(entry.energy * 3, 6.9 + 1)
self.assertAlmostEqual(entry.energy_adjustments[0].value, 1 / 3)
entry.normalize("atom")
self.assertEqual(entry.composition.formula, "Fe0.4 O0.6")
self.assertAlmostEqual(entry.uncorrected_energy, 6.9 / 15)
self.assertAlmostEqual(entry.correction, 1 / 15)
self.assertAlmostEqual(entry.energy * 15, 6.9 + 1)
self.assertAlmostEqual(entry.energy_adjustments[0].value, 1 / 15)
def test_normalize_energy_adjustments(self):
ealist = [
ManualEnergyAdjustment(5),
ConstantEnergyAdjustment(5),
CompositionEnergyAdjustment(1, 5, uncertainty_per_atom=0, name="Na"),
TemperatureEnergyAdjustment(0.005, 100, 10, uncertainty_per_deg=0),
]
entry = ComputedEntry("Na5Cl5", 6.9, energy_adjustments=ealist)
assert entry.correction == 20
entry.normalize()
assert entry.correction == 4
for ea in entry.energy_adjustments:
assert ea.value == 1
def test_normalize_not_in_place(self):
ealist = [
ManualEnergyAdjustment(5),
ConstantEnergyAdjustment(5),
CompositionEnergyAdjustment(1, 5, uncertainty_per_atom=0, name="Na"),
TemperatureEnergyAdjustment(0.005, 100, 10, uncertainty_per_deg=0),
]
entry = ComputedEntry("Na5Cl5", 6.9, energy_adjustments=ealist)
normed_entry = entry.normalize(inplace=False)
entry.normalize()
self.assertEqual(normed_entry, entry)
def test_to_from_dict(self):
d = self.entry.as_dict()
e = ComputedEntry.from_dict(d)
self.assertEqual(self.entry, e)
self.assertAlmostEqual(e.energy, -269.38319884)
def test_to_from_dict_with_adjustment(self):
"""
Legacy case where adjustment was provided manually
"""
d = self.entry6.as_dict()
e = ComputedEntry.from_dict(d)
self.assertAlmostEqual(e.uncorrected_energy, 6.9)
self.assertEqual(e.energy_adjustments[0].value, self.entry6.energy_adjustments[0].value)
def test_to_from_dict_with_adjustment_2(self):
"""
Modern case where correction was provided manually
"""
d = self.entry7.as_dict()
e = ComputedEntry.from_dict(d)
self.assertAlmostEqual(e.uncorrected_energy, 6.9)
self.assertEqual(e.energy_adjustments[0].value, self.entry7.energy_adjustments[0].value)
def test_to_from_dict_with_adjustment_3(self):
"""
Legacy case where the entry was serialized before the energy_adjustment
attribute was part of ComputedEntry
"""
# same as entry6
d = {
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedEntry",
"energy": 6.9,
"composition": defaultdict(float, {"Fe": 6.0, "O": 9.0}),
"parameters": {},
"data": {},
"entry_id": None,
"correction": -10,
}
e = ComputedEntry.from_dict(d)
self.assertAlmostEqual(e.uncorrected_energy, 6.9)
self.assertAlmostEqual(e.correction, -10)
assert len(e.energy_adjustments) == 1
def test_conflicting_correction_adjustment(self):
"""
Should raise a ValueError if a user tries to manually set both the correction
and energy_adjustment, even if the values match.
"""
ea = ConstantEnergyAdjustment(-10, name="Dummy adjustment")
with pytest.raises(ValueError, match="Argument conflict!"):
ComputedEntry("Fe6O9", 6.9, correction=-10, energy_adjustments=[ea])
def test_entry_id(self):
self.assertEqual(self.entry4.entry_id, 1)
self.assertEqual(self.entry2.entry_id, None)
def test_str(self):
self.assertIsNotNone(str(self.entry))
def test_sulfide_energy(self):
self.entry = ComputedEntry("BaS", -10.21249155)
self.assertAlmostEqual(self.entry.energy, -10.21249155)
self.assertAlmostEqual(self.entry.energy_per_atom, -10.21249155 / 2)
self.entry.correction = 1.0
self.assertAlmostEqual(self.entry.energy, -9.21249155)
def test_is_element(self):
entry = ComputedEntry("Fe3", 2.3)
self.assertTrue(entry.is_element)
class ComputedStructureEntryTest(unittest.TestCase):
def setUp(self):
self.entry = ComputedStructureEntry(vasprun.final_structure, vasprun.final_energy, parameters=vasprun.incar)
def test_energy(self):
self.assertAlmostEqual(self.entry.energy, -269.38319884)
self.entry.correction = 1.0
self.assertAlmostEqual(self.entry.energy, -268.38319884)
def test_composition(self):
self.assertEqual(self.entry.composition.reduced_formula, "LiFe4(PO4)4")
def test_to_from_dict(self):
d = self.entry.as_dict()
e = ComputedStructureEntry.from_dict(d)
self.assertEqual(self.entry, e)
self.assertAlmostEqual(e.energy, -269.38319884)
def test_str(self):
self.assertIsNotNone(str(self.entry))
def test_to_from_dict_structure_with_adjustment_3(self):
"""
Legacy case where the structure entry was serialized before the energy_adjustment
attribute was part of ComputedEntry
"""
# ComputedStructureEntry for Oxygen, mp-12957, as of April 2020
# with an arbitrary 1 eV correction added
d = {
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedStructureEntry",
"energy": -39.42116819,
"composition": defaultdict(float, {"O": 8.0}),
"parameters": {
"run_type": "GGA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["O"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE O"],
"oxide_type": "None",
},
"data": {"oxide_type": "None"},
"entry_id": "mp-12957",
"correction": 1,
"structure": {
"@module": "pymatgen.core.structure",
"@class": "Structure",
"charge": None,
"lattice": {
"matrix": [
[-1.7795583, 0.0, 3.86158265],
[4.17564656, -3.03266995, -0.01184798],
[4.17564656, 3.03266995, -0.01184798],
],
"a": 4.251899376264673,
"b": 5.160741380296335,
"c": 5.160741380296335,
"alpha": 71.97975354157973,
"beta": 109.9211782454931,
"gamma": 109.9211782454931,
"volume": 97.67332322031668,
},
"sites": [
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.8531272, 0.15466029, 0.15466029],
"xyz": [
-0.22657617390155504,
-1.750215367360042e-17,
3.2907563697176516,
],
"label": "O",
"properties": {"magmom": 0.002},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.84038763, 0.71790132, 0.21754949],
"xyz": [
2.410593174641884,
-1.5174019592685084,
3.234143088794756,
],
"label": "O",
"properties": {"magmom": -0.002},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.17255465, 0.21942628, 0.21942628],
"xyz": [
1.5254221229000986,
-2.121360826524921e-18,
0.6611345262629937,
],
"label": "O",
"properties": {"magmom": 0.002},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.15961237, 0.78245051, 0.28209968],
"xyz": [
4.161145821004675,
-1.5173989265985586,
0.6037435893572642,
],
"label": "O",
"properties": {"magmom": -0.002},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.84038763, 0.21754949, 0.71790132],
"xyz": [
2.410593174641884,
1.5174019592685082,
3.234143088794756,
],
"label": "O",
"properties": {"magmom": -0.002},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.82744535, 0.78057372, 0.78057372],
"xyz": [
5.046312697099901,
-1.3574974398403584e-16,
3.176752163737006,
],
"label": "O",
"properties": {"magmom": 0.002},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.15961237, 0.28209968, 0.78245051],
"xyz": [
4.161145821004675,
1.5173989265985584,
0.6037435893572642,
],
"label": "O",
"properties": {"magmom": -0.002},
},
{
"species": [{"element": "O", "occu": 1}],
"abc": [0.1468728, 0.84533971, 0.84533971],
"xyz": [
6.798310993901555,
-1.7769364890338579e-16,
0.5471303202823484,
],
"label": "O",
"properties": {"magmom": 0.002},
},
],
},
}
e = ComputedEntry.from_dict(d)
self.assertAlmostEqual(e.uncorrected_energy, -39.42116819)
self.assertAlmostEqual(e.energy, -38.42116819)
self.assertAlmostEqual(e.correction, 1)
assert len(e.energy_adjustments) == 1
class GibbsComputedStructureEntryTest(unittest.TestCase):
def setUp(self):
with pytest.warns(FutureWarning, match="MaterialsProjectCompatibility will be updated"):
self.temps = [300, 600, 900, 1200, 1500, 1800]
self.struct = vasprun.final_structure
self.num_atoms = self.struct.composition.num_atoms
self.temp_entries = {
temp: GibbsComputedStructureEntry(
self.struct,
-2.436 * self.num_atoms,
temp=temp,
parameters=vasprun.incar,
entry_id="test",
)
for temp in self.temps
}
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "Mn-O_entries.json"), "r") as f:
data = json.load(f)
self.mp_entries = [MontyDecoder().process_decoded(d) for d in data]
def test_gf_sisso(self):
energies = {
300: -56.21273010866969,
600: -51.52997063074788,
900: -47.29888391585979,
1200: -42.942338738866304,
1500: -37.793417248809774,
1800: -32.32513382051749,
}
for t in self.temps:
self.assertAlmostEqual(self.temp_entries[t].energy, energies[t])
def test_interpolation(self):
temp = 450
e = GibbsComputedStructureEntry(self.struct, -2.436 * self.num_atoms, temp=temp)
self.assertAlmostEqual(e.energy, -53.7243542548528)
def test_from_entries(self):
gibbs_entries = GibbsComputedStructureEntry.from_entries(self.mp_entries)
self.assertIsNotNone(gibbs_entries)
def test_from_pd(self):
pd = PhaseDiagram(self.mp_entries)
gibbs_entries = GibbsComputedStructureEntry.from_pd(pd)
self.assertIsNotNone(gibbs_entries)
def test_to_from_dict(self):
test_entry = self.temp_entries[300]
d = test_entry.as_dict()
e = GibbsComputedStructureEntry.from_dict(d)
self.assertEqual(test_entry, e)
self.assertAlmostEqual(e.energy, test_entry.energy)
def test_str(self):
self.assertIsNotNone(str(self.temp_entries[300]))
def test_normalize(self):
for e in self.temp_entries.values():
entry = copy.deepcopy(e)
test = entry.normalize(mode="atom", inplace=False)
self.assertAlmostEqual(entry.gibbs_correction, test.gibbs_correction * 25, 11)
entry.normalize(mode="atom")
self.assertEqual(entry.gibbs_correction, test.gibbs_correction)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import testtools
import mock
from oslo_utils import timeutils
from neutron.agent import l2population_rpc
from neutron.common import constants
from neutron.common import topics
from neutron import context
from neutron.db import agents_db
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron import manager
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2.drivers.l2pop import mech_driver as l2pop_mech_driver
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import rpc
from neutron.tests import base
from neutron.tests.unit.plugins.ml2 import test_plugin
HOST = 'my_l2_host'
L2_AGENT = {
'binary': 'neutron-openvswitch-agent',
'host': HOST,
'topic': constants.L2_AGENT_TOPIC,
'configurations': {'tunneling_ip': '20.0.0.1',
'tunnel_types': ['vxlan']},
'agent_type': constants.AGENT_TYPE_OVS,
'tunnel_type': [],
'start_flag': True
}
L2_AGENT_2 = {
'binary': 'neutron-openvswitch-agent',
'host': HOST + '_2',
'topic': constants.L2_AGENT_TOPIC,
'configurations': {'tunneling_ip': '20.0.0.2',
'tunnel_types': ['vxlan']},
'agent_type': constants.AGENT_TYPE_OVS,
'tunnel_type': [],
'start_flag': True
}
L2_AGENT_3 = {
'binary': 'neutron-openvswitch-agent',
'host': HOST + '_3',
'topic': constants.L2_AGENT_TOPIC,
'configurations': {'tunneling_ip': '20.0.0.3',
'tunnel_types': []},
'agent_type': constants.AGENT_TYPE_OVS,
'tunnel_type': [],
'start_flag': True
}
L2_AGENT_4 = {
'binary': 'neutron-openvswitch-agent',
'host': HOST + '_4',
'topic': constants.L2_AGENT_TOPIC,
'configurations': {'tunneling_ip': '20.0.0.4',
'tunnel_types': ['vxlan']},
'agent_type': constants.AGENT_TYPE_OVS,
'tunnel_type': [],
'start_flag': True
}
L2_AGENT_5 = {
'binary': 'neutron-fake-agent',
'host': HOST + '_5',
'topic': constants.L2_AGENT_TOPIC,
'configurations': {'tunneling_ip': '20.0.0.5',
'tunnel_types': [],
'interface_mappings': {'physnet1': 'eth9'},
'l2pop_network_types': ['vlan']},
# NOTE(yamamoto): mech_fake_agent has a comment to explain why
# OFA is used here.
'agent_type': constants.AGENT_TYPE_OFA,
'tunnel_type': [],
'start_flag': True
}
NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
DEVICE_OWNER_COMPUTE = 'compute:None'
class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['openvswitch', 'fake_agent', 'l2population']
def setUp(self):
super(TestL2PopulationRpcTestCase, self).setUp()
self.adminContext = context.get_admin_context()
self.type_manager = managers.TypeManager()
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager)
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '1'}
self._network = self._make_network(self.fmt, 'net1', True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
net_arg = {pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: '2'}
self._network2 = self._make_network(self.fmt, 'net2', True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID,),
**net_arg)
net_arg = {pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'noagent'}
self._network3 = self._make_network(self.fmt, 'net3', True,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,),
**net_arg)
notifier_patch = mock.patch(NOTIFIER)
notifier_patch.start()
self.fanout_topic = topics.get_topic_name(topics.AGENT,
topics.L2POPULATION,
topics.UPDATE)
fanout = ('neutron.plugins.ml2.drivers.l2pop.rpc.'
'L2populationAgentNotifyAPI._notification_fanout')
fanout_patch = mock.patch(fanout)
self.mock_fanout = fanout_patch.start()
cast = ('neutron.plugins.ml2.drivers.l2pop.rpc.'
'L2populationAgentNotifyAPI._notification_host')
cast_patch = mock.patch(cast)
self.mock_cast = cast_patch.start()
uptime = ('neutron.plugins.ml2.drivers.l2pop.db.L2populationDbMixin.'
'get_agent_uptime')
uptime_patch = mock.patch(uptime, return_value=190)
uptime_patch.start()
def _register_ml2_agents(self):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': L2_AGENT},
time=timeutils.strtime())
callback.report_state(self.adminContext,
agent_state={'agent_state': L2_AGENT_2},
time=timeutils.strtime())
callback.report_state(self.adminContext,
agent_state={'agent_state': L2_AGENT_3},
time=timeutils.strtime())
callback.report_state(self.adminContext,
agent_state={'agent_state': L2_AGENT_4},
time=timeutils.strtime())
callback.report_state(self.adminContext,
agent_state={'agent_state': L2_AGENT_5},
time=timeutils.strtime())
def test_port_info_compare(self):
# An assumption the code makes is that PortInfo compares equal to
# equivalent regular tuples.
self.assertEqual(("mac", "ip"), l2pop_rpc.PortInfo("mac", "ip"))
flooding_entry = l2pop_rpc.PortInfo(*constants.FLOODING_ENTRY)
self.assertEqual(constants.FLOODING_ENTRY, flooding_entry)
def test__unmarshall_fdb_entries(self):
entries = {'foouuid': {
'segment_id': 1001,
'ports': {'192.168.0.10': [['00:00:00:00:00:00', '0.0.0.0'],
['fa:16:3e:ff:8c:0f', '10.0.0.6']]},
'network_type': 'vxlan'}}
mixin = l2population_rpc.L2populationRpcCallBackMixin
entries = mixin._unmarshall_fdb_entries(entries)
port_info_list = entries['foouuid']['ports']['192.168.0.10']
# Check that the lists have been properly converted to PortInfo
self.assertIsInstance(port_info_list[0], l2pop_rpc.PortInfo)
self.assertIsInstance(port_info_list[1], l2pop_rpc.PortInfo)
self.assertEqual(('00:00:00:00:00:00', '0.0.0.0'), port_info_list[0])
self.assertEqual(('fa:16:3e:ff:8c:0f', '10.0.0.6'), port_info_list[1])
def test__marshall_fdb_entries(self):
entries = {'foouuid': {
'segment_id': 1001,
'ports': {'192.168.0.10': [('00:00:00:00:00:00', '0.0.0.0'),
('fa:16:3e:ff:8c:0f', '10.0.0.6')]},
'network_type': 'vxlan'}}
entries = l2pop_rpc.L2populationAgentNotifyAPI._marshall_fdb_entries(
entries)
port_info_list = entries['foouuid']['ports']['192.168.0.10']
# Check that the PortInfo tuples have been converted to list
self.assertIsInstance(port_info_list[0], list)
self.assertIsInstance(port_info_list[1], list)
self.assertEqual(['00:00:00:00:00:00', '0.0.0.0'], port_info_list[0])
self.assertEqual(['fa:16:3e:ff:8c:0f', '10.0.0.6'], port_info_list[1])
def test_fdb_add_called(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected)
def test_fdb_add_not_called_type_local(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_3'}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self.assertFalse(self.mock_fanout.called)
def test_fdb_add_called_for_l2pop_network_types(self):
self._register_ml2_agents()
host = HOST + '_5'
with self.subnet(network=self._network2) as subnet:
host_arg = {portbindings.HOST_ID: host}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=host,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {p1['network_id']:
{'ports':
{'20.0.0.5': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vlan',
'segment_id': 2}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected)
def test_fdb_called_for_active_ports(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device1 = 'tap' + p1['id']
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device1)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
self.assertFalse(self.mock_cast.called)
expected2 = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected2)
def test_fdb_add_two_agents(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST,
'admin_state_up': True}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2',
'admin_state_up': True}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,
'admin_state_up',),
**host_arg) as port2:
p1 = port1['port']
p2 = port2['port']
device1 = 'tap' + p1['id']
device2 = 'tap' + p2['id']
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST + '_2',
device=device2)
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device1)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
expected1 = {p1['network_id']:
{'ports':
{'20.0.0.2': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p2['mac_address'],
p2_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_cast.assert_called_with(mock.ANY,
'add_fdb_entries',
expected1, HOST)
expected2 = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected2)
def test_fdb_add_called_two_networks(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.subnet(cidr='10.1.0.0/24') as subnet2:
with self.port(subnet=subnet2,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
p1 = port1['port']
p3 = port3['port']
device1 = 'tap' + p1['id']
device3 = 'tap' + p3['id']
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(
self.adminContext, agent_id=HOST + '_2',
device=device1)
self.callbacks.update_device_up(
self.adminContext, agent_id=HOST,
device=device3)
p1_ips = [p['ip_address']
for p in p1['fixed_ips']]
expected1 = {p1['network_id']:
{'ports':
{'20.0.0.2':
[constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_cast.assert_called_with(
mock.ANY, 'add_fdb_entries', expected1,
HOST)
p3_ips = [p['ip_address']
for p in p3['fixed_ips']]
expected2 = {p1['network_id']:
{'ports':
{'20.0.0.1':
[constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p3['mac_address'],
p3_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', expected2)
def test_update_port_down(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
p2 = port2['port']
device2 = 'tap' + p2['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device2)
p1 = port1['port']
device1 = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device1)
self.mock_fanout.reset_mock()
self.callbacks.update_device_down(self.adminContext,
agent_id=HOST,
device=device2)
p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
expected = {p2['network_id']:
{'ports':
{'20.0.0.1': [l2pop_rpc.PortInfo(
p2['mac_address'],
p2_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'remove_fdb_entries', expected)
def test_update_port_down_last_port_up(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
p2 = port2['port']
device2 = 'tap' + p2['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device2)
self.callbacks.update_device_down(self.adminContext,
agent_id=HOST,
device=device2)
p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
expected = {p2['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p2['mac_address'],
p2_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'remove_fdb_entries', expected)
def test_delete_port(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
p1 = port['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port2:
p2 = port2['port']
device1 = 'tap' + p2['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device1)
self._delete('ports', port2['port']['id'])
p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
expected = {p2['network_id']:
{'ports':
{'20.0.0.1': [l2pop_rpc.PortInfo(
p2['mac_address'],
p2_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_any_call(
mock.ANY, 'remove_fdb_entries', expected)
def test_delete_port_last_port_up(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
p1 = port['port']
device = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self._delete('ports', port['port']['id'])
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_any_call(
mock.ANY, 'remove_fdb_entries', expected)
def test_mac_addr_changed(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_5'}
with self.port(subnet=subnet,
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
self.mock_fanout.reset_mock()
device = 'tap' + p1['id']
old_mac = p1['mac_address']
mac = old_mac.split(':')
mac[5] = '01' if mac[5] != '01' else '00'
new_mac = ':'.join(mac)
data = {'port': {'mac_address': new_mac,
portbindings.HOST_ID: HOST}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIn('port', res)
self.assertEqual(new_mac, res['port']['mac_address'])
# port was not bound before, so no fdb call expected yet
self.assertFalse(self.mock_fanout.called)
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self.assertEqual(1, self.mock_fanout.call_count)
add_expected = {
p1['network_id']: {
'segment_id': 1,
'network_type': 'vxlan',
'ports': {
'20.0.0.1': [
l2pop_rpc.PortInfo('00:00:00:00:00:00',
'0.0.0.0'),
l2pop_rpc.PortInfo(new_mac, '10.0.0.2')
]
}
}
}
self.mock_fanout.assert_called_with(
mock.ANY, 'add_fdb_entries', add_expected)
def test_fixed_ips_changed(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
device = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.10'}]}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
add_expected = {'chg_ip':
{p1['network_id']:
{'20.0.0.1':
{'after': [(p1['mac_address'],
'10.0.0.10')]}}}}
self.mock_fanout.assert_any_call(
mock.ANY, 'update_fdb_entries', add_expected)
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.16'}]}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
upd_expected = {'chg_ip':
{p1['network_id']:
{'20.0.0.1':
{'before': [(p1['mac_address'],
'10.0.0.10')],
'after': [(p1['mac_address'],
'10.0.0.16')]}}}}
self.mock_fanout.assert_any_call(
mock.ANY, 'update_fdb_entries', upd_expected)
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
del_expected = {'chg_ip':
{p1['network_id']:
{'20.0.0.1':
{'before': [(p1['mac_address'],
'10.0.0.2')]}}}}
self.mock_fanout.assert_any_call(
mock.ANY, 'update_fdb_entries', del_expected)
def test_no_fdb_updates_without_port_updates(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
device = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1['status'] = 'ACTIVE'
self.mock_fanout.reset_mock()
fanout = ('neutron.plugins.ml2.drivers.l2pop.rpc.'
'L2populationAgentNotifyAPI._notification_fanout')
fanout_patch = mock.patch(fanout)
mock_fanout = fanout_patch.start()
plugin = manager.NeutronManager.get_plugin()
plugin.update_port(self.adminContext, p1['id'], port1)
self.assertFalse(mock_fanout.called)
fanout_patch.stop()
def test_get_device_details_port_id(self):
self._register_ml2_agents()
host_arg = {portbindings.HOST_ID: L2_AGENT['host']}
with self.port(arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
port_id = port['port']['id']
# ensure various formats all result in correct port_id
formats = ['tap' + port_id[0:8], port_id,
port['port']['mac_address']]
for device in formats:
details = self.callbacks.get_device_details(
self.adminContext, device=device,
agent_id=L2_AGENT_2['host'])
self.assertEqual(port_id, details['port_id'])
def _update_and_check_portbinding(self, port_id, host_id):
data = {'port': {'binding:host_id': host_id}}
req = self.new_update_request('ports', data, port_id)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(host_id, res['port']['binding:host_id'])
def _test_host_changed(self, twice):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: L2_AGENT['host']}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
device_owner=DEVICE_OWNER_COMPUTE,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
device1 = 'tap' + p1['id']
self.callbacks.update_device_up(
self.adminContext,
agent_id=L2_AGENT['host'],
device=device1)
if twice:
self._update_and_check_portbinding(p1['id'],
L2_AGENT_4['host'])
self._update_and_check_portbinding(p1['id'],
L2_AGENT_2['host'])
self.mock_fanout.reset_mock()
# NOTE(yamamoto): see bug #1441488
self.adminContext.session.expire_all()
self.callbacks.get_device_details(
self.adminContext,
device=device1,
agent_id=L2_AGENT_2['host'])
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
p1['mac_address'],
p1_ips[0])]},
'network_type': 'vxlan',
'segment_id': 1}}
self.mock_fanout.assert_called_with(
mock.ANY, 'remove_fdb_entries', expected)
def test_host_changed(self):
self._test_host_changed(twice=False)
def test_host_changed_twice(self):
self._test_host_changed(twice=True)
def test_delete_port_invokes_update_device_down(self):
l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver()
l2pop_mech.L2PopulationAgentNotify = mock.Mock()
l2pop_mech.rpc_ctx = mock.Mock()
with contextlib.nested(
mock.patch.object(l2pop_mech,
'_update_port_down',
return_value=None),
mock.patch.object(l2pop_mech.L2PopulationAgentNotify,
'remove_fdb_entries')) as (upd_port_down,
rem_fdb_entries):
l2pop_mech.delete_port_postcommit(mock.Mock())
self.assertTrue(upd_port_down.called)
class TestL2PopulationMechDriver(base.BaseTestCase):
def _test_get_tunnels(self, agent_ip, exclude_host=True):
mech_driver = l2pop_mech_driver.L2populationMechanismDriver()
agent = mock.Mock()
agent.host = HOST
network_ports = ((None, agent),)
with mock.patch.object(l2pop_db.L2populationDbMixin,
'get_agent_ip',
return_value=agent_ip):
excluded_host = HOST + '-EXCLUDE' if exclude_host else HOST
return mech_driver._get_tunnels(network_ports, excluded_host)
def test_get_tunnels(self):
tunnels = self._test_get_tunnels('20.0.0.1')
self.assertTrue('20.0.0.1' in tunnels)
def test_get_tunnels_no_ip(self):
tunnels = self._test_get_tunnels(None)
self.assertEqual(0, len(tunnels))
def test_get_tunnels_dont_exclude_host(self):
tunnels = self._test_get_tunnels(None, exclude_host=False)
self.assertEqual(0, len(tunnels))
def _test_create_agent_fdb(self, fdb_network_ports_query, agent_ips):
mech_driver = l2pop_mech_driver.L2populationMechanismDriver()
tunnel_network_ports_query, tunnel_agent = (
self._mock_network_ports_query(HOST + '1', None))
agent_ips[tunnel_agent] = '10.0.0.1'
def agent_ip_side_effect(agent):
return agent_ips[agent]
with contextlib.nested(
mock.patch.object(l2pop_db.L2populationDbMixin,
'get_agent_ip',
side_effect=agent_ip_side_effect),
mock.patch.object(l2pop_db.L2populationDbMixin,
'get_nondvr_active_network_ports',
new=fdb_network_ports_query),
mock.patch.object(l2pop_db.L2populationDbMixin,
'get_dvr_active_network_ports',
new=tunnel_network_ports_query)):
session = mock.Mock()
agent = mock.Mock()
agent.host = HOST
segment = {'segmentation_id': 1, 'network_type': 'vxlan'}
return mech_driver._create_agent_fdb(session,
agent,
segment,
'network_id')
def _mock_network_ports_query(self, host_name, binding):
agent = mock.Mock()
agent.host = host_name
result = [(binding, agent)]
all_mock = mock.Mock()
all_mock.all = mock.Mock(return_value=result)
mock_query = mock.Mock(return_value=all_mock)
return mock_query, agent
def test_create_agent_fdb(self):
binding = mock.Mock()
binding.port = {'mac_address': '00:00:DE:AD:BE:EF',
'fixed_ips': [{'ip_address': '1.1.1.1'}]}
fdb_network_ports_query, fdb_agent = (
self._mock_network_ports_query(HOST + '2', binding))
agent_ips = {fdb_agent: '20.0.0.1'}
agent_fdb = self._test_create_agent_fdb(fdb_network_ports_query,
agent_ips)
result = agent_fdb['network_id']
expected_result = {'segment_id': 1,
'network_type': 'vxlan',
'ports':
{'10.0.0.1':
[constants.FLOODING_ENTRY],
'20.0.0.1':
[constants.FLOODING_ENTRY,
l2pop_rpc.PortInfo(
mac_address='00:00:DE:AD:BE:EF',
ip_address='1.1.1.1')]}}
self.assertEqual(expected_result, result)
def test_create_agent_fdb_only_tunnels(self):
all_mock = mock.Mock()
all_mock.all = mock.Mock(return_value=[])
fdb_network_ports_query = mock.Mock(return_value=all_mock)
agent_fdb = self._test_create_agent_fdb(fdb_network_ports_query, {})
result = agent_fdb['network_id']
expected_result = {'segment_id': 1,
'network_type': 'vxlan',
'ports':
{'10.0.0.1':
[constants.FLOODING_ENTRY]}}
self.assertEqual(expected_result, result)
def test_update_port_postcommit_mac_address_changed_raises(self):
port = {'status': u'ACTIVE',
'device_owner': u'compute:None',
'mac_address': u'12:34:56:78:4b:0e',
'id': u'1'}
original_port = port.copy()
original_port['mac_address'] = u'12:34:56:78:4b:0f'
with mock.patch.object(driver_context.db, 'get_network_segments'):
ctx = driver_context.PortContext(mock.Mock(),
mock.Mock(),
port,
mock.MagicMock(),
mock.Mock(),
None,
original_port=original_port)
mech_driver = l2pop_mech_driver.L2populationMechanismDriver()
with testtools.ExpectedException(ml2_exc.MechanismDriverError):
mech_driver.update_port_postcommit(ctx)
| |
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer, search
from textwrap import dedent
from numpy.random import RandomState
import pytest
from sklearn.base import is_classifier
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz, plot_tree, export_text
from io import StringIO
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
y_degraded = [1, 1, 1, 1, 1, 1]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
contents1 = export_graphviz(clf, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert contents1 == contents2
# Test with feature_names
contents1 = export_graphviz(clf, feature_names=["feature0", "feature1"],
out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert contents1 == contents2
# Test with class_names
contents1 = export_graphviz(clf, class_names=["yes", "no"], out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert contents1 == contents2
# Test plot_options
contents1 = export_graphviz(clf, filled=True, impurity=False,
proportion=True, special_characters=True,
rounded=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#ffffff"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert contents1 == contents2
# Test max_depth
contents1 = export_graphviz(clf, max_depth=0,
class_names=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert contents1 == contents2
# Test max_depth with plot_options
contents1 = export_graphviz(clf, max_depth=0, filled=True,
out_file=None, node_ids=True)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#ffffff"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert contents1 == contents2
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=2,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
contents1 = export_graphviz(clf, filled=True,
impurity=False, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[3.0, 1.0, 0.5]]", fillcolor="#ffffff"] ;\n' \
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' \
'[3, 0, 0]]", fillcolor="#e58139"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n' \
'[0.0, 1.0, 0.5]]", fillcolor="#f1bd97"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' \
'[0, 1, 0]]", fillcolor="#e58139"] ;\n' \
'2 -> 3 ;\n' \
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139"] ;\n' \
'2 -> 4 ;\n' \
'}'
assert contents1 == contents2
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=2,
criterion="mse",
random_state=2)
clf.fit(X, y)
contents1 = export_graphviz(clf, filled=True, leaves_parallel=True,
out_file=None, rotate=True, rounded=True)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#f2c09c"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#ffffff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert contents1 == contents2
# Test classifier with degraded learning set
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(X, y_degraded)
contents1 = export_graphviz(clf, filled=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="gini = 0.0\\nsamples = 6\\nvalue = 6.0", ' \
'fillcolor="#ffffff"] ;\n' \
'}'
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
# Check not-fitted decision tree error
out = StringIO()
with pytest.raises(NotFittedError):
export_graphviz(clf, out)
clf.fit(X, y)
# Check if it errors when length of feature_names
# mismatches with number of features
message = ("Length of feature_names, "
"1 does not match number of features, 2")
with pytest.raises(ValueError, match=message):
export_graphviz(clf, None, feature_names=["a"])
message = ("Length of feature_names, "
"3 does not match number of features, 2")
with pytest.raises(ValueError, match=message):
export_graphviz(clf, None, feature_names=["a", "b", "c"])
# Check error when argument is not an estimator
message = "is not an estimator instance"
with pytest.raises(TypeError, match=message):
export_graphviz(clf.fit(X, y).tree_)
# Check class_names error
out = StringIO()
with pytest.raises(IndexError):
export_graphviz(clf, out, class_names=[])
# Check precision error
out = StringIO()
with pytest.raises(ValueError, match="should be greater or equal"):
export_graphviz(clf, out, precision=-1)
with pytest.raises(ValueError, match="should be an integer"):
export_graphviz(clf, out, precision="1")
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer(r"\[.*?samples.*?\]", dot_data.getvalue()):
assert "friedman_mse" in finding.group()
def test_precision():
rng_reg = RandomState(2)
rng_clf = RandomState(8)
for X, y, clf in zip(
(rng_reg.random_sample((5, 2)),
rng_clf.random_sample((1000, 4))),
(rng_reg.random_sample((5, )),
rng_clf.randint(2, size=(1000, ))),
(DecisionTreeRegressor(criterion="friedman_mse", random_state=0,
max_depth=1),
DecisionTreeClassifier(max_depth=1, random_state=0))):
clf.fit(X, y)
for precision in (4, 3):
dot_data = export_graphviz(clf, out_file=None, precision=precision,
proportion=True)
# With the current random state, the impurity and the threshold
# will have the number of precision set in the export_graphviz
# function. We will check the number of precision with a strict
# equality. The value reported will have only 2 precision and
# therefore, only a less equal comparison will be done.
# check value
for finding in finditer(r"value = \d+\.\d+", dot_data):
assert (
len(search(r"\.\d+", finding.group()).group()) <=
precision + 1)
# check impurity
if is_classifier(clf):
pattern = r"gini = \d+\.\d+"
else:
pattern = r"friedman_mse = \d+\.\d+"
# check impurity
for finding in finditer(pattern, dot_data):
assert (len(search(r"\.\d+", finding.group()).group()) ==
precision + 1)
# check threshold
for finding in finditer(r"<= \d+\.\d+", dot_data):
assert (len(search(r"\.\d+", finding.group()).group()) ==
precision + 1)
def test_export_text_errors():
clf = DecisionTreeClassifier(max_depth=2, random_state=0)
clf.fit(X, y)
err_msg = "max_depth bust be >= 0, given -1"
with pytest.raises(ValueError, match=err_msg):
export_text(clf, max_depth=-1)
err_msg = "feature_names must contain 2 elements, got 1"
with pytest.raises(ValueError, match=err_msg):
export_text(clf, feature_names=['a'])
err_msg = "decimals must be >= 0, given -1"
with pytest.raises(ValueError, match=err_msg):
export_text(clf, decimals=-1)
err_msg = "spacing must be > 0, given 0"
with pytest.raises(ValueError, match=err_msg):
export_text(clf, spacing=0)
def test_export_text():
clf = DecisionTreeClassifier(max_depth=2, random_state=0)
clf.fit(X, y)
expected_report = dedent("""
|--- feature_1 <= 0.00
| |--- class: -1
|--- feature_1 > 0.00
| |--- class: 1
""").lstrip()
assert export_text(clf) == expected_report
# testing that leaves at level 1 are not truncated
assert export_text(clf, max_depth=0) == expected_report
# testing that the rest of the tree is truncated
assert export_text(clf, max_depth=10) == expected_report
expected_report = dedent("""
|--- b <= 0.00
| |--- class: -1
|--- b > 0.00
| |--- class: 1
""").lstrip()
assert export_text(clf, feature_names=['a', 'b']) == expected_report
expected_report = dedent("""
|--- feature_1 <= 0.00
| |--- weights: [3.00, 0.00] class: -1
|--- feature_1 > 0.00
| |--- weights: [0.00, 3.00] class: 1
""").lstrip()
assert export_text(clf, show_weights=True) == expected_report
expected_report = dedent("""
|- feature_1 <= 0.00
| |- class: -1
|- feature_1 > 0.00
| |- class: 1
""").lstrip()
assert export_text(clf, spacing=1) == expected_report
X_l = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, 1]]
y_l = [-1, -1, -1, 1, 1, 1, 2]
clf = DecisionTreeClassifier(max_depth=4, random_state=0)
clf.fit(X_l, y_l)
expected_report = dedent("""
|--- feature_1 <= 0.00
| |--- class: -1
|--- feature_1 > 0.00
| |--- truncated branch of depth 2
""").lstrip()
assert export_text(clf, max_depth=0) == expected_report
X_mo = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_mo = [[-1, -1], [-1, -1], [-1, -1], [1, 1], [1, 1], [1, 1]]
reg = DecisionTreeRegressor(max_depth=2, random_state=0)
reg.fit(X_mo, y_mo)
expected_report = dedent("""
|--- feature_1 <= 0.0
| |--- value: [-1.0, -1.0]
|--- feature_1 > 0.0
| |--- value: [1.0, 1.0]
""").lstrip()
assert export_text(reg, decimals=1) == expected_report
assert export_text(reg, decimals=1, show_weights=True) == expected_report
X_single = [[-2], [-1], [-1], [1], [1], [2]]
reg = DecisionTreeRegressor(max_depth=2, random_state=0)
reg.fit(X_single, y_mo)
expected_report = dedent("""
|--- first <= 0.0
| |--- value: [-1.0, -1.0]
|--- first > 0.0
| |--- value: [1.0, 1.0]
""").lstrip()
assert export_text(reg, decimals=1,
feature_names=['first']) == expected_report
assert export_text(reg, decimals=1, show_weights=True,
feature_names=['first']) == expected_report
def test_plot_tree_entropy(pyplot):
# mostly smoke tests
# Check correctness of export_graphviz for criterion = entropy
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="entropy",
random_state=2)
clf.fit(X, y)
# Test export code
feature_names = ['first feat', 'sepal_width']
nodes = plot_tree(clf, feature_names=feature_names)
assert len(nodes) == 3
assert nodes[0].get_text() == ("first feat <= 0.0\nentropy = 1.0\n"
"samples = 6\nvalue = [3, 3]")
assert nodes[1].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [3, 0]"
assert nodes[2].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [0, 3]"
def test_plot_tree_gini(pyplot):
# mostly smoke tests
# Check correctness of export_graphviz for criterion = gini
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
feature_names = ['first feat', 'sepal_width']
nodes = plot_tree(clf, feature_names=feature_names)
assert len(nodes) == 3
assert nodes[0].get_text() == ("first feat <= 0.0\ngini = 0.5\n"
"samples = 6\nvalue = [3, 3]")
assert nodes[1].get_text() == "gini = 0.0\nsamples = 3\nvalue = [3, 0]"
assert nodes[2].get_text() == "gini = 0.0\nsamples = 3\nvalue = [0, 3]"
# FIXME: to be removed in 1.0
def test_plot_tree_rotate_deprecation(pyplot):
tree = DecisionTreeClassifier()
tree.fit(X, y)
# test that a warning is raised when rotate is used.
match = (r"'rotate' has no effect and is deprecated in 0.23. "
r"It will be removed in 1.0 \(renaming of 0.25\).")
with pytest.warns(FutureWarning, match=match):
plot_tree(tree, rotate=True)
def test_not_fitted_tree(pyplot):
# Testing if not fitted tree throws the correct error
clf = DecisionTreeRegressor()
with pytest.raises(NotFittedError):
plot_tree(clf)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
import six
from manilaclient import client as manila_client
from manilaclient.v1.contrib import list_extensions as manila_list_extensions
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
MANILA_UI_USER_AGENT_REPR = "manila_ui_plugin_for_horizon"
MANILA_VERSION = "2.5" # requires manilaclient 1.3.0 or newer
MANILA_SERVICE_TYPE = "sharev2"
# API static values
SHARE_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
def manilaclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
manila_url = ""
try:
manila_url = base.url_for(request, MANILA_SERVICE_TYPE)
except exceptions.ServiceCatalogException:
LOG.debug('no share service configured.')
return None
LOG.debug('manilaclient connection created using token "%s" and url "%s"' %
(request.user.token.id, manila_url))
c = manila_client.Client(
MANILA_VERSION,
username=request.user.username,
input_auth_token=request.user.token.id,
project_id=request.user.tenant_id,
service_catalog_url=manila_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG,
user_agent=MANILA_UI_USER_AGENT_REPR,
api_version=MANILA_VERSION,
)
c.client.auth_token = request.user.token.id
c.client.management_url = manila_url
return c
def share_list(request, search_opts=None):
return manilaclient(request).shares.list(search_opts=search_opts)
def share_get(request, share_id):
share_data = manilaclient(request).shares.get(share_id)
return share_data
def share_create(request, size, name, description, proto, snapshot_id=None,
metadata=None, share_network=None, share_type=None,
is_public=None, availability_zone=None):
return manilaclient(request).shares.create(
proto, size, name=name, description=description,
share_network=share_network, snapshot_id=snapshot_id,
metadata=metadata, share_type=share_type, is_public=is_public,
availability_zone=availability_zone)
def share_delete(request, share_id):
return manilaclient(request).shares.delete(share_id)
def share_update(request, share_id, name, description, is_public=''):
share_data = {'display_name': name, 'display_description': description}
if not isinstance(is_public, six.string_types):
is_public = six.text_type(is_public)
if is_public and is_public.lower() != 'none':
share_data['is_public'] = is_public
return manilaclient(request).shares.update(share_id, **share_data)
def share_rules_list(request, share_id):
return manilaclient(request).shares.access_list(share_id)
def share_allow(request, share_id, access_type, access_to, access_level):
return manilaclient(request).shares.allow(
share_id, access_type, access_to, access_level)
def share_deny(request, share_id, rule_id):
return manilaclient(request).shares.deny(share_id, rule_id)
def share_manage(request, service_host, protocol, export_path,
driver_options=None, share_type=None,
name=None, description=None):
return manilaclient(request).shares.manage(
service_host=service_host,
protocol=protocol,
export_path=export_path,
driver_options=driver_options,
share_type=share_type,
name=name,
description=description,
)
def share_unmanage(request, share):
# Param 'share' can be either string with ID or object with attr 'id'.
return manilaclient(request).shares.unmanage(share)
def share_extend(request, share_id, new_size):
return manilaclient(request).shares.extend(share_id, new_size)
def share_snapshot_get(request, snapshot_id):
return manilaclient(request).share_snapshots.get(snapshot_id)
def share_snapshot_update(request, snapshot_id, **kwargs):
return manilaclient(request).share_snapshots.update(snapshot_id, **kwargs)
def share_snapshot_list(request, detailed=True, search_opts=None,
sort_key=None, sort_dir=None):
# Example of 'search_opts' value:
# {'share_id': 'id_of_existing_share'}
return manilaclient(request).share_snapshots.list(
detailed=detailed,
search_opts=search_opts,
sort_key=sort_key,
sort_dir=sort_dir,
)
def share_snapshot_create(request, share_id, name=None,
description=None, force=False):
return manilaclient(request).share_snapshots.create(
share_id, force=force, name=name, description=description)
def share_snapshot_delete(request, snapshot_id):
return manilaclient(request).share_snapshots.delete(snapshot_id)
def share_server_list(request, search_opts=None):
return manilaclient(request).share_servers.list(search_opts=search_opts)
def share_server_get(request, share_serv_id):
return manilaclient(request).share_servers.get(share_serv_id)
def share_server_delete(request, share_serv_id):
return manilaclient(request).share_servers.delete(share_serv_id)
def share_network_list(request, detailed=False, search_opts=None):
return manilaclient(request).share_networks.list(detailed=detailed,
search_opts=search_opts)
def share_network_create(request, neutron_net_id=None, neutron_subnet_id=None,
nova_net_id=None, name=None, description=None):
return manilaclient(request).share_networks.create(
neutron_net_id=neutron_net_id, neutron_subnet_id=neutron_subnet_id,
nova_net_id=nova_net_id, name=name, description=description)
def share_network_get(request, share_net_id):
return manilaclient(request).share_networks.get(share_net_id)
def share_network_update(request, share_net_id, name=None, description=None):
return manilaclient(request).share_networks.update(
share_net_id, name=name, description=description)
def share_network_delete(request, share_network_id):
return manilaclient(request).share_networks.delete(share_network_id)
def security_service_list(request, search_opts=None):
return manilaclient(request).security_services.list(
detailed=True,
search_opts=search_opts)
def security_service_get(request, sec_service_id, search_opts=None):
return manilaclient(request).security_services.get(sec_service_id)
def security_service_create(request, type, dns_ip=None, server=None,
domain=None, user=None, password=None, name=None,
description=None):
return manilaclient(request).security_services.create(
type, dns_ip=dns_ip, server=server, domain=domain, user=user,
password=password, name=name, description=description)
def security_service_update(request, security_service_id, dns_ip=None,
server=None,
domain=None, user=None, password=None, name=None,
description=None):
return manilaclient(request).security_services.update(
security_service_id, dns_ip=dns_ip, server=server, domain=domain,
user=user, password=password, name=name, description=description,
)
def security_service_delete(request, security_service_id):
return manilaclient(request).security_services.delete(security_service_id)
def share_network_security_service_add(request, share_network_id,
security_service_id):
return manilaclient(request).share_networks.add_security_service(
share_network_id, security_service_id)
def share_network_security_service_remove(request, share_network_id,
security_service_id):
return manilaclient(request).share_networks.remove_security_service(
share_network_id, security_service_id)
def share_network_security_service_list(request, share_network_id):
return manilaclient(request).security_services.list(
search_opts={'share_network_id': share_network_id})
def share_set_metadata(request, share_id, metadata):
return manilaclient(request).shares.set_metadata(share_id, metadata)
def share_delete_metadata(request, share_id, keys):
return manilaclient(request).shares.delete_metadata(share_id, keys)
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(manilaclient(request).quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
return manilaclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(manilaclient(request).quotas.defaults(tenant_id))
def default_quota_update(request, **kwargs):
manilaclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
def share_type_list(request):
return manilaclient(request).share_types.list()
def share_type_get(request, share_type_id):
return manilaclient(request).share_types.get(share_type_id)
def share_type_create(request, name, spec_driver_handles_share_servers,
spec_snapshot_support=True, is_public=True):
return manilaclient(request).share_types.create(
name=name,
spec_driver_handles_share_servers=spec_driver_handles_share_servers,
spec_snapshot_support=spec_snapshot_support,
is_public=is_public)
def share_type_delete(request, share_type_id):
return manilaclient(request).share_types.delete(share_type_id)
def share_type_get_extra_specs(request, share_type_id):
return manilaclient(request).share_types.get(share_type_id).get_keys()
def share_type_set_extra_specs(request, share_type_id, extra_specs):
return manilaclient(request).share_types.get(
share_type_id).set_keys(extra_specs)
def share_type_unset_extra_specs(request, share_type_id, keys):
return manilaclient(request).share_types.get(
share_type_id).unset_keys(keys)
def share_type_access_list(request, share_type_id):
return manilaclient(request).share_type_access.list(share_type_id)
def share_type_access_add(request, share_type_id, project_id):
return manilaclient(request).share_type_access.add_project_access(
share_type_id, project_id)
def share_type_access_remove(request, share_type_id, project_id):
return manilaclient(request).share_type_access.remove_project_access(
share_type_id, project_id)
def tenant_absolute_limits(request):
limits = manilaclient(request).limits.get().absolute
limits_dict = {}
for limit in limits:
# -1 is used to represent unlimited quotas
if limit.value == -1:
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
@memoized
def list_extensions(request):
return manila_list_extensions.ListExtManager(manilaclient(request))\
.show_all()
@memoized
def extension_supported(request, extension_name):
"""This method will determine if manila supports a given extension name.
"""
extensions = list_extensions(request)
for extension in extensions:
if extension.name == extension_name:
return True
return False
| |
from argparse import Namespace
from mock import Mock
from mock import call
from mock import patch
from vimper.config import Config
from vimper.commands import get_plugins
from vimper.commands import get_existing_plugins
from vimper.commands import LinkCommand
from vimper.commands import UpdateCommand
from vimper.utils import abspath
import os
import shutil
import tempfile
import unittest
UNIT_TEST_DATA_DIR = abspath(os.path.dirname(__file__), 'unittestdata')
DUMMY_PLUGINS_CONF = abspath(UNIT_TEST_DATA_DIR, 'plugins.example.yml')
class TestUtilities(unittest.TestCase):
def setUp(self):
self.config = Config()
self.config.config_filename = ''
self.config.setup()
def test_get_plugins(self):
self.config.plugins_config = DUMMY_PLUGINS_CONF
self.assertEqual(get_plugins(self.config), {
'ctrlp': 'git://github.com/kien/ctrlp.vim.git',
'nerdtree': 'git://github.com/lukaszb/nerdtree.git',
})
@patch.object(os, 'listdir')
def test_get_existing_plugins(self, listdir):
plugins = ['ctrp', 'nerdtree', 'solarized']
listdir.return_value = plugins
self.config.bundle_path = '~/.vim/bundle'
self.assertEqual(get_existing_plugins(self.config), plugins)
listdir.assert_called_once_with('~/.vim/bundle')
class TestUpdateCommand(unittest.TestCase):
def setUp(self):
self.command = UpdateCommand()
self.command.stdout = open(os.devnull, 'w')
def test_handle(self):
manager = Mock()
self.command.update_vimper_repo = manager.update_vimper_repo
self.command.makedirs = manager.makedirs
self.command.update_plugins = manager.update_plugins
self.command.handle_links = manager.handle_links
namespace = Namespace()
self.command.handle(namespace)
self.assertEqual(manager.method_calls, [
call.update_vimper_repo(),
call.makedirs(),
call.update_plugins(),
call.handle_links(namespace),
])
@patch('vimper.commands.update_repo')
def test_update_vimper_repo(self, update_repo):
self.command.config.lair_path = 'foo'
self.command.config.lair_url = 'bar'
self.command.update_vimper_repo()
update_repo.assert_called_once_with('foo', 'bar')
@patch('vimper.commands.os')
def test_makedirs(self, os):
self.command.config.bundle_path = '/foo'
self.command.config.plugins_path = '/bar'
self.command.makedirs()
calls = [call(dirname, 0o755) for dirname in ['/foo', '/bar']]
self.assertEqual(os.makedirs.call_args_list, calls)
def test_makedirs_if_they_exist(self):
tempdir = tempfile.mkdtemp()
shutil.rmtree(tempdir)
self.command.get_dirnames_to_create = Mock(return_value=[tempdir])
self.command.makedirs() # should not raise OSError here
self.assertTrue(os.path.isdir(tempdir))
@patch('vimper.commands.update_repo')
def test_update_plugin(self, update_repo):
self.command.update_plugin('foo', 'bar')
repo_path = abspath(self.command.config.plugins_path, 'foo')
update_repo.assert_called_once_with(repo_path, 'bar', piped=True)
@patch('vimper.commands.update_repo')
@patch('vimper.commands.shutil')
@patch('vimper.commands.os')
def test_update_plugin_recreate_plugins(self, os, shutil, update_repo):
self.command.namespace = Mock()
self.command.namespace.recreate_plugins = True
repo_path = abspath(self.command.config.plugins_path, 'foo')
os.path.exists.return_value = True
self.command.update_plugin('foo', 'bar')
os.path.exists.assert_called_once_with(repo_path)
shutil.rmtree.assert_called_once_with(repo_path)
update_repo.assert_called_once_with(repo_path, 'bar', piped=True)
def test_update_plugin_for_info(self):
self.command.update_plugin = Mock(return_value=123)
value = self.command.update_plugin_for_info(('foo', 'bar'))
self.command.update_plugin.assert_called_once_with('foo', 'bar')
self.assertEqual(value, 123)
@patch('vimper.commands.get_plugins')
def test_get_plugins_to_update(self, get_plugins):
self.command.namespace = Namespace(only_new=False)
get_plugins.return_value = {'solarized': 1, 'ctrlp': 2, 'nerdtree': 3}
self.assertEqual(self.command.get_plugins_to_update(), [
('ctrlp', 2), ('nerdtree', 3), ('solarized', 1)])
@patch('vimper.commands.get_plugins')
@patch('vimper.commands.get_existing_plugins')
def test_get_plugins_to_update_respects_only_new_flag(self,
get_existing_plugins,
get_plugins):
get_plugins.return_value = {'solarized': 1, 'ctrlp': 2, 'nerdtree': 3}
get_existing_plugins.return_value = ['nerdtree', 'solarized']
self.command.namespace = Namespace(only_new=True)
self.assertEqual(self.command.get_plugins_to_update(), [('ctrlp', 2)])
@patch('vimper.commands.update_repo')
def test_update_plugins(self, update_repo):
plugins = [('adamantium', 'foo'), ('eternium', 'bar')]
self.command.get_plugins_to_update = Mock(return_value=plugins)
self.command.update_plugin_for_info = Mock(return_value=('foo', 'bar'))
self.command.update_plugins()
self.assertEqual(self.command.update_plugin_for_info.call_args_list, [
call(('adamantium', 'foo')),
call(('eternium', 'bar'))
])
@patch('vimper.commands.LinkCommand')
def test_handle_links(self, LinkCommand):
link_command = Mock()
LinkCommand.return_value = link_command
self.command.handle_links(Namespace())
self.assertEqual(link_command.method_calls, [
call.link_vimper(),
call.link_plugins(),
])
class TestLinkCommand(unittest.TestCase):
def setUp(self):
self.command = LinkCommand()
def test_handle(self):
namespace = Mock()
manager = Mock()
self.command.link_vimper = manager.link_vimper
self.command.link_plugins = manager.link_plugins
self.command.handle(namespace)
self.assertEqual(manager.method_calls, [
call.link_vimper(),
call.link_plugins(),
])
def test_link_vimper(self):
links = [('src1', 'dst1'), ('src2', 'dst2')]
self.command.get_links = Mock(return_value=links)
self.command.link = Mock()
self.command.link_vimper()
self.assertEqual(self.command.link.call_args_list, [
call('src1', 'dst1'),
call('src2', 'dst2'),
])
def test_link_plugins(self):
self.command.get_plugins = Mock(return_value=['foo', 'bar'])
self.command.link_plugin = Mock()
plugins = ['auto-pairs', 'ctrlp', 'nerdtree', 'powerline']
self.command.link_plugins(plugins)
self.assertEqual(self.command.link_plugin.call_args_list, [
call('auto-pairs'),
call('ctrlp'),
call('nerdtree'),
call('powerline'),
])
def test_link_plugins_default(self):
plugins = ['auto-pairs', 'ctrlp', 'nerdtree', 'powerline']
self.command.get_plugins = Mock(return_value=plugins)
self.command.link_plugin = Mock()
self.command.link_plugins()
self.assertEqual(self.command.link_plugin.call_args_list, [
call('auto-pairs'),
call('ctrlp'),
call('nerdtree'),
call('powerline'),
])
| |
import json
import string
from urllib.parse import quote
from django.conf import settings
from django.middleware.csrf import get_token
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.html import escapejs, mark_safe
from allauth.account.models import EmailAddress
from allauth.socialaccount.app_settings import QUERY_EMAIL
from allauth.socialaccount.providers.base import (
AuthAction,
AuthProcess,
ProviderAccount,
)
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
from allauth.utils import import_callable
from .locale import get_default_locale_callable
GRAPH_API_VERSION = (
getattr(settings, "SOCIALACCOUNT_PROVIDERS", {})
.get("facebook", {})
.get("VERSION", "v7.0")
)
GRAPH_API_URL = "https://graph.facebook.com/" + GRAPH_API_VERSION
NONCE_SESSION_KEY = "allauth_facebook_nonce"
NONCE_LENGTH = 32
class FacebookAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get("link")
def get_avatar_url(self):
uid = self.account.uid
# ask for a 600x600 pixel image. We might get smaller but
# image will always be highest res possible and square
return (
GRAPH_API_URL
+ "/%s/picture?type=square&height=600&width=600&return_ssl_resources=1"
% uid
) # noqa
def to_str(self):
dflt = super(FacebookAccount, self).to_str()
return self.account.extra_data.get("name", dflt)
class FacebookProvider(OAuth2Provider):
id = "facebook"
name = "Facebook"
account_class = FacebookAccount
def __init__(self, request):
self._locale_callable_cache = None
super(FacebookProvider, self).__init__(request)
def get_method(self):
return self.get_settings().get("METHOD", "oauth2")
def get_login_url(self, request, **kwargs):
method = kwargs.pop("method", self.get_method())
if method == "js_sdk":
next = "'%s'" % escapejs(kwargs.get("next") or "")
process = "'%s'" % escapejs(kwargs.get("process") or AuthProcess.LOGIN)
action = "'%s'" % escapejs(kwargs.get("action") or AuthAction.AUTHENTICATE)
scope = "'%s'" % escapejs(kwargs.get("scope", ""))
js = "allauth.facebook.login(%s, %s, %s, %s)" % (
next,
action,
process,
scope,
)
ret = "javascript:%s" % (quote(js),)
elif method == "oauth2":
ret = super(FacebookProvider, self).get_login_url(request, **kwargs)
else:
raise RuntimeError("Invalid method specified: %s" % method)
return ret
def _get_locale_callable(self):
settings = self.get_settings()
func = settings.get("LOCALE_FUNC")
return import_callable(func) if func else get_default_locale_callable()
def get_locale_for_request(self, request):
if not self._locale_callable_cache:
self._locale_callable_cache = self._get_locale_callable()
return self._locale_callable_cache(request)
def get_default_scope(self):
scope = []
if QUERY_EMAIL:
scope.append("email")
return scope
def get_fields(self):
settings = self.get_settings()
default_fields = [
"id",
"email",
"name",
"first_name",
"last_name",
"verified",
"locale",
"timezone",
"link",
"gender",
"updated_time",
]
return settings.get("FIELDS", default_fields)
def get_auth_params(self, request, action):
ret = super(FacebookProvider, self).get_auth_params(request, action)
if action == AuthAction.REAUTHENTICATE:
ret["auth_type"] = "reauthenticate"
elif action == AuthAction.REREQUEST:
ret["auth_type"] = "rerequest"
return ret
def get_init_params(self, request, app):
init_params = {"appId": app.client_id, "version": GRAPH_API_VERSION}
settings = self.get_settings()
init_params.update(settings.get("INIT_PARAMS", {}))
return init_params
def get_fb_login_options(self, request):
ret = self.get_auth_params(request, "authenticate")
ret["scope"] = ",".join(self.get_scope(request))
if ret.get("auth_type") == "reauthenticate":
ret["auth_nonce"] = self.get_nonce(request, or_create=True)
return ret
def get_sdk_url(self, request):
settings = self.get_settings()
sdk_url = settings.get("SDK_URL", "//connect.facebook.net/{locale}/sdk.js")
field_names = [
tup[1] for tup in string.Formatter().parse(sdk_url) if tup[1] is not None
]
if "locale" in field_names:
locale = self.get_locale_for_request(request)
sdk_url = sdk_url.format(locale=locale)
return sdk_url
def media_js(self, request):
# NOTE: Avoid loading models at top due to registry boot...
from allauth.socialaccount.models import SocialApp
try:
app = self.get_app(request)
except SocialApp.DoesNotExist:
# It's a problem that Facebook isn't configured; but don't raise
# an error. Other providers don't raise errors when they're missing
# SocialApps in media_js().
return ""
def abs_uri(name):
return request.build_absolute_uri(reverse(name))
fb_data = {
"appId": app.client_id,
"version": GRAPH_API_VERSION,
"sdkUrl": self.get_sdk_url(request),
"initParams": self.get_init_params(request, app),
"loginOptions": self.get_fb_login_options(request),
"loginByTokenUrl": abs_uri("facebook_login_by_token"),
"cancelUrl": abs_uri("socialaccount_login_cancelled"),
"logoutUrl": abs_uri("account_logout"),
"loginUrl": request.build_absolute_uri(
self.get_login_url(request, method="oauth2")
),
"errorUrl": abs_uri("socialaccount_login_error"),
"csrfToken": get_token(request),
}
ctx = {"fb_data": mark_safe(json.dumps(fb_data))}
return render_to_string("facebook/fbconnect.html", ctx, request=request)
def get_nonce(self, request, or_create=False, pop=False):
if pop:
nonce = request.session.pop(NONCE_SESSION_KEY, None)
else:
nonce = request.session.get(NONCE_SESSION_KEY)
if not nonce and or_create:
nonce = get_random_string(NONCE_LENGTH)
request.session[NONCE_SESSION_KEY] = nonce
return nonce
def extract_uid(self, data):
return data["id"]
def extract_common_fields(self, data):
return dict(
email=data.get("email"),
username=data.get("username"),
first_name=data.get("first_name"),
last_name=data.get("last_name"),
name=data.get("name"),
)
def extract_email_addresses(self, data):
ret = []
email = data.get("email")
if email:
# data['verified'] does not imply the email address is
# verified.
ret.append(EmailAddress(email=email, verified=False, primary=True))
return ret
provider_classes = [FacebookProvider]
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' Implement astronomical algorithms for finding solar terms and moon phases.
Full VSOP87D for calculate Sun's apparent longitude;
Full LEA-406 for calculate Moon's apparent longitude;
Truncated IAU2000B from USNO NOVAS c source is used for nutation.
Reference:
VSOP87: ftp://ftp.imcce.fr/pub/ephem/planets/vsop87
LEA-406: S. M. Kudryavtsev (2007) "Long-term harmonic development of
lunar ephemeris", Astronomy and Astrophysics 471, 1069-1075
'''
__license__ = 'BSD'
__copyright__ = '2020, Chen Wei <weichen302@gmail.com>'
__version__ = '0.0.3'
import math
from math import pi, fmod
import numpy as np
import numexpr as ne
from aa import lightabbr_high
from aa import nutation
from aa import fmtdeg
from aa import g2jd, jd2g
J2000 = 2451545.0
SYNODIC_MONTH = 29.53
MOON_SPEED = 2 * pi / SYNODIC_MONTH
TROPICAL_YEAR = 365.24
ASEC2RAD = 4.848136811095359935899141e-6
DEG2RAD = 0.017453292519943295
ASEC360 = 1296000.0
PI = pi
TWOPI = 2 * pi
def vsopLx(vsopterms, t):
''' helper function for calculate VSOP87 '''
lx = vsopterms[:, 0] * np.cos(vsopterms[:, 1] + vsopterms[:, 2] * t)
return sum(lx)
# full VSOP87D tables
from aa_full_table import EAR_L0, EAR_L1, EAR_L2, EAR_L3, EAR_L4, EAR_L5
def vsop(jde, FK5=True):
''' Calculate ecliptical longitude of earth in heliocentric coordinates,
use VSOP87D table, heliocentric spherical, coordinates referred to the mean
equinox of the date,
In A&A, Meeus said while the complete VSOP87 yields an accuracy of 0.01",
the abridge VSOP87 has an accuracy of 1" for -2000 - +6000.
The VSOP87D table used here is a truncated version, done by the
vsoptrunc-sph.c from Celestia.
Arg:
jde: in JDTT
Return:
earth longitude in radians, referred to mean dynamical ecliptic and
equinox of the date
'''
t = (jde - J2000) / 365250.0
L0 = vsopLx(EAR_L0, t)
L1 = vsopLx(EAR_L1, t)
L2 = vsopLx(EAR_L2, t)
L3 = vsopLx(EAR_L3, t)
L4 = vsopLx(EAR_L4, t)
L5 = vsopLx(EAR_L5, t)
lon = (L0 + t * (L1 + t * (L2 + t * (L3 + t * (L4 + t * L5)))))
if FK5:
#b0 = vsopLx(earth_B0, t)
#b1 = vsopLx(earth_B1, t)
#b2 = vsopLx(earth_B2, t)
#b3 = vsopLx(earth_B3, t)
#b4 = vsopLx(earth_B4, t)
#lat = b0 + t * (b1 + t * (b2 + t * (b3 + t * b4 )))
#lp = lon - 1.397 * t - 0.00031 * t * t
#deltalon = (-0.09033 + 0.03916 * (cos(lp) + sin(lp))
# * tan(lat)) * ASEC2RAD
#print 'FK5 convertion: %s' % fmtdeg(math.degrees(deltalon))
# appears -0.09033 is good enough
#deltal = math.radians(-0.09033 / 3600.0)
deltalon = -4.379321981462438e-07
lon += deltalon
return lon
def rootbysecand(f, angle, x0, x1, precision=0.000000001):
''' solve the equation when function f(jd, angle) reaches zero by
Secand method
'''
fx0, fx1 = f(x0, angle), f(x1, angle)
while abs(fx1) > precision and abs(x0 - x1) > precision and fx0 != fx1:
x2 = x1 - fx1 * (x1 - x0) / (fx1 - fx0)
fx0 = fx1
fx1 = f(x2, angle)
x0 = x1
x1 = x2
return x1
def normrad(r):
''' covernt radian to 0 - 2pi '''
alpha = fmod(r, TWOPI)
if alpha < 0:
alpha += TWOPI
return alpha
def npitopi(r):
''' convert an angle in radians into (-pi, +pi] '''
r = fmod(r, TWOPI)
if r > PI:
r -= TWOPI
elif r <= -1.0 * PI:
r += TWOPI
return r
def f_solarangle(jd, r_angle):
''' Calculate the difference between target angle and solar geocentric
longitude at a given JDTT
and normalize the angle between Sun's Longitude on a given
day and the angle we are looking for to (-pi, pi), therefore f(x) is
continuous from -pi to pi, '''
return npitopi(apparentsun(jd) - r_angle)
def f_msangle(jd, angle):
''' Calculate difference between target angle and current sun-moon angle
Arg:
jd: time in JDTT
Return:
angle in radians, convert to -pi to +pi range
'''
return npitopi(apparentmoon(jd, ignorenutation=True)
- apparentsun(jd, ignorenutation=True)
- angle)
def solarterm(year, angle):
''' calculate Solar Term by secand method
The Sun's moving speed on ecliptical longitude is 0.04 argsecond / second,
The accuracy of nutation by IAU2000B is 0.001"
Args:
year: the year in integer
angle: degree of the solar term, in integer
Return:
time in JDTT
'''
# mean error when compare apparentsun to NASA(1900-2100) is 0.05"
# 0.000000005 radians = 0.001"
ERROR = 0.000000005
r = normrad(math.radians(angle))
# negative angle means we want search backward from Vernal Equinox,
# initialize x0 to the day which apparent Sun longitude close to the angle
# we searching for
est_vejd = g2jd(year, 3, 20.5)
x0 = est_vejd + angle * 360.0 / 365.24 # estimate
x1 = x0 + 0.5
return rootbysecand(f_solarangle, r, x0, x1, precision=ERROR)
def newmoon(jd):
''' search newmoon near a given date.
Angle between Sun-Moon has been converted to [-pi, pi] range so the
function f_msangle is continuous in that range. Use Secand method to find
root.
Test shows newmoon can be found in 5 iterations, if the start is close
enough, it may use only 3 iterations.
Arg:
jd: in JDTT
Return:
JDTT of newmoon
'''
# 0.0000001 radians is about 0.02 arcsecond, mean error of apparentmoon
# when compared to JPL Horizon is about 0.7 arcsecond
ERROR = 0.0000001
# initilize x0 to the day close to newmoon
x0 = jd - f_msangle(jd, 0) / MOON_SPEED
x1 = x0 + 0.5
return rootbysecand(f_msangle, 0, x0, x1, precision=ERROR)
def findnewmoons(start, count=15):
''' search new moon from specified start time
Arg:
start: the start time in JD, doesn't matter if it is in TT or UT
count: the number of newmoons to search after start time
Return:
a list of JDTT when newmoon occure
'''
nm = 0
newmoons = []
nmcount = 0
count += 1
while nmcount < count:
b = newmoon(start)
if b != nm:
newmoons.append(b)
nm = b
nmcount += 1
start = nm + SYNODIC_MONTH
else:
start += 1
return newmoons
def apparentmoon(jde, ignorenutation=False):
''' calculate the apparent position of the Moon, it is an alias to the
lea406 function'''
return lea406_full(jde, ignorenutation)
def apparentsun(jde, ignorenutation=False):
''' calculate the apprent place of the Sun.
Arg:
jde as jde
Return:
geocentric longitude in radians, 0 - 2pi
'''
heliolong = vsop(jde)
geolong = heliolong + PI
# compensate nutation
if not ignorenutation:
geolong += nutation(jde)
labbr = lightabbr_high(jde)
geolong += labbr
return normrad(geolong)
#------------------------------------------------------------------------------
# LEA-406 Moon Solution
#
# Reference:
# Long-term harmonic development of lunar ephemeris.
# Kudryavtsev S.M. <Astron. Astrophys. 471, 1069 (2007)>
#
# the tables M_AMP, M_PHASE, M_ARG are imported from aa_full_table
#------------------------------------------------------------------------------
FRM = [785939.924268, 1732564372.3047, -5.279, .006665, -5.522e-5]
from aa_full_table import M_ARG, M_AMP, M_PHASE
# post import process of LEA-406 tables, horizontal split the numpy array
F0_V, F1_V, F2_V, F3_V, F4_V = np.hsplit(M_ARG, 5)
CV = M_PHASE * DEG2RAD
C_V, CT_V, CTT_V = np.hsplit(CV, 3)
A_V, AT_V, ATT_V = np.hsplit(M_AMP, 3)
def lea406_full(jd, ignorenutation=False):
''' compute moon ecliptic longitude using lea406
numpy is used
'''
t = (jd - J2000) / 36525.0
t2 = t * t
t3 = t2 * t
t4 = t3 * t
tm = t / 10.0
tm2 = tm * tm
V = FRM[0] + (((FRM[4] * t + FRM[3]) * t + FRM[2]) * t + FRM[1]) * t
# numpy array operation
ARGS = ne.evaluate('''( F0_V
+ F1_V * t
+ F2_V * t2
+ F3_V * t3
+ F4_V * t4) * ASEC2RAD''')
P = ne.evaluate('''( A_V * sin(ARGS + C_V)
+ AT_V * sin(ARGS + CT_V) * tm
+ ATT_V * sin(ARGS + CTT_V) * tm2)''')
V += sum(P)
V = V * ASEC2RAD
if not ignorenutation:
V += nutation(jd)
return normrad(V)
def main():
#jd = 2444239.5
jd = g2jd(1900, 1, 1)
for i in range(10):
l = normrad(lea406_full(jd))
#d = fmtdeg(math.degrees(npitopi(e -l )))
print(jd, l, fmtdeg(math.degrees(l)))
jd += 2000
#print fmtdeg(math.degrees(e) % 360.0)
#angle = -105
#while angle < 360:
# a = solarterm(2014, angle)
# print 'search %d %s' % (angle,jdftime(a, tz=8, ut=True))
# angle += 15
if __name__ == "__main__":
main()
| |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import calendar
import eventlet
import os
import time
from oslo.config import cfg
import six
from glance.common import crypt
from glance.common import exception
from glance.common import utils
from glance import context
import glance.db as db_api
from glance import i18n
from glance.openstack.common import lockutils
import glance.openstack.common.log as logging
import glance.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_LI = i18n._LI
_LW = i18n._LW
_LE = i18n._LE
scrubber_opts = [
cfg.StrOpt('scrubber_datadir',
default='/var/lib/glance/scrubber',
help=_('Directory that the scrubber will use to track '
'information about what to delete. '
'Make sure this is set in glance-api.conf and '
'glance-scrubber.conf.')),
cfg.IntOpt('scrub_time', default=0,
help=_('The amount of time in seconds to delay before '
'performing a delete.')),
cfg.BoolOpt('cleanup_scrubber', default=False,
help=_('A boolean that determines if the scrubber should '
'clean up the files it uses for taking data. Only '
'one server in your deployment should be designated '
'the cleanup host.')),
cfg.BoolOpt('delayed_delete', default=False,
help=_('Turn on/off delayed delete.')),
cfg.IntOpt('cleanup_scrubber_time', default=86400,
help=_('Items must have a modified time that is older than '
'this value in order to be candidates for cleanup.'))
]
scrubber_cmd_opts = [
cfg.IntOpt('wakeup_time', default=300,
help=_('Loop time between checking for new '
'items to schedule for delete.'))
]
scrubber_cmd_cli_opts = [
cfg.BoolOpt('daemon',
short='D',
default=False,
help=_('Run as a long-running process. When not '
'specified (the default) run the scrub operation '
'once and then exits. When specified do not exit '
'and run scrub on wakeup_time interval as '
'specified in the config.'))
]
CONF = cfg.CONF
CONF.register_opts(scrubber_opts)
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
class ScrubQueue(object):
"""Image scrub queue base class.
The queue contains image's location which need to delete from backend.
"""
def __init__(self):
self.scrub_time = CONF.scrub_time
self.metadata_encryption_key = CONF.metadata_encryption_key
registry.configure_registry_client()
registry.configure_registry_admin_creds()
self.registry = registry.get_registry_client(context.RequestContext())
@abc.abstractmethod
def add_location(self, image_id, location, user_context=None):
"""Adding image location to scrub queue.
:param image_id: The opaque image identifier
:param location: The opaque image location
:param user_context: The user's request context
:retval A boolean value to indicate success or not
"""
pass
@abc.abstractmethod
def get_all_locations(self):
"""Returns a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
pass
@abc.abstractmethod
def pop_all_locations(self):
"""Pop out a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
pass
@abc.abstractmethod
def has_image(self, image_id):
"""Returns whether the queue contains an image or not.
:param image_id: The opaque image identifier
:retval a boolean value to inform including or not
"""
pass
class ScrubFileQueue(ScrubQueue):
"""File-based image scrub queue class."""
def __init__(self):
super(ScrubFileQueue, self).__init__()
self.scrubber_datadir = CONF.scrubber_datadir
utils.safe_mkdirs(self.scrubber_datadir)
def _read_queue_file(self, file_path):
"""Reading queue file to loading deleted location and timestamp out.
:param file_path: Queue file full path
:retval a list of image location id, uri and timestamp tuple
"""
loc_ids = []
uris = []
delete_times = []
try:
with open(file_path, 'r') as f:
while True:
loc_id = f.readline().strip()
if loc_id:
lid = six.text_type(loc_id)
loc_ids.append(int(lid) if lid.isdigit() else lid)
uris.append(unicode(f.readline().strip()))
delete_times.append(int(f.readline().strip()))
else:
break
return loc_ids, uris, delete_times
except Exception:
LOG.error(_LE("%s file can not be read.") % file_path)
def _update_queue_file(self, file_path, remove_record_idxs):
"""Updating queue file to remove such queue records.
:param file_path: Queue file full path
:param remove_record_idxs: A list of record index those want to remove
"""
try:
with open(file_path, 'r') as f:
lines = f.readlines()
# NOTE(zhiyan) we need bottom up removing to
# keep record index be valid.
remove_record_idxs.sort(reverse=True)
for record_idx in remove_record_idxs:
# Each record has three lines:
# location id, uri and delete time.
line_no = (record_idx + 1) * 3 - 1
del lines[line_no:line_no + 3]
with open(file_path, 'w') as f:
f.write(''.join(lines))
os.chmod(file_path, 0o600)
except Exception:
LOG.error(_LE("%s file can not be wrote.") % file_path)
def add_location(self, image_id, location, user_context=None):
"""Adding image location to scrub queue.
:param image_id: The opaque image identifier
:param location: The opaque image location
:param user_context: The user's request context
:retval A boolean value to indicate success or not
"""
if user_context is not None:
registry_client = registry.get_registry_client(user_context)
else:
registry_client = self.registry
with lockutils.lock("scrubber-%s" % image_id,
lock_file_prefix='glance-', external=True):
# NOTE(zhiyan): make sure scrubber does not cleanup
# 'pending_delete' images concurrently before the code
# get lock and reach here.
try:
image = registry_client.get_image(image_id)
if image['status'] == 'deleted':
return True
except exception.NotFound as e:
LOG.warn(_LW("Failed to find image to delete: %s"),
utils.exception_to_str(e))
return False
loc_id = location.get('id', '-')
if self.metadata_encryption_key:
uri = crypt.urlsafe_encrypt(self.metadata_encryption_key,
location['url'], 64)
else:
uri = location['url']
delete_time = time.time() + self.scrub_time
file_path = os.path.join(self.scrubber_datadir, str(image_id))
if os.path.exists(file_path):
# Append the uri of location to the queue file
with open(file_path, 'a') as f:
f.write('\n')
f.write('\n'.join([str(loc_id),
uri,
str(int(delete_time))]))
else:
# NOTE(zhiyan): Protect the file before we write any data.
open(file_path, 'w').close()
os.chmod(file_path, 0o600)
with open(file_path, 'w') as f:
f.write('\n'.join([str(loc_id),
uri,
str(int(delete_time))]))
os.utime(file_path, (delete_time, delete_time))
return True
def _walk_all_locations(self, remove=False):
"""Returns a list of image id and location tuple from scrub queue.
:param remove: Whether remove location from queue or not after walk
:retval a list of image id, location id and uri tuple from scrub queue
"""
if not os.path.exists(self.scrubber_datadir):
LOG.info(_LI("%s directory does not exist.") %
self.scrubber_datadir)
return []
ret = []
for root, dirs, files in os.walk(self.scrubber_datadir):
for image_id in files:
if not utils.is_uuid_like(image_id):
continue
with lockutils.lock("scrubber-%s" % image_id,
lock_file_prefix='glance-', external=True):
file_path = os.path.join(self.scrubber_datadir, image_id)
records = self._read_queue_file(file_path)
loc_ids, uris, delete_times = records
remove_record_idxs = []
skipped = False
for (record_idx, delete_time) in enumerate(delete_times):
if delete_time > time.time():
skipped = True
continue
else:
ret.append((image_id,
loc_ids[record_idx],
uris[record_idx]))
remove_record_idxs.append(record_idx)
if remove:
if skipped:
# NOTE(zhiyan): remove location records from
# the queue file.
self._update_queue_file(file_path,
remove_record_idxs)
else:
utils.safe_remove(file_path)
return ret
def get_all_locations(self):
"""Returns a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations()
def pop_all_locations(self):
"""Pop out a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations(remove=True)
def has_image(self, image_id):
"""Returns whether the queue contains an image or not.
:param image_id: The opaque image identifier
:retval a boolean value to inform including or not
"""
return os.path.exists(os.path.join(self.scrubber_datadir,
str(image_id)))
class ScrubDBQueue(ScrubQueue):
"""Database-based image scrub queue class."""
def __init__(self):
super(ScrubDBQueue, self).__init__()
admin_tenant_name = CONF.admin_tenant_name
admin_token = self.registry.auth_token
self.admin_context = context.RequestContext(user=CONF.admin_user,
tenant=admin_tenant_name,
auth_token=admin_token)
def add_location(self, image_id, location, user_context=None):
"""Adding image location to scrub queue.
:param image_id: The opaque image identifier
:param location: The opaque image location
:param user_context: The user's request context
:retval A boolean value to indicate success or not
"""
loc_id = location.get('id')
if loc_id:
db_api.get_api().image_location_delete(self.admin_context,
image_id, loc_id,
'pending_delete')
return True
else:
return False
def _walk_all_locations(self, remove=False):
"""Returns a list of image id and location tuple from scrub queue.
:param remove: Whether remove location from queue or not after walk
:retval a list of image id, location id and uri tuple from scrub queue
"""
filters = {'deleted': True,
'is_public': 'none',
'status': 'pending_delete'}
ret = []
for image in self.registry.get_images_detailed(filters=filters):
deleted_at = image.get('deleted_at')
if not deleted_at:
continue
# NOTE: Strip off microseconds which may occur after the last '.,'
# Example: 2012-07-07T19:14:34.974216
date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0]
delete_time = calendar.timegm(time.strptime(date_str,
"%Y-%m-%dT%H:%M:%S"))
if delete_time + self.scrub_time > time.time():
continue
for loc in image['location_data']:
if loc['status'] != 'pending_delete':
continue
if self.metadata_encryption_key:
uri = crypt.urlsafe_encrypt(self.metadata_encryption_key,
loc['url'], 64)
else:
uri = loc['url']
ret.append((image['id'], loc['id'], uri))
if remove:
db_api.get_api().image_location_delete(self.admin_context,
image['id'],
loc['id'],
'deleted')
self.registry.update_image(image['id'],
{'status': 'deleted'})
return ret
def get_all_locations(self):
"""Returns a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations()
def pop_all_locations(self):
"""Pop out a list of image id and location tuple from scrub queue.
:retval a list of image id and location tuple from scrub queue
"""
return self._walk_all_locations(remove=True)
def has_image(self, image_id):
"""Returns whether the queue contains an image or not.
:param image_id: The opaque image identifier
:retval a boolean value to inform including or not
"""
try:
image = self.registry.get_image(image_id)
return image['status'] == 'pending_delete'
except exception.NotFound:
return False
_file_queue = None
_db_queue = None
def get_scrub_queues():
global _file_queue, _db_queue
if not _file_queue:
_file_queue = ScrubFileQueue()
if not _db_queue:
_db_queue = ScrubDBQueue()
return (_file_queue, _db_queue)
class Daemon(object):
def __init__(self, wakeup_time=300, threads=1000):
LOG.info(_LI("Starting Daemon: wakeup_time=%(wakeup_time)s "
"threads=%(threads)s"),
{'wakeup_time': wakeup_time, 'threads': threads})
self.wakeup_time = wakeup_time
self.event = eventlet.event.Event()
self.pool = eventlet.greenpool.GreenPool(threads)
def start(self, application):
self._run(application)
def wait(self):
try:
self.event.wait()
except KeyboardInterrupt:
msg = _LI("Daemon Shutdown on KeyboardInterrupt")
LOG.info(msg)
def _run(self, application):
LOG.debug("Running application")
self.pool.spawn_n(application.run, self.pool, self.event)
eventlet.spawn_after(self.wakeup_time, self._run, application)
LOG.debug("Next run scheduled in %s seconds" % self.wakeup_time)
class Scrubber(object):
def __init__(self, store_api):
LOG.info(_LI("Initializing scrubber with configuration: %s") %
six.text_type({'scrubber_datadir': CONF.scrubber_datadir,
'cleanup': CONF.cleanup_scrubber,
'cleanup_time': CONF.cleanup_scrubber_time,
'registry_host': CONF.registry_host,
'registry_port': CONF.registry_port}))
utils.safe_mkdirs(CONF.scrubber_datadir)
self.store_api = store_api
registry.configure_registry_client()
registry.configure_registry_admin_creds()
self.registry = registry.get_registry_client(context.RequestContext())
# Here we create a request context with credentials to support
# delayed delete when using multi-tenant backend storage
admin_tenant = CONF.admin_tenant_name
auth_token = self.registry.auth_token
self.admin_context = context.RequestContext(user=CONF.admin_user,
tenant=admin_tenant,
auth_token=auth_token)
(self.file_queue, self.db_queue) = get_scrub_queues()
def _get_delete_jobs(self, queue, pop):
try:
if pop:
records = queue.pop_all_locations()
else:
records = queue.get_all_locations()
except Exception as err:
LOG.error(_LE("Can not %(op)s scrub jobs from queue: %(err)s") %
{'op': 'pop' if pop else 'get',
'err': utils.exception_to_str(err)})
return {}
delete_jobs = {}
for image_id, loc_id, loc_uri in records:
if image_id not in delete_jobs:
delete_jobs[image_id] = []
delete_jobs[image_id].append((image_id, loc_id, loc_uri))
return delete_jobs
def _merge_delete_jobs(self, file_jobs, db_jobs):
ret = {}
for image_id, file_job_items in file_jobs.iteritems():
ret[image_id] = file_job_items
db_job_items = db_jobs.get(image_id, [])
for db_item in db_job_items:
if db_item not in file_job_items:
ret[image_id].append(db_item)
for image_id, db_job_items in db_jobs.iteritems():
if image_id not in ret:
ret[image_id] = db_job_items
return ret
def run(self, pool, event=None):
file_jobs = self._get_delete_jobs(self.file_queue, True)
db_jobs = self._get_delete_jobs(self.db_queue, False)
delete_jobs = self._merge_delete_jobs(file_jobs, db_jobs)
if delete_jobs:
for image_id, jobs in six.iteritems(delete_jobs):
self._scrub_image(pool, image_id, jobs)
if CONF.cleanup_scrubber:
self._cleanup(pool)
def _scrub_image(self, pool, image_id, delete_jobs):
if len(delete_jobs) == 0:
return
LOG.info(_LI("Scrubbing image %(id)s from %(count)d locations.") %
{'id': image_id, 'count': len(delete_jobs)})
# NOTE(bourke): The starmap must be iterated to do work
list(pool.starmap(self._delete_image_location_from_backend,
delete_jobs))
image = self.registry.get_image(image_id)
if (image['status'] == 'pending_delete' and
not self.file_queue.has_image(image_id)):
self.registry.update_image(image_id, {'status': 'deleted'})
def _delete_image_location_from_backend(self, image_id, loc_id, uri):
if CONF.metadata_encryption_key:
uri = crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri)
try:
LOG.debug("Deleting URI from image %s." % image_id)
self.store_api.delete_from_backend(self.admin_context, uri)
if loc_id != '-':
db_api.get_api().image_location_delete(self.admin_context,
image_id,
int(loc_id),
'deleted')
LOG.info(_LI("Image %s has been deleted.") % image_id)
except Exception:
LOG.warn(_LW("Unable to delete URI from image %s.") % image_id)
def _read_cleanup_file(self, file_path):
"""Reading cleanup to get latest cleanup timestamp.
:param file_path: Cleanup status file full path
:retval latest cleanup timestamp
"""
try:
if not os.path.exists(file_path):
msg = _("%s file is not exists.") % six.text_type(file_path)
raise Exception(msg)
atime = int(os.path.getatime(file_path))
mtime = int(os.path.getmtime(file_path))
if atime != mtime:
msg = _("%s file contains conflicting cleanup "
"timestamp.") % six.text_type(file_path)
raise Exception(msg)
return atime
except Exception as e:
LOG.error(utils.exception_to_str(e))
return None
def _update_cleanup_file(self, file_path, cleanup_time):
"""Update latest cleanup timestamp to cleanup file.
:param file_path: Cleanup status file full path
:param cleanup_time: The Latest cleanup timestamp
"""
try:
open(file_path, 'w').close()
os.chmod(file_path, 0o600)
os.utime(file_path, (cleanup_time, cleanup_time))
except Exception:
LOG.error(_LE("%s file can not be created.") %
six.text_type(file_path))
def _cleanup(self, pool):
now = time.time()
cleanup_file = os.path.join(CONF.scrubber_datadir, ".cleanup")
if not os.path.exists(cleanup_file):
self._update_cleanup_file(cleanup_file, now)
return
last_cleanup_time = self._read_cleanup_file(cleanup_file)
cleanup_time = last_cleanup_time + CONF.cleanup_scrubber_time
if cleanup_time > now:
return
LOG.info(_LI("Getting images deleted before %s") %
CONF.cleanup_scrubber_time)
self._update_cleanup_file(cleanup_file, now)
delete_jobs = self._get_delete_jobs(self.db_queue, False)
if not delete_jobs:
return
for image_id, jobs in six.iteritems(delete_jobs):
with lockutils.lock("scrubber-%s" % image_id,
lock_file_prefix='glance-', external=True):
if not self.file_queue.has_image(image_id):
# NOTE(zhiyan): scrubber should not cleanup this image
# since a queue file be created for this 'pending_delete'
# image concurrently before the code get lock and
# reach here. The checking only be worth if glance-api and
# glance-scrubber service be deployed on a same host.
self._scrub_image(pool, image_id, jobs)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from BaseHTTPServer import BaseHTTPRequestHandler
from Cookie import CookieError, BaseCookie, SimpleCookie
import cgi
from datetime import datetime
import errno
from hashlib import md5
import new
import mimetypes
import os
import socket
from StringIO import StringIO
import sys
import urlparse
from trac.core import Interface, TracError
from trac.util import get_last_traceback, unquote
from trac.util.datefmt import http_date, localtz
from trac.util.text import empty, to_unicode
from trac.util.translation import _
from trac.web.href import Href
from trac.web.wsgi import _FileWrapper
class IAuthenticator(Interface):
"""Extension point interface for components that can provide the name
of the remote user."""
def authenticate(req):
"""Return the name of the remote user, or `None` if the identity of the
user is unknown."""
class IRequestHandler(Interface):
"""Decide which `trac.core.Component` handles which `Request`, and how."""
def match_request(req):
"""Return whether the handler wants to process the given request."""
def process_request(req):
"""Process the request.
Return a `(template_name, data, content_type)` tuple,
where `data` is a dictionary of substitutions for the Genshi template.
"text/html" is assumed if `content_type` is `None`.
Note that if template processing should not occur, this method can
simply send the response itself and not return anything.
:Since 1.0: Clearsilver templates are no longer supported.
"""
class IRequestFilter(Interface):
"""Enable components to interfere with the processing done by the
main handler, either before and/or after it enters in action.
"""
def pre_process_request(req, handler):
"""Called after initial handler selection, and can be used to change
the selected handler or redirect request.
Always returns the request handler, even if unchanged.
"""
def post_process_request(req, template, data, content_type):
"""Do any post-processing the request might need; typically adding
values to the template `data` dictionary, or changing the Genshi
template or mime type.
`data` may be updated in place.
Always returns a tuple of (template, data, content_type), even if
unchanged.
Note that `template`, `data`, `content_type` will be `None` if:
- called when processing an error page
- the default request handler did not return any result
:Since 0.11: there's a `data` argument for supporting Genshi templates;
this introduced a difference in arity which made it possible to
distinguish between the IRequestFilter components still targeted
at ClearSilver templates and the newer ones targeted at Genshi
templates.
:Since 1.0: Clearsilver templates are no longer supported.
"""
class ITemplateStreamFilter(Interface):
"""Transform the generated content by filtering the Genshi event stream
generated by the template, prior to its serialization.
"""
def filter_stream(req, method, filename, stream, data):
"""Return a filtered Genshi event stream, or the original unfiltered
stream if no match.
`req` is the current request object, `method` is the Genshi render
method (xml, xhtml or text), `filename` is the filename of the template
to be rendered, `stream` is the event stream and `data` is the data for
the current template.
See the Genshi_ documentation for more information.
.. _Genshi: http://genshi.edgewall.org/wiki/Documentation/filters.html
"""
HTTP_STATUS = dict([(code, reason.title()) for code, (reason, description)
in BaseHTTPRequestHandler.responses.items()])
class HTTPException(Exception):
def __init__(self, detail, *args):
if isinstance(detail, TracError):
self.detail = detail.message
self.reason = detail.title
else:
self.detail = detail
if args:
self.detail = self.detail % args
Exception.__init__(self, '%s %s (%s)' % (self.code, self.reason,
self.detail))
@classmethod
def subclass(cls, name, code):
"""Create a new Exception class representing a HTTP status code."""
reason = HTTP_STATUS.get(code, 'Unknown')
new_class = new.classobj(name, (HTTPException,), {
'__doc__': 'Exception for HTTP %d %s' % (code, reason)
})
new_class.code = code
new_class.reason = reason
return new_class
_HTTPException_subclass_names = []
for code in [code for code in HTTP_STATUS if code >= 400]:
exc_name = HTTP_STATUS[code].replace(' ', '').replace('-', '')
# 2.5 compatibility hack:
if exc_name == 'InternalServerError':
exc_name = 'InternalError'
if exc_name.lower().startswith('http'):
exc_name = exc_name[4:]
exc_name = 'HTTP' + exc_name
setattr(sys.modules[__name__], exc_name,
HTTPException.subclass(exc_name, code))
_HTTPException_subclass_names.append(exc_name)
del code, exc_name
class _RequestArgs(dict):
"""Dictionary subclass that provides convenient access to request
parameters that may contain multiple values."""
def getfirst(self, name, default=None):
"""Return the first value for the specified parameter, or `default` if
the parameter was not provided.
"""
if name not in self:
return default
val = self[name]
if isinstance(val, list):
val = val[0]
return val
def getlist(self, name):
"""Return a list of values for the specified parameter, even if only
one value was provided.
"""
if name not in self:
return []
val = self[name]
if not isinstance(val, list):
val = [val]
return val
def parse_arg_list(query_string):
"""Parse a query string into a list of `(name, value)` tuples."""
args = []
if not query_string:
return args
for arg in query_string.split('&'):
nv = arg.split('=', 1)
if len(nv) == 2:
(name, value) = nv
else:
(name, value) = (nv[0], empty)
name = unquote(name.replace('+', ' '))
if isinstance(name, str):
name = unicode(name, 'utf-8')
value = unquote(value.replace('+', ' '))
if isinstance(value, str):
value = unicode(value, 'utf-8')
args.append((name, value))
return args
def arg_list_to_args(arg_list):
"""Convert a list of `(name, value)` tuples into into a `_RequestArgs`."""
args = _RequestArgs()
for name, value in arg_list:
if name in args:
if isinstance(args[name], list):
args[name].append(value)
else:
args[name] = [args[name], value]
else:
args[name] = value
return args
class RequestDone(Exception):
"""Marker exception that indicates whether request processing has completed
and a response was sent.
"""
class Cookie(SimpleCookie):
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = []
self._BaseCookie__set = self._loose_set
SimpleCookie.load(self, rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
# If a key appears multiple times, the first occurrence has the
# narrowest scope, keep that
if key in self:
return
try:
self._strict_set(key, real_value, coded_value)
except CookieError:
self.bad_cookies.append(key)
dict.__setitem__(self, key, None)
class Request(object):
"""Represents a HTTP request/response pair.
This class provides a convenience API over WSGI.
"""
def __init__(self, environ, start_response):
"""Create the request wrapper.
:param environ: The WSGI environment dict
:param start_response: The WSGI callback for starting the response
:param callbacks: A dictionary of functions that are used to lazily
evaluate attribute lookups
"""
self.environ = environ
self._start_response = start_response
self._write = None
self._status = '200 OK'
self._response = None
self._outheaders = []
self._outcharset = None
self.outcookie = Cookie()
self.callbacks = {
'arg_list': Request._parse_arg_list,
'args': lambda req: arg_list_to_args(req.arg_list),
'languages': Request._parse_languages,
'incookie': Request._parse_cookies,
'_inheaders': Request._parse_headers
}
self.redirect_listeners = []
self.base_url = self.environ.get('trac.base_url')
if not self.base_url:
self.base_url = self._reconstruct_url()
self.href = Href(self.base_path)
self.abs_href = Href(self.base_url)
def __getattr__(self, name):
"""Performs lazy attribute lookup by delegating to the functions in the
callbacks dictionary."""
if name in self.callbacks:
value = self.callbacks[name](self)
setattr(self, name, value)
return value
raise AttributeError(name)
def __repr__(self):
path_info = self.environ.get('PATH_INFO', '')
return '<%s "%s %r">' % (self.__class__.__name__, self.method,
path_info)
# Public API
@property
def method(self):
"""The HTTP method of the request"""
return self.environ['REQUEST_METHOD']
@property
def path_info(self):
"""Path inside the application"""
path_info = self.environ.get('PATH_INFO', '')
try:
return unicode(path_info, 'utf-8')
except UnicodeDecodeError:
raise HTTPNotFound(_("Invalid URL encoding (was %(path_info)r)",
path_info=path_info))
@property
def query_string(self):
"""Query part of the request"""
return self.environ.get('QUERY_STRING', '')
@property
def remote_addr(self):
"""IP address of the remote user"""
return self.environ.get('REMOTE_ADDR')
@property
def remote_user(self):
""" Name of the remote user.
Will be `None` if the user has not logged in using HTTP authentication.
"""
return self.environ.get('REMOTE_USER')
@property
def scheme(self):
"""The scheme of the request URL"""
return self.environ['wsgi.url_scheme']
@property
def base_path(self):
"""The root path of the application"""
return self.environ.get('SCRIPT_NAME', '')
@property
def server_name(self):
"""Name of the server"""
return self.environ['SERVER_NAME']
@property
def server_port(self):
"""Port number the server is bound to"""
return int(self.environ['SERVER_PORT'])
def add_redirect_listener(self, listener):
"""Add a callable to be called prior to executing a redirect.
The callable is passed the arguments to the `redirect()` call.
"""
self.redirect_listeners.append(listener)
def get_header(self, name):
"""Return the value of the specified HTTP header, or `None` if there's
no such header in the request.
"""
name = name.lower()
for key, value in self._inheaders:
if key == name:
return value
return None
def send_response(self, code=200):
"""Set the status code of the response."""
self._status = '%s %s' % (code, HTTP_STATUS.get(code, 'Unknown'))
def send_header(self, name, value):
"""Send the response header with the specified name and value.
`value` must either be an `unicode` string or can be converted to one
(e.g. numbers, ...)
"""
if name.lower() == 'content-type':
ctpos = value.find('charset=')
if ctpos >= 0:
self._outcharset = value[ctpos + 8:].strip()
elif name.lower() == 'content-length':
self._content_length = int(value)
self._outheaders.append((name, unicode(value).encode('utf-8')))
def end_headers(self):
"""Must be called after all headers have been sent and before the
actual content is written.
"""
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders)
def check_modified(self, datetime, extra=''):
"""Check the request "If-None-Match" header against an entity tag.
The entity tag is generated from the specified last modified time
(`datetime`), optionally appending an `extra` string to
indicate variants of the requested resource.
That `extra` parameter can also be a list, in which case the MD5 sum
of the list content will be used.
If the generated tag matches the "If-None-Match" header of the request,
this method sends a "304 Not Modified" response to the client.
Otherwise, it adds the entity tag as an "ETag" header to the response
so that consecutive requests can be cached.
"""
if isinstance(extra, list):
m = md5()
for elt in extra:
m.update(repr(elt))
extra = m.hexdigest()
etag = 'W/"%s/%s/%s"' % (self.authname, http_date(datetime), extra)
inm = self.get_header('If-None-Match')
if (not inm or inm != etag):
self.send_header('ETag', etag)
else:
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
def redirect(self, url, permanent=False):
"""Send a redirect to the client, forwarding to the specified URL.
The `url` may be relative or absolute, relative URLs will be translated
appropriately.
"""
for listener in self.redirect_listeners:
listener(self, url, permanent)
if permanent:
status = 301 # 'Moved Permanently'
elif self.method == 'POST':
status = 303 # 'See Other' -- safe to use in response to a POST
else:
status = 302 # 'Found' -- normal temporary redirect
self.send_response(status)
if not url.startswith(('http://', 'https://')):
# Make sure the URL is absolute
scheme, host = urlparse.urlparse(self.base_url)[:2]
url = urlparse.urlunparse((scheme, host, url, None, None, None))
# Workaround #10382, IE6+ bug when post and redirect with hash
if status == 303 and '#' in url and \
' MSIE ' in self.environ.get('HTTP_USER_AGENT', ''):
url = url.replace('#', '#__msie303:')
self.send_header('Location', url)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', 0)
self.send_header('Pragma', 'no-cache')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.end_headers()
raise RequestDone
def send(self, content, content_type='text/html', status=200):
self.send_response(status)
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
self.send_header('Content-Length', len(content))
self.end_headers()
if self.method != 'HEAD':
self.write(content)
raise RequestDone
def send_error(self, exc_info, template='error.html',
content_type='text/html', status=500, env=None, data={}):
try:
if template.endswith('.html'):
if env:
from trac.web.chrome import Chrome
try:
data = Chrome(env).render_template(self, template,
data, 'text/html')
except Exception:
# second chance rendering, in "safe" mode
data['trac_error_rendering'] = True
data = Chrome(env).render_template(self, template,
data, 'text/html')
else:
content_type = 'text/plain'
data = '%s\n\n%s: %s' % (data.get('title'),
data.get('type'),
data.get('message'))
except Exception: # failed to render
data = get_last_traceback()
content_type = 'text/plain'
if isinstance(data, unicode):
data = data.encode('utf-8')
self.send_response(status)
self._outheaders = []
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
self.send_header('Content-Length', len(data))
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders,
exc_info)
if self.method != 'HEAD':
self.write(data)
raise RequestDone
def send_no_content(self):
self.send_response(204)
self.send_header('Content-Length', 0)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
raise RequestDone
def send_file(self, path, mimetype=None):
"""Send a local file to the browser.
This method includes the "Last-Modified", "Content-Type" and
"Content-Length" headers in the response, corresponding to the file
attributes. It also checks the last modification time of the local file
against the "If-Modified-Since" provided by the user agent, and sends a
"304 Not Modified" response if it matches.
"""
if not os.path.isfile(path):
raise HTTPNotFound(_("File %(path)s not found", path=path))
stat = os.stat(path)
mtime = datetime.fromtimestamp(stat.st_mtime, localtz)
last_modified = http_date(mtime)
if last_modified == self.get_header('If-Modified-Since'):
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
if not mimetype:
mimetype = mimetypes.guess_type(path)[0] or \
'application/octet-stream'
self.send_response(200)
self.send_header('Content-Type', mimetype)
self.send_header('Content-Length', stat.st_size)
self.send_header('Last-Modified', last_modified)
use_xsendfile = getattr(self, 'use_xsendfile', False)
if use_xsendfile:
self.send_header('X-Sendfile', os.path.abspath(path))
self.end_headers()
if not use_xsendfile and self.method != 'HEAD':
fileobj = file(path, 'rb')
file_wrapper = self.environ.get('wsgi.file_wrapper', _FileWrapper)
self._response = file_wrapper(fileobj, 4096)
raise RequestDone
def read(self, size=None):
"""Read the specified number of bytes from the request body."""
fileobj = self.environ['wsgi.input']
if size is None:
size = self.get_header('Content-Length')
if size is None:
size = -1
else:
size = int(size)
data = fileobj.read(size)
return data
def write(self, data):
"""Write the given data to the response body.
`data` *must* be a `str` string, encoded with the charset
which has been specified in the ''Content-Type'' header
or 'utf-8' otherwise.
Note that the ''Content-Length'' header must have been specified.
Its value either corresponds to the length of `data`, or, if there
are multiple calls to `write`, to the cumulated length of the `data`
arguments.
"""
if not self._write:
self.end_headers()
if not hasattr(self, '_content_length'):
raise RuntimeError("No Content-Length header set")
if isinstance(data, unicode):
raise ValueError("Can't send unicode content")
try:
self._write(data)
except (IOError, socket.error), e:
if e.args[0] in (errno.EPIPE, errno.ECONNRESET, 10053, 10054):
raise RequestDone
raise
# Internal methods
def _parse_arg_list(self):
"""Parse the supplied request parameters into a list of
`(name, value)` tuples.
"""
fp = self.environ['wsgi.input']
# Avoid letting cgi.FieldStorage consume the input stream when the
# request does not contain form data
ctype = self.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype not in ('application/x-www-form-urlencoded',
'multipart/form-data'):
fp = StringIO('')
# Python 2.6 introduced a backwards incompatible change for
# FieldStorage where QUERY_STRING is no longer ignored for POST
# requests. We'll keep the pre 2.6 behaviour for now...
if self.method == 'POST':
qs_on_post = self.environ.pop('QUERY_STRING', '')
fs = cgi.FieldStorage(fp, environ=self.environ, keep_blank_values=True)
if self.method == 'POST':
self.environ['QUERY_STRING'] = qs_on_post
args = []
for value in fs.list or ():
name = value.name
if not value.filename:
value = unicode(value.value, 'utf-8')
args.append((name, value))
return args
def _parse_cookies(self):
cookies = Cookie()
header = self.get_header('Cookie')
if header:
cookies.load(header, ignore_parse_errors=True)
return cookies
def _parse_headers(self):
headers = [(name[5:].replace('_', '-').lower(), value)
for name, value in self.environ.items()
if name.startswith('HTTP_')]
if 'CONTENT_LENGTH' in self.environ:
headers.append(('content-length', self.environ['CONTENT_LENGTH']))
if 'CONTENT_TYPE' in self.environ:
headers.append(('content-type', self.environ['CONTENT_TYPE']))
return headers
def _parse_languages(self):
"""The list of languages preferred by the remote user, taken from the
``Accept-Language`` header.
"""
header = self.get_header('Accept-Language') or 'en-us'
langs = []
for i, lang in enumerate(header.split(',')):
code, params = cgi.parse_header(lang)
q = 1
if 'q' in params:
try:
q = float(params['q'])
except ValueError:
q = 0
langs.append((-q, i, code))
langs.sort()
return [code for q, i, code in langs]
def _reconstruct_url(self):
"""Reconstruct the absolute base URL of the application."""
host = self.get_header('Host')
if not host:
# Missing host header, so reconstruct the host from the
# server name and port
default_port = {'http': 80, 'https': 443}
if self.server_port and self.server_port != \
default_port[self.scheme]:
host = '%s:%d' % (self.server_name, self.server_port)
else:
host = self.server_name
return urlparse.urlunparse((self.scheme, host, self.base_path, None,
None, None))
def _send_cookie_headers(self):
for name in self.outcookie.keys():
path = self.outcookie[name].get('path')
if path:
path = path.replace(' ', '%20') \
.replace(';', '%3B') \
.replace(',', '%3C')
self.outcookie[name]['path'] = path
cookies = to_unicode(self.outcookie.output(header='')).encode('utf-8')
for cookie in cookies.splitlines():
self._outheaders.append(('Set-Cookie', cookie.strip()))
__no_apidoc__ = _HTTPException_subclass_names
| |
from sqlalchemy.testing import assert_raises_message, eq_, \
AssertsCompiledSQL, is_
from sqlalchemy.testing import fixtures
from sqlalchemy.orm import relationships, foreign, remote
from sqlalchemy import MetaData, Table, Column, ForeignKey, Integer, \
select, ForeignKeyConstraint, exc, func, and_, String, Boolean
from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE, MANYTOMANY
from sqlalchemy.testing import mock
class _JoinFixtures(object):
@classmethod
def setup_class(cls):
m = MetaData()
cls.left = Table('lft', m,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
)
cls.right = Table('rgt', m,
Column('id', Integer, primary_key=True),
Column('lid', Integer, ForeignKey('lft.id')),
Column('x', Integer),
Column('y', Integer),
)
cls.right_multi_fk = Table('rgt_multi_fk', m,
Column('id', Integer, primary_key=True),
Column('lid1', Integer, ForeignKey('lft.id')),
Column('lid2', Integer, ForeignKey('lft.id')),
)
cls.selfref = Table('selfref', m,
Column('id', Integer, primary_key=True),
Column('sid', Integer, ForeignKey('selfref.id'))
)
cls.composite_selfref = Table('composite_selfref', m,
Column('id', Integer, primary_key=True),
Column('group_id', Integer, primary_key=True),
Column('parent_id', Integer),
ForeignKeyConstraint(
['parent_id', 'group_id'],
['composite_selfref.id', 'composite_selfref.group_id']
)
)
cls.m2mleft = Table('m2mlft', m,
Column('id', Integer, primary_key=True),
)
cls.m2mright = Table('m2mrgt', m,
Column('id', Integer, primary_key=True),
)
cls.m2msecondary = Table('m2msecondary', m,
Column('lid', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
)
cls.m2msecondary_no_fks = Table('m2msecondary_no_fks', m,
Column('lid', Integer, primary_key=True),
Column('rid', Integer, primary_key=True),
)
cls.m2msecondary_ambig_fks = Table('m2msecondary_ambig_fks', m,
Column('lid1', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid1', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
Column('lid2', Integer, ForeignKey('m2mlft.id'), primary_key=True),
Column('rid2', Integer, ForeignKey('m2mrgt.id'), primary_key=True),
)
cls.base_w_sub_rel = Table('base_w_sub_rel', m,
Column('id', Integer, primary_key=True),
Column('sub_id', Integer, ForeignKey('rel_sub.id'))
)
cls.rel_sub = Table('rel_sub', m,
Column('id', Integer, ForeignKey('base_w_sub_rel.id'),
primary_key=True)
)
cls.base = Table('base', m,
Column('id', Integer, primary_key=True),
Column('flag', Boolean)
)
cls.sub = Table('sub', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
)
cls.sub_w_base_rel = Table('sub_w_base_rel', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
Column('base_id', Integer, ForeignKey('base.id'))
)
cls.sub_w_sub_rel = Table('sub_w_sub_rel', m,
Column('id', Integer, ForeignKey('base.id'),
primary_key=True),
Column('sub_id', Integer, ForeignKey('sub.id'))
)
cls.right_w_base_rel = Table('right_w_base_rel', m,
Column('id', Integer, primary_key=True),
Column('base_id', Integer, ForeignKey('base.id'))
)
cls.three_tab_a = Table('three_tab_a', m,
Column('id', Integer, primary_key=True),
)
cls.three_tab_b = Table('three_tab_b', m,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('three_tab_a.id'))
)
cls.three_tab_c = Table('three_tab_c', m,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('three_tab_a.id')),
Column('bid', Integer, ForeignKey('three_tab_b.id'))
)
cls.composite_target = Table('composite_target', m,
Column('uid', Integer, primary_key=True),
Column('oid', Integer, primary_key=True),
)
cls.composite_multi_ref = Table('composite_multi_ref', m,
Column('uid1', Integer),
Column('uid2', Integer),
Column('oid', Integer),
ForeignKeyConstraint(("uid1", "oid"),
("composite_target.uid", "composite_target.oid")),
ForeignKeyConstraint(("uid2", "oid"),
("composite_target.uid", "composite_target.oid")),
)
cls.purely_single_col = Table('purely_single_col', m,
Column('path', String)
)
def _join_fixture_overlapping_three_tables(self, **kw):
def _can_sync(*cols):
for c in cols:
if self.three_tab_c.c.contains_column(c):
return False
else:
return True
return relationships.JoinCondition(
self.three_tab_a,
self.three_tab_b,
self.three_tab_a,
self.three_tab_b,
support_sync=False,
can_be_synced_fn=_can_sync,
primaryjoin=and_(
self.three_tab_a.c.id == self.three_tab_b.c.aid,
self.three_tab_c.c.bid == self.three_tab_b.c.id,
self.three_tab_c.c.aid == self.three_tab_a.c.id
)
)
def _join_fixture_m2m(self, **kw):
return relationships.JoinCondition(
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary,
**kw
)
def _join_fixture_m2m_backref(self, **kw):
"""return JoinCondition in the same way RelationshipProperty
calls it for a backref on an m2m.
"""
j1 = self._join_fixture_m2m()
return j1, relationships.JoinCondition(
self.m2mright,
self.m2mleft,
self.m2mright,
self.m2mleft,
secondary=self.m2msecondary,
primaryjoin=j1.secondaryjoin_minus_local,
secondaryjoin=j1.primaryjoin_minus_local
)
def _join_fixture_o2m(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
**kw
)
def _join_fixture_m2o(self, **kw):
return relationships.JoinCondition(
self.right,
self.left,
self.right,
self.left,
**kw
)
def _join_fixture_o2m_selfref(self, **kw):
return relationships.JoinCondition(
self.selfref,
self.selfref,
self.selfref,
self.selfref,
**kw
)
def _join_fixture_m2o_selfref(self, **kw):
return relationships.JoinCondition(
self.selfref,
self.selfref,
self.selfref,
self.selfref,
remote_side=set([self.selfref.c.id]),
**kw
)
def _join_fixture_o2m_composite_selfref(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
**kw
)
def _join_fixture_m2o_composite_selfref(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
remote_side=set([self.composite_selfref.c.id,
self.composite_selfref.c.group_id]),
**kw
)
def _join_fixture_o2m_composite_selfref_func(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
self.composite_selfref.c.group_id ==
func.foo(self.composite_selfref.c.group_id),
self.composite_selfref.c.parent_id ==
self.composite_selfref.c.id
),
**kw
)
def _join_fixture_o2m_composite_selfref_func_remote_side(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
self.composite_selfref.c.group_id ==
func.foo(self.composite_selfref.c.group_id),
self.composite_selfref.c.parent_id ==
self.composite_selfref.c.id
),
remote_side=set([self.composite_selfref.c.parent_id]),
**kw
)
def _join_fixture_o2m_composite_selfref_func_annotated(self, **kw):
return relationships.JoinCondition(
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
remote(self.composite_selfref.c.group_id) ==
func.foo(self.composite_selfref.c.group_id),
remote(self.composite_selfref.c.parent_id) ==
self.composite_selfref.c.id
),
**kw
)
def _join_fixture_compound_expression_1(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
relationships.remote(relationships.foreign(
self.right.c.x * self.right.c.y
)),
**kw
)
def _join_fixture_compound_expression_2(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
relationships.foreign(
self.right.c.x * self.right.c.y
),
**kw
)
def _join_fixture_compound_expression_1_non_annotated(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=(self.left.c.x + self.left.c.y) == \
(
self.right.c.x * self.right.c.y
),
**kw
)
def _join_fixture_base_to_joined_sub(self, **kw):
# see test/orm/inheritance/test_abc_inheritance:TestaTobM2O
# and others there
right = self.base_w_sub_rel.join(self.rel_sub,
self.base_w_sub_rel.c.id == self.rel_sub.c.id
)
return relationships.JoinCondition(
self.base_w_sub_rel,
right,
self.base_w_sub_rel,
self.rel_sub,
primaryjoin=self.base_w_sub_rel.c.sub_id == \
self.rel_sub.c.id,
**kw
)
def _join_fixture_o2m_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub_w_base_rel,
self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
self.base,
self.sub_w_base_rel,
self.base,
primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id
)
def _join_fixture_m2o_joined_sub_to_sub_on_base(self, **kw):
# this is a late add - a variant of the test case
# in #2491 where we join on the base cols instead. only
# m2o has a problem at the time of this test.
left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
right = self.base.join(self.sub_w_base_rel,
self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_base_rel,
primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id,
)
def _join_fixture_o2m_joined_sub_to_sub(self, **kw):
left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
right = self.base.join(self.sub_w_sub_rel,
self.base.c.id == self.sub_w_sub_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_sub_rel,
primaryjoin=self.sub.c.id == self.sub_w_sub_rel.c.sub_id
)
def _join_fixture_m2o_sub_to_joined_sub(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
self.right_w_base_rel,
self.right_w_base_rel,
)
def _join_fixture_m2o_sub_to_joined_sub_func(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
self.right_w_base_rel,
self.right_w_base_rel,
primaryjoin=self.right_w_base_rel.c.base_id == \
func.foo(self.base.c.id)
)
def _join_fixture_o2o_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub,
self.base.c.id == self.sub.c.id)
# see test_relationships->AmbiguousJoinInterpretedAsSelfRef
return relationships.JoinCondition(
left,
self.sub,
left,
self.sub,
)
def _join_fixture_o2m_to_annotated_func(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=self.left.c.id ==
foreign(func.foo(self.right.c.lid)),
**kw
)
def _join_fixture_o2m_to_oldstyle_func(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=self.left.c.id ==
func.foo(self.right.c.lid),
consider_as_foreign_keys=[self.right.c.lid],
**kw
)
def _join_fixture_overlapping_composite_fks(self, **kw):
return relationships.JoinCondition(
self.composite_target,
self.composite_multi_ref,
self.composite_target,
self.composite_multi_ref,
consider_as_foreign_keys=[self.composite_multi_ref.c.uid2,
self.composite_multi_ref.c.oid],
**kw
)
cls.left = Table('lft', m,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
)
cls.right = Table('rgt', m,
Column('id', Integer, primary_key=True),
Column('lid', Integer, ForeignKey('lft.id')),
Column('x', Integer),
Column('y', Integer),
)
def _join_fixture_o2m_o_side_none(self, **kw):
return relationships.JoinCondition(
self.left,
self.right,
self.left,
self.right,
primaryjoin=and_(self.left.c.id == self.right.c.lid,
self.left.c.x == 5),
**kw
)
def _join_fixture_purely_single_o2m(self, **kw):
return relationships.JoinCondition(
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
support_sync=False,
primaryjoin=
self.purely_single_col.c.path.like(
remote(
foreign(
self.purely_single_col.c.path.concat('%')
)
)
)
)
def _join_fixture_purely_single_m2o(self, **kw):
return relationships.JoinCondition(
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
self.purely_single_col,
support_sync=False,
primaryjoin=
remote(self.purely_single_col.c.path).like(
foreign(self.purely_single_col.c.path.concat('%'))
)
)
def _join_fixture_remote_local_multiple_ref(self, **kw):
fn = lambda a, b: ((a == b) | (b == a))
return relationships.JoinCondition(
self.selfref, self.selfref,
self.selfref, self.selfref,
support_sync=False,
primaryjoin=fn(
# we're putting a do-nothing annotation on
# "a" so that the left/right is preserved;
# annotation vs. non seems to affect __eq__ behavior
self.selfref.c.sid._annotate({"foo": "bar"}),
foreign(remote(self.selfref.c.sid)))
)
def _join_fixture_inh_selfref_w_entity(self, **kw):
fake_logger = mock.Mock(info=lambda *arg, **kw: None)
prop = mock.Mock(
parent=mock.Mock(),
mapper=mock.Mock(),
logger=fake_logger
)
local_selectable = self.base.join(self.sub)
remote_selectable = self.base.join(self.sub_w_sub_rel)
sub_w_sub_rel__sub_id = self.sub_w_sub_rel.c.sub_id._annotate(
{'parentmapper': prop.mapper})
sub__id = self.sub.c.id._annotate({'parentmapper': prop.parent})
sub_w_sub_rel__flag = self.base.c.flag._annotate(
{"parentmapper": prop.mapper})
return relationships.JoinCondition(
local_selectable, remote_selectable,
local_selectable, remote_selectable,
primaryjoin=and_(
sub_w_sub_rel__sub_id == sub__id,
sub_w_sub_rel__flag == True
),
prop=prop
)
def _assert_non_simple_warning(self, fn):
assert_raises_message(
exc.SAWarning,
"Non-simple column elements in "
"primary join condition for property "
r"None - consider using remote\(\) "
"annotations to mark the remote side.",
fn
)
def _assert_raises_no_relevant_fks(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
exc.ArgumentError,
r"Could not locate any relevant foreign key columns "
r"for %s join condition '%s' on relationship %s. "
r"Ensure that referencing columns are associated with "
r"a ForeignKey or ForeignKeyConstraint, or are annotated "
r"in the join condition with the foreign\(\) annotation."
% (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_no_equality(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
exc.ArgumentError,
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for %s join "
"condition '%s' on relationship %s. "
"Ensure that referencing columns are associated with a "
"ForeignKey or ForeignKeyConstraint, or are annotated in "
r"the join condition with the foreign\(\) annotation. "
"To allow comparison operators other than '==', "
"the relationship can be marked as viewonly=True." % (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_ambig_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are multiple foreign key paths linking the "
"tables via secondary table '%s'. "
"Specify the 'foreign_keys' argument, providing a list "
"of those columns which should be counted as "
"containing a foreign key reference from the "
"secondary table to each of the parent and child tables."
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
% (relname,),
fn, *arg, **kw)
def _assert_raises_no_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables "
"via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey "
"or ForeignKeyConstraint, or specify 'primaryjoin' and "
"'secondaryjoin' expressions"
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey "
"or ForeignKeyConstraint, or specify a 'primaryjoin' "
"expression."
% (relname,),
fn, *arg, **kw)
class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase,
AssertsCompiledSQL):
def test_determine_local_remote_pairs_o2o_joined_sub_to_base(self):
joincond = self._join_fixture_o2o_joined_sub_to_base()
eq_(
joincond.local_remote_pairs,
[(self.base.c.id, self.sub.c.id)]
)
def test_determine_synchronize_pairs_o2m_to_annotated_func(self):
joincond = self._join_fixture_o2m_to_annotated_func()
eq_(
joincond.synchronize_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determine_synchronize_pairs_o2m_to_oldstyle_func(self):
joincond = self._join_fixture_o2m_to_oldstyle_func()
eq_(
joincond.synchronize_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determinelocal_remote_m2o_joined_sub_to_sub_on_base(self):
joincond = self._join_fixture_m2o_joined_sub_to_sub_on_base()
eq_(
joincond.local_remote_pairs,
[(self.base.c.id, self.sub_w_base_rel.c.base_id)]
)
def test_determine_local_remote_base_to_joined_sub(self):
joincond = self._join_fixture_base_to_joined_sub()
eq_(
joincond.local_remote_pairs,
[
(self.base_w_sub_rel.c.sub_id, self.rel_sub.c.id)
]
)
def test_determine_local_remote_o2m_joined_sub_to_base(self):
joincond = self._join_fixture_o2m_joined_sub_to_base()
eq_(
joincond.local_remote_pairs,
[
(self.sub_w_base_rel.c.base_id, self.base.c.id)
]
)
def test_determine_local_remote_m2o_sub_to_joined_sub(self):
joincond = self._join_fixture_m2o_sub_to_joined_sub()
eq_(
joincond.local_remote_pairs,
[
(self.right_w_base_rel.c.base_id, self.base.c.id)
]
)
def test_determine_remote_columns_o2m_joined_sub_to_sub(self):
joincond = self._join_fixture_o2m_joined_sub_to_sub()
eq_(
joincond.local_remote_pairs,
[
(self.sub.c.id, self.sub_w_sub_rel.c.sub_id)
]
)
def test_determine_remote_columns_compound_1(self):
joincond = self._join_fixture_compound_expression_1(
support_sync=False)
eq_(
joincond.remote_columns,
set([self.right.c.x, self.right.c.y])
)
def test_determine_local_remote_compound_1(self):
joincond = self._join_fixture_compound_expression_1(
support_sync=False)
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y)
]
)
def test_determine_local_remote_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y)
]
)
def test_determine_local_remote_compound_3(self):
joincond = self._join_fixture_compound_expression_1()
eq_(
joincond.local_remote_pairs,
[
(self.left.c.x, self.right.c.x),
(self.left.c.x, self.right.c.y),
(self.left.c.y, self.right.c.x),
(self.left.c.y, self.right.c.y),
]
)
def test_err_local_remote_compound_1(self):
self._assert_raises_no_relevant_fks(
self._join_fixture_compound_expression_1_non_annotated,
r'lft.x \+ lft.y = rgt.x \* rgt.y',
"None", "primary"
)
def test_determine_remote_columns_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
eq_(
joincond.remote_columns,
set([self.right.c.x, self.right.c.y])
)
def test_determine_remote_columns_o2m(self):
joincond = self._join_fixture_o2m()
eq_(
joincond.remote_columns,
set([self.right.c.lid])
)
def test_determine_remote_columns_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
eq_(
joincond.remote_columns,
set([self.selfref.c.sid])
)
def test_determine_local_remote_pairs_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
eq_(
joincond.local_remote_pairs,
[
(self.composite_selfref.c.group_id,
self.composite_selfref.c.group_id),
(self.composite_selfref.c.id,
self.composite_selfref.c.parent_id),
]
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_warning(self):
self._assert_non_simple_warning(
self._join_fixture_o2m_composite_selfref_func
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_rs(self):
# no warning
self._join_fixture_o2m_composite_selfref_func_remote_side()
def test_determine_local_remote_pairs_o2m_overlap_func_warning(self):
self._assert_non_simple_warning(
self._join_fixture_m2o_sub_to_joined_sub_func
)
def test_determine_local_remote_pairs_o2m_composite_selfref_func_annotated(self):
joincond = self._join_fixture_o2m_composite_selfref_func_annotated()
eq_(
joincond.local_remote_pairs,
[
(self.composite_selfref.c.group_id,
self.composite_selfref.c.group_id),
(self.composite_selfref.c.id,
self.composite_selfref.c.parent_id),
]
)
def test_determine_remote_columns_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
eq_(
joincond.remote_columns,
set([self.composite_selfref.c.id,
self.composite_selfref.c.group_id])
)
def test_determine_remote_columns_m2o(self):
joincond = self._join_fixture_m2o()
eq_(
joincond.remote_columns,
set([self.left.c.id])
)
def test_determine_local_remote_pairs_o2m(self):
joincond = self._join_fixture_o2m()
eq_(
joincond.local_remote_pairs,
[(self.left.c.id, self.right.c.lid)]
)
def test_determine_synchronize_pairs_m2m(self):
joincond = self._join_fixture_m2m()
eq_(
joincond.synchronize_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid)]
)
eq_(
joincond.secondary_synchronize_pairs,
[(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
def test_determine_local_remote_pairs_o2m_backref(self):
joincond = self._join_fixture_o2m()
joincond2 = self._join_fixture_m2o(
primaryjoin=joincond.primaryjoin_reverse_remote,
)
eq_(
joincond2.local_remote_pairs,
[(self.right.c.lid, self.left.c.id)]
)
def test_determine_local_remote_pairs_m2m(self):
joincond = self._join_fixture_m2m()
eq_(
joincond.local_remote_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid),
(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
def test_determine_local_remote_pairs_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.local_remote_pairs,
[(self.m2mleft.c.id, self.m2msecondary.c.lid),
(self.m2mright.c.id, self.m2msecondary.c.rid)]
)
eq_(
j2.local_remote_pairs,
[
(self.m2mright.c.id, self.m2msecondary.c.rid),
(self.m2mleft.c.id, self.m2msecondary.c.lid),
]
)
def test_determine_local_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.local_columns,
set([self.m2mleft.c.id])
)
eq_(
j2.local_columns,
set([self.m2mright.c.id])
)
def test_determine_remote_columns_m2m_backref(self):
j1, j2 = self._join_fixture_m2m_backref()
eq_(
j1.remote_columns,
set([self.m2msecondary.c.lid, self.m2msecondary.c.rid])
)
eq_(
j2.remote_columns,
set([self.m2msecondary.c.lid, self.m2msecondary.c.rid])
)
def test_determine_remote_columns_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
eq_(
joincond.remote_columns,
set([self.selfref.c.id])
)
def test_determine_local_remote_cols_three_tab_viewonly(self):
joincond = self._join_fixture_overlapping_three_tables()
eq_(
joincond.local_remote_pairs,
[(self.three_tab_a.c.id, self.three_tab_b.c.aid)]
)
eq_(
joincond.remote_columns,
set([self.three_tab_b.c.id, self.three_tab_b.c.aid])
)
def test_determine_local_remote_overlapping_composite_fks(self):
joincond = self._join_fixture_overlapping_composite_fks()
eq_(
joincond.local_remote_pairs,
[
(self.composite_target.c.uid, self.composite_multi_ref.c.uid2,),
(self.composite_target.c.oid, self.composite_multi_ref.c.oid,)
]
)
def test_determine_local_remote_pairs_purely_single_col_o2m(self):
joincond = self._join_fixture_purely_single_o2m()
eq_(
joincond.local_remote_pairs,
[(self.purely_single_col.c.path, self.purely_single_col.c.path)]
)
def test_determine_local_remote_pairs_inh_selfref_w_entities(self):
joincond = self._join_fixture_inh_selfref_w_entity()
eq_(
joincond.local_remote_pairs,
[(self.sub.c.id, self.sub_w_sub_rel.c.sub_id)]
)
eq_(
joincond.remote_columns,
set([self.base.c.flag, self.sub_w_sub_rel.c.sub_id])
)
class DirectionTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
def test_determine_direction_compound_2(self):
joincond = self._join_fixture_compound_expression_2(
support_sync=False)
is_(
joincond.direction,
ONETOMANY
)
def test_determine_direction_o2m(self):
joincond = self._join_fixture_o2m()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_m2o(self):
joincond = self._join_fixture_m2o()
is_(joincond.direction, MANYTOONE)
def test_determine_direction_purely_single_o2m(self):
joincond = self._join_fixture_purely_single_o2m()
is_(joincond.direction, ONETOMANY)
def test_determine_direction_purely_single_m2o(self):
joincond = self._join_fixture_purely_single_m2o()
is_(joincond.direction, MANYTOONE)
class DetermineJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_determine_join_o2m(self):
joincond = self._join_fixture_o2m()
self.assert_compile(
joincond.primaryjoin,
"lft.id = rgt.lid"
)
def test_determine_join_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
self.assert_compile(
joincond.primaryjoin,
"selfref.id = selfref.sid"
)
def test_determine_join_m2o_selfref(self):
joincond = self._join_fixture_m2o_selfref()
self.assert_compile(
joincond.primaryjoin,
"selfref.id = selfref.sid"
)
def test_determine_join_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
self.assert_compile(
joincond.primaryjoin,
"composite_selfref.group_id = composite_selfref.group_id "
"AND composite_selfref.id = composite_selfref.parent_id"
)
def test_determine_join_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
self.assert_compile(
joincond.primaryjoin,
"composite_selfref.group_id = composite_selfref.group_id "
"AND composite_selfref.id = composite_selfref.parent_id"
)
def test_determine_join_m2o(self):
joincond = self._join_fixture_m2o()
self.assert_compile(
joincond.primaryjoin,
"lft.id = rgt.lid"
)
def test_determine_join_ambiguous_fks_o2m(self):
assert_raises_message(
exc.AmbiguousForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship None - "
"there are multiple foreign key paths linking "
"the tables. Specify the 'foreign_keys' argument, "
"providing a list of those columns which "
"should be counted as containing a foreign "
"key reference to the parent table.",
relationships.JoinCondition,
self.left,
self.right_multi_fk,
self.left,
self.right_multi_fk,
)
def test_determine_join_no_fks_o2m(self):
self._assert_raises_no_join(
relationships.JoinCondition,
"None", None,
self.left,
self.selfref,
self.left,
self.selfref,
)
def test_determine_join_ambiguous_fks_m2m(self):
self._assert_raises_ambig_join(
relationships.JoinCondition,
"None", self.m2msecondary_ambig_fks,
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_ambig_fks
)
def test_determine_join_no_fks_m2m(self):
self._assert_raises_no_join(
relationships.JoinCondition,
"None", self.m2msecondary_no_fks,
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_no_fks
)
def _join_fixture_fks_ambig_m2m(self):
return relationships.JoinCondition(
self.m2mleft,
self.m2mright,
self.m2mleft,
self.m2mright,
secondary=self.m2msecondary_ambig_fks,
consider_as_foreign_keys=[
self.m2msecondary_ambig_fks.c.lid1,
self.m2msecondary_ambig_fks.c.rid1]
)
def test_determine_join_w_fks_ambig_m2m(self):
joincond = self._join_fixture_fks_ambig_m2m()
self.assert_compile(
joincond.primaryjoin,
"m2mlft.id = m2msecondary_ambig_fks.lid1"
)
self.assert_compile(
joincond.secondaryjoin,
"m2mrgt.id = m2msecondary_ambig_fks.rid1"
)
class AdaptedJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_join_targets_o2m_selfref(self):
joincond = self._join_fixture_o2m_selfref()
left = select([joincond.parent_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
left,
joincond.child_selectable,
True)
self.assert_compile(
pj, "pj.id = selfref.sid"
)
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj, "selfref.id = pj.sid"
)
def test_join_targets_o2m_plain(self):
joincond = self._join_fixture_o2m()
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
joincond.child_selectable,
False)
self.assert_compile(
pj, "lft.id = rgt.lid"
)
def test_join_targets_o2m_left_aliased(self):
joincond = self._join_fixture_o2m()
left = select([joincond.parent_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
left,
joincond.child_selectable,
True)
self.assert_compile(
pj, "pj.id = rgt.lid"
)
def test_join_targets_o2m_right_aliased(self):
joincond = self._join_fixture_o2m()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj, "lft.id = pj.lid"
)
def test_join_targets_o2m_composite_selfref(self):
joincond = self._join_fixture_o2m_composite_selfref()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj,
"pj.group_id = composite_selfref.group_id "
"AND composite_selfref.id = pj.parent_id"
)
def test_join_targets_m2o_composite_selfref(self):
joincond = self._join_fixture_m2o_composite_selfref()
right = select([joincond.child_selectable]).alias('pj')
pj, sj, sec, adapter, ds = joincond.join_targets(
joincond.parent_selectable,
right,
True)
self.assert_compile(
pj,
"pj.group_id = composite_selfref.group_id "
"AND pj.id = composite_selfref.parent_id"
)
class LazyClauseTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_lazy_clause_o2m(self):
joincond = self._join_fixture_o2m()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = rgt.lid"
)
def test_lazy_clause_o2m_reverse(self):
joincond = self._join_fixture_o2m()
lazywhere, bind_to_col, equated_columns =\
joincond.create_lazy_clause(reverse_direction=True)
self.assert_compile(
lazywhere,
"lft.id = :param_1"
)
def test_lazy_clause_o2m_o_side_none(self):
# test for #2948. When the join is "o.id == m.oid AND o.something == something",
# we don't want 'o' brought into the lazy load for 'm'
joincond = self._join_fixture_o2m_o_side_none()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = rgt.lid AND :param_2 = :x_1",
checkparams={'param_1': None, 'param_2': None, 'x_1': 5}
)
def test_lazy_clause_o2m_o_side_none_reverse(self):
# continued test for #2948.
joincond = self._join_fixture_o2m_o_side_none()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause(reverse_direction=True)
self.assert_compile(
lazywhere,
"lft.id = :param_1 AND lft.x = :x_1",
checkparams= {'param_1': None, 'x_1': 5}
)
def test_lazy_clause_remote_local_multiple_ref(self):
joincond = self._join_fixture_remote_local_multiple_ref()
lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
lazywhere,
":param_1 = selfref.sid OR selfref.sid = :param_1",
checkparams={'param_1': None}
)
| |
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
import unittest
import traceback
from mock import Mock
import ztpserver.serializers
import ztpserver.topology
from ztpserver.app import enable_handler_console # pylint: disable=W0611
from ztpserver.topology import Pattern, PatternError
from ztpserver.topology import Topology, TopologyError
from ztpserver.topology import Node, NodeError
from server_test_lib import random_string
from server_test_lib import create_node, create_neighbordb, create_pattern
class NodeUnitTests(unittest.TestCase):
def test_create_node_success(self):
node = create_node()
systemmac = node.systemmac
kwargs = node.as_dict()
node = Node(**kwargs)
self.assertEqual(node.systemmac, systemmac)
def test_create_node_systemmac_only(self):
systemmac = random_string()
node = Node(systemmac=systemmac)
self.assertEqual(node.systemmac, systemmac)
def test_create_node_failure(self):
try:
node = Node()
except TypeError:
pass
except Exception:
self.fail()
def test_create_node_neighbors_valid(self):
systemmac = random_string()
nodeattrs = create_node()
device = random_string()
port = random_string()
neighbors = {'Ethernet1': {'device': device, 'port': port}}
nodeattrs.add_neighbors(neighbors)
kwargs = nodeattrs.as_dict()
node = Node(**kwargs)
self.assertIsNotNone(node.neighbors('Ethernet1'))
self.assertEqual(node.neighbors('Ethernet1')[0].device, device)
self.assertEqual(node.neighbors('Ethernet1')[0].port, port)
def test_create_node_neighbors_port_missing(self):
systemmac = random_string()
nodeattrs = create_node()
device = random_string()
neighbors = {'Ethernet1': {'device': device}}
nodeattrs.add_neighbors(neighbors)
kwargs = nodeattrs.as_dict()
try:
node = None
node = Node(**kwargs)
except NodeError:
pass
except Exception as exc:
self.fail(exc)
finally:
self.assertIsNone(node)
def test_create_node_neighbors_device_missing(self):
systemmac = random_string()
nodeattrs = create_node()
port = random_string()
neighbors = {'Ethernet1': {'port': port}}
nodeattrs.add_neighbors(neighbors)
kwargs = nodeattrs.as_dict()
try:
node = None
node = Node(**kwargs)
except NodeError:
pass
except Exception as exc:
self.fail(exc)
finally:
self.assertIsNone(node)
def test_add_neighbor(self):
systemmac = random_string()
peer = Mock()
intf = random_string()
node = Node(systemmac=systemmac)
node.add_neighbor(intf, [dict(device=peer.device, port=peer.port)])
self.assertIsNotNone(node.neighbors(intf))
self.assertEqual(node.neighbors(intf)[0].device, peer.device)
self.assertEqual(node.neighbors(intf)[0].port, peer.port)
def test_add_neighbor_existing_interface(self):
systemmac = random_string()
peer = Mock()
intf = random_string()
node = Node(systemmac=systemmac)
node.add_neighbor(intf, [dict(device=peer.device, port=peer.port)])
self.assertRaises(ztpserver.topology.NodeError, node.add_neighbor,
intf, [dict(device=peer.device, port=peer.port)])
def test_add_neighbors_success(self):
nodeattrs = create_node()
device = random_string()
port = random_string()
neighbors = {'Ethernet1': [{'device': device, 'port': port}]}
kwargs = nodeattrs.as_dict()
node = Node(**kwargs)
node.add_neighbors(neighbors)
self.assertIsNotNone(node.neighbors('Ethernet1'))
self.assertEqual(node.neighbors('Ethernet1')[0].device, device)
self.assertEqual(node.neighbors('Ethernet1')[0].port, port)
def test_serialize_success(self):
nodeattrs = create_node()
systemmac = nodeattrs.systemmac
kwargs = nodeattrs.as_dict()
node = Node(**kwargs)
result = node.serialize()
self.assertEqual(result, nodeattrs.as_dict())
# class ResourcePoolUnitTests(unittest.TestCase):
# def setUp(self):
# self.pool = ResourcePool()
# self.pool.load_from_file = Mock()
# self.pool.dump_to_file = Mock()
# self.pooldata = dict()
# for i in range(0, 10): # pylint: disable=W0612
# self.pooldata[random_string()] = None
# self.pool.data = self.pooldata
# def test_serialize_success(self):
# resp = self.pool.serialize()
# self.assertEqual(resp, self.pool.data)
# def test_deserialize_success(self):
# pooldata = dict()
# for i in range(0, 10): # pylint: disable=W0612
# pooldata[random_string()] = None
# self.pool.deserialize(pooldata)
# self.assertEqual(pooldata, self.pool.data)
# def test_allocate_success(self):
# node = create_node()
# resp = self.pool.allocate(random_string(), node)
# self.assertIsNotNone(resp)
# self.assertIn(resp, self.pooldata.keys())
# def test_allocate_success_existing(self):
# node = create_node()
# key = random_string()
# self.pool.data[key] = node.systemmac
# resp = self.pool.allocate(random_string(), node)
# self.assertIsNotNone(resp)
# self.assertIn(resp, key)
# def test_allocate_success_no_resource(self):
# node = create_node()
# key = random_string()
# for key in self.pool.data.keys():
# self.pool.data[key] = random_string()
# self.assertRaises(ResourcePoolError, self.pool.allocate,
# random_string(), node)
# def test_lookup_success_found_key(self):
# node = create_node()
# key = random_string()
# self.pool.data[key] = node.systemmac
# resp = self.pool.lookup(random_string(), node)
# self.assertEqual(resp, key)
# def test_lookup_success_no_key(self):
# node = create_node()
# resp = self.pool.lookup(random_string(), node)
# self.assertIsNone(resp)
class FunctionsUnitTests(unittest.TestCase):
def test_exactfunction_true(self):
value = random_string()
func = ztpserver.topology.ExactFunction(value)
self.assertTrue(func.match(value))
def test_exactfunction_false(self):
value = random_string()
func = ztpserver.topology.ExactFunction(value)
self.assertFalse(func.match(random_string()))
def test_includesfunction_true(self):
value = random_string()
func = ztpserver.topology.IncludesFunction(value)
self.assertTrue(func.match(value))
def test_includesfunction_false(self):
value = random_string()
func = ztpserver.topology.IncludesFunction(value)
self.assertFalse(func.match(random_string()))
def test_excludesfunction_true(self):
value = random_string()
func = ztpserver.topology.ExcludesFunction(value)
self.assertTrue(func.match(random_string()))
def test_excludesfunction_false(self):
value = random_string()
func = ztpserver.topology.ExcludesFunction(value)
self.assertFalse(func.match(value))
def test_regexfunction_true(self):
value = '[\w+]'
func = ztpserver.topology.RegexFunction(value)
self.assertTrue(func.match(random_string()))
def test_regexfunction_false(self):
value = '[^a-zA-Z0-9]'
func = ztpserver.topology.RegexFunction(value)
self.assertFalse(func.match(random_string()))
class TestPattern(unittest.TestCase):
def test_create_pattern_with_defaults(self):
obj = ztpserver.topology.Pattern(None, None, None)
self.assertIsInstance(obj, ztpserver.topology.Pattern)
def test_create_pattern_with_kwargs(self):
obj = ztpserver.topology.Pattern(name='test',
definition='test',
node='abc123',
variables={'var': 'test'},
interfaces=[{'Ethernet1': 'any'}])
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.definition, 'test')
self.assertEqual(obj.node, 'abc123')
self.assertDictEqual({'var': 'test'}, obj.variables)
self.assertEqual(1, len(obj.interfaces))
def test_add_interface(self):
obj = ztpserver.topology.Pattern(None, None, [{'Ethernet1': 'any'}])
self.assertEqual(len(obj.interfaces), 1)
class PatternUnitTests(unittest.TestCase):
def test_create_pattern(self):
pattern = Pattern(random_string())
self.assertIsInstance(pattern, Pattern)
def test_create_pattern_kwargs(self):
kwargs = dict(name=random_string(),
definition=random_string(),
interfaces=None)
pattern = Pattern(**kwargs)
self.assertIsInstance(pattern, Pattern)
def test_add_interface_success(self):
kwargs = dict(name=random_string(),
definition=random_string(),
interfaces=None)
pattern = Pattern(**kwargs)
remote_device = random_string()
remote_intf = random_string()
neighbors = dict(Ethernet1={'device': remote_device,
'port': remote_intf})
try:
pattern.add_interface(neighbors)
except Exception as exc:
print traceback.print_exc(exc)
self.fail('add_interface raised an exception unexpectedly')
def test_add_interface_failure(self):
kwargs = dict(name=random_string(),
definition=random_string(),
interfaces=None)
pattern = Pattern(**kwargs)
self.assertRaises(PatternError, pattern.add_interface, random_string())
class TestInterfacePattern(unittest.TestCase):
def test_create_interface_pattern(self):
intf = random_string()
device = random_string()
port = random_string()
obj = ztpserver.topology.InterfacePattern(intf, device, port)
reprobj = 'InterfacePattern(interface=%s, device=%s, port=%s)' % \
(intf, device, port)
self.assertEqual(repr(obj), reprobj)
def test_match_success(self):
interface = random_string()
device = random_string()
port = random_string()
neighbors = [dict(device=device, port=port)]
pattern = ztpserver.topology.InterfacePattern(interface, device, port)
pattern.match_neighbors = Mock(return_value=True)
pattern.match_interface = Mock(return_value=True)
neighbor = ztpserver.topology.Neighbor(device, port)
result = pattern.match(interface, neighbor)
self.assertTrue(result)
def test_match_false_interface(self):
interface = random_string()
device = random_string()
port = random_string()
neighbors = [dict(device=device, port=port)]
pattern = ztpserver.topology.InterfacePattern(interface, device, port)
pattern.match_neighbors = Mock(return_value=True)
pattern.match_interface = Mock(return_value=False)
neighbor = ztpserver.topology.Neighbor(device, port)
result = pattern.match(interface, neighbor)
self.assertFalse(pattern.match_neighbors.called)
self.assertFalse(result)
def test_match_false_neighbor(self):
interface = random_string()
device = random_string()
port = random_string()
neighbors = [dict(device=device, port=port)]
pattern = ztpserver.topology.InterfacePattern(interface, device, port)
pattern.match_neighbors = Mock(return_value=False)
pattern.match_interface = Mock(return_value=True)
neighbor = ztpserver.topology.Neighbor(device, port)
self.assertRaises(ztpserver.topology.InterfacePatternError,
pattern.match, interface, neighbor)
def compile_known_function(self, interface, cls):
pattern = ztpserver.topology.InterfacePattern(interface,
random_string(),
random_string())
self.assertIsInstance(pattern.interface_re, cls)
def test_compile_exact_function(self):
interface = 'exact(\'%s\')' % random_string()
self.compile_known_function(interface, ztpserver.topology.ExactFunction)
def test_compile_includes_function(self):
interface = 'includes(\'%s\')' % random_string()
self.compile_known_function(interface,
ztpserver.topology.IncludesFunction)
def test_compile_excludes_function(self):
interface = 'excludes(\'%s\')' % random_string()
self.compile_known_function(interface,
ztpserver.topology.ExcludesFunction)
def test_compile_regex_function(self):
interface = 'regex(\'%s\')' % random_string()
self.compile_known_function(interface,
ztpserver.topology.RegexFunction)
def test_compile_no_function(self):
interface = random_string()
self.compile_known_function(interface,
ztpserver.topology.ExactFunction)
def test_compile_unknown_function(self):
interface = '%s(\'%s\')' % (random_string(), random_string())
device = random_string()
port = random_string()
self.assertRaises(ztpserver.topology.InterfacePatternError,
ztpserver.topology.InterfacePattern,
interface, random_string(), random_string())
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This executable module is a console application for presenting itself as a
MODBUS server accepting read and write MODBUS PDUs.
"""
import os
import logging
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.consolelogging import ArgumentParser
from bacpypes.comm import Client, bind
from bacpypes.core import run
from .pdu import ExceptionResponse, \
ReadCoilsResponse, ReadDiscreteInputsResponse, ReadMultipleRegistersResponse, \
WriteSingleCoilResponse, WriteSingleRegisterResponse, WriteMultipleRegistersResponse
from .app import ModbusServer, ModbusException
# some debugging
_debug = 0
_log = ModuleLogger(globals())
_commlog = logging.getLogger(__name__ + "._commlog")
# settings
SERVER_HOST = os.getenv("SERVER_HOST", "")
SERVER_PORT = int(os.getenv("SERVER_PORT", 502))
IDLE_TIMEOUT = int(os.getenv('IDLE_TIMEOUT', 0)) or None
#
# SimpleServer
#
@bacpypes_debugging
class SimpleServer(Client):
"""
Simple Server
"""
def __init__(self, unitNumber=1):
if _debug: SimpleServer._debug("__init__")
Client.__init__(self)
# save the unit number
self.unitNumber = unitNumber
# create some coils and registers
self.coils = [False] * 10
self.registers = [0] * 10
def confirmation(self, req):
"""Got a request from a client."""
if _debug: SimpleServer._debug("confirmation %r", req)
_commlog.debug(">>> %r %r", req.pduSource, req)
# if its an exception, punt
if isinstance(req, Exception):
if _debug: SimpleServer._debug(" - punt exceptions")
return
# if it's not for us, dump it
if req.mpduUnitID != self.unitNumber:
if _debug: SimpleServer._debug(" - not for us")
return
try:
# look up a matching function
try:
fn = getattr(self, "do_" + req.__class__.__name__)
except AttributeError:
raise ModbusException(ExceptionResponse.ILLEGAL_FUNCTION)
# try to execute it
resp = fn(req)
except ModbusException as err:
# create an exception response
resp = ExceptionResponse(req.mpduFunctionCode, err.errCode)
# match the transaction information
resp.pduDestination = req.pduSource
resp.mpduTransactionID = req.mpduTransactionID
resp.mpduUnitID = req.mpduUnitID
_commlog.debug("<<< %r %r", resp.pduDestination, resp)
# send the response back
self.request(resp)
def pull_coils(self, address, count):
"""Called when there is a request for the current value of a coil."""
if _debug: SimpleServer._debug("pull_coils %r %r", address, count)
def push_coils(self, address, count):
"""Called when a MODBUS service has changed the value of one or more coils."""
if _debug: SimpleServer._debug("push_coils %r %r", address, count)
def pull_registers(self, address, count):
"""Called when a MODBUS client is requesting the current value of one
or more registers."""
if _debug: SimpleServer._debug("pull_registers %r %r", address, count)
def push_registers(self, address, count):
"""Called when a MODBUS service has changed the value of one or more
registers."""
if _debug: SimpleServer._debug("push_registers %r %r", address, count)
# ---------- Coils ----------
def do_ReadCoilsRequest(self, req):
SimpleServer._debug('do_ReadCoilsRequest %r', req)
if (req.address + req.count) > len(self.coils):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
self.pull_coils(req.address, req.count)
return ReadCoilsResponse(self.coils[req.address:req.address+req.count])
def do_WriteSingleCoilRequest(self, req):
SimpleServer._debug('do_WriteSingleCoilRequest %r', req)
if req.address > len(self.coils):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
# check the value and save it
if (req.value == 0x0000):
self.coils[req.address] = 0
elif (req.value == 0xFF00):
self.coils[req.address] = 1
else:
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_VALUE)
self.push_coils(req.address, 1)
# return the new value
return WriteSingleCoilResponse(req.address, req.value)
# ---------- Descrete Inputs (mapped as a coil) ----------
def do_ReadDescreteInputsRequest(self, req):
SimpleServer._debug('do_ReadDescreteInputsRequest %r', req)
if (req.address + req.count) > len(self.coils):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
self.pull_coils(req.address, req.count)
return ReadDiscreteInputsResponse(self.coils[req.address:req.address+req.count])
# ---------- Registers ----------
def do_ReadMultipleRegistersRequest(self, req):
SimpleServer._debug('do_ReadMultipleRegistersRequest %r', req)
if (req.address + req.count) > len(self.registers):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
self.pull_registers(req.address, req.count)
return ReadMultipleRegistersResponse(self.registers[req.address:req.address+req.count])
def do_WriteSingleRegisterRequest(self, req):
SimpleServer._debug('do_WriteSingleRegisterRequest %r', req)
if req.address > len(self.registers):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
# save the value
self.registers[req.address] = req.value
self.push_registers(req.address, 1)
# return the new value
return WriteSingleRegisterResponse(req.address, req.value)
def do_WriteMultipleRegistersRequest(self, req):
SimpleServer._debug('do_WriteMultipleRegistersRequest %r', req)
if (req.address + req.count) > len(self.registers):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
# save the values
for i in range(req.count):
self.registers[req.address + i] = req.registers[i]
self.push_registers(req.address, req.count)
return WriteMultipleRegistersResponse(req.address, req.count)
# ---------- Input Registers (mapped as a register) ----------
def do_ReadInputRegistersRequest(self, req):
SimpleServer._debug('do_ReadInputRegistersRequest %r', req)
if (req.address + req.count) > len(self.registers):
raise ModbusException(ExceptionResponse.ILLEGAL_DATA_ADDRESS)
self.pull_registers(req.address, req.count)
return ReadInputRegistersResponse(self.registers[req.address:req.address+req.count])
#
# main
#
def main():
# parse the command line arguments
parser = ArgumentParser(description=__doc__)
# listener arguments
parser.add_argument(
"--host", type=str,
help="address of host (default {!r})".format(SERVER_HOST),
default=SERVER_HOST,
)
parser.add_argument(
"--port", type=int,
help="server port (default {!r})".format(SERVER_PORT),
default=SERVER_PORT,
)
# connection timeout arguments
parser.add_argument(
"--idle-timeout", nargs='?', type=int,
help="idle connection timeout",
default=IDLE_TIMEOUT,
)
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# local IO functions
bind(SimpleServer(), ModbusServer(port=args.port, idle_timeout=args.idle_timeout))
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models related to the State of Missouri's Legislative sessions.
"""
from __future__ import unicode_literals
from django.db import models
from general.models import (
AMBaseModel,
Organization,
Person,
)
from .bill import Bill
from legislative.models import BodyMembership
from django.utils.encoding import python_2_unicode_compatible
from django_react_templatetags.mixins import RepresentationMixin
@python_2_unicode_compatible
class BillTitle(AMBaseModel):
"""
An alternate title for a Bill.
"""
bill = models.ForeignKey(
Bill,
related_name='other_titles',
help_text='Reference to the bill with the alternate title.',
)
title = models.TextField(
help_text='Alternate title for the bill.',
)
note = models.TextField(
help_text='A note describing the origin of the title.',
blank=True,
)
def __str__(self):
return '{0} ({1})'.format(self.title, self.bill.identifier)
@python_2_unicode_compatible
class BillAbstract(AMBaseModel):
"""
An available abstract (sometimes called a summary) for a Bill.
"""
bill = models.ForeignKey(
Bill,
related_name='abstracts',
help_text='Reference to the bill with the abstract.',
)
abstract = models.TextField(
help_text='Text of the abstract.',
)
note = models.TextField(
blank=True,
help_text='A note about the origin of the summary.',
)
def __str__(self):
return '{0} abstract'.format(self.bill.identifier)
@python_2_unicode_compatible
class BillAction(RepresentationMixin, AMBaseModel):
"""
An individual action on a bill.
"""
bill = models.ForeignKey(
Bill,
related_name='actions',
help_text='Reference to the bill that was acted on.',
)
organization = models.ForeignKey(
Organization,
related_name='actions',
help_text='Reference to the Organization that the action took place '
'within.',
null=True,
)
description = models.TextField(
help_text='Description of the action.',
)
date = models.DateField(
help_text='Date when the action occurred.',
)
order = models.PositiveIntegerField(
help_text="Order of the action in the Bill's activity history.",
)
def to_react_representation(self, context={}):
return {
'description': self.description,
'date': self.date.strftime('%Y-%m-%d')
}
def serialize_to_dict(self):
d = {}
d['bill_id'] = self.bill.id
d['description'] = self.description
d['date'] = self.strftime('%Y-%m-%d')
return d
class Meta:
"""
Model options.
"""
ordering = ['-date', '-order']
def __str__(self):
return '{0} action on {1}'.format(self.bill.identifier, self.date)
@python_2_unicode_compatible
class BillVersion(AMBaseModel):
"""
A version of a Bill.
"""
bill = models.ForeignKey(
Bill,
related_name='versions',
help_text='Reference to the Bill to which the version belongs.',
)
lr_number = models.CharField(
max_length=4,
null=True,
blank=True,
help_text="Second part of Legislative Reference number (after '.').",
)
note = models.CharField(
max_length=300,
help_text='Note describing the version.',
)
date = models.DateField(
help_text='Date when the version was published.',
)
url = models.URLField(
max_length=2000,
help_text='Official URL where the text of the version is available.',
)
def __str__(self):
return '{0} version of {1}'.format(self.date, self.bill)
@python_2_unicode_compatible
class BillAmendment(AMBaseModel):
"""
A proposed amendment to the Bill.
"""
bill = models.ForeignKey(
Bill,
related_name='amendments',
help_text='Reference to the Bill to which the amendment was proposed.',
)
member = models.ForeignKey(
BodyMembership,
related_name='amendments',
help_text='Reference to the member who proposed the bill amendment.',
)
status = models.CharField(
max_length=300,
help_text='Status of the bill amendment (e.g., "Distributed", '
'"Withdrawn").',
)
url = models.URLField(
max_length=2000,
help_text='Official URL where the text of the amendments is available.',
)
def __str__(self):
return '{0} amendment to {1}'.format(self.status, self.bill)
@python_2_unicode_compatible
class BillSponsorship(AMBaseModel):
"""
A sponsorship of Bill by a member of a legislative body.
"""
bill = models.ForeignKey(
Bill,
related_name='sponsorships',
help_text='Reference to the sponsored Bill.',
)
member = models.ForeignKey(
BodyMembership,
related_name='bill_sponsorships',
help_text='Reference to the membership record for who sponsored the Bill.',
null=True,
)
person = models.ForeignKey(
Person,
related_name='bill_sponsorships',
help_text='The person who sponsored the bill.',
)
primary = models.BooleanField(
default=False,
help_text='Indicates the member is a primary sponsor of the Bill.',
)
sponsored_at = models.DateField(
null=True,
blank=True,
help_text='Date and time when the member sponsored the bill.',
)
def __str__(self):
return '{} sponsorship of {}'.format(
self.person,
self.bill,
)
class Meta:
ordering = ['-sponsored_at', '-primary',]
| |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for models, hyperparameter settings, problem types, and datasets.
Define a new model by subclassing T2TModel and register it:
```
@registry.register_model
class MyModel(T2TModel):
...
```
Access by snake-cased name: `registry.model("my_model")`. If you're using
`t2t_trainer.py`, you can pass on the command-line: `--model=my_model`.
See all the models registered: `registry.list_models()`.
For hyperparameter sets:
* Register: `registry.register_hparams`
* List: `registry.list_hparams`
* Retrieve by name: `registry.hparams`
* Command-line flag in `t2t_trainer.py`: `--hparams_set=name`
For hyperparameter ranges:
* Register: `registry.register_ranged_hparams`
* List: `registry.list_ranged_hparams`
* Retrieve by name: `registry.ranged_hparams`
* Command-line flag in `t2t_trainer.py`: `--hparams_range=name`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import re
# Dependency imports
import six
from tensorflow.python.eager import context
_MODELS = {}
_HPARAMS = {}
_RANGED_HPARAMS = {}
_PROBLEMS = {}
class Modalities(object):
SYMBOL = "symbol"
IMAGE = "image"
AUDIO = "audio"
CLASS_LABEL = "class_label"
GENERIC = "generic"
REAL = "real"
_MODALITIES = {
Modalities.SYMBOL: {},
Modalities.IMAGE: {},
Modalities.AUDIO: {},
Modalities.CLASS_LABEL: {},
Modalities.GENERIC: {},
Modalities.REAL: {},
}
# Camel case to snake case utils
_first_cap_re = re.compile("(.)([A-Z][a-z0-9]+)")
_all_cap_re = re.compile("([a-z0-9])([A-Z])")
def _convert_camel_to_snake(name):
s1 = _first_cap_re.sub(r"\1_\2", name)
return _all_cap_re.sub(r"\1_\2", s1).lower()
def _reset():
for ctr in [_MODELS, _HPARAMS, _RANGED_HPARAMS] + list(_MODALITIES.values()):
ctr.clear()
def default_name(obj_class):
"""Convert a class name to the registry's default name for the class.
Args:
obj_class: the name of a class
Returns:
The registry's default name for the class.
"""
return _convert_camel_to_snake(obj_class.__name__)
def default_object_name(obj):
"""Convert an object to the registry's default name for the object class.
Args:
obj: an object instance
Returns:
The registry's default name for the class of the object.
"""
return default_name(obj.__class__)
def register_model(name=None):
"""Register a model. name defaults to class name snake-cased."""
def decorator(model_cls, registration_name=None):
"""Registers & returns model_cls with registration_name or default name."""
model_name = registration_name or default_name(model_cls)
if model_name in _MODELS and not context.in_eager_mode():
raise LookupError("Model %s already registered." % model_name)
model_cls.REGISTERED_NAME = model_name
_MODELS[model_name] = model_cls
return model_cls
# Handle if decorator was used without parens
if callable(name):
model_cls = name
return decorator(model_cls, registration_name=default_name(model_cls))
return lambda model_cls: decorator(model_cls, name)
def model(name):
if name not in _MODELS:
raise LookupError("Model %s never registered. Available models:\n %s" % (
name, "\n".join(list_models())))
return _MODELS[name]
def list_models():
return list(sorted(_MODELS))
def register_hparams(name=None):
"""Register an HParams set. name defaults to function name snake-cased."""
def decorator(hp_fn, registration_name=None):
"""Registers & returns hp_fn with registration_name or default name."""
hp_name = registration_name or default_name(hp_fn)
if hp_name in _HPARAMS and not context.in_eager_mode():
raise LookupError("HParams set %s already registered." % hp_name)
_HPARAMS[hp_name] = hp_fn
return hp_fn
# Handle if decorator was used without parens
if callable(name):
hp_fn = name
return decorator(hp_fn, registration_name=default_name(hp_fn))
return lambda hp_fn: decorator(hp_fn, name)
def hparams(name):
if name not in _HPARAMS:
error_msg = "HParams set %s never registered. Sets registered:\n%s"
raise LookupError(
error_msg % (name,
display_list_by_prefix(list_hparams(), starting_spaces=4)))
return _HPARAMS[name]
def list_hparams():
return list(_HPARAMS)
def register_ranged_hparams(name=None):
"""Register a RangedHParams set. name defaults to fn name snake-cased."""
def decorator(rhp_fn, registration_name=None):
"""Registers & returns hp_fn with registration_name or default name."""
rhp_name = registration_name or default_name(rhp_fn)
if rhp_name in _RANGED_HPARAMS:
raise LookupError("RangedHParams set %s already registered." % rhp_name)
# Check that the fn takes a single argument
args, varargs, keywords, _ = inspect.getargspec(rhp_fn)
if len(args) != 1 or varargs is not None or keywords is not None:
raise ValueError("RangedHParams set function must take a single "
"argument, the RangedHParams object.")
_RANGED_HPARAMS[rhp_name] = rhp_fn
return rhp_fn
# Handle if decorator was used without parens
if callable(name):
rhp_fn = name
return decorator(rhp_fn, registration_name=default_name(rhp_fn))
return lambda rhp_fn: decorator(rhp_fn, name)
def ranged_hparams(name):
if name not in _RANGED_HPARAMS:
raise LookupError("RangedHParams set %s never registered." % name)
return _RANGED_HPARAMS[name]
def list_ranged_hparams():
return list(_RANGED_HPARAMS)
def register_problem(name=None):
"""Register a Problem. name defaults to cls name snake-cased."""
def decorator(p_cls, registration_name=None):
"""Registers & returns p_cls with registration_name or default name."""
p_name = registration_name or default_name(p_cls)
if p_name in _PROBLEMS and not context.in_eager_mode():
raise LookupError("Problem %s already registered." % p_name)
_PROBLEMS[p_name] = p_cls
p_cls.name = p_name
return p_cls
# Handle if decorator was used without parens
if callable(name):
p_cls = name
return decorator(p_cls, registration_name=default_name(p_cls))
return lambda p_cls: decorator(p_cls, name)
def problem(name):
"""Retrieve a problem by name."""
def parse_problem_name(problem_name):
"""Determines if problem_name specifies a copy and/or reversal.
Args:
problem_name: A string containing a single problem name from
FLAGS.problems.
Returns:
base_name: A string with the base problem name.
was_reversed: A boolean.
was_copy: A boolean.
"""
# Recursively strip tags until we reach a base name.
if problem_name.endswith("_rev"):
base, _, was_copy = parse_problem_name(problem_name[:-4])
return base, True, was_copy
elif problem_name.endswith("_copy"):
base, was_reversed, _ = parse_problem_name(problem_name[:-5])
return base, was_reversed, True
else:
return problem_name, False, False
base_name, was_reversed, was_copy = parse_problem_name(name)
if base_name not in _PROBLEMS:
all_problem_names = sorted(list_problems())
error_lines = ["%s not in the set of supported problems:" % base_name
] + all_problem_names
error_msg = "\n * ".join(error_lines)
raise LookupError(error_msg)
return _PROBLEMS[base_name](was_reversed, was_copy)
def list_problems():
return list(_PROBLEMS)
def _internal_get_modality(name, mod_collection, collection_str):
if name is None:
name = "default"
if name not in mod_collection:
raise LookupError("%s modality %s never registered." % (collection_str,
name))
return mod_collection[name]
def symbol_modality(name=None):
return _internal_get_modality(name, _MODALITIES[Modalities.SYMBOL],
Modalities.SYMBOL.capitalize())
def generic_modality(name=None):
return _internal_get_modality(name, _MODALITIES[Modalities.GENERIC],
Modalities.GENERIC.capitalize())
def audio_modality(name=None):
return _internal_get_modality(name, _MODALITIES[Modalities.AUDIO],
Modalities.AUDIO.capitalize())
def image_modality(name=None):
return _internal_get_modality(name, _MODALITIES[Modalities.IMAGE],
Modalities.IMAGE.capitalize())
def class_label_modality(name=None):
return _internal_get_modality(name, _MODALITIES[Modalities.CLASS_LABEL],
Modalities.CLASS_LABEL.capitalize())
def real_modality(name=None):
return _internal_get_modality(name, _MODALITIES[Modalities.REAL],
Modalities.REAL.capitalize())
def _internal_register_modality(name, mod_collection, collection_str):
"""Register a modality into mod_collection."""
def decorator(mod_cls, registration_name=None):
"""Registers & returns mod_cls with registration_name or default name."""
mod_name = registration_name or default_name(mod_cls)
if mod_name in mod_collection and not context.in_eager_mode():
raise LookupError("%s modality %s already registered." % (collection_str,
mod_name))
mod_collection[mod_name] = mod_cls
return mod_cls
# Handle if decorator was used without parens
if callable(name):
mod_cls = name
return decorator(mod_cls, registration_name=default_name(mod_cls))
return lambda mod_cls: decorator(mod_cls, name)
def register_symbol_modality(name=None):
"""Register a symbol modality. name defaults to class name snake-cased."""
return _internal_register_modality(name, _MODALITIES[Modalities.SYMBOL],
Modalities.SYMBOL.capitalize())
def register_generic_modality(name=None):
"""Register a generic modality. name defaults to class name snake-cased."""
return _internal_register_modality(name, _MODALITIES[Modalities.GENERIC],
Modalities.GENERIC.capitalize())
def register_real_modality(name=None):
"""Register a real modality. name defaults to class name snake-cased."""
return _internal_register_modality(name, _MODALITIES[Modalities.REAL],
Modalities.REAL.capitalize())
def register_audio_modality(name=None):
"""Register an audio modality. name defaults to class name snake-cased."""
return _internal_register_modality(name, _MODALITIES[Modalities.AUDIO],
Modalities.AUDIO.capitalize())
def register_image_modality(name=None):
"""Register an image modality. name defaults to class name snake-cased."""
return _internal_register_modality(name, _MODALITIES[Modalities.IMAGE],
Modalities.IMAGE.capitalize())
def register_class_label_modality(name=None):
"""Register an image modality. name defaults to class name snake-cased."""
return _internal_register_modality(name, _MODALITIES[Modalities.CLASS_LABEL],
Modalities.CLASS_LABEL.capitalize())
def list_modalities():
all_modalities = []
for modality_type, modalities in six.iteritems(_MODALITIES):
all_modalities.extend([
"%s:%s" % (mtype, modality)
for mtype, modality in zip([modality_type] * len(modalities),
modalities)
])
return all_modalities
def parse_modality_name(name):
name_parts = name.split(":")
if len(name_parts) < 2:
name_parts.append("default")
modality_type, modality_name = name_parts
return modality_type, modality_name
def create_modality(modality_spec, model_hparams):
"""Create modality.
Args:
modality_spec: tuple, ("modality_type:modality_name", vocab_size).
model_hparams: HParams object.
Returns:
Modality instance.
Raises:
LookupError: if modality_type is not recognized. See Modalities class for
accepted types.
"""
retrieval_fns = {
Modalities.SYMBOL: symbol_modality,
Modalities.AUDIO: audio_modality,
Modalities.IMAGE: image_modality,
Modalities.CLASS_LABEL: class_label_modality,
Modalities.GENERIC: generic_modality,
Modalities.REAL: real_modality,
}
modality_full_name, vocab_size = modality_spec
modality_type, modality_name = parse_modality_name(modality_full_name)
if modality_type not in retrieval_fns:
raise LookupError("Modality type %s not recognized. Options are: %s" %
(modality_type, list(_MODALITIES)))
return retrieval_fns[modality_type](modality_name)(model_hparams, vocab_size)
def display_list_by_prefix(names_list, starting_spaces=0):
"""Creates a help string for names_list grouped by prefix."""
cur_prefix, result_lines = None, []
space = " " * starting_spaces
for name in sorted(names_list):
split = name.split("_", 1)
prefix = split[0]
if cur_prefix != prefix:
result_lines.append(space + prefix + ":")
cur_prefix = prefix
result_lines.append(space + " * " + name)
return "\n".join(result_lines)
def help_string():
"""Generate help string with contents of registry."""
help_str = """
Registry contents:
------------------
Models:
%s
HParams:
%s
RangedHParams:
%s
Modalities:
%s
Problems:
%s
"""
m, hp, rhp, mod, probs = [
display_list_by_prefix(entries, starting_spaces=4)
for entries in [
list_models(),
list_hparams(),
list_ranged_hparams(),
list_modalities(),
list_problems()
]
]
return help_str % (m, hp, rhp, mod, probs)
| |
import datetime
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.handlers.wsgi import WSGIRequest
from django.test import Client
from django.test import TestCase
from registration import forms
from registration import signals
from registration.admin import RegistrationAdmin
from registration.backends import get_backend
from registration.backends.default import DefaultBackend
from registration.models import RegistrationProfile
class _MockRequestClient(Client):
"""
A ``django.test.Client`` subclass which can return mock
``HttpRequest`` objects.
"""
def request(self, **request):
"""
Rather than issuing a request and returning the response, this
simply constructs an ``HttpRequest`` object and returns it.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1,0),
'wsgi.url_scheme': 'http',
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return WSGIRequest(environ)
def _mock_request():
"""
Construct and return a mock ``HttpRequest`` object; this is used
in testing backend methods which expect an ``HttpRequest`` but
which are not being called from views.
"""
return _MockRequestClient().request()
class BackendRetrievalTests(TestCase):
"""
Test that utilities for retrieving the active backend work
properly.
"""
def test_get_backend(self):
"""
Verify that ``get_backend()`` returns the correct value when
passed a valid backend.
"""
self.failUnless(isinstance(get_backend('registration.backends.default.DefaultBackend'),
DefaultBackend))
def test_backend_error_invalid(self):
"""
Test that a nonexistent/unimportable backend raises the
correct exception.
"""
self.assertRaises(ImproperlyConfigured, get_backend,
'registration.backends.doesnotexist.NonExistentBackend')
def test_backend_attribute_error(self):
"""
Test that a backend module which exists but does not have a
class of the specified name raises the correct exception.
"""
self.assertRaises(ImproperlyConfigured, get_backend,
'registration.backends.default.NonexistentBackend')
class DefaultRegistrationBackendTests(TestCase):
"""
Test the default registration backend.
Running these tests successfull will require two templates to be
created for the sending of activation emails; details on these
templates and their contexts may be found in the documentation for
the default backend.
"""
def setUp(self):
"""
Create an instance of the default backend for use in testing,
and set ``ACCOUNT_ACTIVATION_DAYS`` if it's not set already.
"""
from registration.backends.default import DefaultBackend
self.backend = DefaultBackend()
self.old_activation = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', None)
if self.old_activation is None:
settings.ACCOUNT_ACTIVATION_DAYS = 7
def tearDown(self):
"""
Yank out ``ACCOUNT_ACTIVATION_DAYS`` back out if it wasn't
originally set.
"""
if self.old_activation is None:
settings.ACCOUNT_ACTIVATION_DAYS = self.old_activation
def test_registration(self):
"""
Test the registration process: registration creates a new
inactive account and a new profile with activation key,
populates the correct account data and sends an activation
email.
"""
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
# Details of the returned user must match what went in.
self.assertEqual(new_user.username, 'bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
# New user must not be active.
self.failIf(new_user.is_active)
# A registration profile was created, and an activation email
# was sent.
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
def test_registration_no_sites(self):
"""
Test that registration still functions properly when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
Site._meta.installed = False
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
self.assertEqual(new_user.username, 'bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
self.failIf(new_user.is_active)
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
Site._meta.installed = True
def test_valid_activation(self):
"""
Test the activation process: activating within the permitted
window sets the account's ``is_active`` field to ``True`` and
resets the activation key.
"""
valid_user = self.backend.register(_mock_request(),
username='alice',
email='alice@example.com',
password1='swordfish')
valid_profile = RegistrationProfile.objects.get(user=valid_user)
activated = self.backend.activate(_mock_request(),
valid_profile.activation_key)
self.assertEqual(activated.username, valid_user.username)
self.failUnless(activated.is_active)
# Fetch the profile again to verify its activation key has
# been reset.
valid_profile = RegistrationProfile.objects.get(user=valid_user)
self.assertEqual(valid_profile.activation_key,
RegistrationProfile.ACTIVATED)
def test_invalid_activation(self):
"""
Test the activation process: trying to activate outside the
permitted window fails, and leaves the account inactive.
"""
expired_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
expired_user.date_joined = expired_user.date_joined - datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
expired_user.save()
expired_profile = RegistrationProfile.objects.get(user=expired_user)
self.failIf(self.backend.activate(_mock_request(),
expired_profile.activation_key))
self.failUnless(expired_profile.activation_key_expired())
def test_allow(self):
"""
Test that the setting ``REGISTRATION_OPEN`` appropriately
controls whether registration is permitted.
"""
old_allowed = getattr(settings, 'REGISTRATION_OPEN', True)
settings.REGISTRATION_OPEN = True
self.failUnless(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = False
self.failIf(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = old_allowed
def test_form_class(self):
"""
Test that the default form class returned is
``registration.forms.RegistrationForm``.
"""
self.failUnless(self.backend.get_form_class(_mock_request()) is forms.RegistrationForm)
def test_post_registration_redirect(self):
"""
Test that the default post-registration redirect is the named
pattern ``registration_complete``.
"""
self.assertEqual(self.backend.post_registration_redirect(_mock_request(), User()),
('registration_complete', (), {}))
def test_registration_signal(self):
"""
Test that registering a user sends the ``user_registered``
signal.
"""
def receiver(sender, **kwargs):
self.failUnless('user' in kwargs)
self.assertEqual(kwargs['user'].username, 'bob')
self.failUnless('request' in kwargs)
self.failUnless(isinstance(kwargs['request'], WSGIRequest))
received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_registered.connect(receiver, sender=self.backend.__class__)
self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
self.assertEqual(len(received_signals), 1)
self.assertEqual(received_signals, [signals.user_registered])
def test_activation_signal_success(self):
"""
Test that successfully activating a user sends the
``user_activated`` signal.
"""
def receiver(sender, **kwargs):
self.failUnless('user' in kwargs)
self.assertEqual(kwargs['user'].username, 'bob')
self.failUnless('request' in kwargs)
self.failUnless(isinstance(kwargs['request'], WSGIRequest))
received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_activated.connect(receiver, sender=self.backend.__class__)
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
profile = RegistrationProfile.objects.get(user=new_user)
self.backend.activate(_mock_request(), profile.activation_key)
self.assertEqual(len(received_signals), 1)
self.assertEqual(received_signals, [signals.user_activated])
def test_activation_signal_failure(self):
"""
Test that an unsuccessful activation attempt does not send the
``user_activated`` signal.
"""
receiver = lambda sender, **kwargs: received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_activated.connect(receiver, sender=self.backend.__class__)
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
new_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
new_user.save()
profile = RegistrationProfile.objects.get(user=new_user)
self.backend.activate(_mock_request(), profile.activation_key)
self.assertEqual(len(received_signals), 0)
def test_email_send_action(self):
"""
Test re-sending of activation emails via admin action.
"""
admin_class = RegistrationAdmin(RegistrationProfile, admin.site)
alice = self.backend.register(_mock_request(),
username='alice',
email='alice@example.com',
password1='swordfish')
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # One on registering, one more on the resend.
RegistrationProfile.objects.filter(user=alice).update(activation_key=RegistrationProfile.ACTIVATED)
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # No additional email because the account has activated.
def test_activation_action(self):
"""
Test manual activation of users view admin action.
"""
admin_class = RegistrationAdmin(RegistrationProfile, admin.site)
alice = self.backend.register(_mock_request(),
username='alice',
email='alice@example.com',
password1='swordfish')
admin_class.activate_users(_mock_request(),
RegistrationProfile.objects.all())
self.failUnless(User.objects.get(username='alice').is_active)
| |
from sympy.core import (Expr, S, C, Symbol, Equality, Interval, sympify, Wild,
Tuple, Dummy)
from sympy.solvers import solve
from sympy.utilities import flatten
class Sum(Expr):
"""Represents unevaluated summation."""
def __new__(cls, f, *symbols, **assumptions):
f = sympify(f)
if not symbols:
raise ValueError("No symbols given.")
else:
limits = []
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif isinstance(V, Equality):
if isinstance(V.lhs, Symbol):
if isinstance(V.rhs, Interval):
limits.append(Tuple(V.lhs, V.rhs.start, V.rhs.end))
else:
limits.append(Tuple(V.lhs, V.rhs))
continue
elif isinstance(V, (tuple, list, Tuple)):
V = flatten(V)
if len(V) == 1:
if isinstance(V[0], Symbol):
limits.append(Tuple(V[0]))
continue
elif len(V) in (2, 3):
if isinstance(V[0], Symbol):
limits.append(Tuple(*map(sympify, V)))
continue
raise ValueError("Invalid summation variable or limits")
obj = Expr.__new__(cls, **assumptions)
arglist = [f]
arglist.extend(limits)
obj._args = tuple(arglist)
return obj
@property
def function(self):
return self._args[0]
@property
def limits(self):
return self._args[1:]
@property
def variables(self):
"""Return a list of the summation variables
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
summation is evaluated. This is useful if one is trying to
determine whether a sum is dependent on a certain
symbol or not.
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
set([y])
"""
# analyze the summation
# >>> Sum(x*y,(x,1,2),(y,1,3)).args
# (x*y, Tuple(x, 1, 2), Tuple(y, 1, 3))
# >>> Sum(x, x, y).args
# (x, Tuple(x), Tuple(y))
intgrl = self
args = intgrl.args
integrand, limits = args[0], args[1:]
if integrand.is_zero:
return set()
isyms = integrand.free_symbols
for ilim in limits:
if len(ilim) == 1:
isyms.add(ilim[0])
continue
# take out the target symbol
if ilim[0] in isyms:
isyms.remove(ilim[0])
if len(ilim) == 3 and ilim[1] == ilim[2]:
# if two limits are the same the sum is 0
# and there are no symbols
return set()
# add in the new symbols
for i in ilim[1:]:
isyms.update(i.free_symbols)
return isyms
def doit(self, **hints):
#if not hints.get('sums', True):
# return self
f = self.function
for i, a, b in self.limits:
f = eval_sum(f, (i, a, b))
if f is None:
return self
if hints.get('deep', True):
return f.doit(**hints)
else:
return f
def _eval_summation(self, f, x):
return
def euler_maclaurin(self, m=0, n=0, eps=0, eval_integral=True):
"""
Return an Euler-Maclaurin approximation of self, where m is the
number of leading terms to sum directly and n is the number of
terms in the tail.
With m = n = 0, this is simply the corresponding integral
plus a first-order endpoint correction.
Returns (s, e) where s is the Euler-Maclaurin approximation
and e is the estimated error (taken to be the magnitude of
the first omitted term in the tail):
>>> from sympy.abc import k, a, b
>>> from sympy import Sum
>>> Sum(1/k, (k, 2, 5)).doit().evalf()
1.28333333333333
>>> s, e = Sum(1/k, (k, 2, 5)).euler_maclaurin()
>>> s
7/20 - log(2) + log(5)
>>> from sympy import sstr
>>> print sstr((s.evalf(), e.evalf()), full_prec=True)
(1.26629073187416, 0.0175000000000000)
The endpoints may be symbolic:
>>> s, e = Sum(1/k, (k, a, b)).euler_maclaurin()
>>> s
-log(a) + log(b) + 1/(2*a) + 1/(2*b)
>>> e
Abs(-1/(12*b**2) + 1/(12*a**2))
If the function is a polynomial of degree at most 2n+1, the
Euler-Maclaurin formula becomes exact (and e = 0 is returned):
>>> Sum(k, (k, 2, b)).euler_maclaurin()
(-1 + b/2 + b**2/2, 0)
>>> Sum(k, (k, 2, b)).doit()
-1 + b/2 + b**2/2
With a nonzero eps specified, the summation is ended
as soon as the remainder term is less than the epsilon.
"""
m = int(m)
n = int(n)
f = self.function
assert len(self.limits) == 1
i, a, b = self.limits[0]
s = S.Zero
if m:
for k in range(m):
term = f.subs(i, a+k)
if (eps and term and abs(term.evalf(3)) < eps):
return s, abs(term)
s += term
a += m
x = Dummy('x')
I = C.Integral(f.subs(i, x), (x, a, b))
if eval_integral:
I = I.doit()
s += I
def fpoint(expr):
if b is S.Infinity:
return expr.subs(i, a), 0
return expr.subs(i, a), expr.subs(i, b)
fa, fb = fpoint(f)
iterm = (fa + fb)/2
g = f.diff(i)
for k in xrange(1, n+2):
ga, gb = fpoint(g)
term = C.bernoulli(2*k)/C.Factorial(2*k)*(gb-ga)
if (eps and term and abs(term.evalf(3)) < eps) or (k > n):
break
s += term
g = g.diff(i, 2)
return s + iterm, abs(term)
def _eval_subs(self, old, new):
if self == old:
return new
newlimits = []
for lim in self.limits:
if lim[0] == old:
return self
newlimits.append( (lim[0],lim[1].subs(old,new),lim[2].subs(old,new)) )
return Sum(self.args[0].subs(old, new), *newlimits)
def summation(f, *symbols, **kwargs):
"""
Compute the summation of f with respect to symbols.
The notation for symbols is similar to the notation used in Integral.
summation(f, (i, a, b)) computes the sum of f with respect to i from a to b,
i.e.,
b
____
\ `
summation(f, (i, a, b)) = ) f
/___,
i = a
If it cannot compute the sum, it returns an unevaluated Sum object.
Repeated sums can be computed by introducing additional symbols tuples::
>>> from sympy import summation, oo, symbols, log
>>> i, n, m = symbols('i n m', integer=True)
>>> summation(2*i - 1, (i, 1, n))
n**2
>>> summation(1/2**i, (i, 0, oo))
2
>>> summation(1/log(n)**n, (n, 2, oo))
Sum(log(n)**(-n), (n, 2, oo))
>>> summation(i, (i, 0, n), (n, 0, m))
m/3 + m**2/2 + m**3/6
"""
return Sum(f, *symbols, **kwargs).doit(deep=False)
def telescopic_direct(L, R, n, (i, a, b)):
"""Returns the direct summation of the terms of a telescopic sum
L is the term with lower index
R is the term with higher index
n difference between the indexes of L and R
For example:
>>> from sympy.concrete.summations import telescopic_direct
>>> from sympy.abc import k, a, b
>>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b))
1/a + 1/(1 + a) - 1/(1 + b) - 1/(2 + b)
"""
s = 0
for m in xrange(n):
s += L.subs(i,a+m) + R.subs(i,b-m)
return s
def telescopic(L, R, (i, a, b)):
'''Tries to perform the summation using the telescopic property
return None if not possible
'''
if L.is_Add or R.is_Add:
return None
s = None
#First we try to solve using match
#Maybe this should go inside solve
k = Wild("k")
sol = (-R).match(L.subs(i, i + k))
if sol and k in sol:
if L.subs(i,i + sol[k]) == -R:
#sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x}))
s = sol[k]
#Then we try to solve using solve
if not s or not s.is_Integer:
m = Symbol("m")
try:
s = solve(L.subs(i, i + m) + R, m)[0]
except IndexError:#(ValueError, IndexError):
pass
if s and s.is_Integer:
if s < 0:
return telescopic_direct(R, L, abs(s), (i, a, b))
elif s > 0:
return telescopic_direct(L, R, s, (i, a, b))
return None
def eval_sum(f, (i, a, b)):
if f.is_Number:
if f is S.NaN:
return S.NaN
elif f is S.Zero:
return S.Zero
if not f.has(i):
return f*(b-a+1)
definite = a.is_Integer and b.is_Integer
# Doing it directly may be faster if there are very few terms.
if definite and (b-a < 100):
return eval_sum_direct(f, (i, a, b))
# Try to do it symbolically. Even when the number of terms is known,
# this can save time when b-a is big.
# We should try to transform to partial fractions
value = eval_sum_symbolic(f.expand(), (i, a, b))
if value is not None:
return value
# Do it directly
if definite:
return eval_sum_direct(f, (i, a, b))
def eval_sum_symbolic(f, (i, a, b)):
if not f.has(i):
return f*(b-a+1)
# Linearity
if f.is_Mul:
L, R = f.as_two_terms()
if not L.has(i):
sR = eval_sum_symbolic(R, (i, a, b))
if sR: return L*sR
if not R.has(i):
sL = eval_sum_symbolic(L, (i, a, b))
if sL: return R*sL
if f.is_Add:
L, R = f.as_two_terms()
lrsum = telescopic(L, R, (i, a, b))
if lrsum: return lrsum
lsum = eval_sum_symbolic(L, (i, a, b))
rsum = eval_sum_symbolic(R, (i, a, b))
if None not in (lsum, rsum):
return lsum + rsum
# Polynomial terms with Faulhaber's formula
p = C.Wild('p')
e = f.match(i**p)
if e is not None:
c = p.subs(e)
B = C.bernoulli
if c.is_integer and c >= 0:
s = (B(c+1, b+1) - B(c+1, a))/(c+1)
return s.expand()
# Geometric terms
c1 = C.Wild('c1', exclude=[i])
c2 = C.Wild('c2', exclude=[i])
c3 = C.Wild('c3', exclude=[i])
e = f.match(c1**(c2*i+c3))
if e is not None:
c1 = c1.subs(e)
c2 = c2.subs(e)
c3 = c3.subs(e)
# TODO: more general limit handling
return c1**c3 * (c1**(a*c2) - c1**(c2+b*c2)) / (1 - c1**c2)
return None
def eval_sum_direct(expr, (i, a, b)):
s = S.Zero
if expr.has(i):
for j in xrange(a, b+1):
s += expr.subs(i, j)
else:
for j in xrange(a, b+1):
s += expr
return s
| |
from py2neo import neo4j
from flask import jsonify, abort, redirect,url_for, render_template
import ast
import json, uuid
import logging
from utils import getGraph, save_file, send_email, _remove_file
from uuid_token import generate_confirmation_token
from datetime import datetime, date
from user_authentification import User
import flask_login
from imageConverter import *
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# input: python dict {'fullname':'Juan Lopez','email': 'jj@gmail.com', 'username': 'jlopezvi',
# 'position': 'employee', 'group': 'IT', 'password': 'MD5password',
# 'host_email': 'asdf@das' / None, 'ifpublicprofile': True/ False,
# 'ifregistrationfromemail': True / False, 'profilepic_url': 'static/.../sth.jpg'}
# output: json
# 1. Wrong --> {"result":"Wrong","ifemailexists":true,"ifemailexists_msg":ifemailexists_msg[true]}
# 2. OK (registered participant but e-mail not verified yet. Sends new e-mail for verification) -->
# {"result":"OK","ifemailexists":true,"ifemailexists_msg":ifemailexists_msg[true],
# "ifemailverified":false,"ifemailverified_msg":ifemailverified_msg[false]}
# 3. OK (4 different normal cases of registration)
# {"result":"OK", "ifhost":true/false,"ifhost_msg":ifhost_msg[ifhost],
# "ifemailverified":true/false,"ifemailverified_msg":ifemailverified_msg[email_verified]})
def registration_aux(inputdict):
email = inputdict.get('email')
ifemailverified_msg= ["E-mail not verified. E-mail verification sent. " \
"Close this window and check your e-mail within the next few minutes ", None]
ifhost_msg=[None, "You will be following your host in Consensus"]
if _get_participant_node(email, 'all'): # email exists? (Exceptional cases of registration)
ifemailexists = True
ifemailexists_msg = "Participant already exists"
if _get_participant_node(email): # participant's email is verified?
ifemailverified=True
return jsonify({"result":"Wrong","ifemailexists":ifemailexists,"ifemailexists_msg":ifemailexists_msg})
else:
ifemailverified = False
result_send_emailverification = _registration_send_emailverification(email)
if result_send_emailverification is "OK":
return jsonify({"result": "OK: Participant registered previously, resend email verification",
"ifemailexists": ifemailexists, "ifemailexists_msg":ifemailexists_msg,
"ifemailverified": ifemailverified, "ifemailverified_msg": ifemailverified_msg[ifemailverified]})
else:
raise NameError('PROBLEM')
# (Normal cases of registration)
# save data for new (verified / unverified) participant in database
ifemailverified = inputdict.get('ifregistrationfromemail')
_newParticipant(inputdict)
if ifemailverified is True:
user = User(email)
flask_login.login_user(user)
else:
result_send_emailverification2 = _registration_send_emailverification(email)
if result_send_emailverification2 is "OK":
pass
else:
raise NameError('PROBLEM')
ifhost = False
if inputdict.get('host_email') is not None:
# current_participant (verified/unverified) follows host
ifhost = _if_added_following_contact_to_user(inputdict.get('host_email'), email)
return jsonify({"result": "OK", "ifhost": ifhost, "ifhost_msg": ifhost_msg[ifhost],
"ifemailverified": ifemailverified, "ifemailverified_msg": ifemailverified_msg[ifemailverified]})
# Used By <registration_aux>
def _newParticipant(participantdict):
email = participantdict.get('email')
timestamp = (datetime.now()).strftime("%d.%m.%Y %H:%M:%S")
# image goes from base64 to separate JPG file
code_uuid = str(uuid.uuid4())
if participantdict['profilepic'] is None:
profilepic_url = '/static/images/perfil-mediano.png'
else:
profilepic_url = base64ToJGP(participantdict['profilepic'], '/profilepics/'+code_uuid)
newparticipant, = getGraph().create({"fullname" : participantdict.get('fullname'), "email" : email,
"username" : participantdict.get('username'), "position" : participantdict.get('position'),
"group" : participantdict.get('group'), "password" : participantdict.get('password'),
"ifpublicprofile" : participantdict.get('ifpublicprofile'),
"profilepic_url" : profilepic_url, "ifsupportingproposalsvisible": True,
"ifrejectingproposalsvisible": True, "timestamp": timestamp
})
if participantdict.get('ifregistrationfromemail') is True:
newparticipant.add_labels("participant")
_addToParticipantsIndex(email, newparticipant)
elif participantdict.get('ifregistrationfromemail') is False:
newparticipant.add_labels("unverified_participant")
_addToUnverifiedParticipantsIndex(email, newparticipant)
return {'result': 'OK'}
def modify_user_data_aux(user_data, user_email):
participant = _get_participant_node(user_email)
fields = ['position', 'group', 'password', 'ifsupportingproposalsvisible',
'ifrejectingproposalsvisible',
'username', 'ifpublicprofile', 'fullname']
data = {}
for k, v in user_data.items():
if k in fields:
data[k] = v
for k, v in data.items():
participant[k] = v
if 'profilepic' in user_data:
# image goes from base64 to separate JPG file
code_uuid = str(uuid.uuid4())
if user_data['profilepic'] is None:
profilepic_url = '/static/images/perfil-mediano.png'
else:
profilepic_url = base64ToJGP(user_data['profilepic'], '/profilepics/' + code_uuid)
path = basedir + participant["profilepic_url"]
if os.path.isfile(path):
os.remove(path)
participant["profilepic_url"] = profilepic_url
if 'new_email' in user_data:
if user_data['new_email'] != user_email:
new_email=user_data['new_email']
if _get_participant_node(new_email, 'all'): # email exists?
return jsonify({'result': 'Wrong: New e-mail already exists'})
_removeFromParticipantsIndex(user_email, participant)
_addToParticipantsIndex(new_email, participant)
# this line may be unnecessary or even a problem
participant['email']= new_email
user = User(new_email)
flask_login.login_user(user)
# TODO: change profilepic_url in database and profilepic filename, provided there is a picture
return jsonify({'result': 'OK'})
def modify_user_password_aux(user_password_data, user_email):
user = _get_participant_node(user_email)
if user['password'] == user_password_data['old_password']:
user['password'] = user_password_data['new_password']
return jsonify({"result": "OK"})
else:
return jsonify({"result": "Wrong: Wrong current password"})
def get_user_data_aux(user_email):
user = _get_participant_node(user_email)
user_data = user.get_properties()
return jsonify({"result": "OK", "data": user_data})
def remove_user_aux(user_email) :
from ideaManager import _remove_idea
user = _get_participant_node(user_email, 'all')
created_ideas = [x.end_node for x in list(getGraph().match(start_node=user, rel_type="CREATED"))]
for created_idea in created_ideas:
_remove_idea(created_idea)
# for rel in getGraph().match(start_node=created_idea, bidirectional=True):
# rel.delete()
# created_idea.delete()
for rel2 in getGraph().match(start_node=user, bidirectional=True):
rel2.delete()
if user['profilepic_url'].startswith('/static/images/profilepics/'):
_remove_file(user['profilepic_url'])
user.delete()
return jsonify({'result': 'OK'})
def get_participant_data_aux(participant_email, user_email):
from ideaManager import _get_ideas_created_by_participant_for_user
user = _get_participant_node(user_email)
participant = _get_participant_node(participant_email)
ifpublicprofile = participant.get_properties()['ifpublicprofile']
participant_data= {}
if participant_email == user_email or _getIfContactRelationshipExists(participant, user) is True \
or ifpublicprofile is True:
ifallowed = True
profilepic_url = participant.get_properties()['profilepic_url']
username = participant.get_properties()['username']
fullname = participant.get_properties()['fullname']
followers_num = len(_get_participant_followers(participant_email))
followings_num = len(_get_participant_followings(participant_email))
ideas_num = len(_get_ideas_created_by_participant_for_user(participant_email, user_email)['ideas_indices'])
participant_data.update({'id': participant_email,'profilepic_url': profilepic_url,
'username' : username, 'fullname': fullname,
'ideas_num' : ideas_num,
'followers_num': followers_num,
'followings_num': followings_num})
else:
ifallowed = False
return jsonify({"result":"OK", 'ifallowed': ifallowed, "participant_data": participant_data})
def get_participant_data_by_email_unrestricted_aux(participant_email, user_email):
return jsonify(_get_participant_summary_data_unrestricted(participant_email, user_email))
def if_participant_exists_by_email_aux(participant_email):
if _get_participant_node(participant_email, 'all'):
return jsonify({"result": True})
return jsonify({"result": False})
def get_fullname_for_participant_aux(participant_email, user_email):
user=_get_participant_node(user_email)
participant = _get_participant_node(participant_email)
ifpublicprofile = participant.get_properties()['ifpublicprofile']
fullname=None
if participant_email == user_email or _getIfContactRelationshipExists(participant, user) is True \
or ifpublicprofile is True:
ifallowed = True
fullname=participant["fullname"]
else:
ifallowed = False
return jsonify({"result": "OK", "ifallowed": ifallowed, "fullname": fullname})
def get_fullname_for_participant_unrestricted_aux(participant_email):
participant = _get_participant_node(participant_email)
fullname=participant["fullname"]
return jsonify({"result": "OK", "fullname": fullname})
def get_participant_followings_info_aux(participant_email, user_email):
user = _get_participant_node(user_email)
participant = _get_participant_node(participant_email)
ifpublicprofile = participant['ifpublicprofile']
followings_info = []
if (participant_email == user_email) or (_getIfContactRelationshipExists(participant, user) is True) \
or (ifpublicprofile is True):
ifallowed = True
followings = _get_participant_followings(participant_email)
followings_num = len(followings)
for following in followings:
email = following['email']
username = following['username']
fullname = following['fullname']
profilepic_url = following['profilepic_url']
followings_info.append({'email': email, 'username': username, 'fullname': fullname, 'profilepic_url': profilepic_url})
else:
ifallowed = False
followings = _get_participant_followings(participant_email)
followings_num = len(followings)
return jsonify({"result":"OK", "ifallowed": ifallowed, "followings_num": followings_num, "followings_info": followings_info})
def get_participant_followers_info_aux(participant_email, user_email):
user = _get_participant_node(user_email)
participant = _get_participant_node(participant_email)
ifpublicprofile = participant['ifpublicprofile']
followers_info = []
if (participant_email == user_email) or (_getIfContactRelationshipExists(participant, user) is True) \
or (ifpublicprofile is True):
ifallowed = True
followers = _get_participant_followers(participant_email)
followers_num = len(followers)
for follower in followers:
email = follower['email']
username = follower['username']
fullname = follower['fullname']
profilepic_url = follower['profilepic_url']
followers_info.append({'email' : email, 'username': username, 'fullname': fullname, 'profilepic_url': profilepic_url})
else:
ifallowed = False
followers = _get_participant_followers(participant_email)
followers_num = len(followers)
return jsonify({"result":"OK", "ifallowed": ifallowed, "followers_num": followers_num, "followers_info": followers_info})
def add_following_contact_to_user_aux(followingcontact_email, user_email):
result = _if_added_following_contact_to_user(followingcontact_email, user_email)
if result is True:
_add_newfollower_notification_from_participant1_to_participant2(user_email, followingcontact_email)
return jsonify({"result": "OK", "result_msg": "Following contact was added"})
else:
return jsonify({"result": "Wrong", "result_msg": "Following contact not possible or exists already"})
def remove_following_contact_to_user_aux(followingcontact_email, user_email):
result = _if_removed_following_contact_to_user(followingcontact_email, user_email)
if result is True:
return jsonify({"result": "OK", "result_msg": "Following contact was removed"})
else:
return jsonify({"result": "Wrong", "result_msg": "Following contact does not exist"})
def get_all_participants_admin_aux():
allnodes = _getParticipantsIndex().query("email:*")
participants = []
for node in allnodes:
participants.append(node.get_properties())
return participants
def get_all_public_participants_for_user_aux(user):
participant = _get_participant_node(user)
allnodes = _getParticipantsIndex().query("email:*")
participants = []
for node in allnodes:
if node.get_properties()['ifpublicprofile'] is True and node.get_properties()['email'] != user:
participants.append(
{'email': node.get_properties()['email'], 'fullname': node.get_properties()['fullname'],
'position': node.get_properties()['position'], 'group': node.get_properties()['group'],
'profilepic_url': node.get_properties()['profilepic_url'],
'if_following': _getIfContactRelationshipExists(participant, node)})
return participants
###############################################
# <USED BY MANY FUNCTIONS>
def _addToParticipantsIndex(email, newparticipant) :
getGraph().get_or_create_index(neo4j.Node, "Participants").add("email", email, newparticipant)
def _addToUnverifiedParticipantsIndex(email, newparticipant):
getGraph().get_or_create_index(neo4j.Node, "UnverifiedParticipants").add("email", email, newparticipant)
def _removeFromParticipantsIndex(email, participant):
getGraph().get_or_create_index(neo4j.Node, "Participants").remove("email", email, participant)
def _removeFromUnverifiedParticipantsIndex(email, participant):
getGraph().get_or_create_index(neo4j.Node, "UnverifiedParticipants").remove("email", email, participant)
def _getParticipantsIndex():
return getGraph().get_or_create_index(neo4j.Node, "Participants")
def _getUnverifiedParticipantsIndex():
return getGraph().get_or_create_index(neo4j.Node, "UnverifiedParticipants")
# <USED BY MANY FUNCTIONS>
#input: email, ifemailverified_category ('all'/True/False)
#output:
# -> participant_node
# -> None
#TODO: participantFound[0] is a node or a dictionary?
def _get_participant_node(email, ifemailverified_category=True) :
if ifemailverified_category in ('all', True):
participantFound = _getParticipantsIndex().get("email", email)
if participantFound:
return participantFound[0] # node
if ifemailverified_category in ('all', False):
unverifiedparticipantFound = _getUnverifiedParticipantsIndex().get("email", email)
if unverifiedparticipantFound :
return unverifiedparticipantFound[0] #node
else:
return None
# follower, following are participant nodes
def _getIfContactRelationshipExists(follower, following):
contact_relationship_found = getGraph().match_one(start_node=follower, end_node=following, rel_type="FOLLOWS")
if contact_relationship_found is not None:
return True
return False
# Used By <registration_aux>
# input: email to be verified as an argument
# output: e-mail to the email account with a URL token link for email verification
# and returns "OK"
def _registration_send_emailverification(email):
token = generate_confirmation_token(email)
confirm_url = url_for('.registration_receive_emailverification', token=token, _external=True)
html = render_template('login/verification_email.html', confirm_url=confirm_url)
subject = "Please confirm your email"
send_email(email, subject, html)
return "OK"
# <used by registration_send_invitation>
def _get_fullname_for_participant(participant_email):
participant = _get_participant_node(participant_email)
fullname = participant["fullname"]
return fullname
# input: participant_email
# output: list of nodes of following contacts
def _get_participant_followings(participant_email) :
participant = _get_participant_node(participant_email)
rels = list(getGraph().match(start_node=participant, rel_type="FOLLOWS"))
followings = []
for rel in rels:
followings.append(rel.end_node)
return followings
# input: participant_email
# output: list of nodes of follower contacts
def _get_participant_followers(participant_email) :
participant = _get_participant_node(participant_email)
rels = list(getGraph().match(end_node=participant, rel_type="FOLLOWS"))
followers = []
for rel in rels:
followers.append(rel.start_node)
return followers
# <USED BY: add_following_contact_to_user_aux(), registration_aux()>
# input: user email, new following contact email
# output:
# -> True
# -> False
# -> [False,'Following contact exists already']
def _if_added_following_contact_to_user(followingcontact_email, user_email) :
timestamp = (datetime.now()).strftime("%d.%m.%Y %H:%M:%S")
user = _get_participant_node(user_email, 'all') # current's email could be unverified
followingcontact = _get_participant_node(followingcontact_email)
if (followingcontact is None) or (followingcontact is user) :
return False
if _getIfContactRelationshipExists(user, followingcontact) is True:
return [False,'Following contact exists already']
getGraph().create((user, "FOLLOWS", followingcontact, {"timestamp": timestamp}))
return True
# <USED BY: remove_following_contact_to_user_aux()>
# input: user email, following contact email
# output:
# -> True
# -> False
def _if_removed_following_contact_to_user(followingcontact_email, user_email) :
user = _get_participant_node(user_email, 'all') # current's email could be unverified
followingcontact = _get_participant_node(followingcontact_email)
if _getIfContactRelationshipExists(user, followingcontact) is True:
contact_rel = getGraph().match_one(start_node=user, rel_type="FOLLOWS", end_node=followingcontact)
getGraph().delete(contact_rel)
return True
return False
# <Used by /get_participant_data_by_email_unrestricted>
def _get_participant_summary_data_unrestricted(participant_email, user_email):
from ideaManager import _get_ideas_created_by_participant_for_user
participant = _get_participant_node(participant_email)
participant_data= {}
followers_num = len(_get_participant_followers(participant_email))
followings_num = len(_get_participant_followings(participant_email))
ideas_num = len(_get_ideas_created_by_participant_for_user(participant_email, user_email)['ideas_indices'])
participant_data.update({'id': participant_email,'profilepic_url': participant['profilepic_url'],
'username': participant['username'], 'fullname': participant['fullname'],
'position': participant['position'], 'group': participant['group'],
'ideas_num': ideas_num,
'followers_num': followers_num,
'followings_num': followings_num})
return {"result": "OK", "participant_data": participant_data}
####################
####################
# NOTIFICATIONS
###################
####################
from utils import send_email
def get_participantnotifications_for_user_aux(user_email):
user = _get_participant_node(user_email)
notifications = []
current_notification = {}
# notifications from new followers
follower_rels = list(getGraph().match(end_node=user, rel_type="FOLLOWS"))
for follower_rel in follower_rels:
if follower_rel["ifnotification_newfollower"] is True:
current_notification.update({'notification_type': 'newfollower' ,
'participant_index': follower_rel.start_node['email']})
notifications.append(current_notification)
#
return jsonify({"result": "OK", "data": notifications})
# Used By <remove_notification_to_participant>
def remove_notification_from_participant1_to_participant2_aux(participant1_index, participant2_index, notification_type):
participant_sender = _get_participant_node(participant1_index)
participant_receiver = _get_participant_node(participant2_index)
notification_field_str = 'ifnotification_' + notification_type
# remove notification from new followers
follower_rel_found = getGraph().match_one(start_node=participant_sender, end_node= participant_receiver,
rel_type="FOLLOWS")
if follower_rel_found[notification_field_str] is not None:
follower_rel_found[notification_field_str] = False
#
return jsonify({"result": "OK", "result_msg": "Notification was deleted"})
####################
# PURE INTERNAL
###################
# <Used by add_following_contact_to_user_aux>
# "notification_type": "newfollower"
def _add_newfollower_notification_from_participant1_to_participant2(participant1_email, participant2_email):
participant1 = _get_participant_node(participant1_email)
participant2 = _get_participant_node(participant2_email)
following_rel = getGraph().match_one(start_node=participant1, rel_type="FOLLOWS", end_node=participant2)
following_rel["ifnotification_newfollower"] = True
return
# <Used by add_following_contact_to_user_aux>
# "notification_type": "newfollower"
def _send_newfollower_notification_email_from_participant1_to_participant2(participant1_email, participant2_email):
participant1 = _get_participant_node(participant1_email)
subject = "Consensus, New Notifications"
html = render_template('emails/participant_newfollower.html', msg_proposal=participant1['fullname'])
send_email(participant2_email, subject, html)
return
| |
import time
import numpy as np
from os import makedirs
from os.path import exists, join, basename
class ReadEEG:
def __init__(self, filename, newSampleRate=0, channels2exclude=[]):
self.filename = filename
if filename[-3:] == 'bdf':
self.readBDF()
elif filename[-3:] == 'eeg':
self.readBrainVision()
# Exclude Channels if necessary
if channels2exclude != []:
self.excludeChannels(channels2exclude)
# Reslice Data if necessary
if newSampleRate != 0:
self.resliceData(newSampleRate)
def readBDF(self):
self.eegFile = self.filename.replace('.bdf', '.lineviewer')
with open(self.filename, 'rb') as f:
offset = 168
f.seek(offset)
startDate = f.read(8).strip()
startTime = f.read(8).strip()
offset += 68
f.seek(offset)
dataRecorded = int(f.read(8))
durationRecorded = int(f.read(8))
nbChannels = int(f.read(4))
labelsChannel = np.array(
[f.read(16).strip() for i in range(nbChannels)])
offset += 20 + 216 * nbChannels
f.seek(offset)
sampleRate = [int(f.read(8)) for i in range(nbChannels)]
sampleRate = sampleRate[0]
offset += (40) * nbChannels
if not exists(self.eegFile):
with open(self.filename, 'rb') as f:
f.seek(offset)
rawdata = np.fromfile(f, dtype='uint8').reshape(-1, 3)
zeroarray = np.zeros((rawdata.shape[0], 1), dtype='uint8')
# 8-bit shift for negative integers
zeroarray[rawdata[:, 2] >= 128] += 255
rawdata = np.array(
np.hstack((rawdata, zeroarray)), dtype='uint8')
rawdata = (
rawdata.flatten().view('i4') / 32.).astype('float32')
rawdata = rawdata.reshape(
dataRecorded, nbChannels, sampleRate)
rawdata = np.rollaxis(
rawdata, 1).reshape(nbChannels, -1)
eegFile = np.memmap(self.eegFile, mode='w+', dtype='float32',
shape=rawdata.shape)
eegFile[:] = rawdata[:]
rawdata = np.memmap(self.eegFile, mode='r', dtype='float32',
shape=(nbChannels, sampleRate * dataRecorded))
# Create Marker value and timestamp
status = rawdata[np.where(labelsChannel == 'Status')][0]
status = status - np.median(status)
timepoint = (np.diff(status) != 0).nonzero()[0] + 1
markerTime = timepoint[np.where(status[timepoint] != 0)]
markerValue = np.uint8(status[markerTime] * 32)
# Prepare output
self.rawdata = rawdata[:-1, :]
self.startDate = '20%s/%s/%s' % (
startDate[6:8], startDate[3:5], startDate[:2])
self.startTime = startTime
self.dataRecorded = dataRecorded
self.durationRecorded = durationRecorded
self.labelsChannel = labelsChannel[:-1]
self.sampleRate = sampleRate
self.markerTime = markerTime
self.markerValue = np.copy(markerValue).astype('|S16')
self.fileType = 'BDF'
def readBrainVision(self):
self.eegFile = self.filename
hdrFile = self.eegFile.replace('.eeg', '.vhdr')
markerFile = self.eegFile.replace('.eeg', '.vmrk')
# Aggregate Header Information
with open(hdrFile) as f:
tmpHeader = f.readlines()
tmpHeader = [t.strip() for t in tmpHeader]
self.labelsChannel = []
readChannelNames = False
for line in tmpHeader:
if readChannelNames:
if len(line) == 0 or line[0] == ';':
continue
elif line[0:2] == 'Ch':
channelName = line.split(',')[0]
idEqual = channelName.index('=')
channelName = channelName[idEqual + 1:]
self.labelsChannel.append(channelName)
else:
readChannelNames = False
elif 'SamplingInterval=' in line:
sampleInterval = int(line.split('=')[1])
self.sampleRate = int(1e6 / float(sampleInterval))
elif 'BinaryFormat=' in line:
binaryFormat = line.split('=')[1]
elif 'recording started at' in line:
self.startTime = line.split('started at')[-1][1:9]
elif '[Channel Infos]' in line:
readChannelNames = True
self.labelsChannel = np.asarray(self.labelsChannel)
# Aggregate Marker Information
with open(markerFile) as f:
tmpMarker = f.readlines()
tmpMarker = [t.strip() for t in tmpMarker]
tmpMarker = [m.split(',') for m in tmpMarker if m[:2] == 'Mk']
self.markerValue = []
self.markerTime = []
for e in tmpMarker:
if 'New Segment' in e[0]:
recDate = e[5][:8]
self.startDate = '%s/%s/%s' % (
recDate[6:], recDate[4:6], recDate[:4])
else:
self.markerValue.append(e[1])
self.markerTime.append(int(e[2]))
self.markerValue = np.array(self.markerValue).astype('|S16')
self.markerTime = np.array(self.markerTime)
# Aggregate Data Information
numberFormat = {'INT_16': np.int16,
'IEEE_FLOAT_32': np.float32}
dataType = numberFormat[binaryFormat]
rawdata = np.memmap(self.eegFile, dataType, 'r')
nbChannels = self.labelsChannel.shape[0]
timepoints = rawdata.shape[0] / nbChannels
self.dataRecorded = float(timepoints) / self.sampleRate
self.durationRecorded = 1
self.rawdata = np.rollaxis(rawdata.reshape(timepoints, nbChannels), 1)
self.fileType = 'BrainVision'
def resliceData(self, newSampleRate):
if newSampleRate != self.sampleRate:
divisor = float(self.sampleRate) / newSampleRate
# Rewrite markerTime
self.markerTime = (self.markerTime / divisor).astype('int')
# Reslice rawdata with slicer
slicer = np.arange(0, self.rawdata.shape[1], int(divisor))
self.rawdata = self.rawdata[:, slicer]
# Rewrite sampleRate
self.sampleRate = newSampleRate
def excludeChannels(self, channels2exclude):
keepID = [i for i, e in enumerate(self.labelsChannel)
if e not in channels2exclude]
self.labelsChannel = self.labelsChannel[keepID]
self.rawdata = self.rawdata[keepID]
class ReadXYZ:
def __init__(self, filename):
# Read XYZ File
with open(filename) as f:
content = f.readlines()
self.coord = []
self.labels = []
for i, e in enumerate(content):
if i != 0:
coord = e.split()
self.coord.append([float(coord[0]),
float(coord[1]),
float(coord[2])])
self.labels.append(coord[3])
self.coord = np.array(self.coord)
self.labels = np.array(self.labels)
# Get ID of 10% most frontal channels
xValue = self.coord[:, 0]
self.frontalID = np.where(xValue >= np.percentile(xValue, 90))[0]
class SaveTVA:
def __init__(self, data, precut, postcut):
dataSize = [
len([m for m in e.markerTime
if m > precut and m < e.rawdata.shape[1] - postcut])
for e in data.Datasets]
tvaMarker = [
1 if 'ok_' in m else 0 for m in data.Results.matrixSelected]
# Go through the files and save TVA for each
counter = 0
for i, n in enumerate(data.Filenames):
filename = join(data.DirPath, n + '.lineviewer.tva')
with open(filename, 'w') as f:
f.writelines('TV01\n')
for j in range(dataSize[i]):
f.writelines('%d\t0\t%s\n' % (
tvaMarker[counter], data.Results.markers[counter]))
counter += 1
class SaveERP:
def __init__(self, resultsName, resultsPath, results, markers2hide,
preFrame):
# Create output folder if it doesn't exist
if not exists(resultsPath):
makedirs(resultsPath)
# Go through all the markers
for i, m in enumerate(results.uniqueMarkers):
# Do nothing if marker was hidden
if m in markers2hide:
continue
# Write GFP data into ERP file
nTimepoint = results.avgGFP[0].shape[0]
filename = '%s.ERP_%s.GFP.eph' % (resultsName, m)
with open(join(resultsPath, filename), 'w') as f:
f.writelines('{:>15}\t{:>15}\t{:>25}\n'.format(
1, nTimepoint, results.sampleRate))
for tValue in results.avgGFP[i]:
f.writelines('{:>15}\n'.format(round(tValue, 7)))
# Write GFP marker file
filename += '.mrk'
with open(join(resultsPath, filename), 'w') as f:
f.writelines(
'TL02\n{:>12}\t{:>12}\t"Origin"\n'.format(preFrame,
preFrame))
# Write electrode data into ERP file
nSignal, nTimepoint = results.avgEpochs[0].shape
filename = '%s.ERP_%s.eph' % (resultsName, m)
with open(join(resultsPath, filename), 'w') as f:
f.writelines('{:>15}\t{:>15}\t{:>25}\n'.format(
nSignal, nTimepoint, results.sampleRate))
for tValues in results.avgEpochs[i].T:
formatString = '{:>15}\t' * nSignal
formatString = formatString[:-1] + '\n'
f.writelines(
formatString.format(*np.round(tValues, 7).tolist()))
# Write ERP marker file
filename += '.mrk'
with open(join(resultsPath, filename), 'w') as f:
f.writelines(
'TL02\n{:>12}\t{:>12}\t"Origin"\n'.format(preFrame,
preFrame))
class SaveEpochs:
def __init__(self, resultsPath, results, preFrame):
# Create output folder if it doesn't exist
if not exists(resultsPath):
makedirs(resultsPath)
# Go through all epochs
for i, epoch in enumerate(results.epochs):
# Only save epochs that were OK
if not results.okID[i]:
continue
# Write electrode data into ERP file
nSignal, nTimepoint = epoch.shape
marker = results.markers[i]
filename = 'Epoch_%.4d_%s.eph' % (i + 1, marker)
with open(join(resultsPath, filename), 'w') as f:
f.writelines('{:>15}\t{:>15}\t{:>25}\n'.format(
nSignal, nTimepoint, results.sampleRate))
for tValues in epoch.T:
formatString = '{:>15}\t' * nSignal
formatString = formatString[:-1] + '\n'
f.writelines(
formatString.format(*np.round(tValues, 7).tolist()))
# Write Epoch marker file
filename += '.mrk'
with open(join(resultsPath, filename), 'w') as f:
f.writelines(
'TL02\n{:>12}\t{:>12}\t"Origin"\n'.format(preFrame,
preFrame))
class SaveFigures:
def __init__(self, resultsName, resultsPath, figures):
figures.Overview.figure.savefig(
join(resultsPath, 'plot_Overview.png'), bbox_inches='tight')
figures.GFPSummary.figure.savefig(
join(resultsPath, 'plot_GFP_Summary.png'), bbox_inches='tight')
figures.GFPDetail.figure.savefig(
join(resultsPath, 'plot_GFP_Detail.png'), bbox_inches='tight')
markers = figures.ERPSummary.ComboMarkers.GetItems()[1:]
for m in markers:
figures.ERPSummary.update(str(m))
figures.ERPSummary.figure.savefig(
join(resultsPath, 'plot_ERP_Marker_%s.png' % str(m)),
bbox_inches='tight')
class SaveVerbose:
def __init__(self, resultsName, resultsPath, data):
# abbreviation to shorten variable name
res = data.Results
# Write Verbose File
with open(join(resultsPath, '%s.vrb' % resultsName), 'w') as f:
f.writelines('Verbose File\n============\n\n')
f.writelines('LINEViewer (Version %s)\n' % data.VERSION)
f.writelines('%s\n\n\n' % time.strftime('%Y/%m/%d %H:%M:%S'))
# Information about the preprocessing
f.writelines(
'Processing Information:\n-----------------------\n\n')
f.writelines('DC removed\t\t\t:\t%s\n' % res.removeDC)
f.writelines('Reference to\t\t:\t%s\n' % res.newReference)
highcut = res.highcut if res.highcut != 0 else 'None'
lowcut = res.lowcut if res.lowcut != 0 else 'None'
f.writelines('High-pass filter\t:\t%s\n' % highcut)
f.writelines('Low-pass filter\t\t:\t%s\n' % lowcut)
notch = res.notchValue if res.doNotch else 'None'
f.writelines('Notch\t\t\t\t:\t%s\n' % notch)
f.writelines('\n')
f.writelines('Epoch duration pre\t:\t%sms / %s sampling points\n' %
(res.preEpoch, res.preFrame))
f.writelines(
'Epoch duration post\t:\t%sms / %s sampling points\n' %
(res.postEpoch, res.postFrame))
f.writelines('Baseline correction\t:\t%s\n' % res.baselineCorr)
f.writelines('Blink correction\t:\t%s\n' % res.blinkCorr)
f.writelines('Thresh. correction\t:\t%s\n' % res.thresholdCorr)
f.writelines('Threshold [mikroV]\t:\t%s\n' % res.threshold)
f.writelines('Channels ignored\t:\t%s\n' %
data.Specs.channels2ignore)
f.writelines('\n')
if hasattr(res, 'collapsedTransform'):
collapsedInfo = [
'%s -> %s' % (res.collapsedTransform[e], e)
for e in res.collapsedTransform]
collapsedInfo = ', '.join(collapsedInfo)
else:
collapsedInfo = 'None'
f.writelines('Markers collapsed\t:\t%s\n' % collapsedInfo)
f.writelines('Markers hidden\t\t:\t%s\n' % data.markers2hide)
f.writelines('\n')
f.writelines('Interpolated ch.\t:\t%s\n' %
data.Specs.channels2interpolate)
xyzFile = data.Specs.xyzFile if data.Specs.xyzFile != '' \
else 'None'
f.writelines('XYZ-file path\t\t:\t%s\n' % xyzFile)
f.writelines('\n\n')
# Information about the ERP output
f.writelines('ERP Information:\n----------------\n\n')
epochsTotal = res.epochs.shape[0]
epochsOK = res.okID.sum()
percentOK = np.round(float(epochsOK) / epochsTotal, 2) * 100
f.writelines('Channels #\t\t\t:\t%s\n' % res.epochs.shape[1])
f.writelines('Markers #\t\t\t:\t%s\n' % len(res.uniqueMarkers))
f.writelines('Marker value\t\t:\t%s\n' %
', '.join(res.uniqueMarkers.astype('str')))
f.writelines('Epochs total #\t\t:\t%s\n' % epochsTotal)
f.writelines(
'Epochs in ERP\t:\t{0} / {1}%\n'.format(epochsOK,
percentOK))
f.writelines('\n')
selected = len(np.where(res.matrixSelected == 'selected')[0])
ok_normal = len(np.where(res.matrixSelected == 'ok_normal')[0])
threshold = len(np.where(res.matrixSelected == 'threshold')[0])
ok_thresh = len(np.where(res.matrixSelected == 'ok_thresh')[0])
blink = len(np.where(res.matrixSelected == 'blink')[0])
ok_blink = len(np.where(res.matrixSelected == 'ok_blink')[0])
modeInfo = '[automatic detection / manually switched]'
f.writelines('Epochs accepted\t\t:\t%s / %s %s\n' %
(ok_normal, selected, modeInfo))
f.writelines('Epochs Threshold\t:\t%s / %s %s\n' %
(threshold, ok_thresh, modeInfo))
f.writelines('Epochs Blink\t\t:\t%s / %s %s\n' %
(blink, ok_blink, modeInfo))
f.writelines('\n\n')
# Information about the channel overview
f.writelines(
'Overview Channel Outliers:\n--------------------------\n\n')
nFaultyChannels = res.OxaxisChannel.shape[0]
distChannelThreshold = res.OdistChannelThreshold[-nFaultyChannels:]
percentageChannel = np.round(
res.OpercentageChannels[-nFaultyChannels:], 3) * 100
percentageMarker = np.round(res.OpercentageMarker, 3) * 100
nChannelOutliers = sum(distChannelThreshold)
f.writelines('Outliers Total #\t:\t%s\n' % nChannelOutliers)
f.writelines('Outliers Selected\t:\t{0} / {1}%\n'.format(
res.OnSelectedOutliers, np.round(
float(res.OnSelectedOutliers) / epochsTotal, 3) * 100))
f.writelines('Outliers Broken\t\t:\t{0} / {1}%\n'.format(
res.OBroken, np.round(
float(res.OBroken) / epochsTotal, 3) * 100))
f.writelines('Outliers Blink\t\t:\t{0} / {1}%\n'.format(
res.OBlink, np.round(float(res.OBlink) / epochsTotal,
3) * 100))
f.writelines('\n')
f.writelines('Channel Name\t\t:%s\n' %
('{:>6}' * nFaultyChannels).format(
*res.OxaxisChannel))
f.writelines('Outliers %\t\t\t:{0}\n'.format(
('{:>6}' * nFaultyChannels).format(*percentageChannel)))
f.writelines('Threshold #\t\t\t:%s\n' % (
'{:>6}' * nFaultyChannels).format(*distChannelThreshold))
f.writelines('\n\n')
# Information about the marker overview
f.writelines(
'Overview Marker Outliers:\n-------------------------\n\n')
nMarkers = len(res.OxaxisMarker)
f.writelines('Outliers Total\t\t:\t{0} / {1}%\n'.format(
res.OoutlierEpochs, np.round(
float(res.OoutlierEpochs) / epochsTotal, 3) * 100))
f.writelines('Marker Name\t\t\t:%s\n' %
('{:>6}' * nMarkers).format(*res.OxaxisMarker))
f.writelines('Outliers %\t\t\t:{0}\n'.format(
('{:>6}' * nMarkers).format(*percentageMarker)))
f.writelines('OK #\t\t\t\t:%s\n' %
('{:>6}' * nMarkers).format(*res.OdistMarkerOK))
f.writelines('Selected #\t\t\t:%s\n' %
('{:>6}' * nMarkers).format(*res.OdistMarkerSelected))
f.writelines('Threshold #\t\t\t:%s\n' %
('{:>6}' * nMarkers).format(
*res.OdistMarkerThreshold))
f.writelines('Blink #\t\t\t\t:%s\n' %
('{:>6}' * nMarkers).format(*res.OdistMarkerBlink))
f.writelines('Broken #\t\t\t:%s\n' %
('{:>6}' * nMarkers).format(*res.OdistMarkerBroken))
f.writelines('Outlier ID\t\t\t:\t%s\n' %
', '.join(np.where(res.badID)[0].astype('str')))
f.writelines('\n\n')
# Information about input files
f.writelines('Input File(s):\n--------------\n\n')
f.writelines('Number of input file(s):\t%s\n\n' %
len(data.Datasets))
for i, d in enumerate(data.Datasets):
f.writelines('File #\t\t\t\t:\t%s\n' % i)
f.writelines('File name\t\t\t:\t%s\n' % basename(d.filename))
f.writelines('File path\t\t\t:\t%s\n' % d.filename)
f.writelines('Timestamp\t\t\t:\t%s:%s %s\n' %
(d.startTime[:2], d.startTime[3:5], d.startDate))
duration = round(float(d.dataRecorded) * d.durationRecorded, 1)
f.writelines('Lenght\t\t\t\t:\t%sm%ss\n' %
(int(duration) / 60, int(duration) % 60))
f.writelines('Sampling freq.\t\t:\t%s Hz\n' % d.sampleRate)
f.writelines('Sampling points\t\t:\t%s\n' % d.rawdata.shape[1])
f.writelines('Channels #\t\t\t:\t%s\n' % d.labelsChannel.shape)
uniqueMarkers = np.unique(d.markerValue)
f.writelines('Markers #\t\t\t:\t%s\n' % len(uniqueMarkers))
f.writelines('Marker value\t\t:\t%s\n' %
', '.join(uniqueMarkers.astype('str')))
f.writelines('\n')
f.writelines('\n')
# Information about output files
f.writelines('Output Files:\n-------------\n\n')
f.writelines('Folder name\t\t\t:\t%s\n' % resultsName)
f.writelines('Output path\t\t\t:\t%s\n' % resultsPath)
| |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_serialization import jsonutils
import webob
from glance.api.v2 import metadef_namespaces as namespaces
from glance.api.v2 import metadef_objects as objects
from glance.api.v2 import metadef_properties as properties
from glance.api.v2 import metadef_resource_types as resource_types
from glance.api.v2 import metadef_tags as tags
import glance.gateway
from glance.tests.unit import base
import glance.tests.unit.utils as unit_test_utils
DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355)
ISOTIME = '2012-05-16T15:27:36Z'
NAMESPACE1 = 'Namespace1'
NAMESPACE2 = 'Namespace2'
NAMESPACE3 = 'Namespace3'
NAMESPACE4 = 'Namespace4'
NAMESPACE5 = 'Namespace5'
NAMESPACE6 = 'Namespace6'
PROPERTY1 = 'Property1'
PROPERTY2 = 'Property2'
PROPERTY3 = 'Property3'
PROPERTY4 = 'Property4'
OBJECT1 = 'Object1'
OBJECT2 = 'Object2'
OBJECT3 = 'Object3'
RESOURCE_TYPE1 = 'ResourceType1'
RESOURCE_TYPE2 = 'ResourceType2'
RESOURCE_TYPE3 = 'ResourceType3'
RESOURCE_TYPE4 = 'ResourceType4'
TAG1 = 'Tag1'
TAG2 = 'Tag2'
TAG3 = 'Tag3'
TAG4 = 'Tag4'
TAG5 = 'Tag5'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81'
TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8'
TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4'
PREFIX1 = 'pref'
def _db_namespace_fixture(namespace, **kwargs):
obj = {
'namespace': namespace,
'display_name': None,
'description': None,
'visibility': 'public',
'protected': False,
'owner': None,
}
obj.update(kwargs)
return obj
def _db_property_fixture(name, **kwargs):
obj = {
'name': name,
'json_schema': {"type": "string", "title": "title"},
}
obj.update(kwargs)
return obj
def _db_object_fixture(name, **kwargs):
obj = {
'name': name,
'description': None,
'json_schema': {},
'required': '[]',
}
obj.update(kwargs)
return obj
def _db_resource_type_fixture(name, **kwargs):
obj = {
'name': name,
'protected': False,
}
obj.update(kwargs)
return obj
def _db_tag_fixture(name, **kwargs):
obj = {
'name': name
}
obj.update(kwargs)
return obj
def _db_tags_fixture(tag_names=None):
tag_list = []
if not tag_names:
tag_names = [TAG1, TAG2, TAG3]
for tag_name in tag_names:
tag = tags.MetadefTag()
tag.name = tag_name
tag_list.append(tag)
return tag_list
def _db_namespace_resource_type_fixture(name, **kwargs):
obj = {
'name': name,
'properties_target': None,
'prefix': None,
}
obj.update(kwargs)
return obj
class TestMetadefsControllers(base.IsolatedUnitTest):
def setUp(self):
super(TestMetadefsControllers, self).setUp()
self.db = unit_test_utils.FakeDB(initialize=False)
self.policy = unit_test_utils.FakePolicyEnforcer()
self.notifier = unit_test_utils.FakeNotifier()
self._create_namespaces()
self._create_properties()
self._create_objects()
self._create_resource_types()
self._create_namespaces_resource_types()
self._create_tags()
self.namespace_controller = namespaces.NamespaceController(
self.db, self.policy, self.notifier)
self.property_controller = properties.NamespacePropertiesController(
self.db, self.policy, self.notifier)
self.object_controller = objects.MetadefObjectsController(
self.db, self.policy, self.notifier)
self.rt_controller = resource_types.ResourceTypeController(
self.db, self.policy, self.notifier)
self.tag_controller = tags.TagsController(
self.db, self.policy, self.notifier)
self.deserializer = objects.RequestDeserializer()
self.property_deserializer = properties.RequestDeserializer()
def _create_namespaces(self):
req = unit_test_utils.get_fake_request()
self.namespaces = [
_db_namespace_fixture(NAMESPACE1, owner=TENANT1,
visibility='private', protected=True),
_db_namespace_fixture(NAMESPACE2, owner=TENANT2,
visibility='private'),
_db_namespace_fixture(NAMESPACE3, owner=TENANT3),
_db_namespace_fixture(NAMESPACE5, owner=TENANT4),
_db_namespace_fixture(NAMESPACE6, owner=TENANT4),
]
[self.db.metadef_namespace_create(req.context, namespace)
for namespace in self.namespaces]
def _create_properties(self):
req = unit_test_utils.get_fake_request()
self.properties = [
(NAMESPACE3, _db_property_fixture(PROPERTY1)),
(NAMESPACE3, _db_property_fixture(PROPERTY2)),
(NAMESPACE1, _db_property_fixture(PROPERTY1)),
(NAMESPACE6, _db_property_fixture(PROPERTY4)),
]
[self.db.metadef_property_create(req.context, namespace, property)
for namespace, property in self.properties]
def _create_objects(self):
req = unit_test_utils.get_fake_request()
self.objects = [
(NAMESPACE3, _db_object_fixture(OBJECT1)),
(NAMESPACE3, _db_object_fixture(OBJECT2)),
(NAMESPACE1, _db_object_fixture(OBJECT1)),
]
[self.db.metadef_object_create(req.context, namespace, object)
for namespace, object in self.objects]
def _create_resource_types(self):
req = unit_test_utils.get_fake_request()
self.resource_types = [
_db_resource_type_fixture(RESOURCE_TYPE1),
_db_resource_type_fixture(RESOURCE_TYPE2),
_db_resource_type_fixture(RESOURCE_TYPE4),
]
[self.db.metadef_resource_type_create(req.context, resource_type)
for resource_type in self.resource_types]
def _create_tags(self):
req = unit_test_utils.get_fake_request()
self.tags = [
(NAMESPACE3, _db_tag_fixture(TAG1)),
(NAMESPACE3, _db_tag_fixture(TAG2)),
(NAMESPACE1, _db_tag_fixture(TAG1)),
]
[self.db.metadef_tag_create(req.context, namespace, tag)
for namespace, tag in self.tags]
def _create_namespaces_resource_types(self):
req = unit_test_utils.get_fake_request(is_admin=True)
self.ns_resource_types = [
(NAMESPACE1, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)),
(NAMESPACE3, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)),
(NAMESPACE2, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)),
(NAMESPACE2, _db_namespace_resource_type_fixture(RESOURCE_TYPE2)),
(NAMESPACE6, _db_namespace_resource_type_fixture(RESOURCE_TYPE4,
prefix=PREFIX1)),
]
[self.db.metadef_resource_type_association_create(req.context,
namespace,
ns_resource_type)
for namespace, ns_resource_type in self.ns_resource_types]
def assertNotificationLog(self, expected_event_type, expected_payloads):
events = [{'type': expected_event_type,
'payload': payload} for payload in expected_payloads]
self.assertNotificationsLog(events)
def assertNotificationsLog(self, expected_events):
output_logs = self.notifier.get_logs()
expected_logs_count = len(expected_events)
self.assertEqual(expected_logs_count, len(output_logs))
for output_log, event in zip(output_logs, expected_events):
self.assertEqual('INFO', output_log['notification_type'])
self.assertEqual(event['type'], output_log['event_type'])
self.assertDictContainsSubset(event['payload'],
output_log['payload'])
self.notifier.log = []
def test_namespace_index(self):
request = unit_test_utils.get_fake_request()
output = self.namespace_controller.index(request)
output = output.to_dict()
self.assertEqual(4, len(output['namespaces']))
actual = set([namespace.namespace for
namespace in output['namespaces']])
expected = set([NAMESPACE1, NAMESPACE3, NAMESPACE5, NAMESPACE6])
self.assertEqual(expected, actual)
def test_namespace_index_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
output = self.namespace_controller.index(request)
output = output.to_dict()
self.assertEqual(5, len(output['namespaces']))
actual = set([namespace.namespace for
namespace in output['namespaces']])
expected = set([NAMESPACE1, NAMESPACE2, NAMESPACE3, NAMESPACE5,
NAMESPACE6])
self.assertEqual(expected, actual)
def test_namespace_index_visibility_public(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
filters = {'visibility': 'public'}
output = self.namespace_controller.index(request, filters=filters)
output = output.to_dict()
self.assertEqual(3, len(output['namespaces']))
actual = set([namespace.namespace for namespace
in output['namespaces']])
expected = set([NAMESPACE3, NAMESPACE5, NAMESPACE6])
self.assertEqual(expected, actual)
def test_namespace_index_resource_type(self):
request = unit_test_utils.get_fake_request()
filters = {'resource_types': [RESOURCE_TYPE1]}
output = self.namespace_controller.index(request, filters=filters)
output = output.to_dict()
self.assertEqual(2, len(output['namespaces']))
actual = set([namespace.namespace for namespace
in output['namespaces']])
expected = set([NAMESPACE1, NAMESPACE3])
self.assertEqual(expected, actual)
def test_namespace_show(self):
request = unit_test_utils.get_fake_request()
output = self.namespace_controller.show(request, NAMESPACE1)
output = output.to_dict()
self.assertEqual(NAMESPACE1, output['namespace'])
self.assertEqual(TENANT1, output['owner'])
self.assertTrue(output['protected'])
self.assertEqual('private', output['visibility'])
def test_namespace_show_with_related_resources(self):
request = unit_test_utils.get_fake_request()
output = self.namespace_controller.show(request, NAMESPACE3)
output = output.to_dict()
self.assertEqual(NAMESPACE3, output['namespace'])
self.assertEqual(TENANT3, output['owner'])
self.assertFalse(output['protected'])
self.assertEqual('public', output['visibility'])
self.assertEqual(2, len(output['properties']))
actual = set([property for property in output['properties']])
expected = set([PROPERTY1, PROPERTY2])
self.assertEqual(expected, actual)
self.assertEqual(2, len(output['objects']))
actual = set([object.name for object in output['objects']])
expected = set([OBJECT1, OBJECT2])
self.assertEqual(expected, actual)
self.assertEqual(1, len(output['resource_type_associations']))
actual = set([rt.name for rt in output['resource_type_associations']])
expected = set([RESOURCE_TYPE1])
self.assertEqual(expected, actual)
def test_namespace_show_with_property_prefix(self):
request = unit_test_utils.get_fake_request()
rt = resource_types.ResourceTypeAssociation()
rt.name = RESOURCE_TYPE2
rt.prefix = 'pref'
rt = self.rt_controller.create(request, rt, NAMESPACE3)
object = objects.MetadefObject()
object.name = OBJECT3
object.required = []
property = properties.PropertyType()
property.name = PROPERTY2
property.type = 'string'
property.title = 'title'
object.properties = {'prop1': property}
object = self.object_controller.create(request, object, NAMESPACE3)
self.assertNotificationsLog([
{
'type': 'metadef_resource_type.create',
'payload': {
'namespace': NAMESPACE3,
'name': RESOURCE_TYPE2,
'prefix': 'pref',
'properties_target': None,
}
},
{
'type': 'metadef_object.create',
'payload': {
'name': OBJECT3,
'namespace': NAMESPACE3,
'properties': [{
'name': 'prop1',
'additionalItems': None,
'confidential': None,
'title': u'title',
'default': None,
'pattern': None,
'enum': None,
'maximum': None,
'minItems': None,
'minimum': None,
'maxItems': None,
'minLength': None,
'uniqueItems': None,
'maxLength': None,
'items': None,
'type': u'string',
'description': None
}],
'required': [],
'description': None,
}
}
])
filters = {'resource_type': RESOURCE_TYPE2}
output = self.namespace_controller.show(request, NAMESPACE3, filters)
output = output.to_dict()
[self.assertTrue(property_name.startswith(rt.prefix)) for
property_name in output['properties'].keys()]
for object in output['objects']:
[self.assertTrue(property_name.startswith(rt.prefix)) for
property_name in object.properties.keys()]
@mock.patch('glance.api.v2.metadef_namespaces.LOG')
def test_cleanup_namespace_success(self, mock_log):
fake_gateway = glance.gateway.Gateway(db_api=self.db,
notifier=self.notifier,
policy_enforcer=self.policy)
req = unit_test_utils.get_fake_request()
ns_factory = fake_gateway.get_metadef_namespace_factory(
req.context)
ns_repo = fake_gateway.get_metadef_namespace_repo(req.context)
namespace = namespaces.Namespace()
namespace.namespace = 'FakeNamespace'
new_namespace = ns_factory.new_namespace(**namespace.to_dict())
ns_repo.add(new_namespace)
self.namespace_controller._cleanup_namespace(ns_repo, namespace, True)
mock_log.debug.assert_called_with(
"Cleaned up namespace %(namespace)s ",
{'namespace': namespace.namespace})
@mock.patch('glance.api.v2.metadef_namespaces.LOG')
@mock.patch('glance.api.authorization.MetadefNamespaceRepoProxy.remove')
def test_cleanup_namespace_exception(self, mock_remove, mock_log):
mock_remove.side_effect = Exception(u'Mock remove was called')
fake_gateway = glance.gateway.Gateway(db_api=self.db,
notifier=self.notifier,
policy_enforcer=self.policy)
req = unit_test_utils.get_fake_request()
ns_factory = fake_gateway.get_metadef_namespace_factory(
req.context)
ns_repo = fake_gateway.get_metadef_namespace_repo(req.context)
namespace = namespaces.Namespace()
namespace.namespace = 'FakeNamespace'
new_namespace = ns_factory.new_namespace(**namespace.to_dict())
ns_repo.add(new_namespace)
self.namespace_controller._cleanup_namespace(ns_repo, namespace, True)
called_msg = 'Failed to delete namespace %(namespace)s.' \
'Exception: %(exception)s'
called_args = {'exception': u'Mock remove was called',
'namespace': u'FakeNamespace'}
mock_log.error.assert_called_with((called_msg, called_args))
mock_remove.assert_called_once_with(mock.ANY)
def test_namespace_show_non_existing(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.show, request, 'FakeName')
def test_namespace_show_non_visible(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.show, request, NAMESPACE2)
def test_namespace_delete(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.namespace_controller.delete(request, NAMESPACE2)
self.assertNotificationLog("metadef_namespace.delete",
[{'namespace': NAMESPACE2}])
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.show, request, NAMESPACE2)
def test_namespace_delete_notification_disabled(self):
self.config(disabled_notifications=["metadef_namespace.delete"])
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.namespace_controller.delete(request, NAMESPACE2)
self.assertNotificationsLog([])
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.show, request, NAMESPACE2)
def test_namespace_delete_notification_group_disabled(self):
self.config(disabled_notifications=["metadef_namespace"])
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.namespace_controller.delete(request, NAMESPACE2)
self.assertNotificationsLog([])
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.show, request, NAMESPACE2)
def test_namespace_delete_notification_create_disabled(self):
self.config(disabled_notifications=["metadef_namespace.create"])
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.namespace_controller.delete(request, NAMESPACE2)
self.assertNotificationLog("metadef_namespace.delete",
[{'namespace': NAMESPACE2}])
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.show, request, NAMESPACE2)
def test_namespace_delete_non_existing(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.delete, request,
'FakeName')
self.assertNotificationsLog([])
def test_namespace_delete_non_visible(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.delete, request,
NAMESPACE2)
self.assertNotificationsLog([])
def test_namespace_delete_non_visible_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.namespace_controller.delete(request, NAMESPACE2)
self.assertNotificationLog("metadef_namespace.delete",
[{'namespace': NAMESPACE2}])
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.show, request, NAMESPACE2)
def test_namespace_delete_protected(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.namespace_controller.delete, request,
NAMESPACE1)
self.assertNotificationsLog([])
def test_namespace_delete_protected_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.assertRaises(webob.exc.HTTPForbidden,
self.namespace_controller.delete, request,
NAMESPACE1)
self.assertNotificationsLog([])
def test_namespace_delete_with_contents(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.namespace_controller.delete(request, NAMESPACE3)
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.show, request, NAMESPACE3)
self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show,
request, NAMESPACE3, OBJECT1)
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.show, request, NAMESPACE3,
OBJECT1)
def test_namespace_delete_properties(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.namespace_controller.delete_properties(request, NAMESPACE3)
output = self.property_controller.index(request, NAMESPACE3)
output = output.to_dict()
self.assertEqual(0, len(output['properties']))
self.assertNotificationLog("metadef_namespace.delete_properties",
[{'namespace': NAMESPACE3}])
def test_namespace_delete_properties_other_owner(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.namespace_controller.delete_properties,
request,
NAMESPACE3)
self.assertNotificationsLog([])
def test_namespace_delete_properties_other_owner_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.namespace_controller.delete_properties(request, NAMESPACE3)
output = self.property_controller.index(request, NAMESPACE3)
output = output.to_dict()
self.assertEqual(0, len(output['properties']))
self.assertNotificationLog("metadef_namespace.delete_properties",
[{'namespace': NAMESPACE3}])
def test_namespace_non_existing_delete_properties(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.delete_properties,
request,
NAMESPACE4)
self.assertNotificationsLog([])
def test_namespace_delete_objects(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.namespace_controller.delete_objects(request, NAMESPACE3)
output = self.object_controller.index(request, NAMESPACE3)
output = output.to_dict()
self.assertEqual(0, len(output['objects']))
self.assertNotificationLog("metadef_namespace.delete_objects",
[{'namespace': NAMESPACE3}])
def test_namespace_delete_objects_other_owner(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.namespace_controller.delete_objects,
request,
NAMESPACE3)
self.assertNotificationsLog([])
def test_namespace_delete_objects_other_owner_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.namespace_controller.delete_objects(request, NAMESPACE3)
output = self.object_controller.index(request, NAMESPACE3)
output = output.to_dict()
self.assertEqual(0, len(output['objects']))
self.assertNotificationLog("metadef_namespace.delete_objects",
[{'namespace': NAMESPACE3}])
def test_namespace_non_existing_delete_objects(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.delete_objects,
request,
NAMESPACE4)
self.assertNotificationsLog([])
def test_namespace_delete_tags(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.namespace_controller.delete_tags(request, NAMESPACE3)
output = self.tag_controller.index(request, NAMESPACE3)
output = output.to_dict()
self.assertEqual(0, len(output['tags']))
self.assertNotificationLog("metadef_namespace.delete_tags",
[{'namespace': NAMESPACE3}])
def test_namespace_delete_tags_other_owner(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.namespace_controller.delete_tags,
request,
NAMESPACE3)
self.assertNotificationsLog([])
def test_namespace_delete_tags_other_owner_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.namespace_controller.delete_tags(request, NAMESPACE3)
output = self.tag_controller.index(request, NAMESPACE3)
output = output.to_dict()
self.assertEqual(0, len(output['tags']))
self.assertNotificationLog("metadef_namespace.delete_tags",
[{'namespace': NAMESPACE3}])
def test_namespace_non_existing_delete_tags(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.delete_tags,
request,
NAMESPACE4)
self.assertNotificationsLog([])
def test_namespace_create(self):
request = unit_test_utils.get_fake_request()
namespace = namespaces.Namespace()
namespace.namespace = NAMESPACE4
namespace = self.namespace_controller.create(request, namespace)
self.assertEqual(NAMESPACE4, namespace.namespace)
self.assertNotificationLog("metadef_namespace.create",
[{'namespace': NAMESPACE4}])
namespace = self.namespace_controller.show(request, NAMESPACE4)
self.assertEqual(NAMESPACE4, namespace.namespace)
def test_namespace_create_with_4byte_character(self):
request = unit_test_utils.get_fake_request()
namespace = namespaces.Namespace()
namespace.namespace = u'\U0001f693'
self.assertRaises(webob.exc.HTTPBadRequest,
self.namespace_controller.create, request,
namespace)
def test_namespace_create_duplicate(self):
request = unit_test_utils.get_fake_request()
namespace = namespaces.Namespace()
namespace.namespace = 'new-namespace'
new_ns = self.namespace_controller.create(request, namespace)
self.assertEqual('new-namespace', new_ns.namespace)
self.assertRaises(webob.exc.HTTPConflict,
self.namespace_controller.create,
request, namespace)
def test_namespace_create_different_owner(self):
request = unit_test_utils.get_fake_request()
namespace = namespaces.Namespace()
namespace.namespace = NAMESPACE4
namespace.owner = TENANT4
self.assertRaises(webob.exc.HTTPForbidden,
self.namespace_controller.create, request, namespace)
self.assertNotificationsLog([])
def test_namespace_create_different_owner_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
namespace = namespaces.Namespace()
namespace.namespace = NAMESPACE4
namespace.owner = TENANT4
namespace = self.namespace_controller.create(request, namespace)
self.assertEqual(NAMESPACE4, namespace.namespace)
self.assertNotificationLog("metadef_namespace.create",
[{'namespace': NAMESPACE4}])
namespace = self.namespace_controller.show(request, NAMESPACE4)
self.assertEqual(NAMESPACE4, namespace.namespace)
def test_namespace_create_with_related_resources(self):
request = unit_test_utils.get_fake_request()
namespace = namespaces.Namespace()
namespace.namespace = NAMESPACE4
prop1 = properties.PropertyType()
prop1.type = 'string'
prop1.title = 'title'
prop2 = properties.PropertyType()
prop2.type = 'string'
prop2.title = 'title'
namespace.properties = {PROPERTY1: prop1, PROPERTY2: prop2}
object1 = objects.MetadefObject()
object1.name = OBJECT1
object1.required = []
object1.properties = {}
object2 = objects.MetadefObject()
object2.name = OBJECT2
object2.required = []
object2.properties = {}
namespace.objects = [object1, object2]
output = self.namespace_controller.create(request, namespace)
self.assertEqual(NAMESPACE4, namespace.namespace)
output = output.to_dict()
self.assertEqual(2, len(output['properties']))
actual = set([property for property in output['properties']])
expected = set([PROPERTY1, PROPERTY2])
self.assertEqual(expected, actual)
self.assertEqual(2, len(output['objects']))
actual = set([object.name for object in output['objects']])
expected = set([OBJECT1, OBJECT2])
self.assertEqual(expected, actual)
output = self.namespace_controller.show(request, NAMESPACE4)
self.assertEqual(NAMESPACE4, namespace.namespace)
output = output.to_dict()
self.assertEqual(2, len(output['properties']))
actual = set([property for property in output['properties']])
expected = set([PROPERTY1, PROPERTY2])
self.assertEqual(expected, actual)
self.assertEqual(2, len(output['objects']))
actual = set([object.name for object in output['objects']])
expected = set([OBJECT1, OBJECT2])
self.assertEqual(expected, actual)
self.assertNotificationsLog([
{
'type': 'metadef_namespace.create',
'payload': {
'namespace': NAMESPACE4,
'owner': TENANT1,
}
},
{
'type': 'metadef_object.create',
'payload': {
'namespace': NAMESPACE4,
'name': OBJECT1,
'properties': [],
}
},
{
'type': 'metadef_object.create',
'payload': {
'namespace': NAMESPACE4,
'name': OBJECT2,
'properties': [],
}
},
{
'type': 'metadef_property.create',
'payload': {
'namespace': NAMESPACE4,
'type': 'string',
'title': 'title',
}
},
{
'type': 'metadef_property.create',
'payload': {
'namespace': NAMESPACE4,
'type': 'string',
'title': 'title',
}
}
])
def test_namespace_create_conflict(self):
request = unit_test_utils.get_fake_request()
namespace = namespaces.Namespace()
namespace.namespace = NAMESPACE1
self.assertRaises(webob.exc.HTTPConflict,
self.namespace_controller.create, request, namespace)
self.assertNotificationsLog([])
def test_namespace_update(self):
request = unit_test_utils.get_fake_request()
namespace = self.namespace_controller.show(request, NAMESPACE1)
namespace.protected = False
namespace = self.namespace_controller.update(request, namespace,
NAMESPACE1)
self.assertFalse(namespace.protected)
self.assertNotificationLog("metadef_namespace.update", [
{'namespace': NAMESPACE1, 'protected': False}
])
namespace = self.namespace_controller.show(request, NAMESPACE1)
self.assertFalse(namespace.protected)
def test_namespace_update_non_existing(self):
request = unit_test_utils.get_fake_request()
namespace = namespaces.Namespace()
namespace.namespace = NAMESPACE4
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.update, request, namespace,
NAMESPACE4)
self.assertNotificationsLog([])
def test_namespace_update_non_visible(self):
request = unit_test_utils.get_fake_request()
namespace = namespaces.Namespace()
namespace.namespace = NAMESPACE2
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.update, request, namespace,
NAMESPACE2)
self.assertNotificationsLog([])
def test_namespace_update_non_visible_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
namespace = self.namespace_controller.show(request, NAMESPACE2)
namespace.protected = False
namespace = self.namespace_controller.update(request, namespace,
NAMESPACE2)
self.assertFalse(namespace.protected)
self.assertNotificationLog("metadef_namespace.update", [
{'namespace': NAMESPACE2, 'protected': False}
])
namespace = self.namespace_controller.show(request, NAMESPACE2)
self.assertFalse(namespace.protected)
def test_namespace_update_name(self):
request = unit_test_utils.get_fake_request()
namespace = self.namespace_controller.show(request, NAMESPACE1)
namespace.namespace = NAMESPACE4
namespace = self.namespace_controller.update(request, namespace,
NAMESPACE1)
self.assertEqual(NAMESPACE4, namespace.namespace)
self.assertNotificationLog("metadef_namespace.update", [
{'namespace': NAMESPACE4, 'namespace_old': NAMESPACE1}
])
namespace = self.namespace_controller.show(request, NAMESPACE4)
self.assertEqual(NAMESPACE4, namespace.namespace)
self.assertRaises(webob.exc.HTTPNotFound,
self.namespace_controller.show, request, NAMESPACE1)
def test_namespace_update_with_4byte_character(self):
request = unit_test_utils.get_fake_request()
namespace = self.namespace_controller.show(request, NAMESPACE1)
namespace.namespace = u'\U0001f693'
self.assertRaises(webob.exc.HTTPBadRequest,
self.namespace_controller.update, request,
namespace, NAMESPACE1)
def test_namespace_update_name_conflict(self):
request = unit_test_utils.get_fake_request()
namespace = self.namespace_controller.show(request, NAMESPACE1)
namespace.namespace = NAMESPACE2
self.assertRaises(webob.exc.HTTPConflict,
self.namespace_controller.update, request, namespace,
NAMESPACE1)
self.assertNotificationsLog([])
def test_property_index(self):
request = unit_test_utils.get_fake_request()
output = self.property_controller.index(request, NAMESPACE3)
self.assertEqual(2, len(output.properties))
actual = set([property for property in output.properties])
expected = set([PROPERTY1, PROPERTY2])
self.assertEqual(expected, actual)
def test_property_index_empty(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
output = self.property_controller.index(request, NAMESPACE2)
self.assertEqual(0, len(output.properties))
def test_property_index_non_existing_namespace(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.index, request, NAMESPACE4)
def test_property_show(self):
request = unit_test_utils.get_fake_request()
output = self.property_controller.show(request, NAMESPACE3, PROPERTY1)
self.assertEqual(PROPERTY1, output.name)
def test_property_show_specific_resource_type(self):
request = unit_test_utils.get_fake_request()
output = self.property_controller.show(
request, NAMESPACE6, ''.join([PREFIX1, PROPERTY4]),
filters={'resource_type': RESOURCE_TYPE4})
self.assertEqual(PROPERTY4, output.name)
def test_property_show_prefix_mismatch(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.show, request, NAMESPACE6,
PROPERTY4, filters={'resource_type': RESOURCE_TYPE4})
def test_property_show_non_existing_resource_type(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.show, request, NAMESPACE2,
PROPERTY1, filters={'resource_type': 'test'})
def test_property_show_non_existing(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.show, request, NAMESPACE2,
PROPERTY1)
def test_property_show_non_visible(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.show, request, NAMESPACE1,
PROPERTY1)
def test_property_show_non_visible_admin(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2,
is_admin=True)
output = self.property_controller.show(request, NAMESPACE1, PROPERTY1)
self.assertEqual(PROPERTY1, output.name)
def test_property_delete(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.property_controller.delete(request, NAMESPACE3, PROPERTY1)
self.assertNotificationLog("metadef_property.delete",
[{'name': PROPERTY1,
'namespace': NAMESPACE3}])
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.show, request, NAMESPACE3,
PROPERTY1)
def test_property_delete_disabled_notification(self):
self.config(disabled_notifications=["metadef_property.delete"])
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.property_controller.delete(request, NAMESPACE3, PROPERTY1)
self.assertNotificationsLog([])
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.show, request, NAMESPACE3,
PROPERTY1)
def test_property_delete_other_owner(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.property_controller.delete, request, NAMESPACE3,
PROPERTY1)
self.assertNotificationsLog([])
def test_property_delete_other_owner_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.property_controller.delete(request, NAMESPACE3, PROPERTY1)
self.assertNotificationLog("metadef_property.delete",
[{'name': PROPERTY1,
'namespace': NAMESPACE3}])
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.show, request, NAMESPACE3,
PROPERTY1)
def test_property_delete_non_existing(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.delete, request, NAMESPACE5,
PROPERTY2)
self.assertNotificationsLog([])
def test_property_delete_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.delete, request, NAMESPACE4,
PROPERTY1)
self.assertNotificationsLog([])
def test_property_delete_non_visible(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.delete, request, NAMESPACE1,
PROPERTY1)
self.assertNotificationsLog([])
def test_property_delete_admin_protected(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.assertRaises(webob.exc.HTTPForbidden,
self.property_controller.delete, request, NAMESPACE1,
PROPERTY1)
self.assertNotificationsLog([])
def test_property_create(self):
request = unit_test_utils.get_fake_request()
property = properties.PropertyType()
property.name = PROPERTY2
property.type = 'string'
property.title = 'title'
property = self.property_controller.create(request, NAMESPACE1,
property)
self.assertEqual(PROPERTY2, property.name)
self.assertEqual('string', property.type)
self.assertEqual('title', property.title)
self.assertNotificationLog("metadef_property.create",
[{'name': PROPERTY2,
'namespace': NAMESPACE1}])
property = self.property_controller.show(request, NAMESPACE1,
PROPERTY2)
self.assertEqual(PROPERTY2, property.name)
self.assertEqual('string', property.type)
self.assertEqual('title', property.title)
def test_property_create_overlimit_name(self):
request = unit_test_utils.get_fake_request('/metadefs/namespaces/'
'Namespace3/'
'properties')
request.body = jsonutils.dump_as_bytes({'name': 'a' * 256})
self.assertRaises(webob.exc.HTTPBadRequest,
self.property_deserializer.create,
request)
def test_property_create_with_4byte_character(self):
request = unit_test_utils.get_fake_request()
property = properties.PropertyType()
property.name = u'\U0001f693'
property.type = 'string'
property.title = 'title'
self.assertRaises(webob.exc.HTTPBadRequest,
self.property_controller.create,
request, NAMESPACE1, property)
def test_property_create_with_operators(self):
request = unit_test_utils.get_fake_request()
property = properties.PropertyType()
property.name = PROPERTY2
property.type = 'string'
property.title = 'title'
property.operators = ['<or>']
property = self.property_controller.create(request, NAMESPACE1,
property)
self.assertEqual(PROPERTY2, property.name)
self.assertEqual('string', property.type)
self.assertEqual('title', property.title)
self.assertEqual(['<or>'], property.operators)
property = self.property_controller.show(request, NAMESPACE1,
PROPERTY2)
self.assertEqual(PROPERTY2, property.name)
self.assertEqual('string', property.type)
self.assertEqual('title', property.title)
self.assertEqual(['<or>'], property.operators)
def test_property_create_conflict(self):
request = unit_test_utils.get_fake_request()
property = properties.PropertyType()
property.name = PROPERTY1
property.type = 'string'
property.title = 'title'
self.assertRaises(webob.exc.HTTPConflict,
self.property_controller.create, request, NAMESPACE1,
property)
self.assertNotificationsLog([])
def test_property_create_non_visible_namespace(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
property = properties.PropertyType()
property.name = PROPERTY1
property.type = 'string'
property.title = 'title'
self.assertRaises(webob.exc.HTTPForbidden,
self.property_controller.create, request, NAMESPACE1,
property)
self.assertNotificationsLog([])
def test_property_create_non_visible_namespace_admin(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2,
is_admin=True)
property = properties.PropertyType()
property.name = PROPERTY2
property.type = 'string'
property.title = 'title'
property = self.property_controller.create(request, NAMESPACE1,
property)
self.assertEqual(PROPERTY2, property.name)
self.assertEqual('string', property.type)
self.assertEqual('title', property.title)
self.assertNotificationLog("metadef_property.create",
[{'name': PROPERTY2,
'namespace': NAMESPACE1}])
property = self.property_controller.show(request, NAMESPACE1,
PROPERTY2)
self.assertEqual(PROPERTY2, property.name)
self.assertEqual('string', property.type)
self.assertEqual('title', property.title)
def test_property_create_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
property = properties.PropertyType()
property.name = PROPERTY1
property.type = 'string'
property.title = 'title'
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.create, request, NAMESPACE4,
property)
self.assertNotificationsLog([])
def test_property_create_duplicate(self):
request = unit_test_utils.get_fake_request()
property = properties.PropertyType()
property.name = 'new-property'
property.type = 'string'
property.title = 'title'
new_property = self.property_controller.create(request, NAMESPACE1,
property)
self.assertEqual('new-property', new_property.name)
self.assertRaises(webob.exc.HTTPConflict,
self.property_controller.create, request,
NAMESPACE1, property)
def test_property_update(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
property = self.property_controller.show(request, NAMESPACE3,
PROPERTY1)
property.name = PROPERTY1
property.type = 'string123'
property.title = 'title123'
property = self.property_controller.update(request, NAMESPACE3,
PROPERTY1, property)
self.assertEqual(PROPERTY1, property.name)
self.assertEqual('string123', property.type)
self.assertEqual('title123', property.title)
self.assertNotificationLog("metadef_property.update", [
{
'name': PROPERTY1,
'namespace': NAMESPACE3,
'type': 'string123',
'title': 'title123',
}
])
property = self.property_controller.show(request, NAMESPACE3,
PROPERTY1)
self.assertEqual(PROPERTY1, property.name)
self.assertEqual('string123', property.type)
self.assertEqual('title123', property.title)
def test_property_update_name(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
property = self.property_controller.show(request, NAMESPACE3,
PROPERTY1)
property.name = PROPERTY3
property.type = 'string'
property.title = 'title'
property = self.property_controller.update(request, NAMESPACE3,
PROPERTY1, property)
self.assertEqual(PROPERTY3, property.name)
self.assertEqual('string', property.type)
self.assertEqual('title', property.title)
self.assertNotificationLog("metadef_property.update", [
{
'name': PROPERTY3,
'name_old': PROPERTY1,
'namespace': NAMESPACE3,
'type': 'string',
'title': 'title',
}
])
property = self.property_controller.show(request, NAMESPACE3,
PROPERTY2)
self.assertEqual(PROPERTY2, property.name)
self.assertEqual('string', property.type)
self.assertEqual('title', property.title)
def test_property_update_conflict(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
property = self.property_controller.show(request, NAMESPACE3,
PROPERTY1)
property.name = PROPERTY2
property.type = 'string'
property.title = 'title'
self.assertRaises(webob.exc.HTTPConflict,
self.property_controller.update, request, NAMESPACE3,
PROPERTY1, property)
self.assertNotificationsLog([])
def test_property_update_with_4byte_character(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
property = self.property_controller.show(request, NAMESPACE3,
PROPERTY1)
property.name = u'\U0001f693'
property.type = 'string'
property.title = 'title'
self.assertRaises(webob.exc.HTTPBadRequest,
self.property_controller.update, request,
NAMESPACE3, PROPERTY1, property)
def test_property_update_non_existing(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
property = properties.PropertyType()
property.name = PROPERTY1
property.type = 'string'
property.title = 'title'
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.update, request, NAMESPACE5,
PROPERTY1, property)
self.assertNotificationsLog([])
def test_property_update_namespace_non_existing(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
property = properties.PropertyType()
property.name = PROPERTY1
property.type = 'string'
property.title = 'title'
self.assertRaises(webob.exc.HTTPNotFound,
self.property_controller.update, request, NAMESPACE4,
PROPERTY1, property)
self.assertNotificationsLog([])
def test_object_index(self):
request = unit_test_utils.get_fake_request()
output = self.object_controller.index(request, NAMESPACE3)
output = output.to_dict()
self.assertEqual(2, len(output['objects']))
actual = set([object.name for object in output['objects']])
expected = set([OBJECT1, OBJECT2])
self.assertEqual(expected, actual)
def test_object_index_zero_limit(self):
request = unit_test_utils.get_fake_request('/metadefs/namespaces/'
'Namespace3/'
'objects?limit=0')
self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index,
request)
def test_object_index_empty(self):
request = unit_test_utils.get_fake_request()
output = self.object_controller.index(request, NAMESPACE5)
output = output.to_dict()
self.assertEqual(0, len(output['objects']))
def test_object_index_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.index,
request, NAMESPACE4)
def test_object_show(self):
request = unit_test_utils.get_fake_request()
output = self.object_controller.show(request, NAMESPACE3, OBJECT1)
self.assertEqual(OBJECT1, output.name)
def test_object_show_non_existing(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show,
request, NAMESPACE5, OBJECT1)
def test_object_show_non_visible(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show,
request, NAMESPACE1, OBJECT1)
def test_object_show_non_visible_admin(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2,
is_admin=True)
output = self.object_controller.show(request, NAMESPACE1, OBJECT1)
self.assertEqual(OBJECT1, output.name)
def test_object_delete(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.object_controller.delete(request, NAMESPACE3, OBJECT1)
self.assertNotificationLog("metadef_object.delete",
[{'name': OBJECT1,
'namespace': NAMESPACE3}])
self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show,
request, NAMESPACE3, OBJECT1)
def test_object_delete_disabled_notification(self):
self.config(disabled_notifications=["metadef_object.delete"])
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.object_controller.delete(request, NAMESPACE3, OBJECT1)
self.assertNotificationsLog([])
self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show,
request, NAMESPACE3, OBJECT1)
def test_object_delete_other_owner(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.object_controller.delete, request, NAMESPACE3,
OBJECT1)
self.assertNotificationsLog([])
def test_object_delete_other_owner_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.object_controller.delete(request, NAMESPACE3, OBJECT1)
self.assertNotificationLog("metadef_object.delete",
[{'name': OBJECT1,
'namespace': NAMESPACE3}])
self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show,
request, NAMESPACE3, OBJECT1)
def test_object_delete_non_existing(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.object_controller.delete, request, NAMESPACE5,
OBJECT1)
self.assertNotificationsLog([])
def test_object_delete_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.object_controller.delete, request, NAMESPACE4,
OBJECT1)
self.assertNotificationsLog([])
def test_object_delete_non_visible(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound,
self.object_controller.delete, request, NAMESPACE1,
OBJECT1)
self.assertNotificationsLog([])
def test_object_delete_admin_protected(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.assertRaises(webob.exc.HTTPForbidden,
self.object_controller.delete, request, NAMESPACE1,
OBJECT1)
self.assertNotificationsLog([])
def test_object_create(self):
request = unit_test_utils.get_fake_request()
object = objects.MetadefObject()
object.name = OBJECT2
object.required = []
object.properties = {}
object = self.object_controller.create(request, object, NAMESPACE1)
self.assertEqual(OBJECT2, object.name)
self.assertEqual([], object.required)
self.assertEqual({}, object.properties)
self.assertNotificationLog("metadef_object.create",
[{'name': OBJECT2,
'namespace': NAMESPACE1,
'properties': []}])
object = self.object_controller.show(request, NAMESPACE1, OBJECT2)
self.assertEqual(OBJECT2, object.name)
self.assertEqual([], object.required)
self.assertEqual({}, object.properties)
def test_object_create_overlimit_name(self):
request = unit_test_utils.get_fake_request('/metadefs/namespaces/'
'Namespace3/'
'objects')
request.body = jsonutils.dump_as_bytes({'name': 'a' * 256})
self.assertRaises(webob.exc.HTTPBadRequest,
self.deserializer.create,
request)
def test_object_create_duplicate(self):
request = unit_test_utils.get_fake_request()
object = objects.MetadefObject()
object.name = 'New-Object'
object.required = []
object.properties = {}
new_obj = self.object_controller.create(request, object, NAMESPACE3)
self.assertEqual('New-Object', new_obj.name)
self.assertRaises(webob.exc.HTTPConflict,
self.object_controller.create, request, object,
NAMESPACE3)
def test_object_create_conflict(self):
request = unit_test_utils.get_fake_request()
object = objects.MetadefObject()
object.name = OBJECT1
object.required = []
object.properties = {}
self.assertRaises(webob.exc.HTTPConflict,
self.object_controller.create, request, object,
NAMESPACE1)
self.assertNotificationsLog([])
def test_object_create_with_4byte_character(self):
request = unit_test_utils.get_fake_request()
object = objects.MetadefObject()
object.name = u'\U0001f693'
object.required = []
object.properties = {}
self.assertRaises(webob.exc.HTTPBadRequest,
self.object_controller.create, request,
object, NAMESPACE1)
def test_object_create_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
object = objects.MetadefObject()
object.name = PROPERTY1
object.required = []
object.properties = {}
self.assertRaises(webob.exc.HTTPNotFound,
self.object_controller.create, request, object,
NAMESPACE4)
self.assertNotificationsLog([])
def test_object_create_non_visible_namespace(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
object = objects.MetadefObject()
object.name = OBJECT1
object.required = []
object.properties = {}
self.assertRaises(webob.exc.HTTPForbidden,
self.object_controller.create, request, object,
NAMESPACE1)
self.assertNotificationsLog([])
def test_object_create_non_visible_namespace_admin(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2,
is_admin=True)
object = objects.MetadefObject()
object.name = OBJECT2
object.required = []
object.properties = {}
object = self.object_controller.create(request, object, NAMESPACE1)
self.assertEqual(OBJECT2, object.name)
self.assertEqual([], object.required)
self.assertEqual({}, object.properties)
self.assertNotificationLog("metadef_object.create",
[{'name': OBJECT2,
'namespace': NAMESPACE1}])
object = self.object_controller.show(request, NAMESPACE1, OBJECT2)
self.assertEqual(OBJECT2, object.name)
self.assertEqual([], object.required)
self.assertEqual({}, object.properties)
def test_object_create_missing_properties(self):
request = unit_test_utils.get_fake_request()
object = objects.MetadefObject()
object.name = OBJECT2
object.required = []
object = self.object_controller.create(request, object, NAMESPACE1)
self.assertEqual(OBJECT2, object.name)
self.assertEqual([], object.required)
self.assertNotificationLog("metadef_object.create",
[{'name': OBJECT2,
'namespace': NAMESPACE1,
'properties': []}])
object = self.object_controller.show(request, NAMESPACE1, OBJECT2)
self.assertEqual(OBJECT2, object.name)
self.assertEqual([], object.required)
self.assertEqual({}, object.properties)
def test_object_update(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
object = self.object_controller.show(request, NAMESPACE3, OBJECT1)
object.name = OBJECT1
object.description = 'description'
object = self.object_controller.update(request, object, NAMESPACE3,
OBJECT1)
self.assertEqual(OBJECT1, object.name)
self.assertEqual('description', object.description)
self.assertNotificationLog("metadef_object.update", [
{
'name': OBJECT1,
'namespace': NAMESPACE3,
'description': 'description',
}
])
property = self.object_controller.show(request, NAMESPACE3, OBJECT1)
self.assertEqual(OBJECT1, property.name)
self.assertEqual('description', object.description)
def test_object_update_name(self):
request = unit_test_utils.get_fake_request()
object = self.object_controller.show(request, NAMESPACE1, OBJECT1)
object.name = OBJECT2
object = self.object_controller.update(request, object, NAMESPACE1,
OBJECT1)
self.assertEqual(OBJECT2, object.name)
self.assertNotificationLog("metadef_object.update", [
{
'name': OBJECT2,
'name_old': OBJECT1,
'namespace': NAMESPACE1,
}
])
object = self.object_controller.show(request, NAMESPACE1, OBJECT2)
self.assertEqual(OBJECT2, object.name)
def test_object_update_with_4byte_character(self):
request = unit_test_utils.get_fake_request()
object = self.object_controller.show(request, NAMESPACE1, OBJECT1)
object.name = u'\U0001f693'
self.assertRaises(webob.exc.HTTPBadRequest,
self.object_controller.update, request,
object, NAMESPACE1, OBJECT1)
def test_object_update_conflict(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
object = self.object_controller.show(request, NAMESPACE3, OBJECT1)
object.name = OBJECT2
self.assertRaises(webob.exc.HTTPConflict,
self.object_controller.update, request, object,
NAMESPACE3, OBJECT1)
self.assertNotificationsLog([])
def test_object_update_non_existing(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
object = objects.MetadefObject()
object.name = OBJECT1
object.required = []
object.properties = {}
self.assertRaises(webob.exc.HTTPNotFound,
self.object_controller.update, request, object,
NAMESPACE5, OBJECT1)
self.assertNotificationsLog([])
def test_object_update_namespace_non_existing(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
object = objects.MetadefObject()
object.name = OBJECT1
object.required = []
object.properties = {}
self.assertRaises(webob.exc.HTTPNotFound,
self.object_controller.update, request, object,
NAMESPACE4, OBJECT1)
self.assertNotificationsLog([])
def test_resource_type_index(self):
request = unit_test_utils.get_fake_request()
output = self.rt_controller.index(request)
self.assertEqual(3, len(output.resource_types))
actual = set([rtype.name for rtype in output.resource_types])
expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2, RESOURCE_TYPE4])
self.assertEqual(expected, actual)
def test_resource_type_show(self):
request = unit_test_utils.get_fake_request()
output = self.rt_controller.show(request, NAMESPACE3)
self.assertEqual(1, len(output.resource_type_associations))
actual = set([rt.name for rt in output.resource_type_associations])
expected = set([RESOURCE_TYPE1])
self.assertEqual(expected, actual)
def test_resource_type_show_empty(self):
request = unit_test_utils.get_fake_request()
output = self.rt_controller.show(request, NAMESPACE5)
self.assertEqual(0, len(output.resource_type_associations))
def test_resource_type_show_non_visible(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.show,
request, NAMESPACE2)
def test_resource_type_show_non_visible_admin(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2,
is_admin=True)
output = self.rt_controller.show(request, NAMESPACE2)
self.assertEqual(2, len(output.resource_type_associations))
actual = set([rt.name for rt in output.resource_type_associations])
expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2])
self.assertEqual(expected, actual)
def test_resource_type_show_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.show,
request, NAMESPACE4)
def test_resource_type_association_delete(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1)
self.assertNotificationLog("metadef_resource_type.delete",
[{'name': RESOURCE_TYPE1,
'namespace': NAMESPACE3}])
output = self.rt_controller.show(request, NAMESPACE3)
self.assertEqual(0, len(output.resource_type_associations))
def test_resource_type_association_delete_disabled_notification(self):
self.config(disabled_notifications=["metadef_resource_type.delete"])
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1)
self.assertNotificationsLog([])
output = self.rt_controller.show(request, NAMESPACE3)
self.assertEqual(0, len(output.resource_type_associations))
def test_resource_type_association_delete_other_owner(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.delete,
request, NAMESPACE3, RESOURCE_TYPE1)
self.assertNotificationsLog([])
def test_resource_type_association_delete_other_owner_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1)
self.assertNotificationLog("metadef_resource_type.delete",
[{'name': RESOURCE_TYPE1,
'namespace': NAMESPACE3}])
output = self.rt_controller.show(request, NAMESPACE3)
self.assertEqual(0, len(output.resource_type_associations))
def test_resource_type_association_delete_non_existing(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete,
request, NAMESPACE1, RESOURCE_TYPE2)
self.assertNotificationsLog([])
def test_resource_type_association_delete_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete,
request, NAMESPACE4, RESOURCE_TYPE1)
self.assertNotificationsLog([])
def test_resource_type_association_delete_non_visible(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete,
request, NAMESPACE1, RESOURCE_TYPE1)
self.assertNotificationsLog([])
def test_resource_type_association_delete_protected_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.delete,
request, NAMESPACE1, RESOURCE_TYPE1)
self.assertNotificationsLog([])
def test_resource_type_association_create(self):
request = unit_test_utils.get_fake_request()
rt = resource_types.ResourceTypeAssociation()
rt.name = RESOURCE_TYPE2
rt.prefix = 'pref'
rt = self.rt_controller.create(request, rt, NAMESPACE1)
self.assertEqual(RESOURCE_TYPE2, rt.name)
self.assertEqual('pref', rt.prefix)
self.assertNotificationLog("metadef_resource_type.create",
[{'name': RESOURCE_TYPE2,
'namespace': NAMESPACE1}])
output = self.rt_controller.show(request, NAMESPACE1)
self.assertEqual(2, len(output.resource_type_associations))
actual = set([x.name for x in output.resource_type_associations])
expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2])
self.assertEqual(expected, actual)
def test_resource_type_association_create_conflict(self):
request = unit_test_utils.get_fake_request()
rt = resource_types.ResourceTypeAssociation()
rt.name = RESOURCE_TYPE1
rt.prefix = 'pref'
self.assertRaises(webob.exc.HTTPConflict, self.rt_controller.create,
request, rt, NAMESPACE1)
self.assertNotificationsLog([])
def test_resource_type_association_create_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
rt = resource_types.ResourceTypeAssociation()
rt.name = RESOURCE_TYPE1
rt.prefix = 'pref'
self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.create,
request, rt, NAMESPACE4)
self.assertNotificationsLog([])
def test_resource_type_association_create_non_existing_resource_type(self):
request = unit_test_utils.get_fake_request()
rt = resource_types.ResourceTypeAssociation()
rt.name = RESOURCE_TYPE3
rt.prefix = 'pref'
self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.create,
request, rt, NAMESPACE1)
self.assertNotificationsLog([])
def test_resource_type_association_create_non_visible_namespace(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
rt = resource_types.ResourceTypeAssociation()
rt.name = RESOURCE_TYPE2
rt.prefix = 'pref'
self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.create,
request, rt, NAMESPACE1)
self.assertNotificationsLog([])
def test_resource_type_association_create_non_visible_namesp_admin(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2,
is_admin=True)
rt = resource_types.ResourceTypeAssociation()
rt.name = RESOURCE_TYPE2
rt.prefix = 'pref'
rt = self.rt_controller.create(request, rt, NAMESPACE1)
self.assertEqual(RESOURCE_TYPE2, rt.name)
self.assertEqual('pref', rt.prefix)
self.assertNotificationLog("metadef_resource_type.create",
[{'name': RESOURCE_TYPE2,
'namespace': NAMESPACE1}])
output = self.rt_controller.show(request, NAMESPACE1)
self.assertEqual(2, len(output.resource_type_associations))
actual = set([x.name for x in output.resource_type_associations])
expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2])
self.assertEqual(expected, actual)
def test_tag_index(self):
request = unit_test_utils.get_fake_request()
output = self.tag_controller.index(request, NAMESPACE3)
output = output.to_dict()
self.assertEqual(2, len(output['tags']))
actual = set([tag.name for tag in output['tags']])
expected = set([TAG1, TAG2])
self.assertEqual(expected, actual)
def test_tag_index_empty(self):
request = unit_test_utils.get_fake_request()
output = self.tag_controller.index(request, NAMESPACE5)
output = output.to_dict()
self.assertEqual(0, len(output['tags']))
def test_tag_index_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.index,
request, NAMESPACE4)
def test_tag_show(self):
request = unit_test_utils.get_fake_request()
output = self.tag_controller.show(request, NAMESPACE3, TAG1)
self.assertEqual(TAG1, output.name)
def test_tag_show_non_existing(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show,
request, NAMESPACE5, TAG1)
def test_tag_show_non_visible(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show,
request, NAMESPACE1, TAG1)
def test_tag_show_non_visible_admin(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2,
is_admin=True)
output = self.tag_controller.show(request, NAMESPACE1, TAG1)
self.assertEqual(TAG1, output.name)
def test_tag_delete(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.tag_controller.delete(request, NAMESPACE3, TAG1)
self.assertNotificationLog("metadef_tag.delete",
[{'name': TAG1,
'namespace': NAMESPACE3}])
self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show,
request, NAMESPACE3, TAG1)
def test_tag_delete_disabled_notification(self):
self.config(disabled_notifications=["metadef_tag.delete"])
request = unit_test_utils.get_fake_request(tenant=TENANT3)
self.tag_controller.delete(request, NAMESPACE3, TAG1)
self.assertNotificationsLog([])
self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show,
request, NAMESPACE3, TAG1)
def test_tag_delete_other_owner(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.tag_controller.delete, request, NAMESPACE3,
TAG1)
self.assertNotificationsLog([])
def test_tag_delete_other_owner_admin(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.tag_controller.delete(request, NAMESPACE3, TAG1)
self.assertNotificationLog("metadef_tag.delete",
[{'name': TAG1,
'namespace': NAMESPACE3}])
self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show,
request, NAMESPACE3, TAG1)
def test_tag_delete_non_existing(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.tag_controller.delete, request, NAMESPACE5,
TAG1)
self.assertNotificationsLog([])
def test_tag_delete_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.tag_controller.delete, request, NAMESPACE4,
TAG1)
self.assertNotificationsLog([])
def test_tag_delete_non_visible(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPNotFound,
self.tag_controller.delete, request, NAMESPACE1,
TAG1)
self.assertNotificationsLog([])
def test_tag_delete_admin_protected(self):
request = unit_test_utils.get_fake_request(is_admin=True)
self.assertRaises(webob.exc.HTTPForbidden,
self.tag_controller.delete, request, NAMESPACE1,
TAG1)
self.assertNotificationsLog([])
def test_tag_create(self):
request = unit_test_utils.get_fake_request()
tag = self.tag_controller.create(request, NAMESPACE1, TAG2)
self.assertEqual(TAG2, tag.name)
self.assertNotificationLog("metadef_tag.create",
[{'name': TAG2,
'namespace': NAMESPACE1}])
tag = self.tag_controller.show(request, NAMESPACE1, TAG2)
self.assertEqual(TAG2, tag.name)
def test_tag_create_overlimit_name(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPBadRequest,
self.tag_controller.create,
request, NAMESPACE1, 'a' * 256)
def test_tag_create_with_4byte_character(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPBadRequest,
self.tag_controller.create,
request, NAMESPACE1, u'\U0001f693')
def test_tag_create_tags(self):
request = unit_test_utils.get_fake_request()
metadef_tags = tags.MetadefTags()
metadef_tags.tags = _db_tags_fixture()
output = self.tag_controller.create_tags(
request, metadef_tags, NAMESPACE1)
output = output.to_dict()
self.assertEqual(3, len(output['tags']))
actual = set([tag.name for tag in output['tags']])
expected = set([TAG1, TAG2, TAG3])
self.assertEqual(expected, actual)
self.assertNotificationLog(
"metadef_tag.create", [
{'name': TAG1, 'namespace': NAMESPACE1},
{'name': TAG2, 'namespace': NAMESPACE1},
{'name': TAG3, 'namespace': NAMESPACE1},
]
)
def test_tag_create_duplicate_tags(self):
request = unit_test_utils.get_fake_request()
metadef_tags = tags.MetadefTags()
metadef_tags.tags = _db_tags_fixture([TAG4, TAG5, TAG4])
self.assertRaises(
webob.exc.HTTPConflict,
self.tag_controller.create_tags,
request, metadef_tags, NAMESPACE1)
self.assertNotificationsLog([])
def test_tag_create_duplicate_with_pre_existing_tags(self):
request = unit_test_utils.get_fake_request()
metadef_tags = tags.MetadefTags()
metadef_tags.tags = _db_tags_fixture([TAG1, TAG2, TAG3])
output = self.tag_controller.create_tags(
request, metadef_tags, NAMESPACE1)
output = output.to_dict()
self.assertEqual(3, len(output['tags']))
actual = set([tag.name for tag in output['tags']])
expected = set([TAG1, TAG2, TAG3])
self.assertEqual(expected, actual)
self.assertNotificationLog(
"metadef_tag.create", [
{'name': TAG1, 'namespace': NAMESPACE1},
{'name': TAG2, 'namespace': NAMESPACE1},
{'name': TAG3, 'namespace': NAMESPACE1},
]
)
metadef_tags = tags.MetadefTags()
metadef_tags.tags = _db_tags_fixture([TAG4, TAG5, TAG4])
self.assertRaises(
webob.exc.HTTPConflict,
self.tag_controller.create_tags,
request, metadef_tags, NAMESPACE1)
self.assertNotificationsLog([])
output = self.tag_controller.index(request, NAMESPACE1)
output = output.to_dict()
self.assertEqual(3, len(output['tags']))
actual = set([tag.name for tag in output['tags']])
expected = set([TAG1, TAG2, TAG3])
self.assertEqual(expected, actual)
def test_tag_create_conflict(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPConflict,
self.tag_controller.create, request,
NAMESPACE1, TAG1)
self.assertNotificationsLog([])
def test_tag_create_non_existing_namespace(self):
request = unit_test_utils.get_fake_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.tag_controller.create, request,
NAMESPACE4, TAG1)
self.assertNotificationsLog([])
def test_tag_create_non_visible_namespace(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2)
self.assertRaises(webob.exc.HTTPForbidden,
self.tag_controller.create, request,
NAMESPACE1, TAG1)
self.assertNotificationsLog([])
def test_tag_create_non_visible_namespace_admin(self):
request = unit_test_utils.get_fake_request(tenant=TENANT2,
is_admin=True)
tag = self.tag_controller.create(request, NAMESPACE1, TAG2)
self.assertEqual(TAG2, tag.name)
self.assertNotificationLog("metadef_tag.create",
[{'name': TAG2,
'namespace': NAMESPACE1}])
tag = self.tag_controller.show(request, NAMESPACE1, TAG2)
self.assertEqual(TAG2, tag.name)
def test_tag_update(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
tag = self.tag_controller.show(request, NAMESPACE3, TAG1)
tag.name = TAG3
tag = self.tag_controller.update(request, tag, NAMESPACE3, TAG1)
self.assertEqual(TAG3, tag.name)
self.assertNotificationLog("metadef_tag.update", [
{'name': TAG3, 'namespace': NAMESPACE3}
])
property = self.tag_controller.show(request, NAMESPACE3, TAG3)
self.assertEqual(TAG3, property.name)
def test_tag_update_name(self):
request = unit_test_utils.get_fake_request()
tag = self.tag_controller.show(request, NAMESPACE1, TAG1)
tag.name = TAG2
tag = self.tag_controller.update(request, tag, NAMESPACE1, TAG1)
self.assertEqual(TAG2, tag.name)
self.assertNotificationLog("metadef_tag.update", [
{'name': TAG2, 'name_old': TAG1, 'namespace': NAMESPACE1}
])
tag = self.tag_controller.show(request, NAMESPACE1, TAG2)
self.assertEqual(TAG2, tag.name)
def test_tag_update_with_4byte_character(self):
request = unit_test_utils.get_fake_request()
tag = self.tag_controller.show(request, NAMESPACE1, TAG1)
tag.name = u'\U0001f693'
self.assertRaises(webob.exc.HTTPBadRequest,
self.tag_controller.update, request, tag,
NAMESPACE1, TAG1)
def test_tag_update_conflict(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
tag = self.tag_controller.show(request, NAMESPACE3, TAG1)
tag.name = TAG2
self.assertRaises(webob.exc.HTTPConflict,
self.tag_controller.update, request, tag,
NAMESPACE3, TAG1)
self.assertNotificationsLog([])
def test_tag_update_non_existing(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
tag = tags.MetadefTag()
tag.name = TAG1
self.assertRaises(webob.exc.HTTPNotFound,
self.tag_controller.update, request, tag,
NAMESPACE5, TAG1)
self.assertNotificationsLog([])
def test_tag_update_namespace_non_existing(self):
request = unit_test_utils.get_fake_request(tenant=TENANT3)
tag = tags.MetadefTag()
tag.name = TAG1
self.assertRaises(webob.exc.HTTPNotFound,
self.tag_controller.update, request, tag,
NAMESPACE4, TAG1)
self.assertNotificationsLog([])
| |
from __future__ import print_function, absolute_import, division
import warnings
from distutils.version import LooseVersion
import pytest
import numpy as np
import astropy
from astropy.wcs import WCS
from astropy import units as u
from astropy.io import fits
from ..spectral_cube import SpectralCube, VarianceWarning
from .helpers import assert_allclose
# the back of the book
dv = 3e-2 * u.Unit('m/s')
dy = 2e-5 * u.Unit('deg')
dx = 1e-5 * u.Unit('deg')
data_unit = u.K
m0v = np.array([[27, 30, 33],
[36, 39, 42],
[45, 48, 51]]) * data_unit * dv
m0y = np.array([[9, 12, 15],
[36, 39, 42],
[63, 66, 69]]) * data_unit * dy
m0x = np.array([[3, 12, 21],
[30, 39, 48],
[57, 66, 75]]) * data_unit * dx
# M1V is a special case, where we return the actual coordinate
m1v = np.array([[1.66666667, 1.6, 1.54545455],
[1.5, 1.46153846, 1.42857143],
[1.4, 1.375, 1.35294118]]) * dv + 2 * u.Unit('m/s')
m1y = np.array([[1.66666667, 1.5, 1.4],
[1.16666667, 1.15384615, 1.14285714],
[1.0952381, 1.09090909, 1.08695652]]) * dy
m1x = np.array([[1.66666667, 1.16666667, 1.0952381],
[1.06666667, 1.05128205, 1.04166667],
[1.03508772, 1.03030303, 1.02666667]]) * dx
m2v = np.array([[0.22222222, 0.30666667, 0.36914601],
[0.41666667, 0.45364892, 0.4829932],
[0.50666667, 0.52604167, 0.54209919]]) * dv ** 2
m2y = np.array([[0.22222222, 0.41666667, 0.50666667],
[0.63888889, 0.64299803, 0.6462585],
[0.65759637, 0.6584022, 0.65910523]]) * dy ** 2
m2x = np.array([[0.22222222, 0.63888889, 0.65759637],
[0.66222222, 0.66403682, 0.66493056],
[0.66543552, 0.66574839, 0.66595556]]) * dx ** 2
MOMENTS = [[m0v, m0y, m0x], [m1v, m1y, m1x], [m2v, m2y, m2x]]
# In issue 184, the cubes were corrected such that they all have valid units
# Therefore, no separate tests are needed for moments-with-units and those
# without
MOMENTSu = MOMENTS
def moment_cube():
cube = np.arange(27).reshape([3, 3, 3]).astype(np.float)
wcs = WCS(naxis=3)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VELO']
# choose values to minimize spherical distortions
wcs.wcs.cdelt = np.array([-1, 2, 3], dtype='float32') / 1e5
wcs.wcs.crpix = np.array([1, 1, 1], dtype='float32')
wcs.wcs.crval = np.array([0, 1e-3, 2e-3], dtype='float32')
wcs.wcs.cunit = ['deg', 'deg', 'km/s']
header = wcs.to_header()
header['BUNIT'] = 'K'
hdu = fits.PrimaryHDU(data=cube, header=header)
return hdu
axis_order = pytest.mark.parametrize(('axis', 'order'),
((0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2)))
if LooseVersion(astropy.__version__[:3]) >= LooseVersion('1.0'):
# The relative error is slightly larger on astropy-dev
# There is no obvious reason for this.
rtol = 2e-7
atol = 1e-30
else:
rtol = 1e-7
atol = 0.0
@axis_order
def test_strategies_consistent(axis, order):
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
cwise = sc.moment(axis=axis, order=order, how='cube')
swise = sc.moment(axis=axis, order=order, how='slice')
rwise = sc.moment(axis=axis, order=order, how='ray')
assert_allclose(cwise, swise, rtol=rtol, atol=atol)
assert_allclose(cwise, rwise, rtol=rtol, atol=atol)
@pytest.mark.parametrize(('order', 'axis', 'how'),
[(o, a, h)
for o in [0, 1, 2]
for a in [0, 1, 2]
for h in ['cube', 'slice', 'auto', 'ray']])
def test_reference(order, axis, how):
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
mom_sc = sc.moment(order=order, axis=axis, how=how)
assert_allclose(mom_sc, MOMENTS[order][axis])
@axis_order
def test_consistent_mask_handling(axis, order):
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
sc._mask = sc > 4*u.K
cwise = sc.moment(axis=axis, order=order, how='cube')
swise = sc.moment(axis=axis, order=order, how='slice')
rwise = sc.moment(axis=axis, order=order, how='ray')
assert_allclose(cwise, swise, rtol=rtol, atol=atol)
assert_allclose(cwise, rwise, rtol=rtol, atol=atol)
def test_convenience_methods():
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
assert_allclose(sc.moment0(axis=0), MOMENTS[0][0])
assert_allclose(sc.moment1(axis=2), MOMENTS[1][2])
assert_allclose(sc.moment2(axis=1), MOMENTS[2][1])
def test_linewidth():
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
with warnings.catch_warnings(record=True) as w:
assert_allclose(sc.moment2(), MOMENTS[2][0])
assert len(w) == 1
assert w[0].category == VarianceWarning
assert str(w[0].message) == ("Note that the second moment returned will be a "
"variance map. To get a linewidth map, use the "
"SpectralCube.linewidth_fwhm() or "
"SpectralCube.linewidth_sigma() methods instead.")
with warnings.catch_warnings(record=True) as w:
assert_allclose(sc.linewidth_sigma(), MOMENTS[2][0] ** 0.5)
assert_allclose(sc.linewidth_fwhm(), MOMENTS[2][0] ** 0.5 * 2.3548200450309493)
assert len(w) == 0
def test_preserve_unit():
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
sc_kms = sc.with_spectral_unit(u.km/u.s)
m0 = sc_kms.moment0(axis=0)
m1 = sc_kms.moment1(axis=0)
assert_allclose(m0, MOMENTS[0][0].to(u.K*u.km/u.s))
assert_allclose(m1, MOMENTS[1][0].to(u.km/u.s))
def test_with_flux_unit():
"""
As of Issue 184, redundant with test_reference
"""
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
sc._unit = u.K
sc_kms = sc.with_spectral_unit(u.km/u.s)
m0 = sc_kms.moment0(axis=0)
m1 = sc_kms.moment1(axis=0)
assert sc.unit == u.K
assert sc.filled_data[:].unit == u.K
assert_allclose(m0, MOMENTS[0][0].to(u.K*u.km/u.s))
assert_allclose(m1, MOMENTS[1][0].to(u.km/u.s))
@pytest.mark.parametrize(('order', 'axis', 'how'),
[(o, a, h)
for o in [0, 1, 2]
for a in [0, 1, 2]
for h in ['cube', 'slice', 'auto', 'ray']])
def test_how_withfluxunit(order, axis, how):
"""
Regression test for issue 180
As of issue 184, this is mostly redundant with test_reference except that
it (kind of) checks that units are set
"""
mc_hdu = moment_cube()
sc = SpectralCube.read(mc_hdu)
sc._unit = u.K
mom_sc = sc.moment(order=order, axis=axis, how=how)
assert sc.unit == u.K
assert sc.filled_data[:].unit == u.K
assert_allclose(mom_sc, MOMENTSu[order][axis])
| |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolargl"
_path_str = "scatterpolargl.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.scatterpolargl.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolargl.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| |
"""Lists all available ISO files."""
from baseCmd import *
from baseResponse import *
class listIsosCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""list resources by account. Must be used with the domainId parameter."""
self.account = None
self.typeInfo['account'] = 'string'
"""true if the ISO is bootable, false otherwise"""
self.bootable = None
self.typeInfo['bootable'] = 'boolean'
"""list only resources belonging to the domain specified"""
self.domainid = None
self.typeInfo['domainid'] = 'uuid'
"""the hypervisor for which to restrict the search"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""list ISO by ID"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""possible values are "featured", "self", "selfexecutable","sharedexecutable","executable", and "community". * featured : templates that have been marked as featured and public. * self : templates that have been registered or created by the calling user. * selfexecutable : same as self, but only returns templates that can be used to deploy a new VM. * sharedexecutable : templates ready to be deployed that have been granted to the calling user by another user. * executable : templates that are owned by the calling user, or public templates, that can be used to deploy a VM. * community : templates that have been marked as public but not featured. * all : all templates (only usable by admins)."""
self.isofilter = None
self.typeInfo['isofilter'] = 'string'
"""true if the ISO is publicly available to all users, false otherwise."""
self.ispublic = None
self.typeInfo['ispublic'] = 'boolean'
"""true if this ISO is ready to be deployed"""
self.isready = None
self.typeInfo['isready'] = 'boolean'
"""defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves."""
self.isrecursive = None
self.typeInfo['isrecursive'] = 'boolean'
"""List by keyword"""
self.keyword = None
self.typeInfo['keyword'] = 'string'
"""If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false"""
self.listall = None
self.typeInfo['listall'] = 'boolean'
"""list all ISOs by name"""
self.name = None
self.typeInfo['name'] = 'string'
""""""
self.page = None
self.typeInfo['page'] = 'integer'
""""""
self.pagesize = None
self.typeInfo['pagesize'] = 'integer'
"""list objects by project"""
self.projectid = None
self.typeInfo['projectid'] = 'uuid'
"""show removed ISOs as well"""
self.showremoved = None
self.typeInfo['showremoved'] = 'boolean'
"""List resources by tags (key/value pairs)"""
self.tags = []
self.typeInfo['tags'] = 'map'
"""the ID of the zone"""
self.zoneid = None
self.typeInfo['zoneid'] = 'uuid'
self.required = []
class listIsosResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the template ID"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account name to which the template belongs"""
self.account = None
self.typeInfo['account'] = 'string'
"""the account id to which the template belongs"""
self.accountid = None
self.typeInfo['accountid'] = 'string'
"""true if the ISO is bootable, false otherwise"""
self.bootable = None
self.typeInfo['bootable'] = 'boolean'
"""checksum of the template"""
self.checksum = None
self.typeInfo['checksum'] = 'string'
"""the date this template was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""true if the template is managed across all Zones, false otherwise"""
self.crossZones = None
self.typeInfo['crossZones'] = 'boolean'
"""additional key/value details tied with template"""
self.details = None
self.typeInfo['details'] = 'map'
"""the template display text"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""the name of the domain to which the template belongs"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain to which the template belongs"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the format of the template."""
self.format = None
self.typeInfo['format'] = 'imageformat'
"""the ID of the secondary storage host for the template"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the secondary storage host for the template"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""true if template contains XS tools inorder to support dynamic scaling of VM cpu/memory"""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""true if the template is extractable, false otherwise"""
self.isextractable = None
self.typeInfo['isextractable'] = 'boolean'
"""true if this template is a featured template, false otherwise"""
self.isfeatured = None
self.typeInfo['isfeatured'] = 'boolean'
"""true if this template is a public template, false otherwise"""
self.ispublic = None
self.typeInfo['ispublic'] = 'boolean'
"""true if the template is ready to be deployed from, false otherwise."""
self.isready = None
self.typeInfo['isready'] = 'boolean'
"""the template name"""
self.name = None
self.typeInfo['name'] = 'string'
"""the ID of the OS type for this template."""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'string'
"""the name of the OS type for this template."""
self.ostypename = None
self.typeInfo['ostypename'] = 'string'
"""true if the reset password feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the template"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the template"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""the date this template was removed"""
self.removed = None
self.typeInfo['removed'] = 'date'
"""the size of the template"""
self.size = None
self.typeInfo['size'] = 'long'
"""the template ID of the parent template if present"""
self.sourcetemplateid = None
self.typeInfo['sourcetemplateid'] = 'string'
"""true if template is sshkey enabled, false otherwise"""
self.sshkeyenabled = None
self.typeInfo['sshkeyenabled'] = 'boolean'
"""the status of the template"""
self.status = None
self.typeInfo['status'] = 'string'
"""the tag of this template"""
self.templatetag = None
self.typeInfo['templatetag'] = 'string'
"""the type of the template"""
self.templatetype = None
self.typeInfo['templatetype'] = 'string'
"""the ID of the zone for this template"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the zone for this template"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""the list of resource tags associated with tempate"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
import os
import re
from flexget import plugin
from flexget.event import event
from flexget.utils.template import render_from_entry, RenderError
from flexget.utils import archive
log = logging.getLogger('decompress')
def fail_entry_with_error(entry, error):
"""
Log error message at error level and fail the entry
"""
log.error(error)
entry.fail(error)
def open_archive_entry(entry):
"""
Convenience method for opening archives from entries. Returns an archive.Archive object
"""
arch = None
try:
archive_path = entry['location']
arch = archive.open_archive(archive_path)
except KeyError:
log.error('Entry does not appear to represent a local file.')
except archive.BadArchive as error:
fail_entry_with_error(entry, 'Bad archive: %s (%s)' % (archive_path, error))
except archive.NeedFirstVolume:
log.error('Not the first volume: %s', archive_path)
except archive.ArchiveError as error:
fail_entry_with_error(entry, 'Failed to open Archive: %s (%s)' % (archive_path, error))
return arch
def get_destination_path(path, to, keep_dirs):
"""
Generate the destination path for a given file
"""
filename = os.path.basename(path)
if keep_dirs:
path_suffix = path
else:
path_suffix = filename
return os.path.join(to, path_suffix)
def is_dir(info):
"""
Tests whether the file descibed in info is a directory
"""
if hasattr(info, 'isdir'):
return info.isdir()
else:
base = os.path.basename(info.filename)
return not base
def makepath(path):
if not os.path.exists(path):
log.debug('Creating path: %s', path)
os.makedirs(path)
class Decompress(object):
r"""
Extracts files from Zip or RAR archives. By default this plugin will extract to the same
directory as the source archive, preserving directory structure from the archive.
This plugin requires the rarfile Python module and unrar command line utility to extract RAR
archives.
Configuration:
to: Destination path; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar. If no path is specified, archive contents will
be extraced in the same directory as the archve itself.
keep_dirs: [yes|no] (default: yes) Indicates whether to preserve the directory
structure from within the archive in the destination path.
mask: Shell-style file mask; any matching files will be extracted. When used, this
field will override regexp.
regexp: Regular expression pattern; any matching files will be extracted. Overridden
by mask if specified.
unrar_tool: Specifies the path of the unrar tool. Only necessary if its location is not
defined in the operating system's PATH environment variable.
delete_archive: [yes|no] (default: no) Delete this archive after extraction is completed.
Example:
decompress:
to: '/Volumes/External/TV/{{series_name}}/Season {{series_season}}/'
keep_dirs: yes
regexp: '.*s\d{1,2}e\d{1,2}.*\.mkv'
"""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'to': {'type': 'string'},
'keep_dirs': {'type': 'boolean'},
'mask': {'type': 'string'},
'regexp': {'type': 'string', 'format': 'regex'},
'unrar_tool': {'type': 'string'},
'delete_archive': {'type': 'boolean'}
},
'additionalProperties': False
}
]
}
def prepare_config(self, config):
"""
Prepare config for processing
"""
from fnmatch import translate
if not isinstance(config, dict):
config = {}
config.setdefault('to', '')
config.setdefault('keep_dirs', True)
config.setdefault('unrar_tool', '')
config.setdefault('delete_archive', False)
# If mask was specified, turn it in to a regexp
if 'mask' in config:
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
if 'regexp' not in config:
config['regexp'] = '.'
return config
def handle_entry(self, entry, config):
"""
Extract matching files into the directory specified
Optionally delete the original archive if config.delete_archive is True
"""
match = re.compile(config['regexp'], re.IGNORECASE).match
archive_path = entry.get('location')
if not archive_path:
log.warning('Entry does not appear to represent a local file.')
return
archive_dir = os.path.dirname(archive_path)
if not os.path.exists(archive_path):
log.warning('File no longer exists: %s', archive_path)
return
arch = open_archive_entry(entry)
if not arch:
return
to = config['to']
if to:
try:
to = render_from_entry(to, entry)
except RenderError as error:
log.error('Could not render path: %s', to)
entry.fail(error)
return
else:
to = archive_dir
for info in arch.infolist():
destination = get_destination_path(info.filename, to, config['keep_dirs'])
dest_dir = os.path.dirname(destination)
arch_file = os.path.basename(info.filename)
if is_dir(info):
log.debug('Appears to be a directory: %s', info.filename)
continue
if not match(arch_file):
log.debug('File did not match regexp: %s', arch_file)
continue
log.debug('Found matching file: %s', info.filename)
log.debug('Creating path: %s', dest_dir)
makepath(dest_dir)
if os.path.exists(destination):
log.verbose('File already exists: %s', destination)
continue
error_message = ''
log.debug('Attempting to extract: %s to %s', arch_file, destination)
try:
arch.extract_file(info, destination)
except archive.FSError as error:
error_message = 'OS error while creating file: %s (%s)' % (destination, error)
except archive.ArchiveError as error:
error_message = 'Failed to extract file: %s in %s (%s)' % (info.filename,
archive_path, error)
if error_message:
log.error(error_message)
entry.fail(entry)
if os.path.exists(destination):
log.debug('Cleaning up partially extracted file: %s', destination)
os.remove(destination)
return
if config['delete_archive']:
arch.delete()
else:
arch.close()
@plugin.priority(255)
def on_task_output(self, task, config):
"""Task handler for archive_extract"""
if isinstance(config, bool) and not config:
return
config = self.prepare_config(config)
archive.rarfile_set_tool_path(config)
archive.rarfile_set_path_sep(os.path.sep)
for entry in task.accepted:
self.handle_entry(entry, config)
@event('plugin.register')
def register_plugin():
plugin.register(Decompress, 'decompress', api_ver=2)
| |
#
# AllOrNothing.py : all-or-nothing package transformations
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew M. Kuchling and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""This file implements all-or-nothing package transformations.
An all-or-nothing package transformation is one in which some text is
transformed into message blocks, such that all blocks must be obtained before
the reverse transformation can be applied. Thus, if any blocks are corrupted
or lost, the original message cannot be reproduced.
An all-or-nothing package transformation is not encryption, although a block
cipher algorithm is used. The encryption key is randomly generated and is
extractable from the message blocks.
This class implements the All-Or-Nothing package transformation algorithm
described in:
Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform"
http://theory.lcs.mit.edu/~rivest/fusion.pdf
"""
__revision__ = "$Id$"
import operator
import sys
from Crypto.Util.number import bytes_to_long, long_to_bytes
from Crypto.Util.py3compat import *
from functools import reduce
def isInt(x):
test = 0
try:
test += x
except TypeError:
return 0
return 1
class AllOrNothing:
"""Class implementing the All-or-Nothing package transform.
Methods for subclassing:
_inventkey(key_size):
Returns a randomly generated key. Subclasses can use this to
implement better random key generating algorithms. The default
algorithm is probably not very cryptographically secure.
"""
def __init__(self, ciphermodule, mode=None, IV=None):
"""AllOrNothing(ciphermodule, mode=None, IV=None)
ciphermodule is a module implementing the cipher algorithm to
use. It must provide the PEP272 interface.
Note that the encryption key is randomly generated
automatically when needed. Optional arguments mode and IV are
passed directly through to the ciphermodule.new() method; they
are the feedback mode and initialization vector to use. All
three arguments must be the same for the object used to create
the digest, and to undigest'ify the message blocks.
"""
self.__ciphermodule = ciphermodule
self.__mode = mode
self.__IV = IV
self.__key_size = ciphermodule.key_size
if not isInt(self.__key_size) or self.__key_size==0:
self.__key_size = 16
__K0digit = bchr(0x69)
def digest(self, text):
"""digest(text:string) : [string]
Perform the All-or-Nothing package transform on the given
string. Output is a list of message blocks describing the
transformed text, where each block is a string of bit length equal
to the ciphermodule's block_size.
"""
# generate a random session key and K0, the key used to encrypt the
# hash blocks. Rivest calls this a fixed, publically-known encryption
# key, but says nothing about the security implications of this key or
# how to choose it.
key = self._inventkey(self.__key_size)
K0 = self.__K0digit * self.__key_size
# we need two cipher objects here, one that is used to encrypt the
# message blocks and one that is used to encrypt the hashes. The
# former uses the randomly generated key, while the latter uses the
# well-known key.
mcipher = self.__newcipher(key)
hcipher = self.__newcipher(K0)
# Pad the text so that its length is a multiple of the cipher's
# block_size. Pad with trailing spaces, which will be eliminated in
# the undigest() step.
block_size = self.__ciphermodule.block_size
padbytes = block_size - (len(text) % block_size)
text = text + b(' ') * padbytes
# Run through the algorithm:
# s: number of message blocks (size of text / block_size)
# input sequence: m1, m2, ... ms
# random key K' (`key' in the code)
# Compute output sequence: m'1, m'2, ... m's' for s' = s + 1
# Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s
# Let m's' = K' ^ h1 ^ h2 ^ ... hs
# where hi = E(K0, m'i ^ i) for i = 1, 2, ... s
#
# The one complication I add is that the last message block is hard
# coded to the number of padbytes added, so that these can be stripped
# during the undigest() step
s = divmod(len(text), block_size)[0]
blocks = []
hashes = []
for i in range(1, s+1):
start = (i-1) * block_size
end = start + block_size
mi = text[start:end]
assert len(mi) == block_size
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate the hash block for this block
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Add the padbytes length as a message block
i = i + 1
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = padbytes ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate this block's hash
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Now calculate the last message block of the sequence 1..s'. This
# will contain the random session key XOR'd with all the hash blocks,
# so that for undigest(), once all the hash blocks are calculated, the
# session key can be trivially extracted. Calculating all the hash
# blocks requires that all the message blocks be received, thus the
# All-or-Nothing algorithm succeeds.
mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes)
blocks.append(mtick_stick)
# we convert the blocks to strings since in Python, byte sequences are
# always represented as strings. This is more consistent with the
# model that encryption and hash algorithms always operate on strings.
return [long_to_bytes(i,self.__ciphermodule.block_size) for i in blocks]
def undigest(self, blocks):
"""undigest(blocks : [string]) : string
Perform the reverse package transformation on a list of message
blocks. Note that the ciphermodule used for both transformations
must be the same. blocks is a list of strings of bit length
equal to the ciphermodule's block_size.
"""
# better have at least 2 blocks, for the padbytes package and the hash
# block accumulator
if len(blocks) < 2:
raise ValueError("List must be at least length 2.")
# blocks is a list of strings. We need to deal with them as long
# integers
blocks = list(map(bytes_to_long, blocks))
# Calculate the well-known key, to which the hash blocks are
# encrypted, and create the hash cipher.
K0 = self.__K0digit * self.__key_size
hcipher = self.__newcipher(K0)
block_size = self.__ciphermodule.block_size
# Since we have all the blocks (or this method would have been called
# prematurely), we can calculate all the hash blocks.
hashes = []
for i in range(1, len(blocks)):
mticki = blocks[i-1] ^ i
hi = hcipher.encrypt(long_to_bytes(mticki, block_size))
hashes.append(bytes_to_long(hi))
# now we can calculate K' (key). remember the last block contains
# m's' which we don't include here
key = blocks[-1] ^ reduce(operator.xor, hashes)
# and now we can create the cipher object
mcipher = self.__newcipher(long_to_bytes(key, self.__key_size))
# And we can now decode the original message blocks
parts = []
for i in range(1, len(blocks)):
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mi = blocks[i-1] ^ bytes_to_long(cipherblock)
parts.append(mi)
# The last message block contains the number of pad bytes appended to
# the original text string, such that its length was an even multiple
# of the cipher's block_size. This number should be small enough that
# the conversion from long integer to integer should never overflow
padbytes = int(parts[-1])
text = b('').join(map(long_to_bytes, parts[:-1]))
return text[:-padbytes]
def _inventkey(self, key_size):
# Return key_size random bytes
from Crypto import Random
return Random.new().read(key_size)
def __newcipher(self, key):
if self.__mode is None and self.__IV is None:
return self.__ciphermodule.new(key)
elif self.__IV is None:
return self.__ciphermodule.new(key, self.__mode)
else:
return self.__ciphermodule.new(key, self.__mode, self.__IV)
if __name__ == '__main__':
import sys
import getopt
import base64
usagemsg = '''\
Test module usage: %(program)s [-c cipher] [-l] [-h]
Where:
--cipher module
-c module
Cipher module to use. Default: %(ciphermodule)s
--aslong
-l
Print the encoded message blocks as long integers instead of base64
encoded strings
--help
-h
Print this help message
'''
ciphermodule = 'AES'
aslong = 0
def usage(code, msg=None):
if msg:
print(msg)
print(usagemsg % {'program': sys.argv[0],
'ciphermodule': ciphermodule})
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:],
'c:l', ['cipher=', 'aslong'])
except getopt.error as msg:
usage(1, msg)
if args:
usage(1, 'Too many arguments')
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-c', '--cipher'):
ciphermodule = arg
elif opt in ('-l', '--aslong'):
aslong = 1
# ugly hack to force __import__ to give us the end-path module
module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new'])
x = AllOrNothing(module)
print('Original text:\n==========')
print(__doc__)
print('==========')
msgblocks = x.digest(b(__doc__))
print('message blocks:')
for i, blk in zip(list(range(len(msgblocks))), msgblocks):
# base64 adds a trailing newline
print(' %3d' % i, end=' ')
if aslong:
print(bytes_to_long(blk))
else:
print(base64.encodestring(blk)[:-1])
#
# get a new undigest-only object so there's no leakage
y = AllOrNothing(module)
text = y.undigest(msgblocks)
if text == b(__doc__):
print('They match!')
else:
print('They differ!')
| |
"""The tests for Humidifier device actions."""
import pytest
import voluptuous_serialize
import homeassistant.components.automation as automation
from homeassistant.components.humidifier import DOMAIN, const, device_action
from homeassistant.const import STATE_ON
from homeassistant.helpers import config_validation as cv, device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
async def test_get_actions(hass, device_reg, entity_reg):
"""Test we get the expected actions from a humidifier."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set("humidifier.test_5678", STATE_ON, {})
hass.states.async_set(
"humidifier.test_5678", "attributes", {"supported_features": 1}
)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "toggle",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "set_humidity",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "set_mode",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_get_action_no_modes(hass, device_reg, entity_reg):
"""Test we get the expected actions from a humidifier."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
hass.states.async_set("humidifier.test_5678", STATE_ON, {})
hass.states.async_set(
"humidifier.test_5678", "attributes", {"supported_features": 0}
)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "toggle",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "set_humidity",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_get_action_no_state(hass, device_reg, entity_reg):
"""Test we get the expected actions from a humidifier."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "toggle",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
{
"domain": DOMAIN,
"type": "set_humidity",
"device_id": device_entry.id,
"entity_id": "humidifier.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_action(hass):
"""Test for actions."""
hass.states.async_set(
"humidifier.entity",
STATE_ON,
{const.ATTR_AVAILABLE_MODES: [const.MODE_HOME, const.MODE_AWAY]},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_off",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "turn_off",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_on",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "turn_on",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event_toggle"},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "toggle",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_humidity",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_humidity",
"humidity": 35,
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_set_mode",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_mode",
"mode": const.MODE_AWAY,
},
},
]
},
)
set_humidity_calls = async_mock_service(hass, "humidifier", "set_humidity")
set_mode_calls = async_mock_service(hass, "humidifier", "set_mode")
turn_on_calls = async_mock_service(hass, "humidifier", "turn_on")
turn_off_calls = async_mock_service(hass, "humidifier", "turn_off")
toggle_calls = async_mock_service(hass, "humidifier", "toggle")
assert len(set_humidity_calls) == 0
assert len(set_mode_calls) == 0
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_set_humidity")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 0
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_set_mode")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 1
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_turn_off")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 1
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 1
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_turn_on")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 1
assert len(turn_on_calls) == 1
assert len(turn_off_calls) == 1
assert len(toggle_calls) == 0
hass.bus.async_fire("test_event_toggle")
await hass.async_block_till_done()
assert len(set_humidity_calls) == 1
assert len(set_mode_calls) == 1
assert len(turn_on_calls) == 1
assert len(turn_off_calls) == 1
assert len(toggle_calls) == 1
async def test_capabilities(hass):
"""Test getting capabilities."""
# Test capabililities without state
capabilities = await device_action.async_get_action_capabilities(
hass,
{
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_mode",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "mode", "options": [], "required": True, "type": "select"}]
# Set state
hass.states.async_set(
"humidifier.entity",
STATE_ON,
{const.ATTR_AVAILABLE_MODES: [const.MODE_HOME, const.MODE_AWAY]},
)
# Set humidity
capabilities = await device_action.async_get_action_capabilities(
hass,
{
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_humidity",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "humidity", "required": True, "type": "integer"}]
# Set mode
capabilities = await device_action.async_get_action_capabilities(
hass,
{
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "humidifier.entity",
"type": "set_mode",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "mode",
"options": [("home", "home"), ("away", "away")],
"required": True,
"type": "select",
}
]
| |
from CGATReport.Tracker import *
from collections import OrderedDict as odict
from RnaseqTranscriptsReport import *
class TrackerGenemodels(RnaseqTranscriptsTracker):
mPattern = "_gene_expression"
def getTracks(self, subset=None):
return self.getValues("SELECT DISTINCT track FROM %s" % self.name)
class GeneModelsBenchmark(TrackerGenemodels):
name = "agg_agg_agg_cuffcompare_benchmark"
def getSlices(self, subset=None):
'''slice by contig'''
return ('all',) # self.getValues( "SELECT DISTINCT contig FROM transcripts_compare" )
def __call__(self, track, slice=None):
return self.getRow( """
SELECT baselevel_sp, baselevel_sn,
exonlevel_sp, exonlevel_sn,
transcriptlevel_sp, transcriptlevel_sn,
intronlevel_sp, intronlevel_sn,
locuslevel_sp, locuslevel_sn,
100.0 * missedexons_counts / missedexons_total AS missed_exons,
100.0 * missedloci_counts / missedloci_total AS missed_loci,
100.0 * novelexons_counts / novelexons_total AS wrong_exons,
100.0 * novelloci_counts / novelloci_total AS wrong_loci
FROM %(name)s WHERE track = '%(track)s' AND contig = '%(slice)s'
""" % self.members(locals()))
class GeneModelsCodes(RnaseqTranscriptsTracker):
pattern = "(.*)_cuffcompare_tracking"
as_tables = True
def getSlices(self, subset=None):
return tuple("=cjeiopruxs.*")
def __call__(self, track, slice=None):
return self.getValue( """SELECT COUNT(*) AS ntransfrags FROM %(track)s WHERE code = '%(slice)s'""" )
class GeneModelsSharedLoci(RnaseqTranscriptsTracker):
'''number of times a locus appears in experiments.'''
mPattern = "_cuffcompare_loci"
mAsTables = True
def __call__(self, track, slice=None):
return self.getDict("SELECT nexperiments, count(*) AS nloci FROM %(track)s group by nexperiments")
class GeneModelsSharedTransfrags(RnaseqTranscriptsTracker):
'''number of times a transfrag appears in experiments.'''
mPattern = "_cuffcompare_tracking"
mAsTables = True
def getSlices(self, subset=None):
return tuple("=cjeiopruxs.*")
def __call__(self, track, slice=None):
data = self.getDict(
"SELECT nexperiments, count(*) AS ntransfrags FROM %(track)s WHERE code = '%(slice)s' group by nexperiments")
return data
class ExpressionByClass(RnaseqTranscriptsTracker):
'''number of times a transfrag appears in experiments.'''
mPattern = "_cuffcompare_tracking"
mAsTables = False
def getSlices(self, subset=None):
return tuple("=cjeiopruxs.*")
def __call__(self, track, slice=None):
vals = self.getValues( """SELECT avg(FPKM)
FROM %(track)s_cuffcompare_tracking AS t,
%(track)s_cuffcompare_transcripts AS a
WHERE code = '%(slice)s' AND
a.transfrag_id = t.transfrag_id
GROUP BY a.transfrag_id""" % locals() )
return odict((("fpkm", vals), ))
class TransfragCorrelation(RnaseqTranscriptsTracker):
'''return correlation table
'''
mPattern = "_reproducibility"
def getSlices(self, subset=None):
return tuple("=cjeiopruxs.*")
def __call__(self, track, slice=None):
data = self.getAll( """SELECT track1, track2,
coeff, pvalue, significance,
pairs, both_null, null1, null2,
method, alternative
FROM %(track)s_reproducibility
WHERE code = '%(slice)s'""" )
return data
class TransfragReproducibility2(RnaseqTranscriptsTracker):
'''return proportion of transfrags present in a pair of replicates.
'''
pattern = "(.*)_reproducibility"
def getSlices(self, subset=None):
return tuple("=cjeiopruxs.*")
def __call__(self, track, slice):
data = self.getAll( """SELECT track1, track2,
ROUND( CAST( not_null AS FLOAT) / (pairs-both_null),2) AS pcalled,
ROUND( coeff, 2) as correlation
FROM %(track)s_reproducibility
WHERE code = '%(slice)s'""" )
return data
class TransfragReproducibility(RnaseqTranscriptsTracker):
'''return proportion of transfrags present in a pair of replicates.
'''
mPattern = "_reproducibility"
def getSlices(self, subset=None):
return tuple("=cjeiopruxs.*")
def __call__(self, track, slice=None):
data = self.getDict( """SELECT track1 || '_x_' || track2 AS tracks,
ROUND( CAST( not_null AS FLOAT) / (pairs-both_null),2) AS pcalled,
ROUND( coeff, 2) as correlation
FROM %(track)s_reproducibility
WHERE code = '%(slice)s'""" )
return data
class GenesetSummary(RnaseqTranscriptsTracker, SingleTableTrackerRows):
'''summary properties of genesets.'''
table = "geneset_stats"
column = "track"
class GenesetMappability(RnaseqTranscriptsTracker):
'''return average mappability for all transcripts.'''
mPattern = "_mappability"
def __call__(self, track, slice=None):
return odict( (('mean', self.getValues( '''SELECT mean FROM %(track)s_mappability''' )), ))
class TranscriptClassCounts(RnaseqTranscriptsTracker):
'''return number of transcripts within each class.'''
pattern = "(.*)_class"
def getSlices(self, subset=None):
'''slice by contig'''
return self.getValues("SELECT DISTINCT source FROM agg_agg_agg_class") + ["-"]
def __call__(self, track, slice=None):
if slice == "-":
stmt = "is NULL"
else:
stmt = "= '%(slice)s'" % locals()
return self.getDict( '''SELECT class || '-' || CASE WHEN sense = 's' THEN 'sense' WHEN sense = 'a' THEN 'antisense' ELSE 'anysense' END,
COUNT(*) AS ntranscripts
FROM %(track)s_class
WHERE source %(stmt)s
GROUP BY class, sense''' )
class TranscriptClassCountsSummaryBySource(RnaseqTranscriptsTracker):
'''return number of transcripts within each class.'''
pattern = "(.*)_class$"
def __call__(self, track, slice=None):
return self.getDict( '''SELECT source, COUNT(*) AS ntranscripts FROM %(track)s_class GROUP BY source''')
class TranscriptClassCountsSummaryByClass(RnaseqTranscriptsTracker):
'''return number of transcripts within each class.'''
pattern = "(.*)_class$"
def __call__(self, track, slice=None):
return self.getDict( '''SELECT class, COUNT(*) AS ntranscripts FROM %(track)s_class GROUP BY class''')
class BuildSummary(RnaseqTranscriptsTracker):
'''summary of gene set construction.'''
pattern = "(.*)_build_summary"
def __call__(self, track, slice=None):
return self.getAll( '''SELECT category, transcripts FROM %(track)s_build_summary''')
| |
# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Basic classes and functions for Wikigeo items.'''
from absl import logging
import geopandas as gpd
from geopandas import GeoDataFrame, GeoSeries
import numpy as np
import os
import pandas as pd
import re
from shapely.geometry.point import Point
from shapely.geometry import box, mapping, LineString
import sys
import swifter
from typing import Any, Dict, Text
import torch
from transformers import DistilBertTokenizerFast
import attr
from cabby.model import util as mutil
from cabby.geo import util as gutil
from cabby.model.text import util
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
CELLID_DIM = 64
@attr.s
class TextGeoDataset:
"""Construct a RVSPath sample.
`train` is the train split.
`valid` is the valid split.
`test` is the test split.
`unique_cellids` is the unique S2Cells.
`unique_cellids_binary` is the binary tensor of the unique S2Cells.
`label_to_cellid` is the dictionary mapping labels to cellids.
"""
train: Any = attr.ib()
valid: Any = attr.ib()
test: Any = attr.ib()
unique_cellids: np.ndarray = attr.ib()
unique_cellids_binary: torch.tensor = attr.ib()
label_to_cellid: Dict[int, int] = attr.ib()
@classmethod
def from_TextGeoSplit(cls, train, valid, test, unique_cellids,
unique_cellids_binary, label_to_cellid):
"""Construct a TextGeoDataset."""
return TextGeoDataset(
train,
valid,
test,
unique_cellids,
unique_cellids_binary,
label_to_cellid,
)
@classmethod
def load(cls, dataset_path: Text, train_path_dataset: Text,
valid_path_dataset: Text, test_path_dataset: Text,
unique_cellid_path: Text, tensor_cellid_path: Text,
label_to_cellid_path: Text):
logging.info("Loading dataset from <== {}.".format(dataset_path))
train_dataset = torch.load(train_path_dataset)
valid_dataset = torch.load(valid_path_dataset)
test_dataset = torch.load(test_path_dataset)
unique_cellid = np.load(unique_cellid_path, allow_pickle='TRUE')
label_to_cellid = np.load(
label_to_cellid_path, allow_pickle='TRUE').item()
tens_cells = torch.load(tensor_cellid_path)
n_cells = len(unique_cellid)
dataset_text = TextGeoDataset(
train_dataset, valid_dataset, test_dataset,
unique_cellid, tens_cells, label_to_cellid)
return dataset_text
@classmethod
def save(cls, dataset_text: Any, dataset_path: Text,
train_path_dataset: Text, valid_path_dataset: Text,
test_path_dataset: Text, unique_cellid_path: Text,
tensor_cellid_path: Text, label_to_cellid_path: Text):
os.mkdir(dataset_path)
torch.save(dataset_text.train, train_path_dataset)
torch.save(dataset_text.valid, valid_path_dataset)
torch.save(dataset_text.test, test_path_dataset)
np.save(unique_cellid_path, dataset_text.unique_cellids)
torch.save(dataset_text.unique_cellids_binary, tensor_cellid_path)
np.save(label_to_cellid_path, dataset_text.label_to_cellid)
logging.info("Saved data to ==> {}.".format(dataset_path))
class TextGeoSplit(torch.utils.data.Dataset):
"""A split of of the RUN dataset.
`points`: The ground true end-points of the samples.
`labels`: The ground true label of the cellid.
`cellids`: The ground truth S2Cell id.
`neighbor_cells`: One neighbor cell id of the ground truth S2Cell id.
`far_cells`: One far away cell id (in the region defined) of the ground truth
'dprob': Gamma distribution probability.
S2Cell id.
"""
def __init__(self, data: pd.DataFrame, s2level: int,
unique_cells_df: pd.DataFrame, cellid_to_label: Dict[int, int],
dprob: mutil.DistanceProbability):
points = data.end_point.swifter.apply(
lambda x: gutil.point_from_list_coord(x))
data = data.assign(point=points)
data['cellid'] = data.point.swifter.apply(
lambda x: gutil.cellid_from_point(x, s2level))
data['neighbor_cells'] = data.cellid.apply(
lambda x: gutil.neighbor_cellid(x))
start_points = data.start_point.swifter.apply(
lambda x: gutil.point_from_list_coord(x))
dist_lists = start_points.apply(
lambda start: calc_dist(start, unique_cells_df)
)
# Tokenize instructions.
self.encodings = tokenizer(
data.instructions.tolist(), truncation=True,
padding=True, add_special_tokens=True)
self.prob = dist_lists.swifter.apply(
lambda row: [dprob(dist) for dist in row.values.tolist()], axis=1) #.tolist()
self.prob = self.prob.tolist()
data['far_cells'] = data.cellid.swifter.apply(
lambda cellid: unique_cells_df[unique_cells_df['cellid']==cellid].far.iloc[0])
cellids_array = np.array(data.cellid.tolist())
neighbor_cells_array = np.array(data.neighbor_cells.tolist())
far_cells_array = np.array(data.far_cells.tolist())
self.points = data.point.apply(
lambda x: gutil.tuple_from_point(x)).tolist()
self.labels = data.cellid.apply(lambda x: cellid_to_label[x]).tolist()
self.cellids = util.binary_representation(cellids_array, dim = CELLID_DIM)
self.neighbor_cells = util.binary_representation(
neighbor_cells_array, dim = CELLID_DIM)
self.far_cells = util.binary_representation(
far_cells_array, dim = CELLID_DIM)
def __getitem__(self, idx: int):
'''Supports indexing such that TextGeoDataset[i] can be used to get
i-th sample.
Arguments:
idx: The index for which a sample from the dataset will be returned.
Returns:
A single sample including text, the correct cellid, a neighbor cellid,
a far cellid, a point of the cellid and the label of the cellid.
'''
text = {key: torch.tensor(val[idx])
for key, val in self.encodings.items()}
cellid = torch.tensor(self.cellids[idx])
neighbor_cells = torch.tensor(self.neighbor_cells[idx])
far_cells = torch.tensor(self.far_cells[idx])
point = torch.tensor(self.points[idx])
label = torch.tensor(self.labels[idx])
prob = torch.tensor(self.prob[idx])
sample = {'text': text, 'cellid': cellid, 'neighbor_cells': neighbor_cells,
'far_cells': far_cells, 'point': point, 'label': label, 'prob': prob}
return sample
def __len__(self):
return len(self.cellids)
def calc_dist(start, unique_cells_df):
return unique_cells_df.swifter.apply(
lambda end: gutil.get_distance_between_points(start, end.point), axis=1)
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import any_helper
import cpp_util
class CppTypeGenerator(object):
"""Manages the types of properties and provides utilities for getting the
C++ type out of a model.Property
"""
def __init__(self, root_namespace, namespace=None, cpp_namespace=None):
"""Creates a cpp_type_generator. The given root_namespace should be of the
format extensions::api::sub. The generator will generate code suitable for
use in the given namespace.
"""
self._type_namespaces = {}
self._root_namespace = root_namespace.split('::')
self._cpp_namespaces = {}
if namespace and cpp_namespace:
self._namespace = namespace
self.AddNamespace(namespace, cpp_namespace)
def AddNamespace(self, namespace, cpp_namespace):
"""Maps a model.Namespace to its C++ namespace name. All mappings are
beneath the root namespace.
"""
for type_ in namespace.types:
qualified_name = self._QualifyName(namespace, type_)
if qualified_name in self._type_namespaces:
raise ValueError('Type %s is declared in both %s and %s' %
(qualified_name, namespace.name,
self._type_namespaces[qualified_name].name))
self._type_namespaces[qualified_name] = namespace
self._cpp_namespaces[namespace] = cpp_namespace
def GetExpandedChoicesInParams(self, params):
"""Returns the given parameters with PropertyType.CHOICES parameters
expanded so that each choice is a separate parameter and sets a unix_name
for each choice.
"""
expanded = []
for param in params:
if param.type_ == PropertyType.CHOICES:
for choice in param.choices.values():
choice.unix_name = (
param.unix_name + '_' + choice.type_.name.lower())
expanded.append(choice)
else:
expanded.append(param)
return expanded
def GetCppNamespaceName(self, namespace):
"""Gets the mapped C++ namespace name for the given namespace relative to
the root namespace.
"""
return self._cpp_namespaces[namespace]
def GetRootNamespaceStart(self):
"""Get opening root namespace declarations.
"""
c = Code()
for namespace in self._root_namespace:
c.Append('namespace %s {' % namespace)
return c
def GetRootNamespaceEnd(self):
"""Get closing root namespace declarations.
"""
c = Code()
for namespace in reversed(self._root_namespace):
c.Append('} // %s' % namespace)
return c
def GetNamespaceStart(self):
"""Get opening self._namespace namespace declaration.
"""
return Code().Append('namespace %s {' %
self.GetCppNamespaceName(self._namespace))
def GetNamespaceEnd(self):
"""Get closing self._namespace namespace declaration.
"""
return Code().Append('} // %s' %
self.GetCppNamespaceName(self._namespace))
def GetEnumNoneValue(self, prop):
"""Gets the enum value in the given model.Property indicating no value has
been set.
"""
return '%s_NONE' % prop.unix_name.upper()
def GetEnumValue(self, prop, enum_value):
"""Gets the enum value of the given model.Property of the given type.
e.g VAR_STRING
"""
return '%s_%s' % (
prop.unix_name.upper(), cpp_util.Classname(enum_value.upper()))
def GetChoicesEnumType(self, prop):
"""Gets the type of the enum for the given model.Property.
e.g VarType
"""
return cpp_util.Classname(prop.name) + 'Type'
def GetType(self, prop, pad_for_generics=False, wrap_optional=False):
"""Translates a model.Property into its C++ type.
If REF types from different namespaces are referenced, will resolve
using self._type_namespaces.
Use pad_for_generics when using as a generic to avoid operator ambiguity.
Use wrap_optional to wrap the type in a scoped_ptr<T> if the Property is
optional.
"""
cpp_type = None
if prop.type_ == PropertyType.REF:
dependency_namespace = self._ResolveTypeNamespace(prop.ref_type)
if not dependency_namespace:
raise KeyError('Cannot find referenced type: %s' % prop.ref_type)
if self._namespace != dependency_namespace:
cpp_type = '%s::%s' % (self._cpp_namespaces[dependency_namespace],
prop.ref_type)
else:
cpp_type = prop.ref_type
elif prop.type_ == PropertyType.BOOLEAN:
cpp_type = 'bool'
elif prop.type_ == PropertyType.INTEGER:
cpp_type = 'int'
elif prop.type_ == PropertyType.DOUBLE:
cpp_type = 'double'
elif prop.type_ == PropertyType.STRING:
cpp_type = 'std::string'
elif prop.type_ == PropertyType.ENUM:
cpp_type = cpp_util.Classname(prop.name)
elif prop.type_ == PropertyType.ADDITIONAL_PROPERTIES:
cpp_type = 'base::DictionaryValue'
elif prop.type_ == PropertyType.ANY:
cpp_type = any_helper.ANY_CLASS
elif prop.type_ == PropertyType.OBJECT:
cpp_type = cpp_util.Classname(prop.name)
elif prop.type_ == PropertyType.ARRAY:
if prop.item_type.type_ in (
PropertyType.REF, PropertyType.ANY, PropertyType.OBJECT):
cpp_type = 'std::vector<linked_ptr<%s> > '
else:
cpp_type = 'std::vector<%s> '
cpp_type = cpp_type % self.GetType(
prop.item_type, pad_for_generics=True)
elif prop.type_ == PropertyType.BINARY:
cpp_type = 'base::BinaryValue'
else:
raise NotImplementedError(prop.type_)
# Enums aren't wrapped because C++ won't allow it. Optional enums have a
# NONE value generated instead.
if wrap_optional and prop.optional and prop.type_ != PropertyType.ENUM:
cpp_type = 'scoped_ptr<%s> ' % cpp_type
if pad_for_generics:
return cpp_type
return cpp_type.strip()
def GenerateForwardDeclarations(self):
"""Returns the forward declarations for self._namespace.
Use after GetRootNamespaceStart. Assumes all namespaces are relative to
self._root_namespace.
"""
c = Code()
for namespace, types in sorted(self._NamespaceTypeDependencies().items()):
c.Append('namespace %s {' % namespace.name)
for type_ in types:
if namespace.types[type_].type_ == PropertyType.STRING:
c.Append('typedef std::string %s;' % type_)
elif namespace.types[type_].type_ == PropertyType.ARRAY:
c.Append('typedef std::vector<%(item_type)s> %(name)s;')
c.Substitute({'name': type_, 'item_type':
self.GetType(namespace.types[type_].item_type,
wrap_optional=True)})
else:
c.Append('struct %s;' % type_)
c.Append('}')
c.Concat(self.GetNamespaceStart())
for (name, type_) in self._namespace.types.items():
if not type_.functions and type_.type_ == PropertyType.OBJECT:
c.Append('struct %s;' % name)
c.Concat(self.GetNamespaceEnd())
return c
def GenerateIncludes(self):
"""Returns the #include lines for self._namespace.
"""
c = Code()
for dependency in sorted(self._NamespaceTypeDependencies().keys()):
c.Append('#include "%s/%s.h"' % (
dependency.source_file_dir,
self._cpp_namespaces[dependency]))
return c
def _ResolveTypeNamespace(self, ref_type):
"""Resolves a type name to its enclosing namespace.
Searches for the ref_type first as an explicitly qualified name, then within
the enclosing namespace, then within other namespaces that the current
namespace depends upon.
"""
if ref_type in self._type_namespaces:
return self._type_namespaces[ref_type]
qualified_name = self._QualifyName(self._namespace, ref_type)
if qualified_name in self._type_namespaces:
return self._type_namespaces[qualified_name]
for (type_name, namespace) in self._type_namespaces.items():
if type_name == self._QualifyName(namespace, ref_type):
return namespace
return None
def GetReferencedProperty(self, prop):
"""Returns the property a property of type REF is referring to.
If the property passed in is not of type PropertyType.REF, it will be
returned unchanged.
"""
if prop.type_ != PropertyType.REF:
return prop
return self._ResolveTypeNamespace(prop.ref_type).types.get(prop.ref_type,
None)
def _QualifyName(self, namespace, name):
return '.'.join([namespace.name, name])
def _NamespaceTypeDependencies(self):
"""Returns a dict containing a mapping of model.Namespace to the C++ type
of type dependencies for self._namespace.
"""
dependencies = set()
for function in self._namespace.functions.values():
for param in function.params:
dependencies |= self._PropertyTypeDependencies(param)
if function.callback:
for param in function.callback.params:
dependencies |= self._PropertyTypeDependencies(param)
for type_ in self._namespace.types.values():
for prop in type_.properties.values():
dependencies |= self._PropertyTypeDependencies(prop)
dependency_namespaces = dict()
for dependency in dependencies:
namespace = self._ResolveTypeNamespace(dependency)
if namespace != self._namespace:
dependency_namespaces.setdefault(namespace, [])
dependency_namespaces[namespace].append(dependency)
return dependency_namespaces
def _PropertyTypeDependencies(self, prop):
"""Recursively gets all the type dependencies of a property.
"""
deps = set()
if prop:
if prop.type_ == PropertyType.REF:
deps.add(prop.ref_type)
elif prop.type_ == PropertyType.ARRAY:
deps = self._PropertyTypeDependencies(prop.item_type)
elif prop.type_ == PropertyType.OBJECT:
for p in prop.properties.values():
deps |= self._PropertyTypeDependencies(p)
return deps
def GeneratePropertyValues(self, property, line, nodoc=False):
"""Generates the Code to display all value-containing properties.
"""
c = Code()
if not nodoc:
c.Comment(property.description)
if property.has_value:
c.Append(line % {
"type": self._GetPrimitiveType(property.type_),
"name": property.name,
"value": property.value
})
else:
has_child_code = False
c.Sblock('namespace %s {' % property.name)
for child_property in property.properties.values():
child_code = self.GeneratePropertyValues(
child_property,
line,
nodoc=nodoc)
if child_code:
has_child_code = True
c.Concat(child_code)
c.Eblock('} // namespace %s' % property.name)
if not has_child_code:
c = None
return c
def _GetPrimitiveType(self, type_):
"""Like |GetType| but only accepts and returns C++ primitive types.
"""
if type_ == PropertyType.BOOLEAN:
return 'bool'
elif type_ == PropertyType.INTEGER:
return 'int'
elif type_ == PropertyType.DOUBLE:
return 'double'
elif type_ == PropertyType.STRING:
return 'char*'
raise Exception(type_ + ' is not primitive')
| |
#
# This file contains implementations of the LLDB display panes in VIM
#
# The most generic way to define a new window is to inherit from VimPane
# and to implement:
# - get_content() - returns a string with the pane contents
#
# Optionally, to highlight text, implement:
# - get_highlights() - returns a map
#
# And call:
# - define_highlight(unique_name, colour)
# at some point in the constructor.
#
#
# If the pane shows some key-value data that is in the context of a
# single frame, inherit from FrameKeyValuePane and implement:
# - get_frame_content(self, SBFrame frame)
#
#
# If the pane presents some information that can be retrieved with
# a simple LLDB command while the subprocess is stopped, inherit
# from StoppedCommandPane and call:
# - self.setCommand(command, command_args)
# at some point in the constructor.
#
# Optionally, you can implement:
# - get_selected_line()
# to highlight a selected line and place the cursor there.
#
#
# FIXME: implement WatchlistPane to displayed watched expressions
# FIXME: define interface for interactive panes, like catching enter
# presses to change selected frame/thread...
#
import lldb
import vim
import sys
# ==============================================================
# Get the description of an lldb object or None if not available
# ==============================================================
# Shamelessly copy/pasted from lldbutil.py in the test suite
def get_description(obj, option=None):
"""Calls lldb_obj.GetDescription() and returns a string, or None.
For SBTarget, SBBreakpointLocation, and SBWatchpoint lldb objects, an extra
option can be passed in to describe the detailed level of description
desired:
o lldb.eDescriptionLevelBrief
o lldb.eDescriptionLevelFull
o lldb.eDescriptionLevelVerbose
"""
method = getattr(obj, 'GetDescription')
if not method:
return None
tuple = (lldb.SBTarget, lldb.SBBreakpointLocation, lldb.SBWatchpoint)
if isinstance(obj, tuple):
if option is None:
option = lldb.eDescriptionLevelBrief
stream = lldb.SBStream()
if option is None:
success = method(stream)
else:
success = method(stream, option)
if not success:
return None
return stream.GetData()
def get_selected_thread(target):
""" Returns a tuple with (thread, error) where thread == None if error occurs """
process = target.GetProcess()
if process is None or not process.IsValid():
return (None, VimPane.MSG_NO_PROCESS)
thread = process.GetSelectedThread()
if thread is None or not thread.IsValid():
return (None, VimPane.MSG_NO_THREADS)
return (thread, "")
def get_selected_frame(target):
""" Returns a tuple with (frame, error) where frame == None if error occurs """
(thread, error) = get_selected_thread(target)
if thread is None:
return (None, error)
frame = thread.GetSelectedFrame()
if frame is None or not frame.IsValid():
return (None, VimPane.MSG_NO_FRAME)
return (frame, "")
def _cmd(cmd):
vim.command("call confirm('%s')" % cmd)
vim.command(cmd)
def move_cursor(line, col=0):
""" moves cursor to specified line and col """
cw = vim.current.window
if cw.cursor[0] != line:
vim.command("execute \"normal %dgg\"" % line)
def winnr():
""" Returns currently selected window number """
return int(vim.eval("winnr()"))
def bufwinnr(name):
""" Returns window number corresponding with buffer name """
return int(vim.eval("bufwinnr('%s')" % name))
def goto_window(nr):
""" go to window number nr"""
if nr != winnr():
vim.command(str(nr) + ' wincmd w')
def goto_next_window():
""" go to next window. """
vim.command('wincmd w')
return (winnr(), vim.current.buffer.name)
def goto_previous_window():
""" go to previously selected window """
vim.command("execute \"normal \\<c-w>p\"")
def have_gui():
""" Returns True if vim is in a gui (Gvim/MacVim), False otherwise. """
return int(vim.eval("has('gui_running')")) == 1
class PaneLayout(object):
""" A container for a (vertical) group layout of VimPanes """
def __init__(self):
self.panes = {}
def havePane(self, name):
""" Returns true if name is a registered pane, False otherwise """
return name in self.panes
def prepare(self, panes=[]):
""" Draw panes on screen. If empty list is provided, show all. """
# If we can't select a window contained in the layout, we are doing a
# first draw
first_draw = not self.selectWindow(True)
did_first_draw = False
# Prepare each registered pane
for name in self.panes:
if name in panes or len(panes) == 0:
if first_draw:
# First window in layout will be created with :vsp, and
# closed later
vim.command(":vsp")
first_draw = False
did_first_draw = True
self.panes[name].prepare()
if did_first_draw:
# Close the split window
vim.command(":q")
self.selectWindow(False)
def contains(self, bufferName=None):
""" Returns True if window with name bufferName is contained in the layout, False otherwise.
If bufferName is None, the currently selected window is checked.
"""
if not bufferName:
bufferName = vim.current.buffer.name
for p in self.panes:
if bufferName is not None and bufferName.endswith(p):
return True
return False
def selectWindow(self, select_contained=True):
""" Selects a window contained in the layout (if select_contained = True) and returns True.
If select_contained = False, a window that is not contained is selected. Returns False
if no group windows can be selected.
"""
if select_contained == self.contains():
# Simple case: we are already selected
return True
# Otherwise, switch to next window until we find a contained window, or
# reach the first window again.
first = winnr()
(curnum, curname) = goto_next_window()
while not select_contained == self.contains(
curname) and curnum != first:
(curnum, curname) = goto_next_window()
return self.contains(curname) == select_contained
def hide(self, panes=[]):
""" Hide panes specified. If empty list provided, hide all. """
for name in self.panes:
if name in panes or len(panes) == 0:
self.panes[name].destroy()
def registerForUpdates(self, p):
self.panes[p.name] = p
def update(self, target, controller):
for name in self.panes:
self.panes[name].update(target, controller)
class VimPane(object):
""" A generic base class for a pane that displays stuff """
CHANGED_VALUE_HIGHLIGHT_NAME_GUI = 'ColorColumn'
CHANGED_VALUE_HIGHLIGHT_NAME_TERM = 'lldb_changed'
CHANGED_VALUE_HIGHLIGHT_COLOUR_TERM = 'darkred'
SELECTED_HIGHLIGHT_NAME_GUI = 'Cursor'
SELECTED_HIGHLIGHT_NAME_TERM = 'lldb_selected'
SELECTED_HIGHLIGHT_COLOUR_TERM = 'darkblue'
MSG_NO_TARGET = "Target does not exist."
MSG_NO_PROCESS = "Process does not exist."
MSG_NO_THREADS = "No valid threads."
MSG_NO_FRAME = "No valid frame."
# list of defined highlights, so we avoid re-defining them
highlightTypes = []
def __init__(self, owner, name, open_below=False, height=3):
self.owner = owner
self.name = name
self.buffer = None
self.maxHeight = 20
self.openBelow = open_below
self.height = height
self.owner.registerForUpdates(self)
def isPrepared(self):
""" check window is OK """
if self.buffer is None or len(
dir(self.buffer)) == 0 or bufwinnr(self.name) == -1:
return False
return True
def prepare(self, method='new'):
""" check window is OK, if not then create """
if not self.isPrepared():
self.create(method)
def on_create(self):
pass
def destroy(self):
""" destroy window """
if self.buffer is None or len(dir(self.buffer)) == 0:
return
vim.command('bdelete ' + self.name)
def create(self, method):
""" create window """
if method != 'edit':
belowcmd = "below" if self.openBelow else ""
vim.command('silent %s %s %s' % (belowcmd, method, self.name))
else:
vim.command('silent %s %s' % (method, self.name))
self.window = vim.current.window
# Set LLDB pane options
vim.command("setlocal buftype=nofile") # Don't try to open a file
vim.command("setlocal noswapfile") # Don't use a swap file
vim.command("set nonumber") # Don't display line numbers
# vim.command("set nowrap") # Don't wrap text
# Save some parameters and reference to buffer
self.buffer = vim.current.buffer
self.width = int(vim.eval("winwidth(0)"))
self.height = int(vim.eval("winheight(0)"))
self.on_create()
goto_previous_window()
def update(self, target, controller):
""" updates buffer contents """
self.target = target
if not self.isPrepared():
# Window is hidden, or otherwise not ready for an update
return
original_cursor = self.window.cursor
# Select pane
goto_window(bufwinnr(self.name))
# Clean and update content, and apply any highlights.
self.clean()
if self.write(self.get_content(target, controller)):
self.apply_highlights()
cursor = self.get_selected_line()
if cursor is None:
# Place the cursor at its original position in the window
cursor_line = min(original_cursor[0], len(self.buffer))
cursor_col = min(
original_cursor[1], len(
self.buffer[
cursor_line - 1]))
else:
# Place the cursor at the location requested by a VimPane
# implementation
cursor_line = min(cursor, len(self.buffer))
cursor_col = self.window.cursor[1]
self.window.cursor = (cursor_line, cursor_col)
goto_previous_window()
def get_selected_line(self):
""" Returns the line number to move the cursor to, or None to leave
it where the user last left it.
Subclasses implement this to define custom behaviour.
"""
return None
def apply_highlights(self):
""" Highlights each set of lines in each highlight group """
highlights = self.get_highlights()
for highlightType in highlights:
lines = highlights[highlightType]
if len(lines) == 0:
continue
cmd = 'match %s /' % highlightType
lines = ['\%' + '%d' % line + 'l' for line in lines]
cmd += '\\|'.join(lines)
cmd += '/'
vim.command(cmd)
def define_highlight(self, name, colour):
""" Defines highlihght """
if name in VimPane.highlightTypes:
# highlight already defined
return
vim.command(
"highlight %s ctermbg=%s guibg=%s" %
(name, colour, colour))
VimPane.highlightTypes.append(name)
def write(self, msg):
""" replace buffer with msg"""
self.prepare()
msg = str(msg.encode("utf-8", "replace")).split('\n')
try:
self.buffer.append(msg)
vim.command("execute \"normal ggdd\"")
except vim.error:
# cannot update window; happens when vim is exiting.
return False
move_cursor(1, 0)
return True
def clean(self):
""" clean all datas in buffer """
self.prepare()
vim.command(':%d')
#self.buffer[:] = None
def get_content(self, target, controller):
""" subclasses implement this to provide pane content """
assert(0 and "pane subclass must implement this")
pass
def get_highlights(self):
""" Subclasses implement this to provide pane highlights.
This function is expected to return a map of:
{ highlight_name ==> [line_number, ...], ... }
"""
return {}
class FrameKeyValuePane(VimPane):
def __init__(self, owner, name, open_below):
""" Initialize parent, define member variables, choose which highlight
to use based on whether or not we have a gui (MacVim/Gvim).
"""
VimPane.__init__(self, owner, name, open_below)
# Map-of-maps key/value history { frame --> { variable_name,
# variable_value } }
self.frameValues = {}
if have_gui():
self.changedHighlight = VimPane.CHANGED_VALUE_HIGHLIGHT_NAME_GUI
else:
self.changedHighlight = VimPane.CHANGED_VALUE_HIGHLIGHT_NAME_TERM
self.define_highlight(VimPane.CHANGED_VALUE_HIGHLIGHT_NAME_TERM,
VimPane.CHANGED_VALUE_HIGHLIGHT_COLOUR_TERM)
def format_pair(self, key, value, changed=False):
""" Formats a key/value pair. Appends a '*' if changed == True """
marker = '*' if changed else ' '
return "%s %s = %s\n" % (marker, key, value)
def get_content(self, target, controller):
""" Get content for a frame-aware pane. Also builds the list of lines that
need highlighting (i.e. changed values.)
"""
if target is None or not target.IsValid():
return VimPane.MSG_NO_TARGET
self.changedLines = []
(frame, err) = get_selected_frame(target)
if frame is None:
return err
output = get_description(frame)
lineNum = 1
# Retrieve the last values displayed for this frame
frameId = get_description(frame.GetBlock())
if frameId in self.frameValues:
frameOldValues = self.frameValues[frameId]
else:
frameOldValues = {}
# Read the frame variables
vals = self.get_frame_content(frame)
for (key, value) in vals:
lineNum += 1
if len(frameOldValues) == 0 or (
key in frameOldValues and frameOldValues[key] == value):
output += self.format_pair(key, value)
else:
output += self.format_pair(key, value, True)
self.changedLines.append(lineNum)
# Save values as oldValues
newValues = {}
for (key, value) in vals:
newValues[key] = value
self.frameValues[frameId] = newValues
return output
def get_highlights(self):
ret = {}
ret[self.changedHighlight] = self.changedLines
return ret
class LocalsPane(FrameKeyValuePane):
""" Pane that displays local variables """
def __init__(self, owner, name='locals'):
FrameKeyValuePane.__init__(self, owner, name, open_below=True)
# FIXME: allow users to customize display of args/locals/statics/scope
self.arguments = True
self.show_locals = True
self.show_statics = True
self.show_in_scope_only = True
def format_variable(self, var):
""" Returns a Tuple of strings "(Type) Name", "Value" for SBValue var """
val = var.GetValue()
if val is None:
# If the value is too big, SBValue.GetValue() returns None; replace
# with ...
val = "..."
return ("(%s) %s" % (var.GetTypeName(), var.GetName()), "%s" % val)
def get_frame_content(self, frame):
""" Returns list of key-value pairs of local variables in frame """
vals = frame.GetVariables(self.arguments,
self.show_locals,
self.show_statics,
self.show_in_scope_only)
return [self.format_variable(x) for x in vals]
class RegistersPane(FrameKeyValuePane):
""" Pane that displays the contents of registers """
def __init__(self, owner, name='registers'):
FrameKeyValuePane.__init__(self, owner, name, open_below=True)
def format_register(self, reg):
""" Returns a tuple of strings ("name", "value") for SBRegister reg. """
name = reg.GetName()
val = reg.GetValue()
if val is None:
val = "..."
return (name, val.strip())
def get_frame_content(self, frame):
""" Returns a list of key-value pairs ("name", "value") of registers in frame """
result = []
for register_sets in frame.GetRegisters():
# hack the register group name into the list of registers...
result.append((" = = %s =" % register_sets.GetName(), ""))
for reg in register_sets:
result.append(self.format_register(reg))
return result
class CommandPane(VimPane):
""" Pane that displays the output of an LLDB command """
def __init__(self, owner, name, open_below, process_required=True):
VimPane.__init__(self, owner, name, open_below)
self.process_required = process_required
def setCommand(self, command, args=""):
self.command = command
self.args = args
def get_content(self, target, controller):
output = ""
if not target:
output = VimPane.MSG_NO_TARGET
elif self.process_required and not target.GetProcess():
output = VimPane.MSG_NO_PROCESS
else:
(success, output) = controller.getCommandOutput(
self.command, self.args)
return output
class StoppedCommandPane(CommandPane):
""" Pane that displays the output of an LLDB command when the process is
stopped; otherwise displays process status. This class also implements
highlighting for a single line (to show a single-line selected entity.)
"""
def __init__(self, owner, name, open_below):
""" Initialize parent and define highlight to use for selected line. """
CommandPane.__init__(self, owner, name, open_below)
if have_gui():
self.selectedHighlight = VimPane.SELECTED_HIGHLIGHT_NAME_GUI
else:
self.selectedHighlight = VimPane.SELECTED_HIGHLIGHT_NAME_TERM
self.define_highlight(VimPane.SELECTED_HIGHLIGHT_NAME_TERM,
VimPane.SELECTED_HIGHLIGHT_COLOUR_TERM)
def get_content(self, target, controller):
""" Returns the output of a command that relies on the process being stopped.
If the process is not in 'stopped' state, the process status is returned.
"""
output = ""
if not target or not target.IsValid():
output = VimPane.MSG_NO_TARGET
elif not target.GetProcess() or not target.GetProcess().IsValid():
output = VimPane.MSG_NO_PROCESS
elif target.GetProcess().GetState() == lldb.eStateStopped:
(success, output) = controller.getCommandOutput(
self.command, self.args)
else:
(success, output) = controller.getCommandOutput("process", "status")
return output
def get_highlights(self):
""" Highlight the line under the cursor. Users moving the cursor has
no effect on the selected line.
"""
ret = {}
line = self.get_selected_line()
if line is not None:
ret[self.selectedHighlight] = [line]
return ret
return ret
def get_selected_line(self):
""" Subclasses implement this to control where the cursor (and selected highlight)
is placed.
"""
return None
class DisassemblyPane(CommandPane):
""" Pane that displays disassembly around PC """
def __init__(self, owner, name='disassembly'):
CommandPane.__init__(self, owner, name, open_below=True)
# FIXME: let users customize the number of instructions to disassemble
self.setCommand("disassemble", "-c %d -p" % self.maxHeight)
class ThreadPane(StoppedCommandPane):
""" Pane that displays threads list """
def __init__(self, owner, name='threads'):
StoppedCommandPane.__init__(self, owner, name, open_below=False)
self.setCommand("thread", "list")
# FIXME: the function below assumes threads are listed in sequential order,
# which turns out to not be the case. Highlighting of selected thread
# will be disabled until this can be fixed. LLDB prints a '*' anyways
# beside the selected thread, so this is not too big of a problem.
# def get_selected_line(self):
# """ Place the cursor on the line with the selected entity.
# Subclasses should override this to customize selection.
# Formula: selected_line = selected_thread_id + 1
# """
# (thread, err) = get_selected_thread(self.target)
# if thread is None:
# return None
# else:
# return thread.GetIndexID() + 1
class BacktracePane(StoppedCommandPane):
""" Pane that displays backtrace """
def __init__(self, owner, name='backtrace'):
StoppedCommandPane.__init__(self, owner, name, open_below=False)
self.setCommand("bt", "")
def get_selected_line(self):
""" Returns the line number in the buffer with the selected frame.
Formula: selected_line = selected_frame_id + 2
FIXME: the above formula hack does not work when the function return
value is printed in the bt window; the wrong line is highlighted.
"""
(frame, err) = get_selected_frame(self.target)
if frame is None:
return None
else:
return frame.GetFrameID() + 2
class BreakpointsPane(CommandPane):
def __init__(self, owner, name='breakpoints'):
super(
BreakpointsPane,
self).__init__(
owner,
name,
open_below=False,
process_required=False)
self.setCommand("breakpoint", "list")
| |
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from ...models._deprecation import deprecated as _deprecated
from coremltools.models.neural_network import NeuralNetworkBuilder # type: ignore
from coremltools.models import datatypes, MLModel # type: ignore
from coremltools.proto import FeatureTypes_pb2 as ft # type: ignore
from coremltools import (
_MINIMUM_CUSTOM_LAYER_SPEC_VERSION as IOS_11_2_SPEC_VERSION,
) # iOS 11.2
from coremltools import (
_MINIMUM_CUSTOM_MODEL_SPEC_VERSION as IOS_12_SPEC_VERSION,
) # iOS 12.0
from coremltools import _MINIMUM_NDARRAY_SPEC_VERSION as IOS_13_SPEC_VERSION # iOS 13.0
from coremltools import __version__ as ct_version
from coremltools.models import _METADATA_VERSION, _METADATA_SOURCE
from coremltools._deps import _HAS_ONNX
# ML model passes
from coremltools.converters.mil.backend.nn.passes.mlmodel_passes import (
remove_disconnected_layers,
transform_conv_crop,
)
if _HAS_ONNX:
import onnx
from onnx import shape_inference
from onnx import TensorProto
from typing import Tuple
from typing import Text, Union, Optional, Dict, Any, Iterable, Sequence, Callable, List
from ._operators import (
_convert_node,
_SEQUENCE_LAYERS_REGISTRY,
_ONNX_NODE_REGISTRY,
_add_const_inputs_if_required,
)
from ._operators_nd import _ONNX_NODE_REGISTRY_ND, _convert_node_nd
from ._graph import Graph, EdgeInfo, Transformer
from ._transformers import (
ConvAddFuser,
DropoutRemover,
ReshapeInitTensorFuser,
BNBroadcastedMulFuser,
BNBroadcastedAddFuser,
PixelShuffleFuser,
OutputRenamer,
AddModelInputsOutputs,
ConstantsToInitializers,
ImageScalerRemover,
ShapeOpRemover,
ConstantRemover,
ConstantFillToInitializers,
ReshapeTransposeReshape_pattern1,
CastOpRemover,
DeadCodeElimination,
PaddingOpRemover,
)
from ._error_utils import ErrorHandling
from ._graph_viz import plot_graph # type: ignore
USE_SHAPE_MAPPING = True
DEBUG = False
class SupportedVersion:
# Supported iOS Version
# New OS Version must be added at the end to maintain backward version index
supported_ios_version = ["11.2", "12", "13"]
IOS_13_VERSION = supported_ios_version.index("13")
ND_ARRARY_SUPPORT = IOS_13_VERSION
@staticmethod
def ios_support_check(minimum_ios_deployment_target):
return minimum_ios_deployment_target in SupportedVersion.supported_ios_version
@staticmethod
def is_nd_array_supported(minimum_ios_deployment_target):
if not SupportedVersion.ios_support_check(minimum_ios_deployment_target):
raise TypeError(
"{} not supported. Please provide one of target iOS: {}".format(
minimum_ios_deployment_target,
SupportedVersion.supported_ios_version
)
)
minimum_ios_deployment_target_index = SupportedVersion.supported_ios_version.index(
minimum_ios_deployment_target
)
return SupportedVersion.ND_ARRARY_SUPPORT <= minimum_ios_deployment_target_index
@staticmethod
def get_supported_ios():
return SupportedVersion.supported_ios_version
@staticmethod
def get_specification_version(minimum_ios_deployment_target):
if not SupportedVersion.ios_support_check(minimum_ios_deployment_target):
raise TypeError(
"{} not supported. Please provide one of target iOS: {}",
minimum_ios_deployment_target,
SupportedVersion.supported_ios_version,
)
if minimum_ios_deployment_target == "11.2":
return IOS_11_2_SPEC_VERSION
elif minimum_ios_deployment_target == "12":
return IOS_12_SPEC_VERSION
else:
return IOS_13_SPEC_VERSION
"""
inputs: list of tuples.
[Tuple]: [(name, type, shape)]
"""
def _make_coreml_input_features(
graph, onnx_coreml_input_shape_map, disable_coreml_rank5_mapping=False
): # type: (...) -> Sequence[Tuple[Text, datatypes.Array]]
"""
If "disable_coreml_rank5_mapping" is False, then:
ONNX shapes to CoreML static shapes mapping
length==1: [C]
length==2: [B,C]
length==3: [C,H,W] or [Seq,B,C]
length==4: [B,C,H,W]
If "disable_coreml_rank5_mapping" is True, then
onnx shapes are mapped "as is" to CoreML.
"""
inputs = graph.inputs
op_types = graph.blob_to_op_type
features = []
for input_ in inputs:
shape = input_[2]
if disable_coreml_rank5_mapping:
if len(shape) > 5:
raise ValueError(
"ONNX input %s has a rank greater than 5, which is not supported in CoreML framework"
% str(input_[0])
)
else:
features.append((str(input_[0]), datatypes.Array(*shape)))
continue
if USE_SHAPE_MAPPING and input_[0] in onnx_coreml_input_shape_map:
mapp = onnx_coreml_input_shape_map[input_[0]]
if len(mapp) != len(shape):
raise ValueError(
"Incorrect value in onnx_coreml_input_shape_map argument"
)
graph.onnx_coreml_shape_mapping[input_[0]] = mapp
coreml_shape = [1, 1, 1]
for i in range(3):
if (i + 2) in mapp:
coreml_shape[i] = shape[mapp.index(i + 2)]
shape = coreml_shape
else:
if len(shape) == 0:
shape = [1, 1, 1]
elif len(shape) == 1:
# assume [C]
if USE_SHAPE_MAPPING:
graph.onnx_coreml_shape_mapping[input_[0]] = [2]
elif len(shape) == 2:
# assume [Batch,C]
shape = [shape[1]]
if USE_SHAPE_MAPPING:
graph.onnx_coreml_shape_mapping[input_[0]] = [1, 2]
elif len(shape) == 3:
# assume [C,H,W] unless its connected an op that bestows another mapping
if input_[0] in op_types and len(op_types[input_[0]]) == 1:
if str(op_types[input_[0]][0]) in _SEQUENCE_LAYERS_REGISTRY:
# (Seq,B,C)
shape = [shape[2]]
if USE_SHAPE_MAPPING:
graph.onnx_coreml_shape_mapping[input_[0]] = [0, 1, 2]
elif str(op_types[input_[0]][0]) in [
"MaxPool",
"AveragePool",
"BatchNormalization",
"GlobalAveragePool",
"GlobalLpPool",
"GlobalMaxPool",
"InstanceNormalization",
"LRN",
"LpPool",
"Conv",
"ConvTranspose",
]:
# (B,C,W)
shape = [shape[1], 1, shape[2]]
if USE_SHAPE_MAPPING:
graph.onnx_coreml_shape_mapping[input_[0]] = [1, 2, 4]
else:
if USE_SHAPE_MAPPING:
graph.onnx_coreml_shape_mapping[input_[0]] = [2, 3, 4]
else:
if USE_SHAPE_MAPPING:
graph.onnx_coreml_shape_mapping[input_[0]] = [2, 3, 4]
elif len(shape) == 4: # (B,C,H,W) --> (C,H,W)
shape = shape[1:]
if USE_SHAPE_MAPPING:
graph.onnx_coreml_shape_mapping[input_[0]] = [1, 2, 3, 4]
else:
raise ValueError(
"CoreML input cannot be more than rank 4. Input shape: %s, input: '%s' "
% (str(shape), str(input_[0]))
)
features.append((str(input_[0]), datatypes.Array(*shape)))
return features
"""
outputs: list of tuples.
[Tuple]: [(name, type, shape)]
"""
def _make_coreml_output_features(
graph, forceShape=False, disable_coreml_rank5_mapping=False
): # type: (...) -> Sequence[Tuple[Text, datatypes.Array]]
features = []
outputs = graph.outputs
op_types = graph.blob_from_op_type
ops_allowing_zerod_output = {"Size"}
for output_ in outputs:
if op_types[output_[0]] in ops_allowing_zerod_output and len(output_[2]) == 0:
output_ = list(output_)
output_[2] = (1,)
if disable_coreml_rank5_mapping:
shape = output_[2]
if len(shape) > 5:
raise ValueError(
"ONNX output %s has a rank greater than 5, which is not supported in CoreML framework"
% str(output_[0])
)
else:
features.append((str(output_[0]), datatypes.Array(*shape)))
continue
if not forceShape:
features.append((str(output_[0]), None))
else:
shape = output_[2]
if len(shape) == 0:
shape = [1, 1, 1]
elif len(shape) == 1:
pass
elif len(shape) == 3:
if (
output_[0] in op_types
and str(op_types[output_[0]]) in _SEQUENCE_LAYERS_REGISTRY
):
# onnx shape: (Seq,B,C)
shape = [shape[2]]
elif len(shape) == 4: # (B,C,H,W) --> (C,H,W)
shape = shape[1:]
else:
shape = None # output shape need not be specified for CoreML.
if shape is None:
features.append((str(output_[0]), shape))
else:
features.append((str(output_[0]), datatypes.Array(*shape)))
return features
def _check_unsupported_ops(
nodes, disable_coreml_rank5_mapping=False
): # type: (...) -> None
unsupported_op_types = [] # type: List[Text]
for node in nodes:
if disable_coreml_rank5_mapping:
if (
node.op_type not in _ONNX_NODE_REGISTRY_ND
and node.op_type not in unsupported_op_types
):
unsupported_op_types.append(node.op_type)
continue
if (
node.op_type not in _ONNX_NODE_REGISTRY
and node.op_type not in unsupported_op_types
):
unsupported_op_types.append(node.op_type)
coreml_3_rerun_message = ""
if not disable_coreml_rank5_mapping:
coreml_3_rerun_message = (
"\nPlease try converting again by providing the additonal argument, "
"minimum_ios_deployment_target=13"
" and making sure you have the latest coremltools package"
)
if len(unsupported_op_types) > 0:
raise NotImplementedError(
"Unsupported ONNX ops of type: %s %s"
% (",".join(unsupported_op_types), coreml_3_rerun_message)
)
def _update_multiarray_to_float32(
feature, # type: Any
): # type : (...) -> None
if feature.type.HasField("multiArrayType"):
feature.type.multiArrayType.dataType = ft.ArrayFeatureType.FLOAT32
def _update_multiarray_to_int32(
feature, # type: Any
): # type : (...) -> None
if feature.type.HasField("multiArrayType"):
feature.type.multiArrayType.dataType = ft.ArrayFeatureType.INT32
def _transform_coreml_dtypes(
builder, # type : NeuralNetworkBuilder
inputs, # type: List[EdgeInfo]
outputs, # type: List[EdgeInfo]
):
# type: (...) -> None
""" Make sure ONNX input/output data types are mapped to the equivalent CoreML types
"""
for i, input_ in enumerate(inputs):
onnx_type = input_[1]
if onnx_type == TensorProto.FLOAT:
_update_multiarray_to_float32(builder.spec.description.input[i])
elif onnx_type == TensorProto.DOUBLE:
continue
elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
_update_multiarray_to_int32(builder.spec.description.input[i])
elif onnx_type == TensorProto.BOOL:
_update_multiarray_to_float32(builder.spec.description.input[i])
else:
raise TypeError("Input must be of of type FLOAT, DOUBLE, INT32 or INT64")
for i, output_ in enumerate(outputs):
onnx_type = output_[1]
if onnx_type == TensorProto.FLOAT:
_update_multiarray_to_float32(builder.spec.description.output[i])
elif onnx_type == TensorProto.DOUBLE:
continue
elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
_update_multiarray_to_int32(builder.spec.description.output[i])
elif onnx_type == TensorProto.BOOL:
_update_multiarray_to_float32(builder.spec.description.output[i])
else:
raise TypeError("Output must be of of type FLOAT, DOUBLE, INT32 or INT64")
def _convert_multiarray_output_to_image(
spec, # type: Any
feature_name, # type: Text
is_bgr=False, # type: bool
):
# type: (...) -> None
for output in spec.description.output:
if output.name != feature_name:
continue
if output.type.WhichOneof("Type") != "multiArrayType":
raise ValueError("{} is not a multiarray type".format(output.name,))
array_shape = tuple(output.type.multiArrayType.shape)
if len(array_shape) == 2:
height, width = array_shape
output.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value(
"GRAYSCALE"
)
else:
if len(array_shape) == 4:
if array_shape[0] != 1:
raise ValueError(
"Shape {} is not supported for image output".format(
array_shape,
)
)
array_shape = array_shape[1:]
channels, height, width = array_shape
if channels == 1:
output.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value(
"GRAYSCALE"
)
elif channels == 3:
if is_bgr:
output.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value(
"BGR"
)
else:
output.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value(
"RGB"
)
else:
raise ValueError(
"Channel Value {} is not supported for image output".format(
channels,
)
)
output.type.imageType.width = width
output.type.imageType.height = height
def _set_deprocessing(
is_grayscale, # type: bool
builder, # type: NeuralNetworkBuilder
deprocessing_args, # type: Dict[Text, Any]
input_name, # type: Text
output_name, # type: Text
):
# type: (...) -> None
is_bgr = deprocessing_args.get("is_bgr", False)
image_scale = deprocessing_args.get("image_scale", 1.0)
if is_grayscale:
gray_bias = deprocessing_args.get("gray_bias", 0.0)
W = np.array([image_scale])
b = np.array([gray_bias])
else:
W = np.array([image_scale, image_scale, image_scale])
red_bias = deprocessing_args.get("red_bias", 0.0)
green_bias = deprocessing_args.get("green_bias", 0.0)
blue_bias = deprocessing_args.get("blue_bias", 0.0)
if not is_bgr:
b = np.array([red_bias, green_bias, blue_bias,])
else:
b = np.array([blue_bias, green_bias, red_bias,])
builder.add_scale(
name=input_name,
W=W,
b=b,
has_bias=True,
shape_scale=W.shape,
shape_bias=b.shape,
input_name=input_name,
output_name=output_name,
)
def _prepare_onnx_graph(
graph, transformers, onnx_ir_version
): # type: (Graph, Iterable[Transformer]) -> Graph
graph_ = Graph.from_onnx(graph, onnx_ir_version)
if DEBUG:
plot_graph(graph_, graph_img_path="/tmp/graph_raw.pdf")
graph_ = graph_.transformed(transformers)
if DEBUG:
plot_graph(graph_, graph_img_path="/tmp/graph_opt.pdf")
return graph_
@_deprecated()
def convert(
model, # type: Union[onnx.ModelProto, Text]
mode=None, # type: Optional[Text]
image_input_names=[], # type: Sequence[Text]
preprocessing_args={}, # type: Dict[Text, Any]
image_output_names=[], # type: Sequence[Text]
deprocessing_args={}, # type: Dict[Text, Any]
class_labels=None, # type: Union[Text, Iterable[Text], None]
predicted_feature_name="classLabel", # type: Text
add_custom_layers=False, # type: bool
custom_conversion_functions={}, # type: Dict[Text, Any]
onnx_coreml_input_shape_map={}, # type: Dict[Text, List[int,...]]
minimum_ios_deployment_target="12",
):
# type: (...) -> MLModel
"""
WARNING: This function is deprecated. It will be removed in the 6.0.
Convert ONNX model to CoreML.
Parameters
----------
model:
An ONNX model with parameters loaded in the ONNX package, or path to file
with models.
mode: 'classifier', 'regressor' or None
Mode of the converted coreml model:
* ``'classifier'``: a NeuralNetworkClassifier spec will be constructed.
* ``'regressor'``: a NeuralNetworkRegressor spec will be constructed.
preprocessing_args:
The ``'is_bgr'``, ``'red_bias'``, ``'green_bias'``, ``'blue_bias'``, ``'gray_bias'``,
and ``'image_scale'`` keys have the same meaning as the pre-processing arguments for
`NeuralNetworkBuilder <https://coremltools.readme.io/reference/modelsneural_network>`_.
deprocessing_args:
Same as ``'preprocessing_args'`` but for de-processing.
class_labels:
* As a string, it represents the name of the file which contains
the classification labels (one per line).
* As a list of strings, it represents a list of categories that map
the index of the output of a neural network to labels in a classifier.
predicted_feature_name:
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to ``'classLabel'``.
add_custom_layers: bool
Flag to turn on additional custom CoreML layers for unsupported ONNX ops or
attributes within a supported op.
custom_conversion_functions: dict()
* A dictionary with keys corresponding to the names/types of ONNX ops and values as
functions taking an object of the ``coreml-tools`` class:
``'NeuralNetworkBuilder'``, ``'Graph'`` (see ``onnx-coreml/_graph.Graph``),
``'Node'`` (see ``onnx-coreml/_graph.Node``), and
``'ErrorHandling'`` (see ``onnx-coreml/_error_utils.ErrorHandling``).
* This custom conversion function gets full control and responsibility for
converting a given ONNX op.
* The function returns nothing and is responsible for adding an equivalent CoreML
layer via ``'NeuralNetworkBuilder'``.
onnx_coreml_input_shape_map: dict() (Optional)
* A dictionary with keys corresponding to the model input names.
* Values are a list of integers that specify how the shape of the input is mapped
to CoreML.
* Convention used for CoreML shapes is ``0: Sequence``, ``1: Batch``,
``2: channel``, ``3: height``, ``4: width``. For example, an input of rank 2
could be mapped as ``[3,4]`` (H,W) or ``[1,2]`` (B,C), and so on. This is
ignored if ``minimum_ios_deployment_target`` is set to ``13``.
minimum_ios_deployment_target: str
Target Deployment iOS Version (default: ``'12'``). Supported iOS version options:
``'11.2'``, ``'12'``, ``'13'``. CoreML model produced by the converter will be
compatible with the iOS version specified in this argument. For example, if
``minimum_ios_deployment_target = '12'``, the converter would utilize only CoreML
features released up to version iOS12 (equivalent to macOS 10.14, watchOS 5, and
so on). iOS 11.2 (CoreML 0.8) does not support ``resize_bilinear`` and
``crop_resize`` layers. See `supported v0.8 features <https://github.com/apple/coremltools/releases/tag/v0.8>`_.
iOS 12 (CoreML 2.0), see `supported v2.0 features <https://github.com/apple/coremltools/releases/tag/v2.0>`_.
iSO 13 (CoreML 3.0), see `supported v3.0 features <https://github.com/apple/coremltools/releases/tag/3.0-beta6>`_.
Returns
-------
model: A coreml model.
"""
if not _HAS_ONNX:
raise ModuleNotFoundError("Missing ONNX package.")
if isinstance(model, Text):
onnx_model = onnx.load(model)
elif isinstance(model, onnx.ModelProto):
onnx_model = model
else:
raise TypeError("Model must be file path to .onnx file or onnx loaded model")
if not SupportedVersion.ios_support_check(minimum_ios_deployment_target):
raise TypeError(
"{} not supported. Please provide one of target iOS: {}",
minimum_ios_deployment_target,
SupportedVersion.get_supported_ios(),
)
global USE_SHAPE_MAPPING
disable_coreml_rank5_mapping = False
if SupportedVersion.is_nd_array_supported(minimum_ios_deployment_target):
disable_coreml_rank5_mapping = True
if disable_coreml_rank5_mapping:
USE_SHAPE_MAPPING = False
else:
USE_SHAPE_MAPPING = True
"""
First, apply a few optimizations to the ONNX graph,
in preparation for conversion to CoreML.
"""
# Using Dummy transformation to conditionally disable certain transformation
class DummyTransformation(object):
def __call__(self, graph):
return graph
transformers = [
ConstantsToInitializers(),
ShapeOpRemover(),
ConstantRemover(),
CastOpRemover(),
PaddingOpRemover(),
ReshapeInitTensorFuser(),
DropoutRemover(),
DeadCodeElimination(),
ConvAddFuser(),
BNBroadcastedMulFuser(),
BNBroadcastedAddFuser(),
ReshapeTransposeReshape_pattern1(),
PixelShuffleFuser(),
AddModelInputsOutputs()
if not disable_coreml_rank5_mapping
else DummyTransformation(),
ConstantFillToInitializers(),
] # type: Iterable[Transformer]
onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
graph = _prepare_onnx_graph(onnx_model.graph, transformers, onnx_model.ir_version)
"""
Check for ImageScalar nodes in ONNX, this will indicate whether input image preprocessing needs
to be added to the CoreML graph or not.
"""
# are there ImageScaler nodes in the Graph?
# If yes then add the info from it to the "preprocessing_args" dictionary, if the dictionary is not
# already provided by the user
if not bool(preprocessing_args):
for node in graph.nodes:
if node.op_type == "ImageScaler":
inp_name = node.inputs[0]
scale = node.attrs.get("scale", 1.0)
bias = node.attrs.get("bias", [0, 0, 0])
if not (len(bias) == 1 or len(bias) == 3):
continue
if "image_scale" in preprocessing_args:
preprocessing_args["image_scale"][inp_name] = scale
else:
preprocessing_args["image_scale"] = {inp_name: scale}
if len(bias) == 3:
for i, color in enumerate(["red", "green", "blue"]):
if color + "_bias" in preprocessing_args:
preprocessing_args[color + "_bias"][inp_name] = bias[i]
else:
preprocessing_args[color + "_bias"] = {inp_name: bias[i]}
else:
if "gray_bias" in preprocessing_args:
preprocessing_args["gray_bias"][inp_name] = bias[0]
else:
preprocessing_args["gray_bias"] = {inp_name: bias[0]}
if inp_name not in image_input_names:
image_input_names.append(inp_name) # type: ignore
# remove all ImageScaler ops
graph = graph.transformed([ImageScalerRemover()])
"""
Gather information (name, shape) for model inputs and outputs
This information is then used to initialize the neural network builder object of coremltools.
The builder object is later used to add layers to the CoreML model.
"""
# Make CoreML input and output features by gathering shape info and
# interpreting it for CoreML
input_features = _make_coreml_input_features(
graph, onnx_coreml_input_shape_map, disable_coreml_rank5_mapping
)
if len(image_output_names) > 0:
output_features = _make_coreml_output_features(
graph,
forceShape=True,
disable_coreml_rank5_mapping=disable_coreml_rank5_mapping,
)
else:
output_features = _make_coreml_output_features(
graph, disable_coreml_rank5_mapping=disable_coreml_rank5_mapping
)
builder = NeuralNetworkBuilder(
input_features,
output_features,
mode=mode,
disable_rank5_shape_mapping=disable_coreml_rank5_mapping,
)
# TODO: To be removed once, auto-downgrading of spec version is enabled
builder.spec.specificationVersion = SupportedVersion.get_specification_version(
minimum_ios_deployment_target
)
"""
Set CoreML input,output types (float, double, int) same as onnx types, if supported
"""
_transform_coreml_dtypes(builder, graph.inputs, graph.outputs)
"""what follows is some book-keeping to support outputs of type image.
"""
is_deprocess_bgr_only = (len(deprocessing_args) == 1) and (
"is_bgr" in deprocessing_args
)
add_deprocess = (
(len(image_output_names) > 0)
and (len(deprocessing_args) > 0)
and (not is_deprocess_bgr_only)
)
if add_deprocess:
mapping = {}
for f in output_features:
output_name = f[0]
mapping[output_name] = graph.get_unique_edge_name(output_name)
graph = OutputRenamer(mapping)(graph)
if len(image_input_names) > 0:
builder.set_pre_processing_parameters(
image_input_names=image_input_names,
is_bgr=preprocessing_args.get("is_bgr", False),
red_bias=preprocessing_args.get("red_bias", 0.0),
green_bias=preprocessing_args.get("green_bias", 0.0),
blue_bias=preprocessing_args.get("blue_bias", 0.0),
gray_bias=preprocessing_args.get("gray_bias", 0.0),
image_scale=preprocessing_args.get("image_scale", 1.0),
)
preprocessing_args.clear()
if len(image_output_names) > 0:
for f in output_features:
f_name = f[0]
if f_name in image_output_names:
is_bgr = deprocessing_args.get("is_bgr", False)
_convert_multiarray_output_to_image(builder.spec, f_name, is_bgr=is_bgr)
"""
Iterate through all the ONNX ops and translate them to CoreML layers, one by one.
"""
"""
before proceeding to start the layer translation process,
check whether there is an op in the ONNX graph, whose translation function is not yet
implemented in the converter or which is not supported in the CoreML framework. If so,
raise an error before starting the process.
(if the user desires to add a custom layer then this check is not required)
"""
if not add_custom_layers:
_check_unsupported_ops(graph.nodes, disable_coreml_rank5_mapping)
"""
ErrorHandling is a generic class, useful to store a variety of parameters during the conversion process
"""
err = ErrorHandling(add_custom_layers, custom_conversion_functions)
for i, node in enumerate(graph.nodes):
print(
"%d/%d: Converting Node Type %s" % (i + 1, len(graph.nodes), node.op_type)
)
if disable_coreml_rank5_mapping:
_convert_node_nd(builder, node, graph, err)
else:
_add_const_inputs_if_required(builder, node, graph, err)
_convert_node(builder, node, graph, err)
if DEBUG:
plot_graph(
graph,
graph_img_path="/tmp/after_conversion.pdf",
show_coreml_mapped_shapes=not disable_coreml_rank5_mapping,
)
if add_deprocess:
for f in output_features:
output_name = f[0]
if output_name not in image_output_names:
continue
output_shape = f[1].dimensions
if len(output_shape) == 2 or output_shape[0] == 1:
is_grayscale = True
elif output_shape[0] == 3:
is_grayscale = False
else:
raise ValueError("Output must be RGB image or Grayscale")
_set_deprocessing(
is_grayscale,
builder,
deprocessing_args,
mapping[output_name],
output_name,
)
if class_labels is not None:
if isinstance(class_labels, Text):
labels = [
l.strip() for l in open(class_labels).readlines()
] # type: Sequence[Text]
elif isinstance(class_labels, list):
labels = class_labels
else:
raise TypeError(
"synset variable of unknown type. Type found: {}. \
Expected either string or list of strings.".format(
type(class_labels),
)
)
builder.set_class_labels(
class_labels=labels, predicted_feature_name=predicted_feature_name
)
def _add_informative_description(feature, raise_error=True):
if feature.type.WhichOneof("Type") == "multiArrayType":
if (
feature.name in graph.onnx_coreml_shape_mapping
and feature.name in graph.shape_dict
):
mapp = graph.onnx_coreml_shape_mapping[feature.name]
onnx_shape = graph.shape_dict[feature.name]
if raise_error:
assert len(mapp) == len(onnx_shape), "Something wrong in shape"
if len(mapp) == len(onnx_shape):
shape = []
for i in range(5):
if i in mapp:
shape += [int(onnx_shape[mapp.index(i)])]
else:
shape += [1]
msg = "MultiArray of shape {}. The first and second dimensions correspond to sequence and batch size, respectively".format(
str(tuple(shape))
)
feature.shortDescription += msg
optional_input_names = []
for tup in graph.optional_inputs:
optional_input_names.append(tup[0])
optional_output_names = []
for tup in graph.optional_outputs:
optional_output_names.append(tup[0])
# add description for inputs and outputs shapes
remove_input_id = []
for i, input_ in enumerate(builder.spec.description.input):
if input_.name not in optional_input_names:
if not disable_coreml_rank5_mapping:
_add_informative_description(input_)
else:
remove_input_id.append(i)
remove_output_id = []
for i, output_ in enumerate(builder.spec.description.output):
if output_.name not in optional_output_names:
if not disable_coreml_rank5_mapping:
_add_informative_description(output_, raise_error=False)
else:
remove_output_id.append(i)
for index in sorted(remove_input_id, reverse=True):
del builder.spec.description.input[index]
for index in sorted(remove_output_id, reverse=True):
del builder.spec.description.output[index]
if len(graph.optional_inputs) > 0 or len(graph.optional_outputs):
builder.add_optionals(graph.optional_inputs, graph.optional_outputs)
# Check for specification version and target ios compatibility
if (
minimum_ios_deployment_target == "11.2"
and builder.spec.WhichOneof("Type") == "neuralNetwork"
):
nn_spec = builder.spec.neuralNetwork
for layer in nn_spec.layers:
if (
layer.WhichOneof("layer") == "resizeBilinear"
or layer.WhichOneof("layer") == "cropResize"
):
raise TypeError(
"{} not supported with target iOS 11.2 please provide higher target iOS".format(
layer.WhichOneof("layer")
)
)
# Optimize ML Model Spec
ml_model_passes = [remove_disconnected_layers, transform_conv_crop]
for opt in ml_model_passes:
opt(builder.spec)
print("Translation to CoreML spec completed. Now compiling the CoreML model.")
try:
if DEBUG:
import coremltools
coremltools.models.utils.save_spec(
builder.spec, "/tmp/node_model_raw_spec.mlmodel"
)
from coremltools.models.neural_network.printer import print_network_spec
print_network_spec(builder.spec, style="coding")
mlmodel = MLModel(builder.spec)
except RuntimeError as e:
raise ValueError("Compilation failed: {}".format(str(e)))
print("Model Compilation done.")
# print information about all ops for which custom layers have been added
if len(err.custom_layer_nodes) > 0:
print("\n")
print(
"Custom layers have been added to the CoreML model "
"corresponding to the following ops in the onnx model: "
)
for i, node in enumerate(err.custom_layer_nodes):
input_info = []
for input_ in node.inputs:
input_info.append(
(
str(input_),
graph.shape_dict.get(input_, str("Shape not available")),
)
)
output_info = []
for output_ in node.outputs:
output_info.append(
(
str(output_),
graph.shape_dict.get(output_, str("Shape not available")),
)
)
print(
"{}/{}: op type: {}, op input names and shapes: {}, op output names and shapes: {}".format(
i + 1,
len(err.custom_layer_nodes),
node.op_type,
str(input_info),
str(output_info),
)
)
mlmodel.user_defined_metadata[_METADATA_VERSION] = ct_version
mlmodel.user_defined_metadata[_METADATA_SOURCE] = "onnx=={0}".format(
onnx.__version__
)
return mlmodel
| |
from importlib import import_module
import itertools
import os
import re
from django.apps import apps
from django.conf import global_settings, settings
from django.contrib.sites.requests import RequestSite
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict, HttpRequest
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import urlparse, ParseResult
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils._os import upath
from django.test import TestCase, override_settings
from django.test.utils import patch_logger
from django.middleware.csrf import CsrfViewMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm)
# Needed so model is installed when tests are run independently:
from django.contrib.auth.tests.custom_user import CustomUser # NOQA
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.views import login as login_view
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATE_LOADERS=global_settings.TEMPLATE_LOADERS,
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertTrue(SESSION_KEY in self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertTrue(SESSION_KEY not in self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@skipIfCustomUser
class AuthViewNamedURLTests(AuthViewsTestCase):
urls = 'django.contrib.auth.urls'
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertTrue('<html>' not in message.get_payload(0).get_payload())
self.assertTrue('<html>' in message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
fixtures = ['custom_user.json']
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
@skipIfCustomUser
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@skipIfCustomUser
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["CSRF_COOKIE_USED"] = True
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
@skipIfCustomUser
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@skipIfCustomUser
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
@skipIfCustomUser
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'HTTPS:///',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response.url,
"%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class ChangelistTests(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls_admin'
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=1)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk, data)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk,
self.get_user_data(self.admin)
)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
response = self.client.post('/admin/auth/user/%s/password/' % self.admin.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % self.admin.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
| |
"""
Module is used to infer Django model fields.
"""
from jedi._compatibility import Parameter
from jedi import debug
from jedi.inference.cache import inference_state_function_cache
from jedi.inference.base_value import ValueSet, iterator_to_value_set, ValueWrapper
from jedi.inference.filters import DictFilter, AttributeOverwrite
from jedi.inference.names import NameWrapper, BaseTreeParamName
from jedi.inference.compiled.value import EmptyCompiledName
from jedi.inference.value.instance import TreeInstance
from jedi.inference.value.klass import ClassMixin
from jedi.inference.gradual.base import GenericClass
from jedi.inference.gradual.generics import TupleGenericManager
from jedi.inference.signature import AbstractSignature
mapping = {
'IntegerField': (None, 'int'),
'BigIntegerField': (None, 'int'),
'PositiveIntegerField': (None, 'int'),
'SmallIntegerField': (None, 'int'),
'CharField': (None, 'str'),
'TextField': (None, 'str'),
'EmailField': (None, 'str'),
'GenericIPAddressField': (None, 'str'),
'URLField': (None, 'str'),
'FloatField': (None, 'float'),
'BinaryField': (None, 'bytes'),
'BooleanField': (None, 'bool'),
'DecimalField': ('decimal', 'Decimal'),
'TimeField': ('datetime', 'time'),
'DurationField': ('datetime', 'timedelta'),
'DateField': ('datetime', 'date'),
'DateTimeField': ('datetime', 'datetime'),
'UUIDField': ('uuid', 'UUID'),
}
_FILTER_LIKE_METHODS = ('create', 'filter', 'exclude', 'update', 'get',
'get_or_create', 'update_or_create')
@inference_state_function_cache()
def _get_deferred_attributes(inference_state):
return inference_state.import_module(
('django', 'db', 'models', 'query_utils')
).py__getattribute__('DeferredAttribute').execute_annotation()
def _infer_scalar_field(inference_state, field_name, field_tree_instance, is_instance):
try:
module_name, attribute_name = mapping[field_tree_instance.py__name__()]
except KeyError:
return None
if not is_instance:
return _get_deferred_attributes(inference_state)
if module_name is None:
module = inference_state.builtins_module
else:
module = inference_state.import_module((module_name,))
for attribute in module.py__getattribute__(attribute_name):
return attribute.execute_with_values()
@iterator_to_value_set
def _get_foreign_key_values(cls, field_tree_instance):
if isinstance(field_tree_instance, TreeInstance):
# TODO private access..
argument_iterator = field_tree_instance._arguments.unpack()
key, lazy_values = next(argument_iterator, (None, None))
if key is None and lazy_values is not None:
for value in lazy_values.infer():
if value.py__name__() == 'str':
foreign_key_class_name = value.get_safe_value()
module = cls.get_root_context()
for v in module.py__getattribute__(foreign_key_class_name):
if v.is_class():
yield v
elif value.is_class():
yield value
def _infer_field(cls, field_name, is_instance):
inference_state = cls.inference_state
result = field_name.infer()
for field_tree_instance in result:
scalar_field = _infer_scalar_field(
inference_state, field_name, field_tree_instance, is_instance)
if scalar_field is not None:
return scalar_field
name = field_tree_instance.py__name__()
is_many_to_many = name == 'ManyToManyField'
if name in ('ForeignKey', 'OneToOneField') or is_many_to_many:
if not is_instance:
return _get_deferred_attributes(inference_state)
values = _get_foreign_key_values(cls, field_tree_instance)
if is_many_to_many:
return ValueSet(filter(None, [
_create_manager_for(v, 'RelatedManager') for v in values
]))
else:
return values.execute_with_values()
debug.dbg('django plugin: fail to infer `%s` from class `%s`',
field_name.string_name, cls.py__name__())
return result
class DjangoModelName(NameWrapper):
def __init__(self, cls, name, is_instance):
super(DjangoModelName, self).__init__(name)
self._cls = cls
self._is_instance = is_instance
def infer(self):
return _infer_field(self._cls, self._wrapped_name, self._is_instance)
def _create_manager_for(cls, manager_cls='BaseManager'):
managers = cls.inference_state.import_module(
('django', 'db', 'models', 'manager')
).py__getattribute__(manager_cls)
for m in managers:
if m.is_class_mixin():
generics_manager = TupleGenericManager((ValueSet([cls]),))
for c in GenericClass(m, generics_manager).execute_annotation():
return c
return None
def _new_dict_filter(cls, is_instance):
filters = list(cls.get_filters(
is_instance=is_instance,
include_metaclasses=False,
include_type_when_class=False)
)
dct = {
name.string_name: DjangoModelName(cls, name, is_instance)
for filter_ in reversed(filters)
for name in filter_.values()
}
if is_instance:
# Replace the objects with a name that amounts to nothing when accessed
# in an instance. This is not perfect and still completes "objects" in
# that case, but it at least not inferes stuff like `.objects.filter`.
# It would be nicer to do that in a better way, so that it also doesn't
# show up in completions, but it's probably just not worth doing that
# for the extra amount of work.
dct['objects'] = EmptyCompiledName(cls.inference_state, 'objects')
return DictFilter(dct)
def is_django_model_base(value):
return value.py__name__() == 'ModelBase' \
and value.get_root_context().py__name__() == 'django.db.models.base'
def get_metaclass_filters(func):
def wrapper(cls, metaclasses, is_instance):
for metaclass in metaclasses:
if is_django_model_base(metaclass):
return [_new_dict_filter(cls, is_instance)]
return func(cls, metaclasses, is_instance)
return wrapper
def tree_name_to_values(func):
def wrapper(inference_state, context, tree_name):
result = func(inference_state, context, tree_name)
if tree_name.value in _FILTER_LIKE_METHODS:
# Here we try to overwrite stuff like User.objects.filter. We need
# this to make sure that keyword param completion works on these
# kind of methods.
for v in result:
if v.get_qualified_names() == ('_BaseQuerySet', tree_name.value) \
and v.parent_context.is_module() \
and v.parent_context.py__name__() == 'django.db.models.query':
qs = context.get_value()
generics = qs.get_generics()
if len(generics) >= 1:
return ValueSet(QuerySetMethodWrapper(v, model)
for model in generics[0])
elif tree_name.value == 'BaseManager' and context.is_module() \
and context.py__name__() == 'django.db.models.manager':
return ValueSet(ManagerWrapper(r) for r in result)
elif tree_name.value == 'Field' and context.is_module() \
and context.py__name__() == 'django.db.models.fields':
return ValueSet(FieldWrapper(r) for r in result)
return result
return wrapper
def _find_fields(cls):
for name in _new_dict_filter(cls, is_instance=False).values():
for value in name.infer():
if value.name.get_qualified_names(include_module_names=True) \
== ('django', 'db', 'models', 'query_utils', 'DeferredAttribute'):
yield name
def _get_signatures(cls):
return [DjangoModelSignature(cls, field_names=list(_find_fields(cls)))]
def get_metaclass_signatures(func):
def wrapper(cls, metaclasses):
for metaclass in metaclasses:
if is_django_model_base(metaclass):
return _get_signatures(cls)
return func(cls, metaclass)
return wrapper
class ManagerWrapper(ValueWrapper):
def py__getitem__(self, index_value_set, contextualized_node):
return ValueSet(
GenericManagerWrapper(generic)
for generic in self._wrapped_value.py__getitem__(
index_value_set, contextualized_node)
)
class GenericManagerWrapper(AttributeOverwrite, ClassMixin):
def py__get__on_class(self, calling_instance, instance, class_value):
return calling_instance.class_value.with_generics(
(ValueSet({class_value}),)
).py__call__(calling_instance._arguments)
def with_generics(self, generics_tuple):
return self._wrapped_value.with_generics(generics_tuple)
class FieldWrapper(ValueWrapper):
def py__getitem__(self, index_value_set, contextualized_node):
return ValueSet(
GenericFieldWrapper(generic)
for generic in self._wrapped_value.py__getitem__(
index_value_set, contextualized_node)
)
class GenericFieldWrapper(AttributeOverwrite, ClassMixin):
def py__get__on_class(self, calling_instance, instance, class_value):
# This is mostly an optimization to avoid Jedi aborting inference,
# because of too many function executions of Field.__get__.
return ValueSet({calling_instance})
class DjangoModelSignature(AbstractSignature):
def __init__(self, value, field_names):
super(DjangoModelSignature, self).__init__(value)
self._field_names = field_names
def get_param_names(self, resolve_stars=False):
return [DjangoParamName(name) for name in self._field_names]
class DjangoParamName(BaseTreeParamName):
def __init__(self, field_name):
super(DjangoParamName, self).__init__(field_name.parent_context, field_name.tree_name)
self._field_name = field_name
def get_kind(self):
return Parameter.KEYWORD_ONLY
def infer(self):
return self._field_name.infer()
class QuerySetMethodWrapper(ValueWrapper):
def __init__(self, method, model_cls):
super(QuerySetMethodWrapper, self).__init__(method)
self._model_cls = model_cls
def py__get__(self, instance, class_value):
return ValueSet({QuerySetBoundMethodWrapper(v, self._model_cls)
for v in self._wrapped_value.py__get__(instance, class_value)})
class QuerySetBoundMethodWrapper(ValueWrapper):
def __init__(self, method, model_cls):
super(QuerySetBoundMethodWrapper, self).__init__(method)
self._model_cls = model_cls
def get_signatures(self):
return _get_signatures(self._model_cls)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for documentation parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import parser
def test_function(unused_arg, unused_kwarg='default'):
"""Docstring for test function."""
pass
def test_function_with_args_kwargs(unused_arg, *unused_args, **unused_kwargs):
"""Docstring for second test function."""
pass
class TestClass(object):
"""Docstring for TestClass itself."""
def a_method(self, arg='default'):
"""Docstring for a method."""
pass
class ChildClass(object):
"""Docstring for a child class."""
pass
@property
def a_property(self):
"""Docstring for a property."""
pass
CLASS_MEMBER = 'a class member'
class DummyVisitor(object):
def __init__(self, index, duplicate_of):
self.index = index
self.duplicate_of = duplicate_of
class ParserTest(googletest.TestCase):
def test_documentation_path(self):
self.assertEqual('test.md', parser.documentation_path('test'))
self.assertEqual('test/module.md', parser.documentation_path('test.module'))
def test_replace_references(self):
class HasOneMember(object):
def foo(self):
pass
string = ('A @{tf.reference}, another @{tf.reference}, '
'a member @{tf.reference.foo}, and a @{tf.third}.')
duplicate_of = {'tf.third': 'tf.fourth'}
index = {'tf.reference': HasOneMember,
'tf.reference.foo': HasOneMember.foo,
'tf.third': HasOneMember,
'tf.fourth': HasOneMember}
visitor = DummyVisitor(index, duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
result = reference_resolver.replace_references(string, '../..')
self.assertEqual(
'A [`tf.reference`](../../tf/reference.md), another '
'[`tf.reference`](../../tf/reference.md), '
'a member [`tf.reference.foo`](../../tf/reference.md#foo), '
'and a [`tf.third`](../../tf/fourth.md).',
result)
def test_doc_replace_references(self):
string = '@{$doc1} @{$doc1#abc} @{$doc1$link} @{$doc1#def$zelda} @{$do/c2}'
class DocInfo(object):
pass
doc1 = DocInfo()
doc1.title = 'Title1'
doc1.url = 'URL1'
doc2 = DocInfo()
doc2.title = 'Two words'
doc2.url = 'somewhere/else'
doc_index = {'doc1': doc1, 'do/c2': doc2}
visitor = DummyVisitor(index={}, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index=doc_index, py_module_names=['tf'])
result = reference_resolver.replace_references(string, 'python')
self.assertEqual(
'[Title1](../URL1) [Title1](../URL1#abc) [link](../URL1) '
'[zelda](../URL1#def) [Two words](../somewhere/else)',
result)
def test_docs_for_class(self):
index = {
'TestClass': TestClass,
'TestClass.a_method': TestClass.a_method,
'TestClass.a_property': TestClass.a_property,
'TestClass.ChildClass': TestClass.ChildClass,
'TestClass.CLASS_MEMBER': TestClass.CLASS_MEMBER
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'TestClass': ['a_method', 'a_property', 'ChildClass', 'CLASS_MEMBER']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='TestClass', py_object=TestClass, parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
inspect.getdoc(TestClass).split('\n')[0], page_info.doc.brief)
# Make sure the method is present
self.assertEqual(TestClass.a_method, page_info.methods[0].obj)
# Make sure that the signature is extracted properly and omits self.
self.assertEqual(["arg='default'"], page_info.methods[0].signature)
# Make sure the property is present
self.assertIs(TestClass.a_property, page_info.properties[0].obj)
# Make sure there is a link to the child class and it points the right way.
self.assertIs(TestClass.ChildClass, page_info.classes[0].obj)
# Make sure this file is contained as the definition location.
self.assertEqual(os.path.relpath(__file__, '/'), page_info.defined_in.path)
def test_docs_for_module(self):
# Get the current module.
module = sys.modules[__name__]
index = {
'TestModule': module,
'TestModule.test_function': test_function,
'TestModule.test_function_with_args_kwargs':
test_function_with_args_kwargs,
'TestModule.TestClass': TestClass,
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'TestModule': ['TestClass', 'test_function',
'test_function_with_args_kwargs']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='TestModule', py_object=module, parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(inspect.getdoc(module).split('\n')[0], page_info.doc.brief)
# Make sure that the members are there
funcs = {f_info.obj for f_info in page_info.functions}
self.assertEqual({test_function, test_function_with_args_kwargs}, funcs)
classes = {cls_info.obj for cls_info in page_info.classes}
self.assertEqual({TestClass}, classes)
# Make sure this file is contained as the definition location.
self.assertEqual(os.path.relpath(__file__, '/'), page_info.defined_in.path)
def test_docs_for_function(self):
index = {
'test_function': test_function
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'': ['test_function']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='test_function',
py_object=test_function,
parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
inspect.getdoc(test_function).split('\n')[0], page_info.doc.brief)
# Make sure the extracted signature is good.
self.assertEqual(['unused_arg', "unused_kwarg='default'"],
page_info.signature)
# Make sure this file is contained as the definition location.
self.assertEqual(os.path.relpath(__file__, '/'), page_info.defined_in.path)
def test_docs_for_function_with_kwargs(self):
index = {
'test_function_with_args_kwargs': test_function_with_args_kwargs
}
visitor = DummyVisitor(index=index, duplicate_of={})
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
tree = {
'': ['test_function_with_args_kwargs']
}
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates={},
duplicate_of={},
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir='/')
page_info = parser.docs_for_object(
full_name='test_function_with_args_kwargs',
py_object=test_function_with_args_kwargs,
parser_config=parser_config)
# Make sure the brief docstring is present
self.assertEqual(
inspect.getdoc(test_function_with_args_kwargs).split('\n')[0],
page_info.doc.brief)
# Make sure the extracted signature is good.
self.assertEqual(['unused_arg', '*unused_args', '**unused_kwargs'],
page_info.signature)
def test_parse_md_docstring(self):
def test_function_with_fancy_docstring(arg):
"""Function with a fancy docstring.
And a bunch of references: @{tf.reference}, another @{tf.reference},
a member @{tf.reference.foo}, and a @{tf.third}.
Args:
arg: An argument.
Raises:
an exception
Returns:
arg: the input, and
arg: the input, again.
@compatibility(numpy)
NumPy has nothing as awesome as this function.
@end_compatibility
@compatibility(theano)
Theano has nothing as awesome as this function.
Check it out.
@end_compatibility
"""
return arg, arg
class HasOneMember(object):
def foo(self):
pass
duplicate_of = {'tf.third': 'tf.fourth'}
index = {
'tf.fancy': test_function_with_fancy_docstring,
'tf.reference': HasOneMember,
'tf.reference.foo': HasOneMember.foo,
'tf.third': HasOneMember,
'tf.fourth': HasOneMember
}
visitor = DummyVisitor(index=index, duplicate_of=duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
doc_info = parser._parse_md_docstring(test_function_with_fancy_docstring,
'../..', reference_resolver)
self.assertNotIn('@', doc_info.docstring)
self.assertNotIn('compatibility', doc_info.docstring)
self.assertNotIn('Raises:', doc_info.docstring)
self.assertEqual(len(doc_info.function_details), 3)
self.assertEqual(set(doc_info.compatibility.keys()), {'numpy', 'theano'})
self.assertEqual(doc_info.compatibility['numpy'],
'NumPy has nothing as awesome as this function.\n')
def test_generate_index(self):
module = sys.modules[__name__]
index = {
'TestModule': module,
'test_function': test_function,
'TestModule.test_function': test_function,
'TestModule.TestClass': TestClass,
'TestModule.TestClass.a_method': TestClass.a_method,
'TestModule.TestClass.a_property': TestClass.a_property,
'TestModule.TestClass.ChildClass': TestClass.ChildClass,
}
duplicate_of = {
'TestModule.test_function': 'test_function'
}
visitor = DummyVisitor(index=index, duplicate_of=duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
docs = parser.generate_global_index('TestLibrary', index=index,
reference_resolver=reference_resolver)
# Make sure duplicates and non-top-level symbols are in the index, but
# methods and properties are not.
self.assertNotIn('a_method', docs)
self.assertNotIn('a_property', docs)
self.assertIn('TestModule.TestClass', docs)
self.assertIn('TestModule.TestClass.ChildClass', docs)
self.assertIn('TestModule.test_function', docs)
# Leading backtick to make sure it's included top-level.
# This depends on formatting, but should be stable.
self.assertIn('`test_function', docs)
def test_argspec_for_functools_partial(self):
# pylint: disable=unused-argument
def test_function_for_partial1(arg1, arg2, kwarg1=1, kwarg2=2):
pass
def test_function_for_partial2(arg1, arg2, *my_args, **my_kwargs):
pass
# pylint: enable=unused-argument
# pylint: disable=protected-access
# Make sure everything works for regular functions.
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg1', 'kwarg2'], None, None,
(1, 2))
self.assertEqual(expected, parser._get_arg_spec(test_function_for_partial1))
# Make sure doing nothing works.
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg1', 'kwarg2'], None, None,
(1, 2))
partial = functools.partial(test_function_for_partial1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting args from the front works.
expected = inspect.ArgSpec(['arg2', 'kwarg1', 'kwarg2'], None, None, (1, 2))
partial = functools.partial(test_function_for_partial1, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = inspect.ArgSpec(['kwarg2',], None, None, (2,))
partial = functools.partial(test_function_for_partial1, 1, 2, 3)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting kwargs works.
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg2'], None, None, (2,))
partial = functools.partial(test_function_for_partial1, kwarg1=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg1'], None, None, (1,))
partial = functools.partial(test_function_for_partial1, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = inspect.ArgSpec(['arg1'], None, None, ())
partial = functools.partial(test_function_for_partial1,
arg2=0, kwarg1=0, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure *args, *kwargs is accounted for.
expected = inspect.ArgSpec([], 'my_args', 'my_kwargs', ())
partial = functools.partial(test_function_for_partial2, 0, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# pylint: enable=protected-access
def testSaveReferenceResolver(self):
you_cant_serialize_this = object()
duplicate_of = {'AClass': ['AClass2']}
doc_index = {'doc': you_cant_serialize_this}
is_class = {
'tf': False,
'tf.AClass': True,
'tf.AClass2': True,
'tf.function': False
}
is_module = {
'tf': True,
'tf.AClass': False,
'tf.AClass2': False,
'tf.function': False
}
py_module_names = ['tf', 'tfdbg']
resolver = parser.ReferenceResolver(duplicate_of, doc_index, is_class,
is_module, py_module_names)
outdir = googletest.GetTempDir()
filepath = os.path.join(outdir, 'resolver.json')
resolver.to_json_file(filepath)
resolver2 = parser.ReferenceResolver.from_json_file(filepath, doc_index)
# There are no __slots__, so all fields are visible in __dict__.
self.assertEqual(resolver.__dict__, resolver2.__dict__)
RELU_DOC = """Computes rectified linear: `max(features, 0)`
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`,
`half`.
name: A name for the operation (optional)
Returns:
A `Tensor`. Has the same type as `features`
"""
class TestParseFunctionDetails(googletest.TestCase):
def testParseFunctionDetails(self):
docstring, function_details = parser._parse_function_details(RELU_DOC)
self.assertEqual(len(function_details), 2)
args = function_details[0]
self.assertEqual(args.keyword, 'Args')
self.assertEmpty(args.header)
self.assertEqual(len(args.items), 2)
self.assertEqual(args.items[0][0], 'features')
self.assertEqual(args.items[1][0], 'name')
self.assertEqual(args.items[1][1],
' A name for the operation (optional)\n\n')
returns = function_details[1]
self.assertEqual(returns.keyword, 'Returns')
relu_doc_lines = RELU_DOC.split('\n')
self.assertEqual(docstring, relu_doc_lines[0] + '\n\n')
self.assertEqual(returns.header, relu_doc_lines[-2] + '\n')
self.assertEqual(
RELU_DOC,
docstring + ''.join(str(detail) for detail in function_details))
if __name__ == '__main__':
googletest.main()
| |
# -*- coding: utf-8 -*-
import datetime
import io
from bson.objectid import ObjectId
from girder import events, logger
from girder.api import rest
from .model_base import Model
from girder.exceptions import GirderException, ValidationException, NoAssetstoreAdapter
from girder.settings import SettingKey
from girder.utility import RequestBodyStream
from girder.utility.progress import noProgress
class Upload(Model):
"""
This model stores temporary records for uploads that have been approved
but are not yet complete, so that they can be uploaded in chunks of
arbitrary size. The chunks must be uploaded in order.
"""
def initialize(self):
self.name = 'upload'
self.ensureIndex('sha512')
def _getChunkSize(self, minSize=32 * 1024**2):
"""
Return a chunk size to use in file uploads. This is the maximum of
the setting for minimum upload chunk size and the specified size.
:param minSize: the minimum size to return.
:return: chunk size to use for file uploads.
"""
from .setting import Setting
minChunkSize = Setting().get(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE)
return max(minChunkSize, minSize)
def uploadFromFile(self, obj, size, name, parentType=None, parent=None,
user=None, mimeType=None, reference=None,
assetstore=None, attachParent=False):
"""
This method wraps the entire upload process into a single function to
facilitate "internal" uploads from a file-like object. Example:
.. code-block:: python
size = os.path.getsize(filename)
with open(filename, 'rb') as f:
Upload().uploadFromFile(f, size, filename, 'item', parentItem, user)
:param obj: The object representing the content to upload.
:type obj: file-like
:param size: The total size of the file.
:type size: int
:param name: The name of the file to create.
:type name: str
:param parentType: The type of the parent: "folder" or "item".
:type parentType: str
:param parent: The parent (item or folder) to upload into.
:type parent: dict
:param user: The user who is creating the file.
:type user: dict
:param mimeType: MIME type of the file.
:type mimeType: str
:param reference: An optional reference string that will be sent to the
data.process event.
:param assetstore: An optional assetstore to use to store the file. If
unspecified, the current assetstore is used.
:type reference: str
:param attachParent: if True, instead of creating an item within the
parent or giving the file an itemId, set itemId to None and set
attachedToType and attachedToId instead (using the values passed in
parentType and parent). This is intended for files that shouldn't
appear as direct children of the parent, but are still associated
with it.
:type attachParent: boolean
"""
upload = self.createUpload(
user=user, name=name, parentType=parentType, parent=parent,
size=size, mimeType=mimeType, reference=reference,
assetstore=assetstore, attachParent=attachParent)
if size == 0:
return self.finalizeUpload(upload)
# The greater of 32 MB or the the upload minimum chunk size.
chunkSize = self._getChunkSize()
while True:
data = obj.read(chunkSize)
if not data:
break
upload = self.handleChunk(upload, RequestBodyStream(io.BytesIO(data), len(data)))
return upload
def validate(self, doc):
if doc['size'] < 0:
raise ValidationException('File size must not be negative.')
if doc['received'] > doc['size']:
raise ValidationException('Received too many bytes.')
doc['updated'] = datetime.datetime.utcnow()
return doc
def handleChunk(self, upload, chunk, filter=False, user=None):
"""
When a chunk is uploaded, this should be called to process the chunk.
If this is the final chunk of the upload, this method will finalize
the upload automatically.
This method will return EITHER an upload or a file document. If this
is the final chunk of the upload, the upload is finalized and the created
file document is returned. Otherwise, it returns the upload document
with the relevant fields modified.
:param upload: The upload document to update.
:type upload: dict
:param chunk: The file object representing the chunk that was uploaded.
:type chunk: file
:param filter: Whether the model should be filtered. Only affects
behavior when returning a file model, not the upload model.
:type filter: bool
:param user: The current user. Only affects behavior if filter=True.
:type user: dict or None
"""
from .assetstore import Assetstore
from .file import File
from girder.utility import assetstore_utilities
assetstore = Assetstore().load(upload['assetstoreId'])
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
upload = adapter.uploadChunk(upload, chunk)
if '_id' in upload or upload['received'] != upload['size']:
upload = self.save(upload)
# If upload is finished, we finalize it
if upload['received'] == upload['size']:
file = self.finalizeUpload(upload, assetstore)
if filter:
return File().filter(file, user=user)
else:
return file
else:
return upload
def requestOffset(self, upload):
"""
Requests the offset that should be used to resume uploading. This
makes the request from the assetstore adapter.
"""
from .assetstore import Assetstore
from girder.utility import assetstore_utilities
assetstore = Assetstore().load(upload['assetstoreId'])
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
return adapter.requestOffset(upload)
def finalizeUpload(self, upload, assetstore=None):
"""
This should only be called manually in the case of creating an
empty file, i.e. one that has no chunks.
:param upload: The upload document.
:type upload: dict
:param assetstore: If known, the containing assetstore for the upload.
:type assetstore: dict
:returns: The file object that was created.
"""
from .assetstore import Assetstore
from .file import File
from .item import Item
from girder.utility import assetstore_utilities
events.trigger('model.upload.finalize', upload)
if assetstore is None:
assetstore = Assetstore().load(upload['assetstoreId'])
if 'fileId' in upload: # Updating an existing file's contents
file = File().load(upload['fileId'], force=True)
# Delete the previous file contents from the containing assetstore
assetstore_utilities.getAssetstoreAdapter(
Assetstore().load(file['assetstoreId'])).deleteFile(file)
item = Item().load(file['itemId'], force=True)
File().propagateSizeChange(item, upload['size'] - file['size'])
# Update file info
file['creatorId'] = upload['userId']
file['created'] = datetime.datetime.utcnow()
file['assetstoreId'] = assetstore['_id']
file['size'] = upload['size']
# If the file was previously imported, it is no longer.
if file.get('imported'):
file['imported'] = False
else: # Creating a new file record
if upload.get('attachParent'):
item = None
elif upload['parentType'] == 'folder':
# Create a new item with the name of the file.
item = Item().createItem(
name=upload['name'], creator={'_id': upload['userId']},
folder={'_id': upload['parentId']})
elif upload['parentType'] == 'item':
item = Item().load(id=upload['parentId'], force=True)
else:
item = None
file = File().createFile(
item=item, name=upload['name'], size=upload['size'],
creator={'_id': upload['userId']}, assetstore=assetstore,
mimeType=upload['mimeType'], saveFile=False)
if upload.get('attachParent'):
if upload['parentType'] and upload['parentId']:
file['attachedToType'] = upload['parentType']
file['attachedToId'] = upload['parentId']
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
file = adapter.finalizeUpload(upload, file)
event_document = {'file': file, 'upload': upload}
events.trigger('model.file.finalizeUpload.before', event_document)
file = File().save(file)
events.trigger('model.file.finalizeUpload.after', event_document)
if '_id' in upload:
self.remove(upload)
logger.info('Upload complete. Upload=%s File=%s User=%s' % (
upload['_id'], file['_id'], upload['userId']))
# Add an async event for handlers that wish to process this file.
eventParams = {
'file': file,
'assetstore': assetstore,
'currentToken': rest.getCurrentToken(),
'currentUser': rest.getCurrentUser()
}
if 'reference' in upload:
eventParams['reference'] = upload['reference']
events.daemon.trigger('data.process', eventParams)
return file
def getTargetAssetstore(self, modelType, resource, assetstore=None):
"""
Get the assetstore for a particular target resource, i.e. where new
data within the resource should be stored. In Girder core, this is
always just the current assetstore, but plugins may override this
behavior to allow for more granular assetstore selection.
:param modelType: the type of the resource that will be stored.
:param resource: the resource to be stored.
:param assetstore: if specified, the preferred assetstore where the
resource should be located. This may be overridden.
:returns: the selected assetstore.
"""
from .assetstore import Assetstore
eventParams = {'model': modelType, 'resource': resource}
event = events.trigger('model.upload.assetstore', eventParams)
if event.responses:
assetstore = event.responses[-1]
elif not assetstore:
assetstore = Assetstore().getCurrent()
return assetstore
def createUploadToFile(self, file, user, size, reference=None,
assetstore=None):
"""
Creates a new upload record into a file that already exists. This
should be used when updating the contents of a file. Deletes any
previous file content from the assetstore it was in. This will upload
into the current assetstore rather than assetstore the file was
previously contained in.
:param file: The file record to update.
:param user: The user performing this upload.
:param size: The size of the new file contents.
:param reference: An optional reference string that will be sent to the
data.process event.
:type reference: str
:param assetstore: An optional assetstore to use to store the file. If
unspecified, the current assetstore is used.
"""
from girder.utility import assetstore_utilities
assetstore = self.getTargetAssetstore('file', file, assetstore)
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
now = datetime.datetime.utcnow()
upload = {
'created': now,
'updated': now,
'userId': user['_id'],
'fileId': file['_id'],
'assetstoreId': assetstore['_id'],
'size': size,
'name': file['name'],
'mimeType': file['mimeType'],
'received': 0
}
if reference is not None:
upload['reference'] = reference
upload = adapter.initUpload(upload)
return self.save(upload)
def createUpload(self, user, name, parentType, parent, size, mimeType=None,
reference=None, assetstore=None, attachParent=False,
save=True):
"""
Creates a new upload record, and creates its temporary file
that the chunks will be written into. Chunks should then be sent
in order using the _id of the upload document generated by this method.
:param user: The user performing the upload.
:type user: dict
:param name: The name of the file being uploaded.
:type name: str
:param parentType: The type of the parent being uploaded into.
:type parentType: str ('folder' or 'item')
:param parent: The document representing the parent.
:type parent: dict.
:param size: Total size in bytes of the whole file.
:type size: int
:param mimeType: The mimeType of the file.
:type mimeType: str
:param reference: An optional reference string that will be sent to the
data.process event.
:type reference: str
:param assetstore: An optional assetstore to use to store the file. If
unspecified, the current assetstore is used.
:param attachParent: if True, instead of creating an item within the
parent or giving the file an itemId, set itemId to None and set
attachedToType and attachedToId instead (using the values passed in
parentType and parent). This is intended for files that shouldn't
appear as direct children of the parent, but are still associated
with it.
:type attachParent: boolean
:param save: if True, save the document after it is created.
:type save: boolean
:returns: The upload document that was created.
"""
from girder.utility import assetstore_utilities
assetstore = self.getTargetAssetstore(parentType, parent, assetstore)
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
now = datetime.datetime.utcnow()
if not mimeType:
mimeType = 'application/octet-stream'
upload = {
'created': now,
'updated': now,
'assetstoreId': assetstore['_id'],
'size': size,
'name': name,
'mimeType': mimeType,
'received': 0
}
if reference is not None:
upload['reference'] = reference
if parentType and parent:
upload['parentType'] = parentType.lower()
upload['parentId'] = parent['_id']
else:
upload['parentType'] = None
upload['parentId'] = None
if attachParent:
upload['attachParent'] = attachParent
if user:
upload['userId'] = user['_id']
else:
upload['userId'] = None
upload = adapter.initUpload(upload)
if save:
upload = self.save(upload)
return upload
def moveFileToAssetstore(self, file, user, assetstore, progress=noProgress):
"""
Move a file from whatever assetstore it is located in to a different
assetstore. This is done by downloading and re-uploading the file.
:param file: the file to move.
:param user: the user that is authorizing the move.
:param assetstore: the destination assetstore.
:param progress: optional progress context.
:returns: the original file if it is not moved, or the newly 'uploaded'
file if it is.
"""
from .file import File
if file['assetstoreId'] == assetstore['_id']:
return file
# Allow an event to cancel the move. This could be done, for instance,
# on files that could change dynamically.
event = events.trigger('model.upload.movefile', {
'file': file, 'assetstore': assetstore})
if event.defaultPrevented:
raise GirderException(
'The file %s could not be moved to assetstore %s' % (
file['_id'], assetstore['_id']))
# Create a new upload record into the existing file
upload = self.createUploadToFile(
file=file, user=user, size=int(file['size']), assetstore=assetstore)
if file['size'] == 0:
return File().filter(self.finalizeUpload(upload), user)
# Uploads need to be chunked for some assetstores
chunkSize = self._getChunkSize()
chunk = None
for data in File().download(file, headers=False)():
if chunk is not None:
chunk += data
else:
chunk = data
if len(chunk) >= chunkSize:
upload = self.handleChunk(upload, RequestBodyStream(io.BytesIO(chunk), len(chunk)))
progress.update(increment=len(chunk))
chunk = None
if chunk is not None:
upload = self.handleChunk(upload, RequestBodyStream(io.BytesIO(chunk), len(chunk)))
progress.update(increment=len(chunk))
return upload
def list(self, limit=0, offset=0, sort=None, filters=None):
"""
Search for uploads or simply list all visible uploads.
:param limit: Result set size limit.
:param offset: Offset into the results.
:param sort: The sort direction.
:param filters: if not None, a dictionary that can contain ids that
must match the uploads, plus an minimumAge value.
"""
query = {}
if filters:
for key in ('uploadId', 'userId', 'parentId', 'assetstoreId'):
if key in filters:
id = filters[key]
if id and not isinstance(id, ObjectId):
id = ObjectId(id)
if id:
if key == 'uploadId':
query['_id'] = id
else:
query[key] = id
if 'minimumAge' in filters:
query['updated'] = {
'$lte': datetime.datetime.utcnow() - datetime.timedelta(
days=float(filters['minimumAge']))
}
# Perform the find; we'll do access-based filtering of the result
# set afterward.
return self.find(query, limit=limit, sort=sort, offset=offset)
def cancelUpload(self, upload):
"""
Discard an upload that is in progress. This asks the assetstore to
discard the data, then removes the item from the upload database.
:param upload: The upload document to remove.
:type upload: dict
"""
from .assetstore import Assetstore
from girder.utility import assetstore_utilities
assetstore = Assetstore().load(upload['assetstoreId'])
# If the assetstore was deleted, the upload may still be in our
# database
if assetstore:
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
try:
adapter.cancelUpload(upload)
except ValidationException:
# this assetstore is currently unreachable, so skip it
pass
if '_id' in upload:
self.remove(upload)
def untrackedUploads(self, action='list', assetstoreId=None):
"""
List or discard any uploads that an assetstore knows about but that our
database doesn't have in it.
:param action: 'delete' to discard the untracked uploads, anything else
to just return with a list of them.
:type action: str
:param assetstoreId: if present, only include untracked items from the
specified assetstore.
:type assetstoreId: str
:returns: a list of items that were removed or could be removed.
"""
from .assetstore import Assetstore
from girder.utility import assetstore_utilities
results = []
knownUploads = list(self.list())
# Iterate through all assetstores
for assetstore in Assetstore().list():
if assetstoreId and assetstoreId != assetstore['_id']:
continue
try:
adapter = assetstore_utilities.getAssetstoreAdapter(assetstore)
except NoAssetstoreAdapter:
continue
try:
results.extend(adapter.untrackedUploads(
knownUploads, delete=(action == 'delete')))
except ValidationException:
# this assetstore is currently unreachable, so skip it
pass
return results
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure files have the right permissions.
Some developers have broken SCM configurations that flip the executable
permission on for no good reason. Unix developers who run ls --color will then
see .cc files in green and get confused.
- For file extensions that must be executable, add it to EXECUTABLE_EXTENSIONS.
- For file extensions that must not be executable, add it to
NOT_EXECUTABLE_EXTENSIONS.
- To ignore all the files inside a directory, add it to IGNORED_PATHS.
- For file base name with ambiguous state and that should not be checked for
shebang, add it to IGNORED_FILENAMES.
Any file not matching the above will be opened and looked if it has a shebang
or an ELF header. If this does not match the executable bit on the file, the
file will be flagged.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and all
file paths should be only lowercase.
"""
import json
import logging
import optparse
import os
import stat
import string
import subprocess
import sys
#### USER EDITABLE SECTION STARTS HERE ####
# Files with these extensions must have executable bit set.
#
# Case-sensitive.
EXECUTABLE_EXTENSIONS = (
'bat',
'dll',
'dylib',
'exe',
)
# These files must have executable bit set.
#
# Case-insensitive, lower-case only.
EXECUTABLE_PATHS = (
'chrome/test/data/app_shim/app_shim_32_bit.app/contents/'
'macos/app_mode_loader',
'chrome/test/data/extensions/uitest/plugins/plugin.plugin/contents/'
'macos/testnetscapeplugin',
'chrome/test/data/extensions/uitest/plugins_private/plugin.plugin/contents/'
'macos/testnetscapeplugin',
)
# These files must not have the executable bit set. This is mainly a performance
# optimization as these files are not checked for shebang. The list was
# partially generated from:
# git ls-files | grep "\\." | sed 's/.*\.//' | sort | uniq -c | sort -b -g
#
# Case-sensitive.
NON_EXECUTABLE_EXTENSIONS = (
'1',
'3ds',
'S',
'am',
'applescript',
'asm',
'c',
'cc',
'cfg',
'chromium',
'cpp',
'crx',
'cs',
'css',
'cur',
'def',
'der',
'expected',
'gif',
'grd',
'gyp',
'gypi',
'h',
'hh',
'htm',
'html',
'hyph',
'ico',
'idl',
'java',
'jpg',
'js',
'json',
'm',
'm4',
'mm',
'mms',
'mock-http-headers',
'nexe',
'nmf',
'onc',
'pat',
'patch',
'pdf',
'pem',
'plist',
'png',
'proto',
'rc',
'rfx',
'rgs',
'rules',
'spec',
'sql',
'srpc',
'svg',
'tcl',
'test',
'tga',
'txt',
'vcproj',
'vsprops',
'webm',
'word',
'xib',
'xml',
'xtb',
'zip',
)
# These files must not have executable bit set.
#
# Case-insensitive, lower-case only.
NON_EXECUTABLE_PATHS = (
'build/android/tests/symbolize/liba.so',
'build/android/tests/symbolize/libb.so',
'chrome/installer/mac/sign_app.sh.in',
'chrome/installer/mac/sign_versioned_dir.sh.in',
'chrome/test/data/extensions/uitest/plugins/plugin32.so',
'chrome/test/data/extensions/uitest/plugins/plugin64.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin32.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin64.so',
'courgette/testdata/elf-32-1',
'courgette/testdata/elf-32-2',
'courgette/testdata/elf-64',
)
# File names that are always whitelisted. (These are mostly autoconf spew.)
#
# Case-sensitive.
IGNORED_FILENAMES = (
'config.guess',
'config.sub',
'configure',
'depcomp',
'install-sh',
'missing',
'mkinstalldirs',
'naclsdk',
'scons',
)
# File paths starting with one of these will be ignored as well.
# Please consider fixing your file permissions, rather than adding to this list.
#
# Case-insensitive, lower-case only.
IGNORED_PATHS = (
'native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/'
'__init__.py',
'out/',
# TODO(maruel): Fix these.
'third_party/bintrees/',
'third_party/closure_linter/',
'third_party/devscripts/licensecheck.pl.vanilla',
'third_party/hyphen/',
'third_party/jemalloc/',
'third_party/lcov-1.9/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov-1.9/contrib/galaxy/gen_makefile.sh',
'third_party/lcov/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov/contrib/galaxy/gen_makefile.sh',
'third_party/libevent/autogen.sh',
'third_party/libevent/test/test.sh',
'third_party/libxml/linux/xml2-config',
'third_party/libxml/src/ltmain.sh',
'third_party/mesa/',
'third_party/protobuf/',
'third_party/python_gflags/gflags.py',
'third_party/sqlite/',
'third_party/talloc/script/mksyms.sh',
'third_party/tcmalloc/',
'third_party/tlslite/setup.py',
# TODO(nednguyen): Remove this when telemetry is moved to catapult
'tools/telemetry/third_party/',
)
#### USER EDITABLE SECTION ENDS HERE ####
assert set(EXECUTABLE_EXTENSIONS) & set(NON_EXECUTABLE_EXTENSIONS) == set()
assert set(EXECUTABLE_PATHS) & set(NON_EXECUTABLE_PATHS) == set()
VALID_CHARS = set(string.ascii_lowercase + string.digits + '/-_.')
for paths in (EXECUTABLE_PATHS, NON_EXECUTABLE_PATHS, IGNORED_PATHS):
assert all([set(path).issubset(VALID_CHARS) for path in paths])
def capture(cmd, cwd):
"""Returns the output of a command.
Ignores the error code or stderr.
"""
logging.debug('%s; cwd=%s' % (' '.join(cmd), cwd))
env = os.environ.copy()
env['LANGUAGE'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
return p.communicate()[0]
def get_git_root(dir_path):
"""Returns the git checkout root or None."""
root = capture(['git', 'rev-parse', '--show-toplevel'], dir_path).strip()
if root:
return root
def is_ignored(rel_path):
"""Returns True if rel_path is in our whitelist of files to ignore."""
rel_path = rel_path.lower()
return (
os.path.basename(rel_path) in IGNORED_FILENAMES or
rel_path.lower().startswith(IGNORED_PATHS))
def must_be_executable(rel_path):
"""The file name represents a file type that must have the executable bit
set.
"""
return (os.path.splitext(rel_path)[1][1:] in EXECUTABLE_EXTENSIONS or
rel_path.lower() in EXECUTABLE_PATHS)
def must_not_be_executable(rel_path):
"""The file name represents a file type that must not have the executable
bit set.
"""
return (os.path.splitext(rel_path)[1][1:] in NON_EXECUTABLE_EXTENSIONS or
rel_path.lower() in NON_EXECUTABLE_PATHS)
def has_executable_bit(full_path):
"""Returns if any executable bit is set."""
permission = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return bool(permission & os.stat(full_path).st_mode)
def has_shebang_or_is_elf(full_path):
"""Returns if the file starts with #!/ or is an ELF binary.
full_path is the absolute path to the file.
"""
with open(full_path, 'rb') as f:
data = f.read(4)
return (data[:3] == '#!/' or data == '#! /', data == '\x7fELF')
def check_file(root_path, rel_path):
"""Checks the permissions of the file whose path is root_path + rel_path and
returns an error if it is inconsistent. Returns None on success.
It is assumed that the file is not ignored by is_ignored().
If the file name is matched with must_be_executable() or
must_not_be_executable(), only its executable bit is checked.
Otherwise, the first few bytes of the file are read to verify if it has a
shebang or ELF header and compares this with the executable bit on the file.
"""
full_path = os.path.join(root_path, rel_path)
def result_dict(error):
return {
'error': error,
'full_path': full_path,
'rel_path': rel_path,
}
try:
bit = has_executable_bit(full_path)
except OSError:
# It's faster to catch exception than call os.path.islink(). Chromium
# tree happens to have invalid symlinks under
# third_party/openssl/openssl/test/.
return None
if must_be_executable(rel_path):
if not bit:
return result_dict('Must have executable bit set')
return
if must_not_be_executable(rel_path):
if bit:
return result_dict('Must not have executable bit set')
return
# For the others, it depends on the file header.
(shebang, elf) = has_shebang_or_is_elf(full_path)
if bit != (shebang or elf):
if bit:
return result_dict('Has executable bit but not shebang or ELF header')
if shebang:
return result_dict('Has shebang but not executable bit')
return result_dict('Has ELF header but not executable bit')
def check_files(root, files):
gen = (check_file(root, f) for f in files if not is_ignored(f))
return filter(None, gen)
class ApiBase(object):
def __init__(self, root_dir, bare_output):
self.root_dir = root_dir
self.bare_output = bare_output
self.count = 0
self.count_read_header = 0
def check_file(self, rel_path):
logging.debug('check_file(%s)' % rel_path)
self.count += 1
if (not must_be_executable(rel_path) and
not must_not_be_executable(rel_path)):
self.count_read_header += 1
return check_file(self.root_dir, rel_path)
def check_dir(self, rel_path):
return self.check(rel_path)
def check(self, start_dir):
"""Check the files in start_dir, recursively check its subdirectories."""
errors = []
items = self.list_dir(start_dir)
logging.info('check(%s) -> %d' % (start_dir, len(items)))
for item in items:
full_path = os.path.join(self.root_dir, start_dir, item)
rel_path = full_path[len(self.root_dir) + 1:]
if is_ignored(rel_path):
continue
if os.path.isdir(full_path):
# Depth first.
errors.extend(self.check_dir(rel_path))
else:
error = self.check_file(rel_path)
if error:
errors.append(error)
return errors
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
return sorted(
x for x in os.listdir(os.path.join(self.root_dir, start_dir))
if not x.startswith('.')
)
class ApiAllFilesAtOnceBase(ApiBase):
_files = None
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
if self._files is None:
self._files = sorted(self._get_all_files())
if not self.bare_output:
print 'Found %s files' % len(self._files)
start_dir = start_dir[len(self.root_dir) + 1:]
return [
x[len(start_dir):] for x in self._files if x.startswith(start_dir)
]
def _get_all_files(self):
"""Lists all the files and directory inside self._root_dir."""
raise NotImplementedError()
class ApiGit(ApiAllFilesAtOnceBase):
def _get_all_files(self):
return capture(['git', 'ls-files'], cwd=self.root_dir).splitlines()
def get_scm(dir_path, bare):
"""Returns a properly configured ApiBase instance."""
cwd = os.getcwd()
root = get_git_root(dir_path or cwd)
if root:
if not bare:
print('Found git repository at %s' % root)
return ApiGit(dir_path or root, bare)
# Returns a non-scm aware checker.
if not bare:
print('Failed to determine the SCM for %s' % dir_path)
return ApiBase(dir_path or cwd, bare)
def main():
usage = """Usage: python %prog [--root <root>] [tocheck]
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python %prog
python %prog --root /path/to/source chrome"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--root',
help='Specifies the repository root. This defaults '
'to the checkout repository root')
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Print debug logging')
parser.add_option(
'--bare',
action='store_true',
default=False,
help='Prints the bare filename triggering the checks')
parser.add_option(
'--file', action='append', dest='files',
help='Specifics a list of files to check the permissions of. Only these '
'files will be checked')
parser.add_option('--json', help='Path to JSON output file')
options, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)])
if len(args) > 1:
parser.error('Too many arguments used')
if options.root:
options.root = os.path.abspath(options.root)
if options.files:
# --file implies --bare (for PRESUBMIT.py).
options.bare = True
errors = check_files(options.root, options.files)
else:
api = get_scm(options.root, options.bare)
start_dir = args[0] if args else api.root_dir
errors = api.check(start_dir)
if not options.bare:
print('Processed %s files, %d files where tested for shebang/ELF '
'header' % (api.count, api.count_read_header))
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
if options.bare:
print '\n'.join(e['full_path'] for e in errors)
else:
print '\nFAILED\n'
print '\n'.join('%s: %s' % (e['full_path'], e['error']) for e in errors)
return 1
if not options.bare:
print '\nSUCCESS\n'
return 0
if '__main__' == __name__:
sys.exit(main())
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from __future__ import unicode_literals
import base64
import logging
import re
from collections import defaultdict
from datetime import datetime
from types import NoneType
import cloudstorage
from babel import Locale
from babel.numbers import format_currency
from dateutil.relativedelta import relativedelta
from google.appengine.api import urlfetch
from google.appengine.api.taskqueue import taskqueue
from google.appengine.ext import db, deferred, ndb
from mcfw.consts import MISSING, REST_TYPE_TO
from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException
from mcfw.properties import azzert, get_members
from mcfw.restapi import rest, GenericRESTRequestHandler
from mcfw.rpc import returns, arguments, serialize_complex_value
from rogerthat.bizz.forms import FormNotFoundException
from rogerthat.bizz.gcs import get_serving_url
from rogerthat.bizz.maps.poi.models import PointOfInterest
from rogerthat.bizz.maps.services.places import get_place_types
from rogerthat.bizz.rtemail import EMAIL_REGEX
from rogerthat.bizz.service import AvatarImageNotSquareException, InvalidValueException
from rogerthat.consts import DEBUG, SCHEDULED_QUEUE
from rogerthat.dal import parent_key, put_and_invalidate_cache, parent_key_unsafe, put_in_chunks, parent_ndb_key
from rogerthat.dal.profile import get_user_profile, get_profile_key, get_service_profile
from rogerthat.models import ServiceIdentity
from rogerthat.models.news import MediaType
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.rpc.users import get_current_session
from rogerthat.service.api import system
from rogerthat.settings import get_server_settings
from rogerthat.to import ReturnStatusTO, RETURNSTATUS_TO_SUCCESS
from rogerthat.to.friends import FriendListResultTO
from rogerthat.to.messaging import BaseMemberTO
from rogerthat.to.service import UserDetailsTO
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils.app import get_human_user_from_app_user, sanitize_app_user, \
get_app_id_from_app_user, get_app_user_tuple
from rogerthat.utils.channel import send_message
from rogerthat.utils.service import create_service_identity_user, remove_slash_default
from shop.bizz import add_service_admin, get_service_admins
from shop.dal import get_customer, get_customer_signups
from shop.exceptions import InvalidEmailFormatException
from shop.models import Customer, CustomerSignup
from solutions import translate as common_translate
from solutions.common.bizz import get_next_free_spots_in_service_menu, common_provision, timezone_offset, \
broadcast_updates_pending, SolutionModule, delete_file_blob, create_file_blob, \
create_news_publisher, delete_news_publisher, enable_or_disable_solution_module, \
validate_enable_or_disable_solution_module, OrganizationType, get_organization_type, validate_before_provision, \
auto_publish
from solutions.common.bizz.branding_settings import save_branding_settings
from solutions.common.bizz.events import update_events_from_google, get_google_authenticate_url, get_google_calendars
from solutions.common.bizz.group_purchase import save_group_purchase, delete_group_purchase, broadcast_group_purchase, \
new_group_purchase_subscription
from solutions.common.bizz.images import upload_file, list_files
from solutions.common.bizz.inbox import send_statistics_export_email, send_inbox_info_messages_to_services
from solutions.common.bizz.loyalty import update_user_data_admins
from solutions.common.bizz.menu import _put_default_menu, get_menu_item_qr_url
from solutions.common.bizz.messaging import validate_broadcast_url, send_reply, delete_all_trash
from solutions.common.bizz.paddle import get_paddle_info, populate_info_from_paddle
from solutions.common.bizz.repair import send_message_for_repair_order, delete_repair_order
from solutions.common.bizz.sandwich import ready_sandwich_order, delete_sandwich_order, reply_sandwich_order
from solutions.common.bizz.service import new_inbox_message, send_inbox_message_update, set_customer_signup_status
from solutions.common.bizz.settings import save_settings, set_logo, set_avatar, save_rss_urls, get_service_info
from solutions.common.bizz.static_content import put_static_content as bizz_put_static_content, delete_static_content
from solutions.common.consts import TRANSLATION_MAPPING, OCA_FILES_BUCKET, AUTO_PUBLISH_MINUTES
from solutions.common.dal import get_solution_settings, get_static_content_list, get_solution_group_purchase_settings, \
get_solution_calendars, get_solution_inbox_messages, \
get_solution_identity_settings, get_solution_settings_or_identity_settings, \
get_solution_news_publishers, is_existing_friend
from solutions.common.dal.appointment import get_solution_appointment_settings
from solutions.common.dal.repair import get_solution_repair_orders, get_solution_repair_settings
from solutions.common.integrations.jcc.jcc_appointments import get_jcc_settings, save_jcc_settings
from solutions.common.integrations.qmatic.qmatic import get_qmatic_settings, save_qmatic_settings, QMaticSettingsTO
from solutions.common.integrations.timeblockr.settings import get_timeblockr_settings, save_timeblockr_settings
from solutions.common.localizer import translations
from solutions.common.models import SolutionBrandingSettings, SolutionSettings, SolutionInboxMessage, \
SolutionRssScraperSettings
from solutions.common.models.agenda import SolutionCalendar
from solutions.common.models.appointment import SolutionAppointmentWeekdayTimeframe, SolutionAppointmentSettings
from solutions.common.models.cityapp import PaddleSettings, PaddleMapping, PaddleOrganizationalUnits
from solutions.common.models.forms import OcaForm
from solutions.common.models.group_purchase import SolutionGroupPurchase
from solutions.common.models.repair import SolutionRepairSettings
from solutions.common.models.sandwich import SandwichType, SandwichTopping, SandwichOption, SandwichSettings, \
SandwichOrder
from solutions.common.models.static_content import SolutionStaticContent
from solutions.common.to import ServiceMenuFreeSpotsTO, SolutionStaticContentTO, SolutionSettingsTO, \
MenuTO, EventItemTO, PublicEventItemTO, SolutionAppointmentWeekdayTimeframeTO, BrandingSettingsTO, \
SolutionRepairOrderTO, SandwichSettingsTO, SandwichOrderTO, SolutionGroupPurchaseTO, \
SolutionGroupPurchaseSettingsTO, SolutionCalendarTO, SolutionInboxForwarder, SolutionInboxesTO, \
SolutionInboxMessageTO, SolutionAppointmentSettingsTO, \
SolutionRepairSettingsTO, UrlReturnStatusTO, ImageReturnStatusTO, SolutionUserKeyLabelTO, \
SolutionCalendarWebTO, BrandingSettingsAndMenuItemsTO, ServiceMenuItemWithCoordinatesTO, \
ServiceMenuItemWithCoordinatesListTO, SolutionGoogleCalendarStatusTO, PictureReturnStatusTO, \
AppUserRolesTO, CustomerSignupTO, SolutionRssSettingsTO, CreateEventItemTO
from solutions.common.to.forms import UploadedFileTO, GalleryFileTO
from solutions.common.to.paddle import PaddleSettingsTO, PaddleSettingsServicesTO, SimpleServiceTO
from solutions.common.to.statistics import StatisticsResultTO
from solutions.common.utils import is_default_service_identity, create_service_identity_user_wo_default
@rest("/solutions/common/public/menu/load", "get", authenticated=False)
@returns(dict)
@arguments(service_user_email=unicode)
def public_load_menu(service_user_email):
from solutions.common.dal import get_restaurant_menu
if service_user_email in (None, MISSING):
logging.debug("Could not load public menu (service_user_email None|MISSING)")
return None
service_user = users.User(service_user_email)
sln_settings = get_solution_settings(service_user)
if not sln_settings:
logging.debug("Could not load public menu for: %s (SolutionSettings==None)", service_user_email)
return None
menu = get_restaurant_menu(service_user, sln_settings.solution)
if not menu:
logging.debug("Could not load public menu for: %s (Menu==None)", service_user_email)
return None
menu = serialize_complex_value(MenuTO.fromMenuObject(menu), MenuTO, False)
# convert prices from long to unicode
for category in menu['categories']:
for item in category['items']:
item['price'] = format_currency((item['price'] or 0) / 100.0, sln_settings.currency, '#,##0.00',
locale=sln_settings.main_language)
return menu
@rest("/solutions/common/public/events/load", "get", authenticated=False)
@returns([PublicEventItemTO])
@arguments(service_user_email=unicode)
def public_load_events(service_user_email):
from solutions.common.dal import get_public_event_list
if service_user_email and service_user_email != MISSING:
service_user = users.User(service_user_email)
return [PublicEventItemTO.fromPublicEventItemObject(e) for e in get_public_event_list(service_user)]
else:
logging.debug("Could not load public events (service_user_email None|MISSING)")
return None
@rest("/solutions/common/public/group_purchase/picture", "get", authenticated=False, silent_result=True)
@returns(PictureReturnStatusTO)
@arguments(service_user_email=unicode, group_purchase_id=long, picture_version=long, service_identity=unicode)
def public_group_purchase_picture(service_user_email, group_purchase_id, picture_version=0, service_identity=None):
service_user = users.User(service_user_email)
settings = get_solution_settings(service_user)
service_identity_user = create_service_identity_user_wo_default(service_user, service_identity)
sgp = SolutionGroupPurchase.get_by_id(group_purchase_id,
parent_key_unsafe(service_identity_user, settings.solution))
if not sgp or not sgp.picture:
return PictureReturnStatusTO.create(False, None)
response = GenericRESTRequestHandler.getCurrentResponse()
response.headers['Cache-Control'] = "public, max-age=31536000" # Cache forever (1 year)
response.headers['Access-Control-Allow-Origin'] = '*'
return PictureReturnStatusTO.create(picture=unicode(sgp.picture))
@rest("/common/service_menu/get_free_spots", "get", read_only_access=True)
@returns(ServiceMenuFreeSpotsTO)
@arguments(count=int)
def get_free_spots(count=10):
service_user = users.get_current_user()
sm = system.get_menu()
all_taken_coords = [item.coords for item in sm.items]
static_content_items = SolutionStaticContent.list_changed(service_user)
for sc in static_content_items:
if sc.deleted or not sc.visible:
try:
all_taken_coords.remove(sc.coords)
except ValueError:
pass
else: # not deleted and visible
all_taken_coords.append(sc.coords)
spots = get_next_free_spots_in_service_menu(all_taken_coords, count)
return ServiceMenuFreeSpotsTO.fromList(spots)
@rest("/common/static_content/load", "get", read_only_access=True, silent_result=True)
@returns([SolutionStaticContentTO])
@arguments()
def get_static_content():
service_user = users.get_current_user()
static_contents = sorted(get_static_content_list(service_user), key=lambda m: tuple(reversed(m.coords)))
return [SolutionStaticContentTO.fromModel(sc) for sc in static_contents]
@rest("/common/static_content/put", "post")
@returns(ReturnStatusTO)
@arguments(static_content=SolutionStaticContentTO)
def put_static_content(static_content):
service_user = users.get_current_user()
try:
bizz_put_static_content(service_user, static_content)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/static_content/delete", "post")
@returns(ReturnStatusTO)
@arguments(static_content_id=(int, long, NoneType))
def rest_delete_static_content(static_content_id=None):
service_user = users.get_current_user()
try:
delete_static_content(service_user, static_content_id)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/inbox/load/all", "get", read_only_access=True)
@returns([SolutionInboxesTO])
@arguments()
def inbox_load_all():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
service_info = get_service_info(service_user, service_identity)
unread_cursor, unread_messages, unread_has_more = get_solution_inbox_messages(service_user,
service_identity,
10,
SolutionInboxMessage.INBOX_NAME_UNREAD)
starred_cursor, starred_messages, starred_has_more = get_solution_inbox_messages(service_user,
service_identity,
10,
SolutionInboxMessage.INBOX_NAME_STARRED)
read_cursor, read_messages, read_has_more = get_solution_inbox_messages(service_user,
service_identity,
10,
SolutionInboxMessage.INBOX_NAME_READ)
trash_cursor, trash_messages, trash_has_more = get_solution_inbox_messages(service_user,
service_identity,
10,
SolutionInboxMessage.INBOX_NAME_TRASH)
inboxes = [(SolutionInboxMessage.INBOX_NAME_UNREAD, unread_cursor, unread_messages, unread_has_more),
(SolutionInboxMessage.INBOX_NAME_STARRED, starred_cursor, starred_messages, starred_has_more),
(SolutionInboxMessage.INBOX_NAME_READ, read_cursor, read_messages, read_has_more),
(SolutionInboxMessage.INBOX_NAME_TRASH, trash_cursor, trash_messages, trash_has_more)]
return [SolutionInboxesTO.fromModel(name, cursor, messages, has_more, sln_settings, service_info, True) for
name, cursor, messages, has_more in inboxes]
@rest("/common/inbox/load/more", "get", read_only_access=True)
@returns(SolutionInboxesTO)
@arguments(name=unicode, count=(int, long), cursor=unicode)
def inbox_load_more(name, count, cursor):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
service_info = get_service_info(service_user, service_identity)
cursor_, messages, has_more = get_solution_inbox_messages(service_user, service_identity, count, name, cursor)
return SolutionInboxesTO.fromModel(name, cursor_, messages, has_more, sln_settings, service_info, True)
@rest("/common/inbox/load/detail", "get", read_only_access=True)
@returns([SolutionInboxMessageTO])
@arguments(key=unicode)
def inbox_load_detail(key):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
sim = SolutionInboxMessage.get(key)
messages = [sim]
messages.extend(sim.get_child_messages())
service_info = get_service_info(service_user, service_identity)
return [SolutionInboxMessageTO.fromModel(message, sln_settings, service_info, False) for message in messages]
@rest("/common/inbox/message/update/reply", "post")
@returns(ReturnStatusTO)
@arguments(key=unicode, message=unicode)
def inbox_message_update_reply(key, message):
service_user = users.get_current_user()
try:
send_reply(service_user, key, message)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/inbox/message/forward", "post")
@returns(ReturnStatusTO)
@arguments(key=unicode, to_email=unicode)
def inbox_message_forward(key, to_email):
"""Only city service to community service"""
try:
sim = SolutionInboxMessage.get(key)
to_service_user = users.User(to_email)
to_sln_settings = get_solution_settings(to_service_user)
new_sim = new_inbox_message(to_sln_settings, sim.message,
user_details=sim.get_sender().to_user_details(),
category=sim.category,
category_key=sim.category_key,
reply_enabled=sim.reply_enabled,
send_to_forwarders=True)
send_inbox_message_update(to_sln_settings, new_sim)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as ex:
return ReturnStatusTO.create(False, ex.message)
@rest("/common/inbox/message/update/starred", "post")
@returns(ReturnStatusTO)
@arguments(key=unicode, starred=bool)
def inbox_message_update_starred(key, starred):
inbox_message = SolutionInboxMessage.get(key)
inbox_message.starred = starred
inbox_message.put()
return _after_inbox_message_updated(inbox_message)
@rest("/common/inbox/message/update/read", "post")
@returns(ReturnStatusTO)
@arguments(key=unicode, read=bool)
def inbox_message_update_read(key, read):
inbox_message = SolutionInboxMessage.get(key)
inbox_message.read = read
inbox_message.put()
return _after_inbox_message_updated(inbox_message)
def _after_inbox_message_updated(inbox_message):
# type: (SolutionInboxMessage) -> ReturnStatusTO
try:
service_user = users.get_current_user()
service_identity = users.get_current_session().service_identity
sln_settings = get_solution_settings(service_user)
service_info = get_service_info(service_user, service_identity)
send_message(service_user, u"solutions.common.messaging.update",
service_identity=service_identity,
message=SolutionInboxMessageTO.fromModel(inbox_message, sln_settings, service_info,
True).to_dict())
deferred.defer(update_user_data_admins, service_user, service_identity)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/inbox/message/update/trashed", "post")
@returns(ReturnStatusTO)
@arguments(key=unicode, trashed=bool)
def inbox_message_update_trashed(key, trashed):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
service_info = get_service_info(service_user, service_identity)
try:
sim = SolutionInboxMessage.get(key)
if trashed and sim.trashed:
sim.deleted = True
sim.trashed = trashed
sim.put()
send_message(service_user, u"solutions.common.messaging.update",
service_identity=service_identity,
message=SolutionInboxMessageTO.fromModel(sim, sln_settings, service_info, True).to_dict())
if not sim.deleted:
deferred.defer(update_user_data_admins, service_user, service_identity)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/inbox/message/update/deleted", "post")
@returns(ReturnStatusTO)
@arguments()
def inbox_message_update_deleted():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
try:
delete_all_trash(service_user, service_identity)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/inbox/messages/export", "get")
@returns(ReturnStatusTO)
@arguments(email=unicode)
def export_inbox_messages(email=''):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
try:
send_statistics_export_email(service_user, service_identity, email, sln_settings)
return RETURNSTATUS_TO_SUCCESS
except InvalidEmailFormatException as ex:
error_msg = common_translate(sln_settings.main_language, 'invalid_email_format',
email=ex.email)
return ReturnStatusTO.create(False, error_msg)
@rest('/common/inbox/services', 'post')
@returns()
@arguments(organization_types=[int], message=unicode)
def api_send_message_to_services(organization_types=None, message=None):
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
def _validation_err(translation_key):
field = common_translate(sln_settings.main_language, translation_key)
raise HttpBadRequestException(
'%s: %s' % (field, common_translate(sln_settings.main_language, 'this_field_is_required')))
if SolutionModule.CITY_APP not in sln_settings.modules:
raise HttpForbiddenException()
if not organization_types:
_validation_err('organization_type')
message = (message or '').strip()
if not message:
_validation_err('message')
services = []
service_profile = get_service_profile(service_user)
for customer in Customer.list_by_community_id(service_profile.community_id):
if customer.organization_type in organization_types:
# Don't send message to self
if customer.service_email and customer.service_email != service_user.email():
services.append(customer.service_user)
send_inbox_info_messages_to_services(services, service_user, message, SolutionInboxMessage.CATEGORY_CITY_MESSAGE)
@rest('/common/news/rss', 'get', read_only_access=True)
@returns(SolutionRssSettingsTO)
@arguments()
def rest_get_news_rss_feeds():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
rss_settings = SolutionRssScraperSettings.create_key(service_user, service_identity).get()
return SolutionRssSettingsTO.from_model(rss_settings)
@rest("/common/broadcast/rss/validate", "get", read_only_access=True, silent_result=True)
@returns(dict)
@arguments(url=unicode)
def rest_validate_rss_feed(url):
from solutions.common.cron.news.rss import parse_rss_items
try:
response = urlfetch.fetch(url, deadline=10) # type: urlfetch._URLFetchResult
items, _ = parse_rss_items(response.content, url)
except Exception as e:
logging.exception('Failed to validate url')
return {'exception': e.message}
return {'items': [{
'title': item.title,
'url': item.url,
'guid': item.guid,
'id': item.id,
'message': item.message,
'date': str(item.date),
'rss_url': item.rss_url,
'image_url': item.image_url
} for item in items]}
@rest('/common/news/rss', 'put', type=REST_TYPE_TO)
@returns(SolutionRssSettingsTO)
@arguments(data=SolutionRssSettingsTO)
def rest_save_news_rss_feeds(data):
# type: (SolutionRssSettingsTO) -> SolutionRssSettingsTO
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
rss_settings = save_rss_urls(service_user, service_identity, data)
return SolutionRssSettingsTO.from_model(rss_settings)
@rest('/common/news/rss/validate', 'post')
@returns(UrlReturnStatusTO)
@arguments(url=unicode, allow_empty=bool)
def rest_news_validate_rss(url, allow_empty=False):
try:
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
url = url.strip()
if url or not allow_empty:
if not (url.startswith("http://") or url.startswith("https://")):
url = "http://%s" % url
validate_broadcast_url(url, sln_settings.main_language)
return UrlReturnStatusTO.create(True, None, url)
except BusinessException as e:
return UrlReturnStatusTO.create(False, e.message, url)
@rest('/common/settings', 'put')
@returns(SolutionSettingsTO)
@arguments(data=SolutionSettingsTO)
def settings_save(data):
# type: (SolutionSettingsTO) -> SolutionSettingsTO
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings, sln_i_settings = save_settings(service_user, service_identity, data)
return SolutionSettingsTO.fromModel(sln_settings, sln_i_settings)
except BusinessException as e:
raise HttpBadRequestException(e.message)
@rest('/common/settings', 'get', read_only_access=True)
@returns(SolutionSettingsTO)
@arguments()
def settings_load():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
sln_i_settings = get_solution_settings_or_identity_settings(sln_settings, service_identity)
return SolutionSettingsTO.fromModel(sln_settings, sln_i_settings)
@rest('/common/available-place-types', 'get', read_only_access=True, silent_result=True)
@returns(dict)
@arguments()
def rest_get_place_types():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
result_list = []
place_types = get_place_types(sln_settings.main_language)
for place_type, label in place_types.iteritems():
result_list.append([place_type, label])
return {'results': sorted(([place_type, label] for place_type, label in place_types.iteritems()),
key=lambda x: x[1].lower())}
@rest('/common/countries', 'get', read_only_access=True, silent_result=True)
@returns(dict)
@arguments()
def rest_get_countries():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
locale = Locale(sln_settings.locale)
return {'countries': sorted([[code, name] for code, name in locale.territories.iteritems()],
key=lambda x: x[1].lower())}
def is_place_visible_for_customer(place_details, customer):
if not place_details.organization_types:
return True
if customer and customer.organization_type in place_details.organization_types:
return True
return False
def _get_city_services(community_id):
customers = Customer.list_enabled_by_organization_type_in_community(community_id, OrganizationType.CITY)
return [SimpleServiceTO(name=c.name, service_email=c.service_email) for c in customers if c.service_email]
@rest('/common/settings/paddle', 'get', type=REST_TYPE_TO, silent_result=True)
@returns(PaddleSettingsServicesTO)
@arguments()
def rest_get_paddle_settings():
# type: () -> PaddleSettingsServicesTO
service_user = users.get_current_user()
settings_key = PaddleSettings.create_key(service_user)
settings = settings_key.get() or PaddleSettings(key=settings_key)
service_profile = get_service_profile(service_user)
return PaddleSettingsServicesTO(settings=PaddleSettingsTO.from_model(settings),
services=_get_city_services(service_profile.community_id))
@rest('/common/settings/paddle', 'put', type=REST_TYPE_TO, silent_result=True)
@returns(PaddleSettingsServicesTO)
@arguments(data=PaddleSettingsTO)
def rest_save_paddle_settings(data):
# type: (PaddleSettingsTO) -> PaddleSettingsServicesTO
service_user = users.get_current_user()
settings_key = PaddleSettings.create_key(service_user)
settings = settings_key.get() or PaddleSettings(key=settings_key)
base_url = data.base_url.rstrip('/') if data.base_url else None
base_url_changed = base_url != settings.base_url
settings.base_url = base_url
for m in settings.mapping:
for mapping in data.mapping:
if mapping.paddle_id == m.paddle_id:
m.service_email = mapping.service_email
to_put = [settings]
paddle_info = None
if base_url_changed:
if settings.base_url:
paddle_info = get_paddle_info(settings)
# Overwrite existing mapping
settings.mapping = [PaddleMapping(service_email=None, paddle_id=m.node.nid, title=m.node.title)
for m in paddle_info.units]
to_put.append(paddle_info)
else:
PaddleOrganizationalUnits.create_key(service_user).delete()
settings.mapping = []
else:
paddle_info = PaddleOrganizationalUnits.create_key(service_user).get()
ndb.put_multi(to_put)
if paddle_info:
populate_info_from_paddle(settings, paddle_info)
service_profile = get_service_profile(service_user)
return PaddleSettingsServicesTO(settings=PaddleSettingsTO.from_model(settings),
services=_get_city_services(service_profile.community_id))
@rest("/common/settings/publish_changes", "post")
@returns(ReturnStatusTO)
@arguments(friends=[BaseMemberTO])
def settings_publish_changes(friends=None):
service_user = users.get_current_user()
try:
common_provision(service_user, friends=friends, run_checks=True)
return RETURNSTATUS_TO_SUCCESS
except InvalidValueException as e:
reason = e.fields.get('reason')
property_ = e.fields.get('property')
logging.warning("Invalid value for property %s: %s", property_, reason, exc_info=1)
return ReturnStatusTO.create(False, reason or e.message)
except AvatarImageNotSquareException:
sln_settings = get_solution_settings(service_user)
message = common_translate(sln_settings.main_language,
'please_select_valid_avatar_image')
return ReturnStatusTO.create(False, message)
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest('/common/settings/auto-publish', 'post')
@returns(dict)
@arguments()
def api_check_auto_publish():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
error_lines = []
countdown_seconds = 60 * AUTO_PUBLISH_MINUTES
if sln_settings.updates_pending:
if not sln_settings.update_date or not sln_settings.auto_publish_date:
should_auto_publish = True
else:
expected_auto_publish_date = sln_settings.update_date + relativedelta(seconds=countdown_seconds)
diff = expected_auto_publish_date - sln_settings.auto_publish_date
# Only update the auto publish date when the update date is longer than 1 minute after the auto publish date
should_auto_publish = expected_auto_publish_date > sln_settings.auto_publish_date and diff.seconds > 60
if should_auto_publish:
error_lines = validate_before_provision(service_user, sln_settings)
valid = not error_lines
if sln_settings.auto_publish_task_id:
logging.debug('Canceling scheduled auto publish task')
taskqueue.Queue(SCHEDULED_QUEUE).delete_tasks(taskqueue.Task(name=sln_settings.auto_publish_task_id))
sln_settings.auto_publish_date = None
sln_settings.auto_publish_task_id = None
if valid:
logging.debug('Scheduling auto publish task')
sln_settings.auto_publish_date = datetime.now() + relativedelta(seconds=countdown_seconds)
new_task = deferred.defer(auto_publish, service_user, _countdown=countdown_seconds,
_queue=SCHEDULED_QUEUE) # type: taskqueue.Task
sln_settings.auto_publish_task_id = new_task.name
sln_settings.put()
return {
'valid': not error_lines,
'publish_date': sln_settings.auto_publish_date and (sln_settings.auto_publish_date.isoformat() + 'Z'),
'errors': error_lines,
}
@rest("/common/settings/publish_changes/users", "post")
@returns()
@arguments(user_keys=[unicode])
def settings_save_publish_changes_users(user_keys):
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
sln_settings.publish_changes_users = user_keys
sln_settings.put()
@rest('/common/settings/logo', 'put')
@returns(BrandingSettingsTO)
@arguments(data=BrandingSettingsTO)
def rest_update_logo(data):
service_identity = users.get_current_session().service_identity
return BrandingSettingsTO.from_model(set_logo(users.get_current_user(), data.logo_url, service_identity))
@rest('/common/settings/avatar', 'put', type=REST_TYPE_TO)
@returns(BrandingSettingsTO)
@arguments(data=BrandingSettingsTO)
def rest_update_avatar(data):
return BrandingSettingsTO.from_model(set_avatar(users.get_current_user(), data.avatar_url))
@rest("/common/menu/save", "post")
@returns(ReturnStatusTO)
@arguments(menu=MenuTO)
def menu_save(menu):
from solutions.common.bizz.menu import save_menu
try:
service_user = users.get_current_user()
save_menu(service_user, menu)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/menu/import", "post", silent=True)
@returns(ReturnStatusTO)
@arguments(file_contents=str)
def menu_import(file_contents):
"""" import menu from excel files """
from solutions.common.bizz.menu import import_menu_from_excel
try:
service_user = users.get_current_user()
import_menu_from_excel(service_user, file_contents)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/menu/save_name", "post")
@returns(ReturnStatusTO)
@arguments(name=unicode)
def menu_save_name(name):
from solutions.common.bizz.menu import save_menu_name
try:
service_user = users.get_current_user()
save_menu_name(service_user, name)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/menu/load", "get", read_only_access=True)
@returns(MenuTO)
@arguments()
def load_menu():
from solutions.common.dal import get_restaurant_menu
service_user = users.get_current_user()
menu = get_restaurant_menu(service_user)
if not menu:
logging.info('Setting menu')
menu = _put_default_menu(service_user)
return MenuTO.fromMenuObject(menu)
@rest("/common/bulkinvite", "post")
@returns(ReturnStatusTO)
@arguments(emails=[unicode], invitation_message=unicode)
def bulk_invite(emails, invitation_message):
from solutions.common.bizz.bulk_invite import bulk_invite
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
bulk_invite(service_user, service_identity, emails, invitation_message)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/calendar/delete", "post")
@returns(ReturnStatusTO)
@arguments(calendar_id=(int, long))
def delete_calendar(calendar_id):
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
try:
sc = SolutionCalendar.create_key(calendar_id, service_user, sln_settings.solution).get()
if sc.events.count(1) > 0:
raise BusinessException(common_translate(sln_settings.main_language,
'calendar-remove-failed-has-events'))
sc.deleted = True
sc.put()
sln_settings.updates_pending = True
put_and_invalidate_cache(sln_settings)
broadcast_updates_pending(sln_settings)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/calendar/save", "post")
@returns(ReturnStatusTO)
@arguments(calendar=SolutionCalendarTO)
def save_calendar(calendar):
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
try:
if calendar.id:
sc = SolutionCalendar.create_key(calendar.id, service_user, sln_settings.solution).get()
else:
sc = SolutionCalendar(parent=parent_ndb_key(service_user, sln_settings.solution))
for c in get_solution_calendars(service_user, sln_settings.solution):
if c.name.lower() == calendar.name.lower():
if calendar.id != c.calendar_id:
raise BusinessException(common_translate(sln_settings.main_language,
'calendar-name-already-exists', name=calendar.name))
sc.name = calendar.name
sc.put()
sln_settings.updates_pending = True
put_and_invalidate_cache(sln_settings)
broadcast_updates_pending(sln_settings)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/calendar/load", "get", silent_result=True, read_only_access=True)
@returns([SolutionCalendarWebTO])
@arguments()
def load_calendar():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
base_url = get_server_settings().baseUrl
return [SolutionCalendarWebTO.fromSolutionCalendar(sln_settings, calendar, base_url, True)
for calendar in get_solution_calendars(service_user, sln_settings.solution)]
@rest("/common/calendar/load/more", "get", silent_result=True, read_only_access=True)
@returns(SolutionCalendarWebTO)
@arguments(calendar_id=(int, long), cursor=unicode)
def load_calendar_more(calendar_id, cursor=None):
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
base_url = get_server_settings().baseUrl
calendar = SolutionCalendar.create_key(calendar_id, service_user, sln_settings.solution).get()
return SolutionCalendarWebTO.fromSolutionCalendar(sln_settings, calendar, base_url, True, cursor)
@rest("/common/calendar/google/authenticate/url", "get", read_only_access=True)
@returns(unicode)
@arguments(calendar_id=(int, long))
def calendar_google_authenticate_url(calendar_id):
return get_google_authenticate_url(calendar_id)
@rest("/common/calendar/google/load", "get", read_only_access=True)
@returns(SolutionGoogleCalendarStatusTO)
@arguments(calendar_id=(int, long))
def calendar_google_load(calendar_id):
service_user = users.get_current_user()
return get_google_calendars(service_user, calendar_id)
@rest("/common/calendar/import/google/put", "post")
@returns(ReturnStatusTO)
@arguments(calendar_id=(int, long), google_calendars=[SolutionUserKeyLabelTO])
def calendar_put_google_import(calendar_id, google_calendars):
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
try:
def trans():
sc = SolutionCalendar.create_key(calendar_id, service_user, sln_settings.solution).get()
if not sc:
raise BusinessException(
common_translate(sln_settings.main_language, 'Calendar not found'))
deferred.defer(update_events_from_google, service_user, calendar_id, _transactional=True)
sc.google_calendar_ids = []
sc.google_calendar_names = []
for google_calendar in google_calendars:
sc.google_calendar_ids.append(google_calendar.key)
sc.google_calendar_names.append(google_calendar.label)
sc.google_sync_events = bool(sc.google_calendar_ids)
sc.put()
ndb.transaction(trans)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/events/put", "post", type=REST_TYPE_TO)
@returns(EventItemTO)
@arguments(data=CreateEventItemTO)
def put_event(data):
from solutions.common.bizz.events import put_event as put_event_bizz
try:
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
org_type = get_organization_type(service_user)
service_profile = get_service_profile(service_user)
event = put_event_bizz(sln_settings, data, service_profile.community_id, org_type)
return EventItemTO.from_model(event, get_server_settings().baseUrl, destination_app=False)
except BusinessException as e:
raise HttpBadRequestException(e.message)
@rest("/common/events/delete", "post")
@returns(ReturnStatusTO)
@arguments(event_id=(int, long))
def delete_event(event_id):
from solutions.common.bizz.events import delete_event as delete_event_bizz
try:
service_user = users.get_current_user()
delete_event_bizz(service_user, event_id)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/events/uit/actor/load", "get", read_only_access=True)
@returns(unicode)
@arguments()
def load_uit_actor_id():
service_user = users.get_current_user()
settings = get_solution_settings(service_user)
return settings.uitdatabank_actor_id
@rest("/common/events/uit/actor/put", "post")
@returns(ReturnStatusTO)
@arguments(uit_id=unicode)
def put_uit_actor_id(uit_id):
service_user = users.get_current_user()
settings = get_solution_settings(service_user)
settings.uitdatabank_actor_id = uit_id
settings.put()
return RETURNSTATUS_TO_SUCCESS
@rest("/common/settings/getTimezoneOffset", "get", read_only_access=True)
@returns(long)
@arguments()
def get_timezone_offset():
service_user = users.get_current_user()
settings = get_solution_settings(service_user)
return timezone_offset(settings.timezone)
@rest("/common/friends/load", "get", read_only_access=True)
@returns(FriendListResultTO)
@arguments(batch_count=int, cursor=unicode)
def load_friends_list(batch_count, cursor):
from rogerthat.service.api.friends import list_friends
from rogerthat.utils.crypto import encrypt
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
frto = list_friends(service_identity, cursor, batch_count=batch_count)
for f in frto.friends:
f.email = unicode(encrypt(service_user, f.email))
return frto
@rest("/common/statistics/load", "get", silent_result=True, read_only_access=True)
@returns(StatisticsResultTO)
@arguments()
def load_service_statistics():
session_ = users.get_current_session()
service_identity = session_.service_identity
si_stats = system.get_statistics(service_identity)
result = StatisticsResultTO()
result.service_identity_statistics = si_stats
return result
@rest('/common/users/search', 'get', read_only_access=True)
@returns([UserDetailsTO])
@arguments(query=unicode, app_id=unicode)
def search_connected_users(query, app_id=None):
from rogerthat.bizz.profile import search_users_via_friend_connection_and_name_or_email
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
if service_identity is None:
service_identity = ServiceIdentity.DEFAULT
connection = remove_slash_default(create_service_identity_user(service_user, service_identity)).email()
return search_users_via_friend_connection_and_name_or_email(connection, query, app_id, True)
@rest("/common/users/roles/load", "get")
@returns([AppUserRolesTO])
@arguments()
def users_load_roles():
"""
Gather inbox forwarders, calendar admins and news publishers
from different places/models
"""
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
# get inbox forwarders and news publishers
inbox_forwarders = inbox_load_forwarders()
news_publishers = get_solution_news_publishers(service_user, sln_settings.solution)
# create AppUserRolesTO for every role type
# with different additional information for every type
all_user_roles = defaultdict(AppUserRolesTO)
mobile_inbox_forwarders = [f for f in inbox_forwarders if f.type == SolutionSettings.INBOX_FORWARDER_TYPE_MOBILE]
email_inbox_forwarders = [f for f in inbox_forwarders if f.type == SolutionSettings.INBOX_FORWARDER_TYPE_EMAIL]
# mobile forwarders may have an app id
for forwarder in mobile_inbox_forwarders:
email = forwarder.key
try:
app_id = email.split(':')[1]
except IndexError:
app_id = None
user_roles = all_user_roles[email]
user_roles.app_user_email = email
user_roles.app_id = app_id
# additional info: forwarder type
user_roles.add_forwarder_type(SolutionSettings.INBOX_FORWARDER_TYPE_MOBILE)
for publisher in news_publishers:
email = publisher.app_user.email()
user_roles = all_user_roles[email]
user_roles.app_user_email = email
user_roles.app_id = get_app_id_from_app_user(publisher.app_user)
user_roles.news_publisher = True
# because email forwarders are stored only by email, without an app id
# after gathering all roles, check if a user with this email
# is a mobile forwarder, then just append the type
for forwarder in email_inbox_forwarders:
email = forwarder.key # email only
has_roles = False # check if any roles have this email
for user_email, user_roles in all_user_roles.iteritems():
# user_email may contain an app id, so check if it contains
# email, then append the email forwarder type
if email in user_email:
user_roles.add_forwarder_type(SolutionSettings.INBOX_FORWARDER_TYPE_EMAIL)
has_roles = True
# no user roles for this email, then create it
if not has_roles:
user_roles = all_user_roles[email]
user_roles.app_user_email = email
user_roles.add_forwarder_type(SolutionSettings.INBOX_FORWARDER_TYPE_EMAIL)
return all_user_roles.values()
@rest("/common/users/roles/add", "post")
@returns(ReturnStatusTO)
@arguments(key=unicode, user_roles=AppUserRolesTO)
def users_add_user_roles(key, user_roles):
# type: (str, AppUserRolesTO) -> ReturnStatusTO
""" set different app roles for a user """
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
sln_i_settings = get_solution_settings_or_identity_settings(sln_settings,
service_identity)
# try first to get the user from user key
app_user = users.User(key)
email, app_id = get_app_user_tuple(app_user)
is_existing_user = is_existing_friend(email.email(), app_id, service_identity)
# add inbox forwarder
if user_roles.inbox_forwarder:
forwarder_types = user_roles.forwarder_types
for forwarder_type in forwarder_types:
if forwarder_type:
if forwarder_type == SolutionSettings.INBOX_FORWARDER_TYPE_EMAIL:
# only email without an app id
key = get_human_user_from_app_user(app_user).email()
else:
key = app_user.email()
if not EMAIL_REGEX.match(key):
return ReturnStatusTO.create(False,
common_translate(sln_settings.main_language,
'Please provide a valid e-mail address'))
forwarders = sln_i_settings.get_forwarders_by_type(forwarder_type)
if key not in forwarders:
forwarders.append(key)
sln_i_settings.put()
if is_existing_user:
# add as news publisher
if user_roles.news_publisher:
create_news_publisher(app_user, service_user, sln_settings.solution)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/users/roles/delete", "post")
@returns(ReturnStatusTO)
@arguments(key=unicode, forwarder_types=[unicode], calendar_ids=[(int, long)])
def users_delete_user_roles(key, forwarder_types, calendar_ids):
""" remove all the user app roles """
try:
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
sln_i_settings = get_solution_settings_or_identity_settings(sln_settings,
service_identity)
app_user = users.User(key)
# inbox
if forwarder_types:
key = app_user.email()
for forwarder_type in forwarder_types:
if forwarder_type == SolutionSettings.INBOX_FORWARDER_TYPE_EMAIL:
key = get_human_user_from_app_user(app_user).email()
forwarders = sln_i_settings.get_forwarders_by_type(forwarder_type)
if key in forwarders:
forwarders.remove(key)
sln_i_settings.put()
# news
delete_news_publisher(app_user, service_user, sln_settings.solution)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest('/common/users/admins', 'get')
@returns([unicode])
@arguments()
def rest_load_service_admins():
service_user = users.get_current_user()
return get_service_admins(service_user)
@rest('/common/users/admins', 'post')
@returns(ReturnStatusTO)
@arguments(user_email=unicode)
def rest_add_service_email(user_email):
base_url = GenericRESTRequestHandler.getCurrentRequest().headers.get('Origin') or get_server_settings().baseUrl
try:
service_user = users.get_current_user()
if not EMAIL_REGEX.match(user_email):
sln_settings = get_solution_settings(service_user)
message = common_translate(sln_settings.main_language, 'invalid_email_format', email=user_email)
return ReturnStatusTO.create(False, message)
add_service_admin(service_user, user_email, base_url)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/inbox/forwarders/load", "get", read_only_access=True)
@returns([SolutionInboxForwarder])
@arguments()
def inbox_load_forwarders():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
if is_default_service_identity(service_identity):
sln_i_settings = get_solution_settings(service_user)
else:
sln_i_settings = get_solution_identity_settings(service_user, service_identity)
forwarder_profiles = dict(zip(sln_i_settings.inbox_forwarders,
db.get(
[get_profile_key(u) for u in map(users.User, sln_i_settings.inbox_forwarders)])))
forwarders_to_be_removed = list()
sifs = []
for fw_type, forwarders in [(SolutionSettings.INBOX_FORWARDER_TYPE_MOBILE, sln_i_settings.inbox_forwarders),
(SolutionSettings.INBOX_FORWARDER_TYPE_EMAIL, sln_i_settings.inbox_mail_forwarders), ]:
for forwarder in forwarders:
sif = SolutionInboxForwarder()
sif.type = fw_type
sif.key = forwarder
if fw_type == SolutionSettings.INBOX_FORWARDER_TYPE_MOBILE:
up = forwarder_profiles[forwarder]
if up:
sif.label = u"%s (%s)" % (up.name, get_human_user_from_app_user(up.user).email())
else:
forwarders_to_be_removed.append(forwarder)
continue
else:
sif.label = forwarder
sifs.append(sif)
if forwarders_to_be_removed:
logging.info('Inbox forwarders %s do not exist anymore', forwarders_to_be_removed)
def trans():
if is_default_service_identity(service_identity):
sln_i_settings = get_solution_settings(service_user)
else:
sln_i_settings = get_solution_identity_settings(service_user, service_identity)
for fwd in forwarders_to_be_removed:
sln_i_settings.inbox_forwarders.remove(fwd)
sln_i_settings.put()
db.run_in_transaction(trans)
return sifs
@rest("/common/inbox/forwarders/add", "post")
@returns(ReturnStatusTO)
@arguments(key=unicode, forwarder_type=unicode)
def inbox_add_forwarder(key, forwarder_type=SolutionSettings.INBOX_FORWARDER_TYPE_MOBILE):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
sln_i_settings = get_solution_settings_or_identity_settings(sln_settings, service_identity)
if forwarder_type == SolutionSettings.INBOX_FORWARDER_TYPE_EMAIL:
if not EMAIL_REGEX.match(key):
return ReturnStatusTO.create(False, common_translate(sln_settings.main_language,
'Please provide a valid e-mail address'))
elif forwarder_type == SolutionSettings.INBOX_FORWARDER_TYPE_MOBILE:
app_user = sanitize_app_user(users.User(key))
get_user_profile(app_user)
key = app_user.email()
forwarders = sln_i_settings.get_forwarders_by_type(forwarder_type)
if key not in forwarders:
forwarders.append(key)
sln_i_settings.put()
return RETURNSTATUS_TO_SUCCESS
@rest("/common/inbox/forwarders/delete", "post")
@returns(ReturnStatusTO)
@arguments(key=unicode, forwarder_type=unicode)
def inbox_delete_forwarder(key, forwarder_type=SolutionSettings.INBOX_FORWARDER_TYPE_MOBILE):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
if is_default_service_identity(service_identity):
sln_i_settings = get_solution_settings(service_user)
else:
sln_i_settings = get_solution_identity_settings(service_user, service_identity)
forwarders = sln_i_settings.get_forwarders_by_type(forwarder_type)
if key in forwarders:
forwarders.remove(key)
sln_i_settings.put()
else:
logging.warn('%s inbox forwarder "%s" not found', forwarder_type, key)
return RETURNSTATUS_TO_SUCCESS
@rest("/common/appointment/settings/load", "get", read_only_access=True)
@returns(SolutionAppointmentSettingsTO)
@arguments()
def load_appointment_settings():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
sln_appointment_settings = get_solution_appointment_settings(service_user)
return SolutionAppointmentSettingsTO.fromModel(sln_appointment_settings, sln_settings.main_language)
@rest("/common/appointment/settings/put", "post")
@returns(ReturnStatusTO)
@arguments(text_1=unicode)
def put_appointment_settings(text_1):
service_user = users.get_current_user()
try:
sln_appointment_settings_key = SolutionAppointmentSettings.create_key(service_user)
sln_appointment_settings = SolutionAppointmentSettings.get(sln_appointment_settings_key)
if not sln_appointment_settings:
sln_appointment_settings = SolutionAppointmentSettings(key=sln_appointment_settings_key)
sln_appointment_settings.text_1 = text_1
sln_appointment_settings.put()
sln_settings = get_solution_settings(service_user)
sln_settings.updates_pending = True
put_and_invalidate_cache(sln_appointment_settings, sln_settings)
broadcast_updates_pending(sln_settings)
send_message(service_user, u"solutions.common.appointment.settings.update")
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/appointment/settings/timeframe/put", "post")
@returns(ReturnStatusTO)
@arguments(appointment_id=(int, long, NoneType), day=int, time_from=int, time_until=int)
def put_appointment_weekday_timeframe(appointment_id, day, time_from, time_until):
from solutions.common.bizz.appointment import \
put_appointment_weekday_timeframe as put_appointment_weekday_timeframe_bizz
service_user = users.get_current_user()
try:
put_appointment_weekday_timeframe_bizz(service_user, appointment_id, day, time_from, time_until)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/appointment/settings/timeframe/load", "get", read_only_access=True)
@returns([SolutionAppointmentWeekdayTimeframeTO])
@arguments()
def load_appointment_weekday_timeframes():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
return [SolutionAppointmentWeekdayTimeframeTO.fromModel(f, sln_settings.main_language or DEFAULT_LANGUAGE) for f in
SolutionAppointmentWeekdayTimeframe.list(service_user, sln_settings.solution)]
@rest("/common/appointment/settings/timeframe/delete", "post")
@returns(ReturnStatusTO)
@arguments(appointment_id=(int, long))
def delete_appointment_weekday_timeframe(appointment_id):
from solutions.common.bizz.appointment import \
delete_appointment_weekday_timeframe as delete_appointment_weekday_timeframe_bizz
service_user = users.get_current_user()
try:
delete_appointment_weekday_timeframe_bizz(service_user, appointment_id)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/settings/branding", "get", read_only_access=True)
@returns(BrandingSettingsTO)
@arguments()
def get_branding_settings():
branding_settings = SolutionBrandingSettings.get_by_user(users.get_current_user())
return BrandingSettingsTO.from_model(branding_settings)
@rest("/common/settings/branding_and_menu", "get", read_only_access=True)
@returns(BrandingSettingsAndMenuItemsTO)
@arguments()
def rest_get_branding_settings_and_menu():
branding_settings = SolutionBrandingSettings.get_by_user(users.get_current_user())
branding_settings_to = BrandingSettingsTO.from_model(branding_settings)
service_menu = system.get_menu_item()
smi_dict = {'x'.join(map(str, smi.coords)): smi for smi in service_menu.items}
service_menu_items = list()
z = 0
for y in xrange(3):
row = list()
for x in xrange(4):
coords = 'x'.join(map(str, [x, y, z]))
label = None
icon_name = None
if y == 0:
if x == 0:
label = service_menu.aboutLabel or u'About'
icon_name = u'fa-info'
elif x == 1:
label = service_menu.messagesLabel or u'History'
icon_name = u'fa-envelope'
elif x == 2:
if service_menu.phoneNumber:
label = service_menu.callLabel or u'Call'
icon_name = u'fa-phone'
elif x == 3:
if service_menu.shareQRId:
label = service_menu.shareLabel or u'Recommend'
icon_name = u'fa-thumbs-o-up'
icon_url = None
else:
smi = smi_dict.get(coords)
label = smi.label if smi else None
icon_url = smi.iconUrl if smi else None
icon_name = smi.iconName if smi else None
row.append(ServiceMenuItemWithCoordinatesTO.create(label, icon_name, icon_url, coords))
service_menu_items.append(ServiceMenuItemWithCoordinatesListTO.create(row))
return BrandingSettingsAndMenuItemsTO.create(branding_settings_to, service_menu_items)
@rest("/common/settings/branding", "post")
@returns(ReturnStatusTO)
@arguments(branding_settings=BrandingSettingsTO)
def rest_save_branding_settings(branding_settings):
"""
Args:
branding_settings (BrandingSettingsTO)
"""
try:
branding_settings.background_color = MISSING
branding_settings.text_color = MISSING
save_branding_settings(users.get_current_user(), branding_settings)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest('/common/q-matic', 'get')
@returns(QMaticSettingsTO)
@arguments()
def rest_get_qmatic_settings():
return QMaticSettingsTO.from_model(get_qmatic_settings(users.get_current_user()))
@rest('/common/q-matic', 'put')
@returns(QMaticSettingsTO)
@arguments(data=QMaticSettingsTO)
def rest_save_qmatic_settings(data):
return QMaticSettingsTO.from_model(save_qmatic_settings(users.get_current_user(), data))
@rest('/common/jcc-appointments', 'get')
@returns(dict)
@arguments()
def rest_get_jcc_settings():
return get_jcc_settings(users.get_current_user()).to_dict()
@rest('/common/jcc-appointments', 'put')
@returns(dict)
@arguments(data=dict)
def rest_save_jcc_settings(data):
return save_jcc_settings(users.get_current_user(), **data).to_dict()
@rest('/common/timeblockr', 'get')
@returns(dict)
@arguments()
def rest_get_timeblockr_settings():
return get_timeblockr_settings(users.get_current_user()).to_dict()
@rest('/common/timeblockr', 'put')
@returns(dict)
@arguments(data=dict)
def rest_save_timeblockr_settings(data):
return save_timeblockr_settings(users.get_current_user(), **data).to_dict()
@rest("/common/repair/settings/load", "get", read_only_access=True)
@returns(SolutionRepairSettingsTO)
@arguments()
def repair_settings_load():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
sln_repair_settings = get_solution_repair_settings(service_user)
return SolutionRepairSettingsTO.fromModel(sln_repair_settings, sln_settings.main_language)
@rest("/common/repair/settings/put", "post")
@returns(ReturnStatusTO)
@arguments(text_1=unicode)
def put_repair_settings(text_1):
service_user = users.get_current_user()
try:
sln_repair_settings_key = SolutionRepairSettings.create_key(service_user)
sln_repair_settings = SolutionRepairSettings.get(sln_repair_settings_key)
if not sln_repair_settings:
sln_repair_settings = SolutionRepairSettings(key=sln_repair_settings_key)
sln_repair_settings.text_1 = text_1
sln_repair_settings.put()
sln_settings = get_solution_settings(service_user)
sln_settings.updates_pending = True
put_and_invalidate_cache(sln_repair_settings, sln_settings)
broadcast_updates_pending(sln_settings)
send_message(service_user, u"solutions.common.repair.settings.update")
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/repair_order/delete", "post")
@returns(ReturnStatusTO)
@arguments(order_key=unicode, message=unicode)
def repair_order_delete(order_key, message):
service_user = users.get_current_user()
try:
delete_repair_order(service_user, order_key, message)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/repair_order/load", "get", read_only_access=True)
@returns([SolutionRepairOrderTO])
@arguments()
def repair_orders_load():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
return map(SolutionRepairOrderTO.fromModel,
get_solution_repair_orders(service_user, service_identity, sln_settings.solution))
@rest("/common/repair_order/sendmessage", "post")
@returns(ReturnStatusTO)
@arguments(order_key=unicode, order_status=int, message=unicode)
def repair_order_send_message(order_key, order_status, message):
service_user = users.get_current_user()
try:
send_message_for_repair_order(service_user, order_key, order_status, message)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/sandwich/settings/load", "get", read_only_access=True)
@returns(SandwichSettingsTO)
@arguments()
def rest_load_sandwich_settings():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
types = SandwichType.list(service_user, sln_settings.solution).run()
toppings = SandwichTopping.list(service_user, sln_settings.solution).run()
options = SandwichOption.list(service_user, sln_settings.solution).run()
sandwich_settings = SandwichSettings.get_settings(service_user, sln_settings.solution)
return SandwichSettingsTO.from_model(sandwich_settings, types, toppings, options, sln_settings.currency)
@rest("/common/sandwich/settings/save", "post")
@returns(SandwichSettingsTO)
@arguments(sandwich_settings=SandwichSettingsTO)
def save_sandwich_settings(sandwich_settings):
"""
Args:
sandwich_settings (SandwichSettingsTO)
Returns:
sandwich_settings (SandwichSettingsTO)
"""
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
simple_members = get_members(SandwichSettingsTO)[1]
to_put = []
if any(getattr(sandwich_settings, name) is not MISSING for name, _ in simple_members):
sandwich_settings_model = SandwichSettings.get_settings(service_user, sln_settings.solution)
if sandwich_settings.show_prices != MISSING:
sandwich_settings_model.show_prices = sandwich_settings.show_prices
if sandwich_settings.days != MISSING:
sandwich_settings_model.status_days = sandwich_settings.days
if sandwich_settings.from_ != MISSING:
sandwich_settings_model.time_from = sandwich_settings.from_
if sandwich_settings.till != MISSING:
sandwich_settings_model.time_until = sandwich_settings.till
if sandwich_settings.leap_time is not MISSING:
sandwich_settings_model.leap_time = sandwich_settings.leap_time
if sandwich_settings.leap_time_enabled is not MISSING:
sandwich_settings_model.leap_time_enabled = sandwich_settings.leap_time_enabled
if sandwich_settings.leap_time_type is not MISSING:
sandwich_settings_model.leap_time_type = sandwich_settings.leap_time_type
to_put.append(sandwich_settings_model)
has_new = False
def update(items, clazz):
has_new_items = False
# XXX: multiget
for item in items:
if item.id is MISSING:
item_model = clazz(parent=parent_key(service_user, sln_settings.solution))
has_new_items = True
else:
item_model = clazz.get_by_id(item.id, parent_key(service_user, sln_settings.solution))
if item.deleted:
item_model.deleted = True
item_model.description = item.description
item_model.price = item.price
item_model.order = item.order
to_put.append(item_model)
return has_new_items
if sandwich_settings.types != MISSING:
has_new = has_new or update(sandwich_settings.types, SandwichType)
if sandwich_settings.toppings != MISSING:
has_new = has_new or update(sandwich_settings.toppings, SandwichTopping)
if sandwich_settings.options != MISSING:
has_new = has_new or update(sandwich_settings.options, SandwichOption)
sln_settings.updates_pending = True
to_put.append(sln_settings)
put_in_chunks(to_put)
broadcast_updates_pending(sln_settings)
if has_new:
return rest_load_sandwich_settings()
@rest("/common/sandwich/orders/load", "get", read_only_access=True)
@returns([SandwichOrderTO])
@arguments()
def load_sandwich_orders():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
return [SandwichOrderTO.fromModel(m) for m in
SandwichOrder.list(service_user, service_identity, sln_settings.solution)]
@rest("/common/sandwich/orders/reply", "post")
@returns(ReturnStatusTO)
@arguments(sandwich_id=unicode, message=unicode)
def sandwich_order_reply(sandwich_id, message):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
try:
reply_sandwich_order(service_user, service_identity, sandwich_id, message)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/sandwich/orders/ready", "post")
@returns(ReturnStatusTO)
@arguments(sandwich_id=unicode, message=unicode)
def sandwich_order_ready(sandwich_id, message):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
try:
ready_sandwich_order(service_user, service_identity, sandwich_id, message)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/sandwich/orders/delete", "post")
@returns(ReturnStatusTO)
@arguments(sandwich_id=unicode, message=unicode)
def sandwich_order_delete(sandwich_id, message):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
try:
delete_sandwich_order(service_user, service_identity, sandwich_id, message)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/group_purchase/broadcast", "post")
@returns(ReturnStatusTO)
@arguments(group_purchase_id=(int, long), message=unicode)
def group_purchase_broadcast(group_purchase_id, message=unicode):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
try:
broadcast_group_purchase(service_user, service_identity, group_purchase_id, message)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/group_purchase/load", "get", silent_result=True, read_only_access=True)
@returns([SolutionGroupPurchaseTO])
@arguments()
def group_purchase_load():
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
sln_settings = get_solution_settings(service_user)
return [SolutionGroupPurchaseTO.fromModel(m, include_picture=True, incude_subscriptions=True) for m in
SolutionGroupPurchase.list(service_user, service_identity, sln_settings.solution)]
@rest("/common/group_purchase/save", "post")
@returns(ReturnStatusTO)
@arguments(group_purchase=SolutionGroupPurchaseTO)
def group_purchase_save(group_purchase):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
try:
save_group_purchase(service_user, service_identity, group_purchase)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/group_purchase/delete", "post")
@returns(ReturnStatusTO)
@arguments(group_purchase_id=(int, long))
def group_purchase_delete(group_purchase_id):
service_user = users.get_current_user()
session_ = users.get_current_session()
service_identity = session_.service_identity
try:
delete_group_purchase(service_user, service_identity, group_purchase_id)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/group_purchase/subscriptions/add", "post")
@returns(ReturnStatusTO)
@arguments(group_purchase_id=(int, long), name=unicode, units=int)
def group_purchase_subscription_add(group_purchase_id, name, units):
service_user = users.get_current_user()
try:
new_group_purchase_subscription(service_user, None, group_purchase_id, name, None, units)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest("/common/group_purchase/settings/load", "get", read_only_access=True)
@returns(SolutionGroupPurchaseSettingsTO)
@arguments()
def group_purchase_settings_load():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
return SolutionGroupPurchaseSettingsTO.fromModel(
get_solution_group_purchase_settings(service_user, sln_settings.solution))
@rest("/common/group_purchase/settings/save", "post")
@returns(ReturnStatusTO)
@arguments(group_purchase_settings=SolutionGroupPurchaseSettingsTO)
def group_purchase_settings_save(group_purchase_settings):
service_user = users.get_current_user()
try:
def trans():
sln_settings = get_solution_settings(service_user)
sgps = get_solution_group_purchase_settings(service_user, sln_settings.solution)
sgps.visible = group_purchase_settings.visible
sln_settings.updates_pending = True
put_and_invalidate_cache(sgps, sln_settings)
return sln_settings
xg_on = db.create_transaction_options(xg=True)
sln_settings = db.run_in_transaction_options(xg_on, trans)
broadcast_updates_pending(sln_settings)
return RETURNSTATUS_TO_SUCCESS
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest('/common/menu/item/image/upload', 'post', read_only_access=False, silent_result=True)
@returns(ImageReturnStatusTO)
@arguments(image=unicode, image_id_to_delete=(int, long, NoneType))
def upload_menu_item_image(image=None, image_id_to_delete=None):
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
logging.info('%s uploaded a small file', sln_settings.name)
azzert(image)
if image_id_to_delete:
delete_file_blob(service_user, image_id_to_delete)
try:
image = create_file_blob(service_user, base64.b64decode(image.split(',', 1)[1]))
return ImageReturnStatusTO.create(True, None, image.key().id())
except BusinessException as ex:
return ImageReturnStatusTO.create(False, ex.message, None)
@rest('/common/menu/item/image/qr_url', 'get')
@returns(unicode)
@arguments(category_index=(int, long), item_index=(int, long))
def menu_item_qr_url(category_index, item_index):
service_user = users.get_current_user()
return get_menu_item_qr_url(service_user, category_index, item_index)
@rest('/common/menu/item/image/remove', 'post', read_only_access=False)
@returns(ReturnStatusTO)
@arguments(image_id=(int, long))
def remove_file_blob(image_id):
delete_file_blob(users.get_current_user(), image_id)
@rest('/common/customer/signup/all', 'get')
@returns([CustomerSignupTO])
@arguments()
def rest_get_customer_signups():
service_user = users.get_current_user()
city_customer = get_customer(service_user)
return [CustomerSignupTO.from_model(s) for s in get_customer_signups(city_customer)]
@rest('/common/customer/signup/reply', 'post')
@returns(ReturnStatusTO)
@arguments(signup_key=unicode, message=unicode)
def rest_customer_signup_reply(signup_key, message):
signup = db.get(signup_key) # type: CustomerSignup
if signup and not signup.done and signup.can_update:
service_user = users.get_current_user()
city_customer = get_customer(service_user)
set_customer_signup_status(city_customer, signup, approved=False, reason=message)
return RETURNSTATUS_TO_SUCCESS
@rest('/common/functionalities/modules/activated', 'get')
@returns([unicode])
@arguments()
def rest_get_activated_modules():
service_user = users.get_current_user()
sln_settings = get_solution_settings(service_user)
modules = sln_settings.modules
for module in sln_settings.modules_to_put:
if module not in modules:
modules.append(module)
for module in sln_settings.modules_to_remove:
if module in modules:
modules.remove(module)
return modules
@rest('/common/functionalities/modules/enable', 'post')
@returns(ReturnStatusTO)
@arguments(name=unicode, enabled=bool)
def rest_enable_or_disable_module(name, enabled):
try:
service_user = users.get_current_user()
if not validate_enable_or_disable_solution_module(service_user, name, enabled):
language = get_solution_settings(service_user).main_language
return ReturnStatusTO.create(False, common_translate(language, 'cannot_enable_solution_module'))
enable_or_disable_solution_module(service_user, name, enabled)
return ReturnStatusTO.create()
except BusinessException as e:
return ReturnStatusTO.create(False, e.message)
@rest('/common/files/<prefix:.*>', 'post')
@returns(UploadedFileTO)
@arguments(prefix=unicode)
def rest_upload_file(prefix):
request = GenericRESTRequestHandler.getCurrentRequest()
uploaded_file = request.POST.get('file')
reference_type = request.POST.get('reference_type')
ref_id = request.POST.get('reference')
media_type = request.POST.get('type')
service_user = users.get_current_user()
reference = None
if ref_id and reference_type:
if reference_type == 'form':
form_id = long(ref_id)
form_key = OcaForm.create_key(form_id, service_user)
form = form_key.get()
if not form:
raise FormNotFoundException(form_id)
prefix = 'forms/%d' % form_id
reference = form_key
elif reference_type == 'branding_settings':
reference = ndb.Key.from_old_key(SolutionBrandingSettings.create_key(service_user))
elif reference_type == 'point_of_interest':
poi_id = long(ref_id)
reference = PointOfInterest.create_key(poi_id)
if not reference.get():
raise BusinessException('Point of interest %d not found' % poi_id)
result = upload_file(users.get_current_user(), uploaded_file, prefix, media_type, reference)
return UploadedFileTO.from_model(result)
@rest('/common/files', 'get', read_only_access=True, silent_result=True)
@returns([UploadedFileTO])
@arguments(media_type=unicode, prefix=unicode, reference=(int, long, NoneType), reference_type=unicode)
def rest_list_uploaded_files(media_type, prefix=None, reference=None, reference_type=None):
return [UploadedFileTO.from_model(i)
for i in list_files(users.get_current_user(), media_type, prefix, reference_type, reference)]
@rest('/common/image-gallery/<prefix:[^/]+>', 'get', read_only_access=True, silent_result=True)
@returns([GalleryFileTO])
@arguments(prefix=unicode)
def rest_list_gallery_images(prefix):
path = '/%s/image-library/%s/' % (OCA_FILES_BUCKET, prefix)
if DEBUG:
return [
GalleryFileTO(url='https://storage.googleapis.com/oca-files/image-library/%s/merchant.jpg' % prefix,
content_type='image/jpeg',
type=MediaType.IMAGE,
size=-1),
GalleryFileTO(
url='https://storage.googleapis.com/oca-files/image-library/%s/community-service.jpg' % prefix,
content_type='image/jpeg',
type=MediaType.IMAGE,
size=-1),
GalleryFileTO(url='https://storage.googleapis.com/oca-files/image-library/%s/association.jpg' % prefix,
content_type='image/jpeg',
type=MediaType.IMAGE,
size=-1),
GalleryFileTO(url='https://storage.googleapis.com/oca-files/image-library/%s/care.jpg' % prefix,
content_type='image/jpeg',
type=MediaType.IMAGE,
size=-1),
]
return [GalleryFileTO(url=get_serving_url(f.filename), content_type=f.content_type, size=f.st_size,
type=MediaType.IMAGE)
for f in cloudstorage.listbucket(path) if f.filename != path]
@rest('/common/i18n/<prefix:[^/]+>/<lang:[^/]+>.json', 'get', read_only_access=True, authenticated=False, silent=True,
silent_result=True)
@returns(dict)
@arguments(lang=unicode, prefix=unicode)
def api_get_translations(lang, prefix):
language_translations = translations.get(lang, {})
prefix_with_dot = prefix + '.'
mapping = {key.replace(prefix_with_dot, ''): translation
for key, translation in language_translations.iteritems()
if key.startswith(prefix_with_dot)}
translation_re = re.compile(r'%\((.*)\)s')
# Replace %(var)s with {{ var }}
for key in TRANSLATION_MAPPING.get(prefix, []):
if key in language_translations:
mapping[key] = translation_re.sub(r'{{ \1 }}', language_translations[key])
elif DEBUG:
logging.warning('Translation not found for language %s: %s', lang, key)
return {prefix: mapping}
@rest('/common/consts', 'get', read_only_access=True, silent=True, silent_result=True)
@returns(dict)
@arguments()
def api_get_consts():
session = get_current_session()
return {'is_shop_user': session.shop}
| |
# Copyright (c) 2015 SONATA-NFV, Thales Communications & Security
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Thales Communications & Security
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
"""son-analyze command line tool"""
import sys
import os
import logging
import signal
import urllib.parse
import uuid
from argparse import ArgumentParser, Namespace, ArgumentTypeError
import typing # noqa pylint: disable=unused-import
from typing import List
from pkg_resources import resource_filename # type: ignore
from docker import APIClient # type: ignore
from son_analyze import __version__
from son_analyze.cli import fetch_cmd
from son_analyze.core import types
from son_analyze.ops.fetch import Kind
_LOGGER = logging.getLogger(__name__)
_IMAGE_TAG = 'son-analyze-scikit'
def bootstrap(args: Namespace) -> None:
"""Create the images used by son-analyze in the current host"""
cli = APIClient(base_url=args.docker_socket)
root_context = os.path.realpath(
resource_filename('son_analyze.cli', '../../..'))
_LOGGER.info('The root context path is: %s', root_context)
path = resource_filename('son_analyze.cli.resources',
'anaconda.Dockerfile')
path = os.path.relpath(path, root_context)
_LOGGER.info('The relative path to the bootstrap dockerfile is: %s', path)
# import pdb; pdb.set_trace()
for line in cli.build(path=root_context, tag=_IMAGE_TAG,
dockerfile=path, rm=True, decode=True):
if "stream" in line:
print('> ', line["stream"], end="")
else:
print(line)
sys.exit(1)
sys.exit(0)
def run(args: Namespace) -> None:
"""Run an analysis framework environment"""
cli = APIClient(base_url=args.docker_socket)
token_path = os.path.join(args.son_workspace, 'platforms', 'token.txt')
_LOGGER.info('The path to the Sonata token file is: %s', token_path)
if not (os.path.isfile(token_path) and os.access(token_path, os.R_OK)):
_LOGGER.error('The Sonata token file %s is not a readable file',
token_path)
sys.exit(1)
binds = {
'/dev/random': {
'bind': '/dev/random'
},
'/dev/urandom': {
'bind': '/dev/urandom'
},
token_path: {
'bind': token_path,
'mode': 'ro'
}
} # type: typing.Dict[str, typing.Dict[str, str]]
if args.dynamic_mount:
field_name = os.path.realpath(
resource_filename('son_analyze.cli', '../../..'))
new_entry = {
field_name: {
'bind': '/son-analyze',
'mode': 'rw'
}
}
binds.update(new_entry)
host_config = cli.create_host_config(
port_bindings={8888: args.jupiter_port},
binds=binds)
container = cli.create_container(image=_IMAGE_TAG+':latest',
labels=['com.sonata.analyze'],
ports=[8888],
host_config=host_config,
user='root',
environment=['GRANT_SUDO=yes'],
command=['start-notebook.sh',
"--NotebookApp.token=''",
("--NotebookApp."
"iopub_data_rate_limit="
"10000000000")])
container_id = container.get('Id')
cli.start(container=container_id)
def cleanup():
"""Remove the container"""
cli.remove_container(container=container_id, force=True)
def signal_term_handler(unused1, unused2): # noqa pylint: disable=unused-argument
"""Catch signal to clean the containers"""
print('Interruption detected, stopping environment')
cleanup()
sys.exit(1)
signal.signal(signal.SIGTERM, signal_term_handler)
signal.signal(signal.SIGINT, signal_term_handler)
print('Browse http://localhost:{} \n'
'Type Ctrl-C to exit'.format(args.jupiter_port))
exit_code = 0
exit_code = cli.wait(container=container_id)
cleanup()
sys.exit(exit_code)
def version(args: Namespace) -> None:
"""Print the current version and exit"""
msg = 'son-analyze version: {}'.format(__version__)
if args.short:
msg = __version__
print(msg)
sys.exit(0)
def resource_target(raw_target: str) -> types.ResourceTargetTuple:
"""Define the type of resource"""
if ',' in raw_target:
try:
rvendor, rname, rversion = raw_target.split(',')
return types.ResourceTargetTuple(vendor=rvendor, name=rname,
version=rversion, uuid=None)
except:
raise ArgumentTypeError("Target must have the form: "
"<vendor>,<name>,<version>")
else:
try:
tid = str(uuid.UUID(raw_target))
return types.ResourceTargetTuple(vendor=None, name=None,
version=None, uuid=tid)
except:
raise ArgumentTypeError("Target uuid must have the form: "
"<12345678-1234-1234-1234-123456789012>")
def url_type(raw_url: str) -> urllib.parse.ParseResult:
"""Define the type of a URL"""
url = urllib.parse.urlparse(raw_url, scheme='http')
isvalid = all(getattr(url, attr) for attr in ['scheme', 'netloc'])
if isvalid:
return url
else:
raise ArgumentTypeError("Url is not valid")
def fetch_func(args: Namespace) -> None:
"""Fetch data"""
fetch_cmd.fetch_cmd(args.endpoint, args.son_workspace, args.kind,
args.target)
sys.exit(0)
def dispatch(raw_args: List) -> None:
"""Parse the raw_args and dispatch the control flow"""
parser = ArgumentParser(description=('An analysis framework '
'creation tool for Sonata'))
parser.add_argument('-v', '--verbose', default=logging.WARNING,
action="store_const", dest="logLevel",
const=logging.INFO, help='increase verbosity')
parser.add_argument('--docker-socket', type=str,
default='unix://var/run/docker.sock',
action='store',
help=('An uri to the docker socket '
'(default: %(default)s)'))
dworkspace_dir = os.path.join(os.path.expanduser('~'), '.son-workspace')
parser.add_argument('--son-workspace', default=dworkspace_dir,
action='store', type=str, metavar='DIR',
help='A path to the Sonata workspace directory')
def no_command(_: Namespace) -> None:
"""Print the help usage and exit"""
parser.print_help()
sys.exit(0)
parser.set_defaults(func=no_command)
subparsers = parser.add_subparsers()
parser_version = subparsers.add_parser('version', help='Show the version')
parser_version.add_argument('--short', default=False, action='store_true',
help='Shows only the version')
parser_version.set_defaults(func=version)
parser_bootstrap = subparsers.add_parser('bootstrap',
help='Bootstrap son-analyze')
parser_bootstrap.set_defaults(func=bootstrap)
parser_run = subparsers.add_parser('run', help='Run an environment')
parser_run.add_argument('--dynamic-mount', default=False,
action='store_true',
help=('(Dev) Dynamically mount the R code'
' inside the environment'))
parser_run.add_argument('--port', type=int,
default=8888, action='store', dest='jupiter_port',
help=('The listening port for the Jupiter '
'server (default: %(default)d)'))
parser_run.set_defaults(func=run)
parser_fetch = subparsers.add_parser('fetch', help='Fetch data/metrics')
default_val = urllib.parse.urlparse(
'http://sp.int2.sonata-nfv.eu:9090', scheme='http')
help_msg = 'A Gatekeeper endpoint (default: {})'.format(
default_val.geturl())
parser_fetch.add_argument('--endpoint', action='store', help=help_msg,
metavar='URL', type=url_type,
default=default_val)
choices = tuple(elt.name for elt in Kind) # type: ignore
parser_fetch.add_argument('kind', help="The resource's type",
type=str, choices=choices)
parser_fetch.add_argument('target', type=resource_target,
help=('A resource specified by: '
'<vendor>,<name>,<version> or <uuid>'))
parser_fetch.set_defaults(func=fetch_func)
args = parser.parse_args(raw_args)
logging.basicConfig(level=args.logLevel)
_LOGGER.debug("Setting level to %s", args.logLevel)
args.func(args)
assert False # this line is impossible to reach
def main() -> None:
"""Main entry point for the son-analyze command line tool"""
dispatch(sys.argv[1:])
| |
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import os
import mock
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder import test
from cinder.volume.drivers import remotefs
from cinder.volume.drivers import smbfs
def requires_allocation_data_update(expected_size):
def wrapper(func):
@functools.wraps(func)
def inner(inst, *args, **kwargs):
with mock.patch.object(
inst._smbfs_driver,
'update_disk_allocation_data') as fake_update:
func(inst, *args, **kwargs)
fake_update.assert_called_once_with(inst._FAKE_VOLUME,
expected_size)
return inner
return wrapper
class SmbFsTestCase(test.TestCase):
_FAKE_SHARE = '//1.2.3.4/share1'
_FAKE_SHARE_HASH = 'db0bf952c1734092b83e8990bd321131'
_FAKE_MNT_BASE = '/mnt'
_FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_TOTAL_SIZE = '2048'
_FAKE_TOTAL_AVAILABLE = '1024'
_FAKE_TOTAL_ALLOCATED = 1024
_FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc',
'size': 1,
'provider_location': _FAKE_SHARE,
'name': _FAKE_VOLUME_NAME,
'status': 'available'}
_FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, _FAKE_SHARE_HASH)
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME)
_FAKE_SNAPSHOT_ID = '5g811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT = {'id': _FAKE_SNAPSHOT_ID,
'volume': _FAKE_VOLUME,
'status': 'available',
'volume_size': 1}
_FAKE_SNAPSHOT_PATH = (
_FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
_FAKE_OPTIONS_DICT = {'username': 'Administrator',
'password': '12345'}
_FAKE_ALLOCATION_DATA_PATH = os.path.join('fake_dir',
'fake_allocation_data')
_FAKE_SMBFS_CONFIG = mock.MagicMock()
_FAKE_SMBFS_CONFIG.smbfs_oversub_ratio = 2
_FAKE_SMBFS_CONFIG.smbfs_used_ratio = 0.5
_FAKE_SMBFS_CONFIG.smbfs_shares_config = '/fake/config/path'
_FAKE_SMBFS_CONFIG.smbfs_default_volume_format = 'raw'
_FAKE_SMBFS_CONFIG.smbfs_sparsed_volumes = False
def setUp(self):
super(SmbFsTestCase, self).setUp()
self._smbfs_driver = smbfs.SmbfsDriver(configuration=mock.Mock())
self._smbfs_driver._remotefsclient = mock.Mock()
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._execute = mock.Mock()
self._smbfs_driver.base = self._FAKE_MNT_BASE
self._smbfs_driver._alloc_info_file_path = (
self._FAKE_ALLOCATION_DATA_PATH)
def _get_fake_allocation_data(self):
return {self._FAKE_SHARE_HASH: {
'total_allocated': self._FAKE_TOTAL_ALLOCATED}}
@mock.patch('__builtin__.open')
@mock.patch('os.path.exists')
@mock.patch.object(fileutils, 'ensure_tree')
@mock.patch('json.load')
def _test_setup_allocation_data(self, mock_json_load, mock_ensure_tree,
mock_exists, mock_open,
allocation_data_exists=False):
mock_exists.return_value = allocation_data_exists
self._smbfs_driver._update_allocation_data_file = mock.Mock()
self._smbfs_driver._setup_allocation_data()
if allocation_data_exists:
fd = mock_open.return_value.__enter__.return_value
mock_json_load.assert_called_once_with(fd)
self.assertEqual(self._smbfs_driver._allocation_data,
mock_json_load.return_value)
else:
mock_ensure_tree.assert_called_once_with(
os.path.dirname(self._FAKE_ALLOCATION_DATA_PATH))
update_func = self._smbfs_driver._update_allocation_data_file
update_func.assert_called_once_with()
def test_setup_allocation_data_file_unexisting(self):
self._test_setup_allocation_data()
def test_setup_allocation_data_file_existing(self):
self._test_setup_allocation_data(allocation_data_exists=True)
def _test_update_allocation_data(self, virtual_size_gb=None,
volume_exists=True):
self._smbfs_driver._update_allocation_data_file = mock.Mock()
update_func = self._smbfs_driver._update_allocation_data_file
fake_alloc_data = self._get_fake_allocation_data()
if volume_exists:
fake_alloc_data[self._FAKE_SHARE_HASH][
self._FAKE_VOLUME_NAME] = self._FAKE_VOLUME['size']
self._smbfs_driver._allocation_data = fake_alloc_data
self._smbfs_driver.update_disk_allocation_data(self._FAKE_VOLUME,
virtual_size_gb)
vol_allocated_size = fake_alloc_data[self._FAKE_SHARE_HASH].get(
self._FAKE_VOLUME_NAME, None)
if not virtual_size_gb:
expected_total_allocated = (self._FAKE_TOTAL_ALLOCATED -
self._FAKE_VOLUME['size'])
self.assertIsNone(vol_allocated_size)
else:
expected_total_allocated = (self._FAKE_TOTAL_ALLOCATED +
virtual_size_gb -
self._FAKE_VOLUME['size'])
self.assertEqual(virtual_size_gb, vol_allocated_size)
update_func.assert_called_once_with()
self.assertEqual(
expected_total_allocated,
fake_alloc_data[self._FAKE_SHARE_HASH]['total_allocated'])
def test_update_allocation_data_volume_deleted(self):
self._test_update_allocation_data()
def test_update_allocation_data_volume_extended(self):
self._test_update_allocation_data(
virtual_size_gb=self._FAKE_VOLUME['size'] + 1)
def test_update_allocation_data_volume_created(self):
self._test_update_allocation_data(
virtual_size_gb=self._FAKE_VOLUME['size'])
@requires_allocation_data_update(expected_size=None)
def test_delete_volume(self):
drv = self._smbfs_driver
fake_vol_info = self._FAKE_VOLUME_PATH + '.info'
drv._ensure_share_mounted = mock.MagicMock()
fake_ensure_mounted = drv._ensure_share_mounted
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._delete = mock.Mock()
drv._local_path_volume_info = mock.Mock(
return_value=fake_vol_info)
with mock.patch('os.path.exists', lambda x: True):
drv.delete_volume(self._FAKE_VOLUME)
fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE)
drv._delete.assert_any_call(
self._FAKE_VOLUME_PATH)
drv._delete.assert_any_call(fake_vol_info)
@mock.patch('os.path.exists')
@mock.patch.object(image_utils, 'check_qemu_img_version')
def _test_setup(self, mock_check_qemu_img_version,
mock_exists, config, share_config_exists=True):
mock_exists.return_value = share_config_exists
fake_ensure_mounted = mock.MagicMock()
self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted
self._smbfs_driver.configuration = config
if not (config.smbfs_shares_config and share_config_exists and
config.smbfs_oversub_ratio > 0 and
0 <= config.smbfs_used_ratio <= 1):
self.assertRaises(exception.SmbfsException,
self._smbfs_driver.do_setup,
None)
else:
self._smbfs_driver.do_setup(mock.sentinel.context)
mock_check_qemu_img_version.assert_called_once_with()
self.assertEqual({}, self._smbfs_driver.shares)
fake_ensure_mounted.assert_called_once_with()
def test_setup_missing_shares_config_option(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_shares_config = None
self._test_setup(config=fake_config,
share_config_exists=False)
def test_setup_missing_shares_config_file(self):
self._test_setup(config=self._FAKE_SMBFS_CONFIG,
share_config_exists=False)
def test_setup_invlid_oversub_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_oversub_ratio = -1
self._test_setup(config=fake_config)
def test_setup_invalid_used_ratio(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_used_ratio = -1
self._test_setup(config=fake_config)
def test_setup_invalid_used_ratio2(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_used_ratio = 1.1
self._test_setup(config=fake_config)
def _test_create_volume(self, volume_exists=False, volume_format=None):
fake_method = mock.MagicMock()
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
self._smbfs_driver._set_rw_permissions_for_all = mock.MagicMock()
fake_set_permissions = self._smbfs_driver._set_rw_permissions_for_all
self._smbfs_driver.get_volume_format = mock.MagicMock()
windows_image_format = False
fake_vol_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_volume_format.return_value = volume_format
if volume_format:
if volume_format in ('vhd', 'vhdx'):
windows_image_format = volume_format
if volume_format == 'vhd':
windows_image_format = 'vpc'
method = '_create_windows_image'
fake_vol_path += '.' + volume_format
else:
method = '_create_%s_file' % volume_format
if volume_format == 'sparsed':
self._smbfs_driver.configuration.smbfs_sparsed_volumes = (
True)
else:
method = '_create_regular_file'
setattr(self._smbfs_driver, method, fake_method)
with mock.patch('os.path.exists', new=lambda x: volume_exists):
if volume_exists:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._do_create_volume,
self._FAKE_VOLUME)
return
self._smbfs_driver._do_create_volume(self._FAKE_VOLUME)
if windows_image_format:
fake_method.assert_called_once_with(
fake_vol_path,
self._FAKE_VOLUME['size'],
windows_image_format)
else:
fake_method.assert_called_once_with(
fake_vol_path, self._FAKE_VOLUME['size'])
fake_set_permissions.assert_called_once_with(fake_vol_path)
def test_create_existing_volume(self):
self._test_create_volume(volume_exists=True)
def test_create_vhdx(self):
self._test_create_volume(volume_format='vhdx')
def test_create_qcow2(self):
self._test_create_volume(volume_format='qcow2')
def test_create_sparsed(self):
self._test_create_volume(volume_format='sparsed')
def test_create_regular(self):
self._test_create_volume()
def _test_find_share(self, existing_mounted_shares=True,
eligible_shares=True):
if existing_mounted_shares:
mounted_shares = ('fake_share1', 'fake_share2', 'fake_share3')
else:
mounted_shares = None
self._smbfs_driver._mounted_shares = mounted_shares
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=eligible_shares)
self._smbfs_driver._get_total_allocated = mock.Mock(
side_effect=[3, 2, 1])
if not mounted_shares:
self.assertRaises(exception.SmbfsNoSharesMounted,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
elif not eligible_shares:
self.assertRaises(exception.SmbfsNoSuitableShareFound,
self._smbfs_driver._find_share,
self._FAKE_VOLUME['size'])
else:
ret_value = self._smbfs_driver._find_share(
self._FAKE_VOLUME['size'])
# The eligible share with the minimum allocated space
# will be selected
self.assertEqual('fake_share3', ret_value)
def test_find_share(self):
self._test_find_share()
def test_find_share_missing_mounted_shares(self):
self._test_find_share(existing_mounted_shares=False)
def test_find_share_missing_eligible_shares(self):
self._test_find_share(eligible_shares=False)
def _test_is_share_eligible(self, capacity_info, volume_size):
self._smbfs_driver._get_capacity_info = mock.Mock(
return_value=[float(x << 30) for x in capacity_info])
self._smbfs_driver.configuration = self._FAKE_SMBFS_CONFIG
return self._smbfs_driver._is_share_eligible(self._FAKE_SHARE,
volume_size)
def test_share_volume_above_used_ratio(self):
fake_capacity_info = (4, 1, 1)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertFalse(ret_value)
def test_eligible_share(self):
fake_capacity_info = (4, 4, 0)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertTrue(ret_value)
def test_share_volume_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 7)
fake_volume_size = 2
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertFalse(ret_value)
def test_share_reserved_above_oversub_ratio(self):
fake_capacity_info = (4, 4, 10)
fake_volume_size = 1
ret_value = self._test_is_share_eligible(fake_capacity_info,
fake_volume_size)
self.assertFalse(ret_value)
def test_parse_options(self):
(opt_list,
opt_dict) = self._smbfs_driver.parse_options(
self._FAKE_SHARE_OPTS)
expected_ret = ([], self._FAKE_OPTIONS_DICT)
self.assertEqual(expected_ret, (opt_list, opt_dict))
def test_parse_credentials(self):
fake_smb_options = r'-o user=MyDomain\Administrator,noperm'
expected_flags = '-o username=Administrator,noperm'
flags = self._smbfs_driver.parse_credentials(fake_smb_options)
self.assertEqual(expected_flags, flags)
@mock.patch.object(smbfs.SmbfsDriver, '_get_local_volume_path_template')
@mock.patch.object(smbfs.SmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.SmbfsDriver, 'get_volume_format')
def _test_get_volume_path(self, mock_get_volume_format, mock_lookup_volume,
mock_get_path_template, volume_exists=True,
volume_format='raw'):
drv = self._smbfs_driver
mock_get_path_template.return_value = self._FAKE_VOLUME_PATH
expected_vol_path = self._FAKE_VOLUME_PATH
if volume_format in (drv._DISK_FORMAT_VHD, drv._DISK_FORMAT_VHDX):
expected_vol_path += '.' + volume_format
mock_lookup_volume.return_value = (
expected_vol_path if volume_exists else None)
mock_get_volume_format.return_value = volume_format
ret_val = drv.local_path(self._FAKE_VOLUME)
if volume_exists:
self.assertFalse(mock_get_volume_format.called)
else:
mock_get_volume_format.assert_called_once_with(self._FAKE_VOLUME)
self.assertEqual(expected_vol_path, ret_val)
def test_get_existing_volume_path(self):
self._test_get_volume_path()
def test_get_new_raw_volume_path(self):
self._test_get_volume_path(volume_exists=False)
def test_get_new_vhd_volume_path(self):
self._test_get_volume_path(volume_exists=False, volume_format='vhd')
@mock.patch.object(smbfs.SmbfsDriver, '_local_volume_dir')
def test_get_local_volume_path_template(self, mock_get_local_dir):
mock_get_local_dir.return_value = self._FAKE_MNT_POINT
ret_val = self._smbfs_driver._get_local_volume_path_template(
self._FAKE_VOLUME)
self.assertEqual(self._FAKE_VOLUME_PATH, ret_val)
@mock.patch('os.path.exists')
def test_lookup_local_volume_path(self, mock_exists):
expected_path = self._FAKE_VOLUME_PATH + '.vhdx'
mock_exists.side_effect = lambda x: x == expected_path
ret_val = self._smbfs_driver._lookup_local_volume_path(
self._FAKE_VOLUME_PATH)
possible_paths = [self._FAKE_VOLUME_PATH + ext
for ext in ('', '.vhd', '.vhdx')]
mock_exists.assert_has_calls(
[mock.call(path) for path in possible_paths])
self.assertEqual(expected_path, ret_val)
@mock.patch.object(smbfs.SmbfsDriver, '_get_local_volume_path_template')
@mock.patch.object(smbfs.SmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.SmbfsDriver, '_qemu_img_info')
@mock.patch.object(smbfs.SmbfsDriver, '_get_volume_format_spec')
def _mock_get_volume_format(self, mock_get_format_spec, mock_qemu_img_info,
mock_lookup_volume, mock_get_path_template,
qemu_format=False, volume_format='raw',
volume_exists=True):
mock_get_path_template.return_value = self._FAKE_VOLUME_PATH
mock_lookup_volume.return_value = (
self._FAKE_VOLUME_PATH if volume_exists else None)
mock_qemu_img_info.return_value.file_format = volume_format
mock_get_format_spec.return_value = volume_format
ret_val = self._smbfs_driver.get_volume_format(self._FAKE_VOLUME,
qemu_format)
if volume_exists:
mock_qemu_img_info.assert_called_once_with(self._FAKE_VOLUME_PATH,
self._FAKE_VOLUME_NAME)
self.assertFalse(mock_get_format_spec.called)
else:
mock_get_format_spec.assert_called_once_with(self._FAKE_VOLUME)
self.assertFalse(mock_qemu_img_info.called)
return ret_val
def test_get_existing_raw_volume_format(self):
fmt = self._mock_get_volume_format()
self.assertEqual('raw', fmt)
def test_get_new_vhd_volume_format(self):
expected_fmt = 'vhd'
fmt = self._mock_get_volume_format(volume_format=expected_fmt,
volume_exists=False)
self.assertEqual(expected_fmt, fmt)
def test_get_new_vhd_legacy_volume_format(self):
img_fmt = 'vhd'
expected_fmt = 'vpc'
ret_val = self._mock_get_volume_format(volume_format=img_fmt,
volume_exists=False,
qemu_format=True)
self.assertEqual(expected_fmt, ret_val)
def test_initialize_connection(self):
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._smbfs_driver._get_mount_point_base = mock.Mock(
return_value=self._FAKE_MNT_BASE)
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format='raw'))
fake_data = {'export': self._FAKE_SHARE,
'format': 'raw',
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS}
expected = {
'driver_volume_type': 'smbfs',
'data': fake_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection(
self._FAKE_VOLUME, None)
self.assertEqual(expected, ret_val)
def _test_extend_volume(self, extend_failed=False, image_format='raw'):
drv = self._smbfs_driver
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv._check_extend_volume_support = mock.Mock(
return_value=True)
drv._is_file_size_equal = mock.Mock(
return_value=not extend_failed)
drv._qemu_img_info = mock.Mock(
return_value=mock.Mock(file_format=image_format))
drv._delete = mock.Mock()
with mock.patch.object(image_utils, 'resize_image') as fake_resize, \
mock.patch.object(image_utils, 'convert_image') as \
fake_convert:
if extend_failed:
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume,
self._FAKE_VOLUME, mock.sentinel.new_size)
else:
drv.extend_volume(self._FAKE_VOLUME, mock.sentinel.new_size)
if image_format in (drv._DISK_FORMAT_VHDX,
drv._DISK_FORMAT_VHD_LEGACY):
fake_tmp_path = self._FAKE_VOLUME_PATH + '.tmp'
fake_convert.assert_any_call(self._FAKE_VOLUME_PATH,
fake_tmp_path, 'raw')
fake_resize.assert_called_once_with(
fake_tmp_path, mock.sentinel.new_size)
fake_convert.assert_any_call(fake_tmp_path,
self._FAKE_VOLUME_PATH,
image_format)
else:
fake_resize.assert_called_once_with(
self._FAKE_VOLUME_PATH, mock.sentinel.new_size)
@requires_allocation_data_update(expected_size=mock.sentinel.new_size)
def test_extend_volume(self):
self._test_extend_volume()
def test_extend_volume_failed(self):
self._test_extend_volume(extend_failed=True)
@requires_allocation_data_update(expected_size=mock.sentinel.new_size)
def test_extend_vhd_volume(self):
self._test_extend_volume(image_format='vpc')
def _test_check_extend_support(self, has_snapshots=False,
is_eligible=True):
self._smbfs_driver.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
if has_snapshots:
active_file_path = self._FAKE_SNAPSHOT_PATH
else:
active_file_path = self._FAKE_VOLUME_PATH
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=active_file_path)
self._smbfs_driver._is_share_eligible = mock.Mock(
return_value=is_eligible)
if has_snapshots:
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
elif not is_eligible:
self.assertRaises(exception.ExtendVolumeError,
self._smbfs_driver._check_extend_volume_support,
self._FAKE_VOLUME, 2)
else:
self._smbfs_driver._check_extend_volume_support(
self._FAKE_VOLUME, 2)
self._smbfs_driver._is_share_eligible.assert_called_once_with(
self._FAKE_SHARE, 1)
def test_check_extend_support(self):
self._test_check_extend_support()
def test_check_extend_volume_with_snapshots(self):
self._test_check_extend_support(has_snapshots=True)
def test_check_extend_volume_uneligible_share(self):
self._test_check_extend_support(is_eligible=False)
@requires_allocation_data_update(expected_size=_FAKE_VOLUME['size'])
@mock.patch.object(remotefs.RemoteFSSnapDriver, 'create_volume')
def test_create_volume_base(self, mock_create_volume):
self._smbfs_driver.create_volume(self._FAKE_VOLUME)
mock_create_volume.assert_called_once_with(self._FAKE_VOLUME)
@requires_allocation_data_update(expected_size=_FAKE_VOLUME['size'])
@mock.patch.object(smbfs.SmbfsDriver,
'_create_volume_from_snapshot')
def test_create_volume_from_snapshot(self, mock_create_volume):
self._smbfs_driver.create_volume_from_snapshot(self._FAKE_VOLUME,
self._FAKE_SNAPSHOT)
mock_create_volume.assert_called_once_with(self._FAKE_VOLUME,
self._FAKE_SNAPSHOT)
@requires_allocation_data_update(expected_size=_FAKE_VOLUME['size'])
@mock.patch.object(smbfs.SmbfsDriver, '_create_cloned_volume')
def test_create_cloned_volume(self, mock_create_volume):
self._smbfs_driver.create_cloned_volume(self._FAKE_VOLUME,
mock.sentinel.src_vol)
mock_create_volume.assert_called_once_with(self._FAKE_VOLUME,
mock.sentinel.src_vol)
def test_create_volume_from_in_use_snapshot(self):
fake_snapshot = {'status': 'in-use'}
self.assertRaises(
exception.InvalidSnapshot,
self._smbfs_driver.create_volume_from_snapshot,
self._FAKE_VOLUME, fake_snapshot)
def test_copy_volume_from_snapshot(self):
drv = self._smbfs_driver
fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name'}
fake_img_info = mock.MagicMock()
fake_img_info.backing_file = self._FAKE_VOLUME_NAME
drv.get_volume_format = mock.Mock(
return_value='raw')
drv._local_path_volume_info = mock.Mock(
return_value=self._FAKE_VOLUME_PATH + '.info')
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv._read_info_file = mock.Mock(
return_value=fake_volume_info)
drv._qemu_img_info = mock.Mock(
return_value=fake_img_info)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH[:-1])
drv._extend_volume = mock.Mock()
drv._set_rw_permissions_for_all = mock.Mock()
with mock.patch.object(image_utils, 'convert_image') as (
fake_convert_image):
drv._copy_volume_from_snapshot(
self._FAKE_SNAPSHOT, self._FAKE_VOLUME,
self._FAKE_VOLUME['size'])
drv._extend_volume.assert_called_once_with(
self._FAKE_VOLUME, self._FAKE_VOLUME['size'])
fake_convert_image.assert_called_once_with(
self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw')
def test_ensure_mounted(self):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE)
self._smbfs_driver._remotefsclient.mount.assert_called_once_with(
self._FAKE_SHARE, self._FAKE_SHARE_OPTS.split())
def _test_copy_image_to_volume(self, wrong_size_after_fetch=False):
drv = self._smbfs_driver
vol_size_bytes = self._FAKE_VOLUME['size'] << 30
fake_img_info = mock.MagicMock()
if wrong_size_after_fetch:
fake_img_info.virtual_size = 2 * vol_size_bytes
else:
fake_img_info.virtual_size = vol_size_bytes
drv.get_volume_format = mock.Mock(
return_value=drv._DISK_FORMAT_VHDX)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv._do_extend_volume = mock.Mock()
drv.configuration = mock.MagicMock()
drv.configuration.volume_dd_blocksize = (
mock.sentinel.block_size)
with mock.patch.object(image_utils, 'fetch_to_volume_format') as \
fake_fetch, mock.patch.object(image_utils, 'qemu_img_info') as \
fake_qemu_img_info:
fake_qemu_img_info.return_value = fake_img_info
if wrong_size_after_fetch:
self.assertRaises(
exception.ImageUnacceptable,
drv.copy_image_to_volume,
mock.sentinel.context, self._FAKE_VOLUME,
mock.sentinel.image_service,
mock.sentinel.image_id)
else:
drv.copy_image_to_volume(
mock.sentinel.context, self._FAKE_VOLUME,
mock.sentinel.image_service,
mock.sentinel.image_id)
fake_fetch.assert_called_once_with(
mock.sentinel.context, mock.sentinel.image_service,
mock.sentinel.image_id, self._FAKE_VOLUME_PATH,
drv._DISK_FORMAT_VHDX,
mock.sentinel.block_size)
drv._do_extend_volume.assert_called_once_with(
self._FAKE_VOLUME_PATH,
self._FAKE_VOLUME['size'],
self._FAKE_VOLUME['name'])
def test_copy_image_to_volume(self):
self._test_copy_image_to_volume()
def test_copy_image_to_volume_wrong_size_after_fetch(self):
self._test_copy_image_to_volume(wrong_size_after_fetch=True)
def test_get_capacity_info(self):
fake_block_size = 4096.0
fake_total_blocks = 1024
fake_avail_blocks = 512
fake_df = ('%s %s %s' % (fake_block_size, fake_total_blocks,
fake_avail_blocks), None)
self._smbfs_driver._get_mount_point_for_share = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver._get_total_allocated = mock.Mock(
return_value=self._FAKE_TOTAL_ALLOCATED)
self._smbfs_driver._execute.return_value = fake_df
ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE)
expected = (fake_block_size * fake_total_blocks,
fake_block_size * fake_avail_blocks,
self._FAKE_TOTAL_ALLOCATED)
self.assertEqual(expected, ret_val)
| |
'''Delta 5 hardware interface layer.'''
import smbus # For i2c comms
import gevent # For threads and timing
from gevent.lock import BoundedSemaphore # To limit i2c calls
from Node import Node
from BaseHardwareInterface import BaseHardwareInterface
READ_ADDRESS = 0x00 # Gets i2c address of arduino (1 byte)
READ_FREQUENCY = 0x03 # Gets channel frequency (2 byte)
READ_LAP_STATS = 0x05
READ_CALIBRATION_THRESHOLD = 0x15
READ_CALIBRATION_MODE = 0x16
READ_CALIBRATION_OFFSET = 0x17
READ_TRIGGER_THRESHOLD = 0x18
READ_FILTER_RATIO = 0x19
WRITE_FREQUENCY = 0x51 # Sets frequency (2 byte)
WRITE_CALIBRATION_THRESHOLD = 0x65
WRITE_CALIBRATION_MODE = 0x66
WRITE_CALIBRATION_OFFSET = 0x67
WRITE_TRIGGER_THRESHOLD = 0x68
WRITE_FILTER_RATIO = 0x69
UPDATE_SLEEP = 0.1 # Main update loop delay
I2C_CHILL_TIME = 0.075 # Delay after i2c read/write
I2C_RETRY_COUNT = 5 # Limit of i2c retries
def unpack_8(data):
return data[0]
def pack_8(data):
return [data]
def unpack_16(data):
'''Returns the full variable from 2 bytes input.'''
result = data[0]
result = (result << 8) | data[1]
return result
def pack_16(data):
'''Returns a 2 part array from the full variable.'''
part_a = (data >> 8)
part_b = (data & 0xFF)
return [part_a, part_b]
def unpack_32(data):
'''Returns the full variable from 4 bytes input.'''
result = data[0]
result = (result << 8) | data[1]
result = (result << 8) | data[2]
result = (result << 8) | data[3]
return result
def validate_checksum(data):
'''Returns True if the checksum matches the data.'''
if data is None:
return False
checksum = sum(data[:-1]) & 0xFF
return checksum == data[-1]
class Delta5Interface(BaseHardwareInterface):
def __init__(self):
BaseHardwareInterface.__init__(self)
self.update_thread = None # Thread for running the main update loop
self.pass_record_callback = None # Function added in server.py
self.hardware_log_callback = None # Function added in server.py
self.i2c = smbus.SMBus(1) # Start i2c bus
self.semaphore = BoundedSemaphore(1) # Limits i2c to 1 read/write at a time
self.i2c_timestamp = -1
# Scans all i2c_addrs to populate nodes array
self.nodes = [] # Array to hold each node object
i2c_addrs = [8, 10, 12, 14, 16, 18, 20, 22] # Software limited to 8 nodes
for index, addr in enumerate(i2c_addrs):
try:
self.i2c.read_i2c_block_data(addr, READ_ADDRESS, 1)
print "Node FOUND at address {0}".format(addr)
gevent.sleep(I2C_CHILL_TIME)
node = Node() # New node instance
node.i2c_addr = addr # Set current loop i2c_addr
node.index = index
self.nodes.append(node) # Add new node to Delta5Interface
except IOError as err:
print "No node at address {0}".format(addr)
gevent.sleep(I2C_CHILL_TIME)
for node in self.nodes:
node.frequency = self.get_value_16(node, READ_FREQUENCY)
if node.index == 0:
self.calibration_threshold = self.get_value_16(node,
READ_CALIBRATION_THRESHOLD)
self.calibration_offset = self.get_value_16(node,
READ_CALIBRATION_OFFSET)
self.trigger_threshold = self.get_value_16(node,
READ_TRIGGER_THRESHOLD)
self.filter_ratio = self.get_value_8(node,
READ_FILTER_RATIO)
else:
self.set_calibration_threshold(node.index, self.calibration_threshold)
self.set_calibration_offset(node.index, self.calibration_offset)
self.set_trigger_threshold(node.index, self.trigger_threshold)
#
# Class Functions
#
def log(self, message):
'''Hardware log of messages.'''
if callable(self.hardware_log_callback):
string = 'Delta 5 Log: {0}'.format(message)
self.hardware_log_callback(string)
#
# Update Loop
#
def start(self):
if self.update_thread is None:
self.log('Starting background thread.')
self.update_thread = gevent.spawn(self.update_loop)
def update_loop(self):
while True:
self.update()
gevent.sleep(UPDATE_SLEEP)
def update(self):
for node in self.nodes:
data = self.read_block(node.i2c_addr, READ_LAP_STATS, 17)
if data != None:
lap_id = data[0]
ms_since_lap = unpack_32(data[1:])
node.current_rssi = unpack_16(data[5:])
node.trigger_rssi = unpack_16(data[7:])
node.peak_rssi_raw = unpack_16(data[9:])
node.peak_rssi = unpack_16(data[11:])
node.loop_time = unpack_32(data[13:])
if lap_id != node.last_lap_id:
if node.last_lap_id != -1 and callable(self.pass_record_callback):
self.pass_record_callback(node, ms_since_lap)
node.last_lap_id = lap_id
#
# I2C Common Functions
#
def i2c_sleep(self):
if self.i2c_timestamp == -1:
return
time_passed = self.milliseconds() - self.i2c_timestamp
time_remaining = (I2C_CHILL_TIME * 1000) - time_passed
if (time_remaining > 0):
# print("i2c sleep {0}".format(time_remaining))
gevent.sleep(time_remaining / 1000.0)
def read_block(self, addr, offset, size):
'''Read i2c data given an address, code, and data size.'''
success = False
retry_count = 0
data = None
while success is False and retry_count < I2C_RETRY_COUNT:
try:
with self.semaphore: # Wait if i2c comms is already in progress
self.i2c_sleep()
data = self.i2c.read_i2c_block_data(addr, offset, size + 1)
self.i2c_timestamp = self.milliseconds()
if validate_checksum(data):
success = True
data = data[:-1]
else:
# self.log('Invalid Checksum ({0}): {1}'.format(retry_count, data))
retry_count = retry_count + 1
except IOError as err:
self.log(err)
self.i2c_timestamp = self.milliseconds()
retry_count = retry_count + 1
return data
def write_block(self, addr, offset, data):
'''Write i2c data given an address, code, and data.'''
success = False
retry_count = 0
data_with_checksum = data
data_with_checksum.append(offset)
data_with_checksum.append(sum(data_with_checksum) & 0xFF)
while success is False and retry_count < I2C_RETRY_COUNT:
try:
with self.semaphore: # Wait if i2c comms is already in progress
self.i2c_sleep()
self.i2c.write_i2c_block_data(addr, offset, data_with_checksum)
self.i2c_timestamp = self.milliseconds()
success = True
except IOError as err:
self.log(err)
self.i2c_timestamp = self.milliseconds()
retry_count = retry_count + 1
return success
#
# Internal helper fucntions for setting single values
#
def get_value_8(self, node, command):
data = self.read_block(node.i2c_addr, command, 1)
result = None
if data != None:
result = unpack_8(data)
return result
def get_value_16(self, node, command):
data = self.read_block(node.i2c_addr, command, 2)
result = None
if data != None:
result = unpack_16(data)
return result
def set_and_validate_value_8(self, node, write_command, read_command, in_value):
success = False
retry_count = 0
out_value = None
while success is False and retry_count < I2C_RETRY_COUNT:
self.write_block(node.i2c_addr, write_command, pack_8(in_value))
out_value = self.get_value_8(node, read_command)
if out_value == in_value:
success = True
else:
retry_count = retry_count + 1
self.log('Value Not Set ({0})'.format(retry_count))
if out_value == None:
out_value = in_value
return out_value
def set_and_validate_value_16(self, node, write_command, read_command, in_value):
success = False
retry_count = 0
out_value = None
while success is False and retry_count < I2C_RETRY_COUNT:
self.write_block(node.i2c_addr, write_command, pack_16(in_value))
out_value = self.get_value_16(node, read_command)
if out_value == in_value:
success = True
else:
retry_count = retry_count + 1
self.log('Value Not Set ({0})'.format(retry_count))
if out_value == None:
out_value = in_value
return out_value
#
# External functions for setting data
#
def set_frequency(self, node_index, frequency):
node = self.nodes[node_index]
node.frequency = self.set_and_validate_value_16(node,
WRITE_FREQUENCY,
READ_FREQUENCY,
frequency)
def set_calibration_threshold(self, node_index, threshold):
node = self.nodes[node_index]
node.calibration_threshold = self.set_and_validate_value_16(node,
WRITE_CALIBRATION_THRESHOLD,
READ_CALIBRATION_THRESHOLD,
threshold)
def set_calibration_threshold_global(self, threshold):
self.calibration_threshold = threshold
for node in self.nodes:
self.set_calibration_threshold(node.index, threshold)
return self.calibration_threshold
def set_calibration_mode(self, node_index, calibration_mode):
node = self.nodes[node_index]
self.set_and_validate_value_8(node,
WRITE_CALIBRATION_MODE,
READ_CALIBRATION_MODE,
calibration_mode)
def enable_calibration_mode(self):
for node in self.nodes:
self.set_calibration_mode(node.index, True);
def set_calibration_offset(self, node_index, offset):
node = self.nodes[node_index]
node.calibration_offset = self.set_and_validate_value_16(node,
WRITE_CALIBRATION_OFFSET,
READ_CALIBRATION_OFFSET,
offset)
def set_calibration_offset_global(self, offset):
self.calibration_offset = offset
for node in self.nodes:
self.set_calibration_offset(node.index, offset)
return self.calibration_offset
def set_trigger_threshold(self, node_index, threshold):
node = self.nodes[node_index]
node.trigger_threshold = self.set_and_validate_value_16(node,
WRITE_TRIGGER_THRESHOLD,
READ_TRIGGER_THRESHOLD,
threshold)
def set_trigger_threshold_global(self, threshold):
self.trigger_threshold = threshold
for node in self.nodes:
self.set_trigger_threshold(node.index, threshold)
return self.trigger_threshold
def set_filter_ratio(self, node_index, filter_ration):
node = self.nodes[node_index]
node.filter_ration = self.set_and_validate_value_8(node,
WRITE_FILTER_RATIO,
READ_FILTER_RATIO,
filter_ration)
def set_filter_ratio_global(self, filter_ratio):
self.filter_ratio = filter_ratio
for node in self.nodes:
self.set_filter_ratio(node.index, filter_ratio)
return self.filter_ratio
def intf_simulate_lap(self, node_index):
node = self.nodes[node_index]
node.current_rssi = 11
node.trigger_rssi = 22
node.peak_rssi_raw = 33
node.peak_rssi = 44
node.loop_time = 55
self.pass_record_callback(node, 100)
def get_hardware_interface():
'''Returns the delta 5 interface object.'''
return Delta5Interface()
| |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Allow this unittest to access _members.
# pylint: disable=W0212
import json
import logging
import unittest
from appengine_module.test_results.model import jsonresults
from appengine_module.test_results.model.jsonresults import (
JsonResults,
TEXT,
FAIL,
LEAK,
PASS,
TIMEOUT,
IMAGE,
NO_DATA,
IMAGE_PLUS_TEXT,
CRASH,
NOTRUN,
UNKNOWN,
TestFile,
)
from appengine_module.test_results.handlers import master_config
from google.appengine.ext import testbed
FULL_RESULT_EXAMPLE = """ADD_RESULTS({
"seconds_since_epoch": 1368146629,
"tests": {
"media": {
"encrypted-media": {
"encrypted-media-v2-events.html": {
"bugs": ["crbug.com/1234"],
"expected": "TIMEOUT",
"actual": "TIMEOUT",
"time": 6.0
},
"encrypted-media-v2-syntax.html": {
"expected": "TIMEOUT",
"actual": "TIMEOUT"
}
},
"progress-events-generated-correctly.html": {
"expected": "PASS FAIL IMAGE TIMEOUT CRASH MISSING",
"actual": "TIMEOUT",
"time": 6.0
},
"W3C": {
"audio": {
"src": {
"src_removal_does_not_trigger_loadstart.html": {
"expected": "PASS",
"actual": "PASS",
"time": 3.5
}
}
},
"video": {
"src": {
"src_removal_does_not_trigger_loadstart.html": {
"expected": "PASS",
"actual": "PASS",
"time": 1.1
},
"notrun.html": {
"expected": "NOTRUN",
"actual": "SKIP",
"time": 1.1
}
}
}
},
"unexpected-skip.html": {
"expected": "PASS",
"actual": "SKIP"
},
"unexpected-fail.html": {
"expected": "PASS",
"actual": "FAIL"
},
"flaky-failed.html": {
"expected": "PASS FAIL",
"actual": "FAIL"
},
"media-document-audio-repaint.html": {
"expected": "IMAGE",
"actual": "IMAGE",
"time": 0.1
},
"unexpected-leak.html": {
"expected": "PASS",
"actual": "LEAK"
},
"unexpected-flake.html": {
"expected": "PASS",
"actual": "FAIL PASS"
},
"unexpected-unexpected.html": {
"expected": "PASS",
"actual": "NOT_A_REAL_RESULT_TYPE"
}
}
},
"skipped": 2,
"num_regressions": 0,
"build_number": "3",
"interrupted": false,
"layout_tests_dir": "\/tmp\/cr\/src\/third_party\/WebKit\/LayoutTests",
"version": 3,
"builder_name": "Webkit",
"num_passes": 10,
"pixel_tests_enabled": true,
"blink_revision": "1234",
"has_pretty_patch": true,
"fixable": 25,
"num_flaky": 0,
"num_failures_by_type": {
"CRASH": 3,
"MISSING": 0,
"TEXT": 3,
"IMAGE": 1,
"PASS": 10,
"SKIP": 2,
"TIMEOUT": 16,
"IMAGE+TEXT": 0,
"FAIL": 2,
"AUDIO": 0,
"LEAK": 1
},
"has_wdiff": true,
"chromium_revision": "5678"
});"""
JSON_RESULTS_OLD_TEMPLATE = (
'{"[BUILDER_NAME]":{'
'"allFixableCount":[[TESTDATA_COUNT]],'
'"blinkRevision":[[TESTDATA_WEBKITREVISION]],'
'"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
'"chromeRevision":[[TESTDATA_CHROMEREVISION]],'
'"failure_map": %s,'
'"fixableCount":[[TESTDATA_COUNT]],'
'"fixableCounts":[[TESTDATA_COUNTS]],'
'"secondsSinceEpoch":[[TESTDATA_TIMES]],'
'"tests":{[TESTDATA_TESTS]}'
'},'
'"version":[VERSION]'
'}') % json.dumps(jsonresults.CHAR_TO_FAILURE)
JSON_RESULTS_COUNTS = ('{"' + '":[[TESTDATA_COUNT]],"'.join(
[char for char in jsonresults.CHAR_TO_FAILURE.values()])
+ '":[[TESTDATA_COUNT]]}')
JSON_RESULTS_TEMPLATE = (
'{"[BUILDER_NAME]":{'
'"blinkRevision":[[TESTDATA_WEBKITREVISION]],'
'"buildNumbers":[[TESTDATA_BUILDNUMBERS]],'
'"chromeRevision":[[TESTDATA_CHROMEREVISION]],'
'"failure_map": %s,'
'"num_failures_by_type":%s,'
'"secondsSinceEpoch":[[TESTDATA_TIMES]],'
'"tests":{[TESTDATA_TESTS]}'
'},'
'"version":[VERSION]'
'}') % (json.dumps(jsonresults.CHAR_TO_FAILURE), JSON_RESULTS_COUNTS)
JSON_RESULTS_COUNTS_TEMPLATE = '{"' + '":[TESTDATA],"'.join(
[char for char in jsonresults.CHAR_TO_FAILURE]) + '":[TESTDATA]}'
JSON_RESULTS_TEST_LIST_TEMPLATE = '{"Webkit":{"tests":{[TESTDATA_TESTS]}}}'
class MockFile(object):
@property
def file_information(self):
return ("master: %s, builder: %s, test_type: %s, build_number: %r, "
"name: %s." % (self.master, self.builder, self.test_type,
self.build_number, self.name))
def __init__(self, name='results.json', data=''):
self.master = 'MockMasterName'
self.builder = 'MockBuilderName'
self.test_type = 'MockTestType'
self.build_number = 0
self.name = name
self.data = data
def save(self, data):
self.data = data
return True
class JsonResultsTest(unittest.TestCase):
def setUp(self):
self._builder = "Webkit"
self.old_log_level = logging.root.level
logging.root.setLevel(logging.ERROR)
def tearDown(self):
logging.root.setLevel(self.old_log_level)
# Use this to get better error messages than just string compare gives.
def assert_json_equal(self, a, b):
self.maxDiff = None
a = json.loads(a) if isinstance(a, str) else a
b = json.loads(b) if isinstance(b, str) else b
self.assertEqual(a, b)
def test_strip_prefix_suffix(self):
json_string = "['contents']"
stripped = jsonresults.JsonResults._strip_prefix_suffix(
"ADD_RESULTS(" + json_string + ");")
self.assertEqual(stripped, json_string)
self.assertEqual(JsonResults._strip_prefix_suffix(json_string), json_string)
@staticmethod
def _make_test_json(test_data, json_string=JSON_RESULTS_TEMPLATE,
builder_name="Webkit"):
if not test_data:
return ""
builds = test_data["builds"]
tests = test_data["tests"]
if not builds or not tests:
return ""
counts = []
build_numbers = []
webkit_revision = []
chrome_revision = []
times = []
for build in builds:
counts.append(JSON_RESULTS_COUNTS_TEMPLATE.replace("[TESTDATA]", build))
build_numbers.append("1000%s" % build)
webkit_revision.append("2000%s" % build)
chrome_revision.append("3000%s" % build)
times.append("100000%s000" % build)
json_string = json_string.replace("[BUILDER_NAME]", builder_name)
json_string = json_string.replace("[TESTDATA_COUNTS]", ",".join(counts))
json_string = json_string.replace("[TESTDATA_COUNT]", ",".join(builds))
json_string = json_string.replace(
"[TESTDATA_BUILDNUMBERS]", ",".join(build_numbers))
json_string = json_string.replace(
"[TESTDATA_WEBKITREVISION]", ",".join(webkit_revision))
json_string = json_string.replace(
"[TESTDATA_CHROMEREVISION]", ",".join(chrome_revision))
json_string = json_string.replace("[TESTDATA_TIMES]", ",".join(times))
version = str(test_data["version"]) if "version" in test_data else "4"
json_string = json_string.replace("[VERSION]", version)
json_string = json_string.replace("{[TESTDATA_TESTS]}",
json.dumps(tests, separators=(',', ':'), sort_keys=True))
return json_string
def _test_merge(self, aggregated_data, incremental_data, expected_data,
max_builds=jsonresults.JSON_RESULTS_MAX_BUILDS):
aggregated_results = self._make_test_json(
aggregated_data, builder_name=self._builder)
incremental_results = self._make_test_json(
incremental_data, builder_name=self._builder)
# FIXME: Why is this called if we ignore the result?
JsonResults._get_incremental_json(self._builder,
JsonResults.load_json(aggregated_results),
is_full_results_format=False)
merged_results, status_code = JsonResults.merge(self._builder,
aggregated_results, JsonResults.load_json(incremental_results),
num_runs=max_builds, sort_keys=True)
if expected_data:
expected_results = self._make_test_json(
expected_data, builder_name=self._builder)
self.assert_json_equal(merged_results, expected_results)
self.assertEqual(status_code, 200)
else:
self.assertTrue(status_code != 200)
def _test_get_test_list(self, input_data, expected_data):
input_results = self._make_test_json(input_data)
expected_results = JSON_RESULTS_TEST_LIST_TEMPLATE.replace(
"{[TESTDATA_TESTS]}", json.dumps(expected_data, separators=(',', ':')))
actual_results = JsonResults.get_test_list(self._builder, input_results)
self.assert_json_equal(actual_results, expected_results)
def test_update_files_empty_aggregate_data(self):
small_file = MockFile(name='results-small.json')
large_file = MockFile(name='results.json')
incremental_data = {
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[200, jsonresults.TEXT]],
"times": [[200, 0]],
}
}
}
incremental_string = self._make_test_json(
incremental_data, builder_name=small_file.builder)
incremental_json = JsonResults.load_json(incremental_string)
self.assertTrue(JsonResults.update_files(small_file.builder,
incremental_json, small_file, large_file, is_full_results_format=False))
self.assert_json_equal(small_file.data, incremental_string)
self.assert_json_equal(large_file.data, incremental_string)
def test_update_files_null_incremental_data(self):
small_file = MockFile(name='results-small.json')
large_file = MockFile(name='results.json')
aggregated_data = {
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[200, jsonresults.TEXT]],
"times": [[200, 0]],
}
}
}
aggregated_string = self._make_test_json(
aggregated_data, builder_name=small_file.builder)
small_file.data = large_file.data = aggregated_string
incremental_string = ""
results_tuple = JsonResults.update_files(small_file.builder,
incremental_string, small_file, large_file,
is_full_results_format=False)
self.assertEqual(results_tuple, ('No incremental JSON data to merge.', 403))
self.assert_json_equal(small_file.data, aggregated_string)
self.assert_json_equal(large_file.data, aggregated_string)
def test_update_files_empty_incremental_data(self):
small_file = MockFile(name='results-small.json')
large_file = MockFile(name='results.json')
aggregated_data = {
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[200, jsonresults.TEXT]],
"times": [[200, 0]],
}
}
}
aggregated_string = self._make_test_json(
aggregated_data, builder_name=small_file.builder)
small_file.data = large_file.data = aggregated_string
incremental_data = {
"builds": [],
"tests": {}
}
incremental_string = self._make_test_json(
incremental_data, builder_name=small_file.builder)
results_tuple = JsonResults.update_files(small_file.builder,
incremental_string, small_file, large_file,
is_full_results_format=False)
self.assertEqual(results_tuple, ('No incremental JSON data to merge.', 403))
self.assert_json_equal(small_file.data, aggregated_string)
self.assert_json_equal(large_file.data, aggregated_string)
def test_merge_with_empty_aggregated_results(self):
incremental_data = {
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[200, jsonresults.TEXT]],
"times": [[200, 0]],
}
}
}
incremental_json = JsonResults.load_json(
self._make_test_json(incremental_data))
incremental_results, _ = JsonResults._get_incremental_json(
self._builder, incremental_json, is_full_results_format=False)
aggregated_results = ""
merged_results, _ = JsonResults.merge(self._builder, aggregated_results,
incremental_results, num_runs=jsonresults.JSON_RESULTS_MAX_BUILDS,
sort_keys=True)
self.assert_json_equal(merged_results, incremental_results)
def test_failures_by_type_added(self):
aggregated_results = self._make_test_json({
"builds": ["2", "1"],
"tests": {
"001.html": {
"results": [[100, TEXT], [100, FAIL]],
"times": [[200, 0]],
}
}
}, json_string=JSON_RESULTS_OLD_TEMPLATE)
incremental_results = self._make_test_json({
"builds": ["3"],
"tests": {
"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]],
}
}
}, json_string=JSON_RESULTS_OLD_TEMPLATE)
incremental_json, _ = JsonResults._get_incremental_json(self._builder,
JsonResults.load_json(incremental_results),
is_full_results_format=False)
merged_results, _ = JsonResults.merge(self._builder, aggregated_results,
incremental_json, num_runs=201, sort_keys=True)
self.assert_json_equal(merged_results, self._make_test_json({
"builds": ["3", "2", "1"],
"tests": {
"001.html": {
"results": [[101, TEXT], [100, FAIL]],
"times": [[201, 0]],
}
}
}))
def test_merge_full_results_format(self):
expected_incremental_results = {
"Webkit": {
"blinkRevision": ["1234"],
"buildNumbers": ["3"],
"chromeRevision": ["5678"],
"failure_map": jsonresults.CHAR_TO_FAILURE,
"num_failures_by_type": {
"AUDIO": [0],
"CRASH": [3],
"FAIL": [2],
"IMAGE": [1],
"IMAGE+TEXT": [0],
"MISSING": [0],
"PASS": [10],
"SKIP": [2],
"TEXT": [3],
"TIMEOUT": [16],
"LEAK": [1]
},
"secondsSinceEpoch": [1368146629],
"tests": {
"media": {
"W3C": {
"audio": {
"src": {
"src_removal_does_not_trigger_loadstart.html": {
"results": [[1, PASS]],
"times": [[1, 4]],
}
}
}
},
"encrypted-media": {
"encrypted-media-v2-events.html": {
"bugs": ["crbug.com/1234"],
"expected": "TIMEOUT",
"results": [[1, TIMEOUT]],
"times": [[1, 6]],
},
"encrypted-media-v2-syntax.html": {
"expected": "TIMEOUT",
"results": [[1, TIMEOUT]],
"times": [[1, 0]],
}
},
"media-document-audio-repaint.html": {
"expected": "IMAGE",
"results": [[1, IMAGE]],
"times": [[1, 0]],
},
"progress-events-generated-correctly.html": {
"expected": "PASS FAIL IMAGE TIMEOUT CRASH MISSING",
"results": [[1, TIMEOUT]],
"times": [[1, 6]],
},
"flaky-failed.html": {
"expected": "PASS FAIL",
"results": [[1, FAIL]],
"times": [[1, 0]],
},
"unexpected-fail.html": {
"results": [[1, FAIL]],
"times": [[1, 0]],
},
"unexpected-leak.html": {
"results": [[1, LEAK]],
"times": [[1, 0]],
},
"unexpected-flake.html": {
"results": [[1, FAIL + PASS]],
"times": [[1, 0]],
},
"unexpected-unexpected.html": {
"results": [[1, UNKNOWN]],
"times": [[1, 0]],
},
}
}
},
"version": 4
}
aggregated_results = ""
incremental_json, _ = JsonResults._get_incremental_json(self._builder,
JsonResults.load_json(FULL_RESULT_EXAMPLE),
is_full_results_format=True)
merged_results, _ = JsonResults.merge("Webkit", aggregated_results,
incremental_json, num_runs=jsonresults.JSON_RESULTS_MAX_BUILDS,
sort_keys=True)
self.assert_json_equal(merged_results, expected_incremental_results)
def test_merge_empty_aggregated_results(self):
# No existing aggregated results.
# Merged results == new incremental results.
self._test_merge(
# Aggregated results
None,
# Incremental results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]}}},
# Expected result
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]}}})
def test_merge_duplicate_build_number(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[100, TEXT]],
"times": [[100, 0]]}}},
# Incremental results
{"builds": ["2"],
"tests": {"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]]}}},
# Expected results
None)
def test_merge_incremental_single_test_single_run_same_result(self):
# Incremental results has the latest build and same test results for
# that run.
# Insert the incremental results at the first place and sum number
# of runs for TEXT (200 + 1) to get merged results.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[201, TEXT]],
"times": [[201, 0]]}}})
def test_merge_single_test_single_run_different_result(self):
# Incremental results has the latest build but different test results
# for that run.
# Insert the incremental results at the first place.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, IMAGE]],
"times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1, IMAGE], [200, TEXT]],
"times": [[1, 1], [200, 0]]}}})
def test_merge_single_test_single_run_result_changed(self):
# Incremental results has the latest build but results which differ from
# the latest result (but are the same as an older result).
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT], [10, IMAGE]],
"times": [[200, 0], [10, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, IMAGE]],
"times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1, IMAGE], [200, TEXT], [10, IMAGE]],
"times": [[1, 1], [200, 0], [10, 1]]}}})
def test_merge_multiple_tests_single_run(self):
# All tests have incremental updates.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]},
"002.html": {
"results": [[100, IMAGE]],
"times": [[100, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]]},
"002.html": {
"results": [[1, IMAGE]],
"times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[201, TEXT]],
"times": [[201, 0]]},
"002.html": {
"results": [[101, IMAGE]],
"times": [[101, 1]]}}})
def test_merge_multiple_tests_single_run_one_no_result(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]},
"002.html": {
"results": [[100, IMAGE]],
"times": [[100, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"002.html": {
"results": [[1, IMAGE]],
"times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1, NO_DATA], [200, TEXT]],
"times": [[201, 0]]},
"002.html": {
"results": [[101, IMAGE]],
"times": [[101, 1]]}}})
def test_merge_single_test_multiple_runs(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]}}},
# Incremental results
{"builds": ["4", "3"],
"tests": {"001.html": {
"results": [[2, IMAGE], [1, FAIL]],
"times": [[3, 2]]}}},
# Expected results
{"builds": ["4", "3", "2", "1"],
"tests": {"001.html": {
"results": [[1, FAIL], [2, IMAGE], [200, TEXT]],
"times": [[3, 2], [200, 0]]}}})
def test_merge_multiple_tests_multiple_runs(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]},
"002.html": {
"results": [[10, IMAGE_PLUS_TEXT]],
"times": [[10, 0]]}}},
# Incremental results
{"builds": ["4", "3"],
"tests": {"001.html": {
"results": [[2, IMAGE]],
"times": [[2, 2]]},
"002.html": {
"results": [[1, CRASH]],
"times": [[1, 1]]}}},
# Expected results
{"builds": ["4", "3", "2", "1"],
"tests": {"001.html": {
"results": [[2, IMAGE], [200, TEXT]],
"times": [[2, 2], [200, 0]]},
"002.html": {
"results": [[1, CRASH], [10, IMAGE_PLUS_TEXT]],
"times": [[1, 1], [10, 0]]}}})
def test_merge_incremental_result_older_build(self):
# Test the build in incremental results is older than the most recent
# build in aggregated results.
self._test_merge(
# Aggregated results
{"builds": ["3", "1"],
"tests": {"001.html": {
"results": [[5, TEXT]],
"times": [[5, 0]]}}},
# Incremental results
{"builds": ["2"],
"tests": {"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]]}}},
# Expected no merge happens.
{"builds": ["2", "3", "1"],
"tests": {"001.html": {
"results": [[6, TEXT]],
"times": [[6, 0]]}}})
def test_merge_incremental_result_same_build(self):
# Test the build in incremental results is same as the build in
# aggregated results.
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[5, TEXT]],
"times": [[5, 0]]}}},
# Incremental results
{"builds": ["3", "2"],
"tests": {"001.html": {
"results": [[2, TEXT]],
"times": [[2, 0]]}}},
# Expected no merge happens.
{"builds": ["3", "2", "2", "1"],
"tests": {"001.html": {
"results": [[7, TEXT]],
"times": [[7, 0]]}}})
def test_merge_remove_new_test(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[199, TEXT]],
"times": [[199, 0]]},
}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]]},
"002.html": {
"results": [[1, PASS]],
"times": [[1, 0]]},
"notrun.html": {
"results": [[1, NOTRUN]],
"times": [[1, 0]]},
"003.html": {
"results": [[1, NO_DATA]],
"times": [[1, 0]]},
}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[200, TEXT]],
"times": [[200, 0]]},
}},
max_builds=200)
def test_merge_remove_test(self):
self._test_merge(
# Aggregated results
{
"builds": ["2", "1"],
"tests": {
"directory": {
"directory": {
"001.html": {
"results": [[200, PASS]],
"times": [[200, 0]]
}
}
},
"002.html": {
"results": [[10, TEXT]],
"times": [[10, 0]]
},
"003.html": {
"results": [[190, PASS], [9, NO_DATA], [1, TEXT]],
"times": [[200, 0]]
},
}
},
# Incremental results
{
"builds": ["3"],
"tests": {
"directory": {
"directory": {
"001.html": {
"results": [[1, PASS]],
"times": [[1, 0]]
}
}
},
"002.html": {
"results": [[1, PASS]],
"times": [[1, 0]]
},
"003.html": {
"results": [[1, PASS]],
"times": [[1, 0]]
},
}
},
# Expected results
{
"builds": ["3", "2", "1"],
"tests": {
"002.html": {
"results": [[1, PASS], [10, TEXT]],
"times": [[11, 0]]
}
}
},
max_builds=200)
def test_merge_updates_expected(self):
self._test_merge(
# Aggregated results
{
"builds": ["2", "1"],
"tests": {
"directory": {
"directory": {
"001.html": {
"expected": "FAIL",
"results": [[200, PASS]],
"times": [[200, 0]]
}
}
},
"002.html": {
"bugs": ["crbug.com/1234"],
"expected": "FAIL",
"results": [[10, TEXT]],
"times": [[10, 0]]
},
"003.html": {
"expected": "FAIL",
"results": [[190, PASS], [9, NO_DATA], [1, TEXT]],
"times": [[200, 0]]
},
"004.html": {
"results": [[199, PASS], [1, TEXT]],
"times": [[200, 0]]
},
}
},
# Incremental results
{
"builds": ["3"],
"tests": {
"002.html": {
"expected": "PASS",
"results": [[1, PASS]],
"times": [[1, 0]]
},
"003.html": {
"expected": "TIMEOUT",
"results": [[1, PASS]],
"times": [[1, 0]]
},
"004.html": {
"bugs": ["crbug.com/1234"],
"results": [[1, PASS]],
"times": [[1, 0]]
},
}
},
# Expected results
{
"builds": ["3", "2", "1"],
"tests": {
"002.html": {
"results": [[1, PASS], [10, TEXT]],
"times": [[11, 0]]
},
"003.html": {
"expected": "TIMEOUT",
"results": [[191, PASS], [9, NO_DATA]],
"times": [[200, 0]]
},
"004.html": {
"bugs": ["crbug.com/1234"],
"results": [[200, PASS]],
"times": [[200, 0]]
},
}
},
max_builds=200)
def test_merge_keep_test_with_all_pass_but_slow_time(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, PASS]],
"times": [[200, jsonresults.JSON_RESULTS_MIN_TIME]]},
"002.html": {
"results": [[10, TEXT]],
"times": [[10, 0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, PASS]],
"times": [[1, 1]]},
"002.html": {
"results": [[1, PASS]],
"times": [[1, 0]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[201, PASS]],
"times": [[1, 1], [200, jsonresults.JSON_RESULTS_MIN_TIME]]},
"002.html": {
"results": [[1, PASS], [10, TEXT]],
"times": [[11, 0]]}}})
def test_merge_pruning_slow_tests_for_debug_builders(self):
self._builder = "MockBuilder(dbg)"
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[200, PASS]],
"times": [[200, 3 * jsonresults.JSON_RESULTS_MIN_TIME]]},
"002.html": {
"results": [[10, TEXT]],
"times": [[10, 0]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, PASS]],
"times": [[1, 1]]},
"002.html": {
"results": [[1, PASS]],
"times": [[1, 0]]},
"003.html": {
"results": [[1, PASS]],
"times": [[1, jsonresults.JSON_RESULTS_MIN_TIME]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[201, PASS]],
"times": [[1, 1], [200, 3 * jsonresults.JSON_RESULTS_MIN_TIME]]},
"002.html": {
"results": [[1, PASS], [10, TEXT]],
"times": [[11, 0]]}}})
def test_merge_prune_extra_results(self):
# Remove items from test results and times that exceed the max number
# of builds to track.
max_builds = jsonresults.JSON_RESULTS_MAX_BUILDS
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[max_builds, TEXT], [1, IMAGE]],
"times": [[max_builds, 0], [1, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, TIMEOUT]],
"times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1, TIMEOUT], [max_builds, TEXT]],
"times": [[1, 1], [max_builds, 0]]}}})
def test_merge_prune_extra_results_small(self):
# Remove items from test results and times that exceed the max number
# of builds to track, using smaller threshold.
max_builds = jsonresults.JSON_RESULTS_MAX_BUILDS_SMALL
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[max_builds, TEXT], [1, IMAGE]],
"times": [[max_builds, 0], [1, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, TIMEOUT]],
"times": [[1, 1]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[1, TIMEOUT], [max_builds, TEXT]],
"times": [[1, 1], [max_builds, 0]]}}},
int(max_builds))
def test_merge_prune_extra_results_with_new_result_of_same_type(self):
# Test that merging in a new result of the same type as the last result
# causes old results to fall off.
max_builds = jsonresults.JSON_RESULTS_MAX_BUILDS_SMALL
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"001.html": {
"results": [[max_builds, TEXT], [1, NO_DATA]],
"times": [[max_builds, 0], [1, 1]]}}},
# Incremental results
{"builds": ["3"],
"tests": {"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]]}}},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"001.html": {
"results": [[max_builds, TEXT]],
"times": [[max_builds, 0]]}}},
int(max_builds))
def test_merge_build_directory_hierarchy(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"bar": {"baz": {
"003.html": {
"results": [[25, TEXT]],
"times": [[25, 0]]}}},
"foo": {
"001.html": {
"results": [[50, TEXT]],
"times": [[50, 0]]},
"002.html": {
"results": [[100, IMAGE]],
"times": [[100, 0]]}}},
"version": 4},
# Incremental results
{"builds": ["3"],
"tests": {"baz": {
"004.html": {
"results": [[1, IMAGE]],
"times": [[1, 0]]}},
"foo": {
"001.html": {
"results": [[1, TEXT]],
"times": [[1, 0]]},
"002.html": {
"results": [[1, IMAGE]],
"times": [[1, 0]]}}},
"version": 4},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"bar": {"baz": {
"003.html": {
"results": [[1, NO_DATA], [25, TEXT]],
"times": [[26, 0]]}}},
"baz": {
"004.html": {
"results": [[1, IMAGE]],
"times": [[1, 0]]}},
"foo": {
"001.html": {
"results": [[51, TEXT]],
"times": [[51, 0]]},
"002.html": {
"results": [[101, IMAGE]],
"times": [[101, 0]]}}},
"version": 4})
def test_merge_treats_multiple_results_as_a_unique_type(self):
self._test_merge(
# Aggregated results
{"builds": ["3", "1"],
"tests": {
"001.html": {
"results": [[5, TEXT]],
"times": [[5, 0]]},
"002.html": {
"results": [[3, TEXT + FAIL]],
"times": [[3, 0]]}}},
# Incremental results
{"builds": ["2"],
"tests": {
"001.html": {
"results": [[1, TEXT + IMAGE + FAIL]],
"times": [[1, 0]]},
"002.html": {
"results": [[1, TEXT + FAIL]],
"times": [[1, 0]]}}},
# Expected no merge for 001, full merge for 002.
{"builds": ["2", "3", "1"],
"tests": {
"001.html": {
"results": [[1, TEXT + IMAGE + FAIL], [5, TEXT]],
"times": [[6, 0]]},
"002.html": {
"results": [[4, TEXT + FAIL]],
"times": [[4, 0]]}}});
# FIXME(aboxhall): Add some tests for xhtml/svg test results.
def test_get_test_name_list(self):
# Get test name list only. Don't include non-test-list data and
# of test result details.
# FIXME: This also tests a temporary bug in the data where directory-level
# results have a results and times values. Once that bug is fixed,
# remove this test-case and assert we don't ever hit it.
self._test_get_test_list(
# Input results
{"builds": ["3", "2", "1"],
"tests": {"foo": {
"001.html": {
"results": [[200, PASS]],
"times": [[200, 0]]},
"results": [[1, NO_DATA]],
"times": [[1, 0]]},
"002.html": {
"results": [[10, TEXT]],
"times": [[10, 0]]}}},
# Expected results
{"foo": {"001.html": {}}, "002.html": {}})
def test_gtest(self):
self._test_merge(
# Aggregated results
{"builds": ["2", "1"],
"tests": {"foo.bar": {
"results": [[50, TEXT]],
"times": [[50, 0]]},
"foo.bar2": {
"results": [[100, IMAGE]],
"times": [[100, 0]]},
"test.failed": {
"results": [[5, FAIL]],
"times": [[5, 0]]},
},
"version": 3},
# Incremental results
{"builds": ["3"],
"tests": {"foo.bar2": {
"results": [[1, IMAGE]],
"times": [[1, 0]]},
"foo.bar3": {
"results": [[1, TEXT]],
"times": [[1, 0]]},
"test.failed": {
"results": [[5, FAIL]],
"times": [[5, 0]]},
},
"version": 4},
# Expected results
{"builds": ["3", "2", "1"],
"tests": {"foo.bar": {
"results": [[1, NO_DATA], [50, TEXT]],
"times": [[51, 0]]},
"foo.bar2": {
"results": [[101, IMAGE]],
"times": [[101, 0]]},
"foo.bar3": {
"results": [[1, TEXT]],
"times": [[1, 0]]},
"test.failed": {
"results": [[10, FAIL]],
"times": [[10, 0]]},
},
"version": 4})
def test_deprecated_master_name(self):
tb = testbed.Testbed()
tb.activate()
tb.init_datastore_v3_stub()
tb.init_blobstore_stub()
master = master_config.getMaster('chromium.chromiumos')
builder = 'test-builder'
test_type = 'test-type'
test_data = [
{
'tests': {
'Test1.testproc1': {
'expected': 'PASS',
'actual': 'PASS',
'time': 1,
}
},
'build_number': '123',
'version': jsonresults.JSON_RESULTS_HIERARCHICAL_VERSION,
'builder_name': builder,
'blink_revision': '12345',
'seconds_since_epoch': 1406123456,
'num_failures_by_type': {
'FAIL': 0,
'SKIP': 0,
'PASS': 1
},
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
},
{
'tests': {
'Test2.testproc2': {
'expected': 'PASS',
'actual': 'FAIL',
'time': 2,
}
},
'build_number': '456',
'version': jsonresults.JSON_RESULTS_HIERARCHICAL_VERSION,
'builder_name': builder,
'blink_revision': '54321',
'seconds_since_epoch': 1406654321,
'num_failures_by_type': {
'FAIL': 1,
'SKIP': 0,
'PASS': 0
},
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d5',
},
]
# Upload a file using old master name
# Seed results files using the old name.
JsonResults.update(
master['name'], builder, test_type, test_data[0], None, True)
# Update results files using the new name.
JsonResults.update(master['url_name'], builder, test_type, test_data[1],
master['name'], True)
# Verify that the file keyed by url_name contains both sets of results.
files = TestFile.get_files(
master['url_name'], builder, test_type, None, None, limit=3)
self.assertEqual(len(files), 2)
for f in files:
j = json.loads(f.data)
self.assertItemsEqual(j[builder]['blinkRevision'], ['12345', '54321'])
tb.deactivate()
def test_is_invalid_full_results_json_not_dict(self):
self.assertFalse(JsonResults.is_valid_full_results_json([]))
self.assertFalse(JsonResults.is_valid_full_results_json("foo"))
def test_is_invalid_full_results_json_missing_required_fields(self):
self.assertFalse(JsonResults.is_valid_full_results_json({}))
def test_is_invalid_full_results_json_incorrect_int_fields(self):
self.assertFalse(JsonResults.is_valid_full_results_json({
'chromium_revision': 'foobar',
'blink_revision': 'foobar',
'build_number': 'foobar',
'version': 'foobar',
'builder_name': 'foobar',
'seconds_since_epoch': 'foobar',
'num_failures_by_type': 'foobar',
'tests': 'foobar',
}))
def test_is_invalid_full_results_json_incorrect_dict_fields(self):
self.assertFalse(JsonResults.is_valid_full_results_json({
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': 'foobar',
'tests': 'foobar',
}))
def test_is_invalid_full_results_json_incorrect_failure_type_value(self):
self.assertFalse(JsonResults.is_valid_full_results_json({
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': {10: 123},
'tests': {},
}))
def test_is_invalid_full_results_json_incorrect_failure_type_count(self):
self.assertFalse(JsonResults.is_valid_full_results_json({
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': {'FAIL': 'foobar'},
'tests': {},
}))
def test_is_invalid_full_results_json_incorrect_test_name(self):
self.assertFalse(JsonResults.is_valid_full_results_json({
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': {'FAIL': '123'},
'tests': {10: 123},
}))
def test_is_invalid_full_results_json_incorrect_test_config_type(self):
self.assertFalse(JsonResults.is_valid_full_results_json({
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': {'FAIL': '123'},
'tests': {'test': 123},
}))
def test_is_invalid_full_results_json_missing_required_fields_in_test(self):
self.assertFalse(JsonResults.is_valid_full_results_json({
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': {'FAIL': '123'},
'tests': {'test': {'actual': '10'}},
}))
def test_is_invalid_full_results_json_incorrect_actual_expected_type(self):
self.assertFalse(JsonResults.is_valid_full_results_json({
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': {'FAIL': '123'},
'tests': {'test': {'actual': 10, 'expected': 20}},
}))
def test_is_invalid_full_results_json_incorrect_time_type(self):
self.assertFalse(JsonResults.is_valid_full_results_json({
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': {'FAIL': '123'},
'tests': {'test': {'actual': 'FAIL', 'expected': 'FAIL',
'time': 'foobar'}},
}))
# Some projects still report numeric chromium_revision in their json.
def test_is_valid_full_results_json_numeric_chromium_revision(self):
self.assertTrue(JsonResults.is_valid_full_results_json({
'chromium_revision': '12345',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': {'FAIL': '123'},
'tests': {'test': {'actual': 'FAIL', 'expected': 'PASS',
'time': '10'}},
}))
def test_is_valid_full_results_json(self):
self.assertTrue(JsonResults.is_valid_full_results_json({
'chromium_revision': '761b2a4cbc3103ef5e48cc7e77184f57eb50f6d4',
'blink_revision': '12345',
'build_number': '12345',
'version': '5',
'builder_name': 'foobar',
'seconds_since_epoch': '12345',
'num_failures_by_type': {'FAIL': '123'},
'tests': {'test': {'actual': 'FAIL', 'expected': 'PASS', 'time': '10'}},
}))
@staticmethod
def test_normalize_results_with_top_level_results_key_does_not_crash():
aggregated_json = {
'Linux Tests': {
'results': {'foo': {'results': [(1, 'P')],
'times': [(1, 1)]}},
}
}
JsonResults._normalize_results(aggregated_json, 1, 2)
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.api.v2 import attributes as attr
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import portsecurity as psec
from neutron import manager
from neutron.tests.unit import test_db_plugin
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_allowedaddresspairs.'
'AllowedAddressPairTestPlugin')
class AllowedAddressPairTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
super(AllowedAddressPairTestCase, self).setUp(plugin)
# Check if a plugin supports security groups
plugin_obj = manager.NeutronManager.get_plugin()
self._skip_port_security = ('port-security' not in
plugin_obj.supported_extension_aliases)
class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin,
db_base_plugin_v2.NeutronDbPluginV2,
addr_pair_db.AllowedAddressPairsMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with port security and allowed address pairs.
"""
supported_extension_aliases = ["allowed-address-pairs"]
def create_port(self, context, port):
p = port['port']
with context.session.begin(subtransactions=True):
neutron_db = super(AllowedAddressPairTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
if attr.is_attr_set(p.get(addr_pair.ADDRESS_PAIRS)):
self._process_create_allowed_address_pairs(
context, p,
p[addr_pair.ADDRESS_PAIRS])
else:
p[addr_pair.ADDRESS_PAIRS] = None
return port['port']
def update_port(self, context, id, port):
delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
port)
has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
with context.session.begin(subtransactions=True):
ret_port = super(AllowedAddressPairTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
if (delete_addr_pairs or has_addr_pairs):
# delete address pairds and readd them
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, ret_port,
ret_port[addr_pair.ADDRESS_PAIRS])
return ret_port
class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
super(AllowedAddressPairDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestAllowedAddressPairs(AllowedAddressPairDBTestCase):
def test_create_port_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_true_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_false_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=False,
allowed_address_pairs=address_pairs)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_port_bad_mac(self):
address_pairs = [{'mac_address': 'invalid_mac',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_bad_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1222'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_missing_ip_field(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_duplicate_mac_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_extra_args(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1',
'icbb': 'agreed'}]
self._create_port_with_address_pairs(address_pairs, 400)
def _create_port_with_address_pairs(self, address_pairs, ret_code):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, ret_code)
def test_update_add_address_pairs(self):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_address_gets_port_mac(self):
with self.network() as net:
address_pairs = [{'ip_address': '23.23.23.23'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)['port']
port_addr_mac = port[addr_pair.ADDRESS_PAIRS][0]['mac_address']
self.assertEqual(port_addr_mac,
port['mac_address'])
self._delete('ports', port['id'])
def test_update_port_security_off_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
with self.subnet(network=net):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {psec.PORTSECURITY: False}}
# If plugin implements security groups we also need to remove
# the security group on port.
plugin_obj = manager.NeutronManager.get_plugin()
if 'security-groups' in plugin_obj.supported_extension_aliases:
update_port['port']['security_groups'] = []
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
self._delete('ports', port['port']['id'])
def test_create_port_remove_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {addr_pair.ADDRESS_PAIRS: []}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], [])
self._delete('ports', port['port']['id'])
class TestAllowedAddressPairsXML(TestAllowedAddressPairs):
fmt = 'xml'
| |
# Authors: Tal Linzen <linzen@nyu.edu>
# Teon Brooks <teon.brooks@gmail.com>
# Denis A. Engemann <denis.engemann@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
# Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
from inspect import isgenerator
from collections import namedtuple
import numpy as np
from scipy import linalg, sparse
from ..externals.six import string_types
from ..source_estimate import SourceEstimate
from ..epochs import BaseEpochs
from ..evoked import Evoked, EvokedArray
from ..utils import logger, _reject_data_segments, warn
from ..io.pick import pick_types, pick_info
def linear_regression(inst, design_matrix, names=None):
"""Fit Ordinary Least Squares regression (OLS).
Parameters
----------
inst : instance of Epochs | iterable of SourceEstimate
The data to be regressed. Contains all the trials, sensors, and time
points for the regression. For Source Estimates, accepts either a list
or a generator object.
design_matrix : ndarray, shape (n_observations, n_regressors)
The regressors to be used. Must be a 2d array with as many rows as
the first dimension of `data`. The first column of this matrix will
typically consist of ones (intercept column).
names : list-like | None
Optional parameter to name the regressors. If provided, the length must
correspond to the number of columns present in regressors
(including the intercept, if present).
Otherwise the default names are x0, x1, x2...xn for n regressors.
Returns
-------
results : dict of namedtuple
For each regressor (key) a namedtuple is provided with the
following attributes:
beta : regression coefficients
stderr : standard error of regression coefficients
t_val : t statistics (beta / stderr)
p_val : two-sided p-value of t statistic under the t distribution
mlog10_p_val : -log10 transformed p-value.
The tuple members are numpy arrays. The shape of each numpy array is
the shape of the data minus the first dimension; e.g., if the shape of
the original data was (n_observations, n_channels, n_timepoints),
then the shape of each of the arrays will be
(n_channels, n_timepoints).
"""
if names is None:
names = ['x%i' % i for i in range(design_matrix.shape[1])]
if isinstance(inst, BaseEpochs):
picks = pick_types(inst.info, meg=True, eeg=True, ref_meg=True,
stim=False, eog=False, ecg=False,
emg=False, exclude=['bads'])
if [inst.ch_names[p] for p in picks] != inst.ch_names:
warn('Fitting linear model to non-data or bad channels. '
'Check picking')
msg = 'Fitting linear model to epochs'
data = inst.get_data()
out = EvokedArray(np.zeros(data.shape[1:]), inst.info, inst.tmin)
elif isgenerator(inst):
msg = 'Fitting linear model to source estimates (generator input)'
out = next(inst)
data = np.array([out.data] + [i.data for i in inst])
elif isinstance(inst, list) and isinstance(inst[0], SourceEstimate):
msg = 'Fitting linear model to source estimates (list input)'
out = inst[0]
data = np.array([i.data for i in inst])
else:
raise ValueError('Input must be epochs or iterable of source '
'estimates')
logger.info(msg + ', (%s targets, %s regressors)' %
(np.product(data.shape[1:]), len(names)))
lm_params = _fit_lm(data, design_matrix, names)
lm = namedtuple('lm', 'beta stderr t_val p_val mlog10_p_val')
lm_fits = {}
for name in names:
parameters = [p[name] for p in lm_params]
for ii, value in enumerate(parameters):
out_ = out.copy()
if not isinstance(out_, (SourceEstimate, Evoked)):
raise RuntimeError('Invalid container.')
out_._data[:] = value
parameters[ii] = out_
lm_fits[name] = lm(*parameters)
logger.info('Done')
return lm_fits
def _fit_lm(data, design_matrix, names):
"""Aux function."""
from scipy import stats
n_samples = len(data)
n_features = np.product(data.shape[1:])
if design_matrix.ndim != 2:
raise ValueError('Design matrix must be a 2d array')
n_rows, n_predictors = design_matrix.shape
if n_samples != n_rows:
raise ValueError('Number of rows in design matrix must be equal '
'to number of observations')
if n_predictors != len(names):
raise ValueError('Number of regressor names must be equal to '
'number of column in design matrix')
y = np.reshape(data, (n_samples, n_features))
betas, resid_sum_squares, _, _ = linalg.lstsq(a=design_matrix, b=y)
df = n_rows - n_predictors
sqrt_noise_var = np.sqrt(resid_sum_squares / df).reshape(data.shape[1:])
design_invcov = linalg.inv(np.dot(design_matrix.T, design_matrix))
unscaled_stderrs = np.sqrt(np.diag(design_invcov))
tiny = np.finfo(np.float64).tiny
beta, stderr, t_val, p_val, mlog10_p_val = (dict() for _ in range(5))
for x, unscaled_stderr, predictor in zip(betas, unscaled_stderrs, names):
beta[predictor] = x.reshape(data.shape[1:])
stderr[predictor] = sqrt_noise_var * unscaled_stderr
p_val[predictor] = np.empty_like(stderr[predictor])
t_val[predictor] = np.empty_like(stderr[predictor])
stderr_pos = (stderr[predictor] > 0)
beta_pos = (beta[predictor] > 0)
t_val[predictor][stderr_pos] = (beta[predictor][stderr_pos] /
stderr[predictor][stderr_pos])
cdf = stats.t.cdf(np.abs(t_val[predictor][stderr_pos]), df)
p_val[predictor][stderr_pos] = np.clip((1. - cdf) * 2., tiny, 1.)
# degenerate cases
mask = (~stderr_pos & beta_pos)
t_val[predictor][mask] = np.inf * np.sign(beta[predictor][mask])
p_val[predictor][mask] = tiny
# could do NaN here, but hopefully this is safe enough
mask = (~stderr_pos & ~beta_pos)
t_val[predictor][mask] = 0
p_val[predictor][mask] = 1.
mlog10_p_val[predictor] = -np.log10(p_val[predictor])
return beta, stderr, t_val, p_val, mlog10_p_val
def linear_regression_raw(raw, events, event_id=None, tmin=-.1, tmax=1,
covariates=None, reject=None, flat=None, tstep=1.,
decim=1, picks=None, solver='cholesky'):
"""Estimate regression-based evoked potentials/fields by linear modeling.
This models the full M/EEG time course, including correction for
overlapping potentials and allowing for continuous/scalar predictors.
Internally, this constructs a predictor matrix X of size
n_samples * (n_conds * window length), solving the linear system
``Y = bX`` and returning ``b`` as evoked-like time series split by
condition. See [1]_.
Parameters
----------
raw : instance of Raw
A raw object. Note: be very careful about data that is not
downsampled, as the resulting matrices can be enormous and easily
overload your computer. Typically, 100 Hz sampling rate is
appropriate - or using the decim keyword (see below).
events : ndarray of int, shape (n_events, 3)
An array where the first column corresponds to samples in raw
and the last to integer codes in event_id.
event_id : dict | None
As in Epochs; a dictionary where the values may be integers or
iterables of integers, corresponding to the 3rd column of
events, and the keys are condition names.
If None, uses all events in the events array.
tmin : float | dict
If float, gives the lower limit (in seconds) for the time window for
which all event types' effects are estimated. If a dict, can be used to
specify time windows for specific event types: keys correspond to keys
in event_id and/or covariates; for missing values, the default (-.1) is
used.
tmax : float | dict
If float, gives the upper limit (in seconds) for the time window for
which all event types' effects are estimated. If a dict, can be used to
specify time windows for specific event types: keys correspond to keys
in event_id and/or covariates; for missing values, the default (1.) is
used.
covariates : dict-like | None
If dict-like (e.g., a pandas DataFrame), values have to be array-like
and of the same length as the rows in ```events```. Keys correspond
to additional event types/conditions to be estimated and are matched
with the time points given by the first column of ```events```. If
None, only binary events (from event_id) are used.
reject : None | dict
For cleaning raw data before the regression is performed: set up
rejection parameters based on peak-to-peak amplitude in continuously
selected subepochs. If None, no rejection is done.
If dict, keys are types ('grad' | 'mag' | 'eeg' | 'eog' | 'ecg')
and values are the maximal peak-to-peak values to select rejected
epochs, e.g.::
reject = dict(grad=4000e-12, # T / m (gradiometers)
mag=4e-11, # T (magnetometers)
eeg=40e-5, # V (EEG channels)
eog=250e-5 # V (EOG channels))
flat : None | dict
or cleaning raw data before the regression is performed: set up
rejection parameters based on flatness of the signal. If None, no
rejection is done. If a dict, keys are ('grad' | 'mag' |
'eeg' | 'eog' | 'ecg') and values are minimal peak-to-peak values to
select rejected epochs.
tstep : float
Length of windows for peak-to-peak detection for raw data cleaning.
decim : int
Decimate by choosing only a subsample of data points. Highly
recommended for data recorded at high sampling frequencies, as
otherwise huge intermediate matrices have to be created and inverted.
picks : None | list
List of indices of channels to be included. If None, defaults to all
MEG and EEG channels.
solver : str | function
Either a function which takes as its inputs the sparse predictor
matrix X and the observation matrix Y, and returns the coefficient
matrix b; or a string.
X is of shape (n_times, n_predictors * time_window_length).
y is of shape (n_channels, n_times).
If str, must be ``'cholesky'``, in which case the solver used is
``linalg.solve(dot(X.T, X), dot(X.T, y))``.
Returns
-------
evokeds : dict
A dict where the keys correspond to conditions and the values are
Evoked objects with the ER[F/P]s. These can be used exactly like any
other Evoked object, including e.g. plotting or statistics.
References
----------
.. [1] Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
if isinstance(solver, string_types):
if solver not in {"cholesky"}:
raise ValueError("No such solver: {0}".format(solver))
if solver == 'cholesky':
def solver(X, y):
a = (X.T * X).toarray() # dot product of sparse matrices
return linalg.solve(a, X.T * y, sym_pos=True,
overwrite_a=True, overwrite_b=True).T
elif callable(solver):
pass
else:
raise TypeError("The solver must be a str or a callable.")
# build data
data, info, events = _prepare_rerp_data(raw, events, picks=picks,
decim=decim)
if event_id is None:
event_id = dict((str(v), v) for v in set(events[:, 2]))
# build predictors
X, conds, cond_length, tmin_s, tmax_s = _prepare_rerp_preds(
n_samples=data.shape[1], sfreq=info["sfreq"], events=events,
event_id=event_id, tmin=tmin, tmax=tmax, covariates=covariates)
# remove "empty" and contaminated data points
X, data = _clean_rerp_input(X, data, reject, flat, decim, info, tstep)
# solve linear system
coefs = solver(X, data.T)
if coefs.shape[0] != data.shape[0]:
raise ValueError("solver output has unexcepted shape. Supply a "
"function that returns coefficients in the form "
"(n_targets, n_features), where targets == channels.")
# construct Evoked objects to be returned from output
evokeds = _make_evokeds(coefs, conds, cond_length, tmin_s, tmax_s, info)
return evokeds
def _prepare_rerp_data(raw, events, picks=None, decim=1):
"""Prepare events and data, primarily for `linear_regression_raw`."""
if picks is None:
picks = pick_types(raw.info, meg=True, eeg=True, ref_meg=True)
info = pick_info(raw.info, picks)
decim = int(decim)
info["sfreq"] /= decim
data, times = raw[:]
data = data[picks, ::decim]
if len(set(events[:, 0])) < len(events[:, 0]):
raise ValueError("`events` contains duplicate time points. Make "
"sure all entries in the first column of `events` "
"are unique.")
events = events.copy()
events[:, 0] -= raw.first_samp
events[:, 0] //= decim
if len(set(events[:, 0])) < len(events[:, 0]):
raise ValueError("After decimating, `events` contains duplicate time "
"points. This means some events are too closely "
"spaced for the requested decimation factor. Choose "
"different events, drop close events, or choose a "
"different decimation factor.")
return data, info, events
def _prepare_rerp_preds(n_samples, sfreq, events, event_id=None, tmin=-.1,
tmax=1, covariates=None):
"""Build predictor matrix and metadata (e.g. condition time windows)."""
conds = list(event_id)
if covariates is not None:
conds += list(covariates)
# time windows (per event type) are converted to sample points from times
# int(round()) to be safe and match Epochs constructor behavior
if isinstance(tmin, (float, int)):
tmin_s = dict((cond, int(round(tmin * sfreq))) for cond in conds)
else:
tmin_s = dict((cond, int(round(tmin.get(cond, -.1) * sfreq)))
for cond in conds)
if isinstance(tmax, (float, int)):
tmax_s = dict(
(cond, int(round((tmax * sfreq)) + 1)) for cond in conds)
else:
tmax_s = dict((cond, int(round(tmax.get(cond, 1.) * sfreq)) + 1)
for cond in conds)
# Construct predictor matrix
# We do this by creating one array per event type, shape (lags, samples)
# (where lags depends on tmin/tmax and can be different for different
# event types). Columns correspond to predictors, predictors correspond to
# time lags. Thus, each array is mostly sparse, with one diagonal of 1s
# per event (for binary predictors).
cond_length = dict()
xs = []
for cond in conds:
tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
n_lags = int(tmax_ - tmin_) # width of matrix
if cond in event_id: # for binary predictors
ids = ([event_id[cond]]
if isinstance(event_id[cond], int)
else event_id[cond])
onsets = -(events[np.in1d(events[:, 2], ids), 0] + tmin_)
values = np.ones((len(onsets), n_lags))
else: # for predictors from covariates, e.g. continuous ones
covs = covariates[cond]
if len(covs) != len(events):
error = ("Condition {0} from ```covariates``` is "
"not the same length as ```events```").format(cond)
raise ValueError(error)
onsets = -(events[np.where(covs != 0), 0] + tmin_)[0]
v = np.asarray(covs)[np.nonzero(covs)].astype(float)
values = np.ones((len(onsets), n_lags)) * v[:, np.newaxis]
cond_length[cond] = len(onsets)
xs.append(sparse.dia_matrix((values, onsets),
shape=(n_samples, n_lags)))
return sparse.hstack(xs), conds, cond_length, tmin_s, tmax_s
def _clean_rerp_input(X, data, reject, flat, decim, info, tstep):
"""Remove empty and contaminated points from data & predictor matrices."""
# find only those positions where at least one predictor isn't 0
has_val = np.unique(X.nonzero()[0])
# reject positions based on extreme steps in the data
if reject is not None:
_, inds = _reject_data_segments(data, reject, flat, decim=None,
info=info, tstep=tstep)
for t0, t1 in inds:
has_val = np.setdiff1d(has_val, range(t0, t1))
return X.tocsr()[has_val], data[:, has_val]
def _make_evokeds(coefs, conds, cond_length, tmin_s, tmax_s, info):
"""Create a dictionary of Evoked objects.
These will be created from a coefs matrix and condition durations.
"""
evokeds = dict()
cumul = 0
for cond in conds:
tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
evokeds[cond] = EvokedArray(
coefs[:, cumul:cumul + tmax_ - tmin_], info=info, comment=cond,
tmin=tmin_ / float(info["sfreq"]), nave=cond_length[cond],
kind='average') # nave and kind are technically incorrect
cumul += tmax_ - tmin_
return evokeds
| |
"""
A collection of tests covering legacy user management in DC/OS.
Legacy user management is considered to be the user management API offered by
`dcos-oauth` up to DC/OS release 1.12.
Assume that access control is activated in Master Admin Router (could be
disabled with `oauth_enabled`) and therefore authenticate individual HTTP
dcos_api_session.
One aspect of legacy DC/OS user management is that once authenticated a user can
add other users. Unauthenticated HTTP dcos_api_session are rejected by Master
Admin Router and user management fails (this is the coarse-grained authorization
model of (open) DC/OS). Here, test that unauthenticated HTTP dcos_api_session
cannot manage users. However, do not test that newly added users can add other
users: in this test suite we are limited to having authentication state for just
a single user available. This is why we can test managing other users only from
that first user's point of view. That is, we can not test that a user (e.g.
user2) which was added by the first user (user1) can add another user (user3).
"""
import logging
import uuid
import pytest
from dcos_test_utils import dcos_cli
from test_helpers import get_expanded_config
__maintainer__ = 'jgehrcke'
__contact__ = 'security-team@mesosphere.io'
log = logging.getLogger(__name__)
# Skip entire module in downstream integration tests.
@pytest.fixture(autouse=True)
def skip_in_downstream():
expanded_config = get_expanded_config()
if 'security' in expanded_config:
pytest.skip(
'Skip upstream-specific user management tests',
allow_module_level=True
)
def get_users(apisession):
r = apisession.get('/acs/api/v1/users')
r.raise_for_status()
users = {u['uid']: u for u in r.json()['array']}
return users
def delete_user(apisession, uid):
r = apisession.delete('/acs/api/v1/users/%s' % (uid, ))
r.raise_for_status()
assert r.status_code == 204
@pytest.fixture()
def remove_users_added_by_test(dcos_api_session):
users_before = set(get_users(dcos_api_session))
log.info('remove_users_added_by_test pre test: users are %s', users_before)
try:
yield
finally:
users_after = set(get_users(dcos_api_session))
new_uids = users_after - users_before
for uid in new_uids:
log.info('remove_users_added_by_test post test: remove `%s`', uid)
delete_user(dcos_api_session, uid)
def test_users_get(dcos_api_session):
users = get_users(dcos_api_session)
assert users
required_keys = ('uid', 'description')
for userdict in users.values():
for k in required_keys:
assert k in userdict
def test_user_put_no_email_uid_empty_body(dcos_api_session):
# This test mainly demonstrates a subtle API difference between dcos-oauth
# (legacy) and Bouncer.
r = dcos_api_session.put('/acs/api/v1/users/user1')
# This is the old behavior in dcos-oauth.
# assert r.status_code == 500
# assert 'invalid email' in r.text
# With Bouncer non-email uids are valid, and the request fails as of the
# missing request body.
assert r.status_code == 400
assert 'Request has bad Content-Type or lacks JSON data' in r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_legacy_user_creation_with_empty_json_doc(dcos_api_session):
# Legacy HTTP clients built for dcos-oauth such as the web UI (up to DC/OS
# 1.12) might insert users in the following way: uid appears to be an email
# address, and the JSON document in the request body does not provide a
# `public_key` or a `password` property (indicating local user), or is
# empty. The legacy web UI would insert users like that and expect those
# users to be remote users, usable with the legacy OIDC ID Token login
# method through the 'https://dcos.auth0.com/' provider. This behavior is
# maintained in Bouncer for backwards compatibility.
r = dcos_api_session.put('/acs/api/v1/users/user@domain.foo', json={})
assert r.status_code == 201, r.text
# Bouncer annotates the created user (this is new compared to dcos-oauth).
r = dcos_api_session.get('/acs/api/v1/users/user@domain.foo')
assert r.json()['provider_type'] == 'oidc'
assert r.json()['provider_id'] == 'https://dcos.auth0.com/'
assert r.json()['is_remote'] is True
# When the uid however does not appear to be an email address the more sane
# behavior of Bouncer takes effect: an empty (meaningless) JSON body
# results in a useful error message.
r = dcos_api_session.put('/acs/api/v1/users/user1', json={})
assert r.status_code == 400
assert 'One of `password` or `public_key` must be provided' in r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_put_email_uid_and_description(dcos_api_session):
r = dcos_api_session.put(
'/acs/api/v1/users/user1@domain.foo',
json={'description': 'integration test user'}
)
assert r.status_code == 201, r.text
users = get_users(dcos_api_session)
assert len(users) > 1
assert 'user1@domain.foo' in users
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_put_with_legacy_body(dcos_api_session):
# The UI up to DC/OS 1.12 sends the `creator_uid` and the `cluster_url`
# properties although they are not used by dcos-oauth. Bouncer supports
# these two properties for legacy reasons. Note(JP): As a follow-up task we
# should change the UI to not send these properties anymore, and then remove
# the properties from Bouncer's UserCreate JSON schema again, ideally within
# the 1.13 development cycle.
r = dcos_api_session.put(
'/acs/api/v1/users/user2@domain.foo',
json={'creator_uid': 'any@thing.bla', 'cluster_url': 'foobar'}
)
assert r.status_code == 201, r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_conflict(dcos_api_session):
# Note: the empty request body is not the decisive criterion here.
r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={})
assert r.status_code == 201, r.text
r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={})
assert r.status_code == 409, r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_delete(dcos_api_session):
r = dcos_api_session.put('/acs/api/v1/users/user6@domain.foo', json={})
r.raise_for_status()
assert r.status_code == 201
r = dcos_api_session.delete('/acs/api/v1/users/user6@domain.foo')
r.raise_for_status()
assert r.status_code == 204
users = get_users(dcos_api_session)
assert 'user6@domain.foo' not in users
def test_user_put_requires_authentication(noauth_api_session):
r = noauth_api_session.put('/acs/api/v1/users/user7@domain.foo', json={})
assert r.status_code == 401, r.text
def test_dynamic_ui_config(dcos_api_session):
r = dcos_api_session.get('/dcos-metadata/ui-config.json')
data = r.json()
assert not data['clusterConfiguration']['firstUser']
assert 'id' in data['clusterConfiguration']
assert 'uiConfiguration' in data
def test_dcos_add_user(dcos_api_session):
"""
dcos_add_user.py script adds a user to IAM using the
script dcos_add_user.py.
"""
email_address = uuid.uuid4().hex + '@example.com'
cli = dcos_cli.DcosCli('', '', '')
command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address]
cli.exec_command(command)
try:
r = dcos_api_session.get('/acs/api/v1/users')
r.raise_for_status()
expected_user_data = {
"uid": email_address,
"description": "",
"url": "/acs/api/v1/users/" + email_address,
"is_remote": True,
"is_service": False,
"provider_type": "oidc",
"provider_id": "https://dcos.auth0.com/"
}
assert expected_user_data in r.json()['array']
finally:
delete_user(dcos_api_session, email_address)
def test_check_message_on_adding_user_twice(dcos_api_session):
"""
Check that the correct message is emitted on adding the
same user for the second time.
"""
email_address = uuid.uuid4().hex + '@example.com'
cli = dcos_cli.DcosCli('', '', '')
command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address]
stdout, stderr = cli.exec_command(command)
try:
expected_output = '[INFO] Created IAM user `' + email_address + '`\n'
assert '' == stdout
assert expected_output == stderr
stdout, stderr = cli.exec_command(command)
expected_error = '[INFO] User `' + email_address + '` already exists\n'
assert expected_error == stderr
assert '' == stdout
finally:
delete_user(dcos_api_session, email_address)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates test runner factory and tests for GTests."""
# pylint: disable=W0212
import fnmatch
import glob
import logging
import os
import shutil
import sys
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib.gtest import test_package_apk
from pylib.gtest import test_package_exe
from pylib.gtest import test_runner
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common'))
import unittest_util # pylint: disable=F0401
_ISOLATE_FILE_PATHS = {
'base_unittests': 'base/base_unittests.isolate',
'blink_heap_unittests':
'third_party/WebKit/Source/heap/BlinkHeapUnitTests.isolate',
'breakpad_unittests': 'breakpad/breakpad_unittests.isolate',
'cc_perftests': 'cc/cc_perftests.isolate',
'components_unittests': 'components/components_unittests.isolate',
'content_browsertests': 'content/content_browsertests.isolate',
'content_unittests': 'content/content_unittests.isolate',
'media_perftests': 'media/media_perftests.isolate',
'media_unittests': 'media/media_unittests.isolate',
'net_unittests': 'net/net_unittests.isolate',
'ui_unittests': 'ui/ui_unittests.isolate',
'unit_tests': 'chrome/unit_tests.isolate',
'webkit_unit_tests':
'third_party/WebKit/Source/web/WebKitUnitTests.isolate',
}
# Paths relative to third_party/webrtc/ (kept separate for readability).
_WEBRTC_ISOLATE_FILE_PATHS = {
'audio_decoder_unittests':
'modules/audio_coding/neteq4/audio_decoder_unittests.isolate',
'common_audio_unittests': 'common_audio/common_audio_unittests.isolate',
'common_video_unittests': 'common_video/common_video_unittests.isolate',
'modules_tests': 'modules/modules_tests.isolate',
'modules_unittests': 'modules/modules_unittests.isolate',
'neteq_unittests': 'modules/audio_coding/neteq/neteq_unittests.isolate',
'system_wrappers_unittests':
'system_wrappers/source/system_wrappers_unittests.isolate',
'test_support_unittests': 'test/test_support_unittests.isolate',
'tools_unittests': 'tools/tools_unittests.isolate',
'video_engine_core_unittests':
'video_engine/video_engine_core_unittests.isolate',
'voice_engine_unittests': 'voice_engine/voice_engine_unittests.isolate',
}
# Append the WebRTC tests with the full path from Chromium's src/ root.
for webrtc_test, isolate_path in _WEBRTC_ISOLATE_FILE_PATHS.items():
_ISOLATE_FILE_PATHS[webrtc_test] = 'third_party/webrtc/%s' % isolate_path
# Used for filtering large data deps at a finer grain than what's allowed in
# isolate files since pushing deps to devices is expensive.
# Wildcards are allowed.
_DEPS_EXCLUSION_LIST = [
'chrome/test/data/extensions/api_test',
'chrome/test/data/extensions/secure_shell',
'chrome/test/data/firefox*',
'chrome/test/data/gpu',
'chrome/test/data/image_decoding',
'chrome/test/data/import',
'chrome/test/data/page_cycler',
'chrome/test/data/perf',
'chrome/test/data/pyauto_private',
'chrome/test/data/safari_import',
'chrome/test/data/scroll',
'chrome/test/data/third_party',
'third_party/hunspell_dictionaries/*.dic',
# crbug.com/258690
'webkit/data/bmp_decoder',
'webkit/data/ico_decoder',
]
_ISOLATE_SCRIPT = os.path.join(
constants.DIR_SOURCE_ROOT, 'tools', 'swarming_client', 'isolate.py')
def _GenerateDepsDirUsingIsolate(suite_name):
"""Generate the dependency dir for the test suite using isolate.
Args:
suite_name: Name of the test suite (e.g. base_unittests).
"""
if os.path.isdir(constants.ISOLATE_DEPS_DIR):
shutil.rmtree(constants.ISOLATE_DEPS_DIR)
isolate_rel_path = _ISOLATE_FILE_PATHS.get(suite_name)
if not isolate_rel_path:
logging.info('Did not find an isolate file for the test suite.')
return
isolate_abs_path = os.path.join(constants.DIR_SOURCE_ROOT, isolate_rel_path)
isolated_abs_path = os.path.join(
constants.GetOutDirectory(), '%s.isolated' % suite_name)
assert os.path.exists(isolate_abs_path)
# This needs to be kept in sync with the cmd line options for isolate.py
# in src/build/isolate.gypi.
isolate_cmd = [
'python', _ISOLATE_SCRIPT,
'remap',
'--isolate', isolate_abs_path,
'--isolated', isolated_abs_path,
'--outdir', constants.ISOLATE_DEPS_DIR,
'--path-variable', 'PRODUCT_DIR', constants.GetOutDirectory(),
'--config-variable', 'OS', 'android',
'--config-variable', 'chromeos', '0',
'--config-variable', 'component', 'static_library',
'--config-variable', 'icu_use_data_file_flag', '0',
'--config-variable', 'use_openssl', '0',
]
assert not cmd_helper.RunCmd(isolate_cmd)
# We're relying on the fact that timestamps are preserved
# by the remap command (hardlinked). Otherwise, all the data
# will be pushed to the device once we move to using time diff
# instead of md5sum. Perform a sanity check here.
for root, _, filenames in os.walk(constants.ISOLATE_DEPS_DIR):
if filenames:
linked_file = os.path.join(root, filenames[0])
orig_file = os.path.join(
constants.DIR_SOURCE_ROOT,
os.path.relpath(linked_file, constants.ISOLATE_DEPS_DIR))
if os.stat(linked_file).st_ino == os.stat(orig_file).st_ino:
break
else:
raise Exception('isolate remap command did not use hardlinks.')
# Delete excluded files as defined by _DEPS_EXCLUSION_LIST.
old_cwd = os.getcwd()
try:
os.chdir(constants.ISOLATE_DEPS_DIR)
excluded_paths = [x for y in _DEPS_EXCLUSION_LIST for x in glob.glob(y)]
if excluded_paths:
logging.info('Excluding the following from dependency list: %s',
excluded_paths)
for p in excluded_paths:
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.remove(p)
finally:
os.chdir(old_cwd)
# On Android, all pak files need to be in the top-level 'paks' directory.
paks_dir = os.path.join(constants.ISOLATE_DEPS_DIR, 'paks')
os.mkdir(paks_dir)
for root, _, filenames in os.walk(os.path.join(constants.ISOLATE_DEPS_DIR,
'out')):
for filename in fnmatch.filter(filenames, '*.pak'):
shutil.move(os.path.join(root, filename), paks_dir)
# Move everything in PRODUCT_DIR to top level.
deps_product_dir = os.path.join(constants.ISOLATE_DEPS_DIR, 'out',
constants.GetBuildType())
if os.path.isdir(deps_product_dir):
for p in os.listdir(deps_product_dir):
shutil.move(os.path.join(deps_product_dir, p), constants.ISOLATE_DEPS_DIR)
os.rmdir(deps_product_dir)
os.rmdir(os.path.join(constants.ISOLATE_DEPS_DIR, 'out'))
def _GetDisabledTestsFilterFromFile(suite_name):
"""Returns a gtest filter based on the *_disabled file.
Args:
suite_name: Name of the test suite (e.g. base_unittests).
Returns:
A gtest filter which excludes disabled tests.
Example: '*-StackTrace.*:StringPrintfTest.StringPrintfMisc'
"""
filter_file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'filter', '%s_disabled' % suite_name)
if not filter_file_path or not os.path.exists(filter_file_path):
logging.info('No filter file found at %s', filter_file_path)
return '*'
filters = [x for x in [x.strip() for x in file(filter_file_path).readlines()]
if x and x[0] != '#']
disabled_filter = '*-%s' % ':'.join(filters)
logging.info('Applying filter "%s" obtained from %s',
disabled_filter, filter_file_path)
return disabled_filter
def _GetTestsFromDevice(runner_factory, devices):
"""Get a list of tests from a device.
Args:
runner_factory: Callable that takes device and shard_index and returns
a TestRunner.
devices: A list of device ids.
Returns:
All the tests in the test suite.
"""
for device in devices:
try:
logging.info('Obtaining tests from %s', device)
return runner_factory(device, 0).GetAllTests()
except (android_commands.errors.WaitForResponseTimedOutError,
android_commands.errors.DeviceUnresponsiveError), e:
logging.warning('Failed obtaining test list from %s with exception: %s',
device, e)
raise Exception('Failed to obtain test list from devices.')
def _FilterTestsUsingPrefixes(all_tests, pre=False, manual=False):
"""Removes tests with disabled prefixes.
Args:
all_tests: List of tests to filter.
pre: If True, include tests with PRE_ prefix.
manual: If True, include tests with MANUAL_ prefix.
Returns:
List of tests remaining.
"""
filtered_tests = []
filter_prefixes = ['DISABLED_', 'FLAKY_', 'FAILS_']
if not pre:
filter_prefixes.append('PRE_')
if not manual:
filter_prefixes.append('MANUAL_')
for t in all_tests:
test_case, test = t.split('.', 1)
if not any([test_case.startswith(prefix) or test.startswith(prefix) for
prefix in filter_prefixes]):
filtered_tests.append(t)
return filtered_tests
def _FilterDisabledTests(tests, suite_name, has_gtest_filter):
"""Removes disabled tests from |tests|.
Applies the following filters in order:
1. Remove tests with disabled prefixes.
2. Remove tests specified in the *_disabled files in the 'filter' dir
Args:
tests: List of tests.
suite_name: Name of the test suite (e.g. base_unittests).
has_gtest_filter: Whether a gtest_filter is provided.
Returns:
List of tests remaining.
"""
tests = _FilterTestsUsingPrefixes(
tests, has_gtest_filter, has_gtest_filter)
tests = unittest_util.FilterTestNames(
tests, _GetDisabledTestsFilterFromFile(suite_name))
return tests
def Setup(test_options, devices):
"""Create the test runner factory and tests.
Args:
test_options: A GTestOptions object.
devices: A list of attached devices.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_package = test_package_apk.TestPackageApk(test_options.suite_name)
if not os.path.exists(test_package.suite_path):
test_package = test_package_exe.TestPackageExecutable(
test_options.suite_name)
if not os.path.exists(test_package.suite_path):
raise Exception(
'Did not find %s target. Ensure it has been built.'
% test_options.suite_name)
logging.warning('Found target %s', test_package.suite_path)
_GenerateDepsDirUsingIsolate(test_options.suite_name)
# Constructs a new TestRunner with the current options.
def TestRunnerFactory(device, _shard_index):
return test_runner.TestRunner(
test_options,
device,
test_package)
tests = _GetTestsFromDevice(TestRunnerFactory, devices)
if test_options.run_disabled:
test_options = test_options._replace(
test_arguments=('%s --gtest_also_run_disabled_tests' %
test_options.test_arguments))
else:
tests = _FilterDisabledTests(tests, test_options.suite_name,
bool(test_options.gtest_filter))
if test_options.gtest_filter:
tests = unittest_util.FilterTestNames(tests, test_options.gtest_filter)
# Coalesce unit tests into a single test per device
if test_options.suite_name != 'content_browsertests':
num_devices = len(devices)
tests = [':'.join(tests[i::num_devices]) for i in xrange(num_devices)]
tests = [t for t in tests if t]
return (TestRunnerFactory, tests)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""Read images and perform augmentations for object detection."""
from __future__ import absolute_import, print_function
import random
import logging
import json
import numpy as np
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .image import RandomOrderAug, ColorJitterAug, LightingAug, ColorNormalizeAug
from .image import ResizeAug, ForceResizeAug, CastAug, HueJitterAug, RandomGrayAug
from .image import fixed_crop, ImageIter, Augmenter
class DetAugmenter(object):
"""Detection base augmenter"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src, label):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class DetBorrowAug(DetAugmenter):
"""Borrow standard augmenter from image classification.
Which is good once you know label won't be affected after this augmenter.
Parameters
----------
augmenter : mx.image.Augmenter
The borrowed standard augmenter which has no effect on label
"""
def __init__(self, augmenter):
if not isinstance(augmenter, Augmenter):
raise TypeError('Borrowing from invalid Augmenter')
super(DetBorrowAug, self).__init__(augmenter=augmenter.dumps())
self.augmenter = augmenter
def dumps(self):
"""Override the default one to avoid duplicate dump."""
return [self.__class__.__name__.lower(), self.augmenter.dumps()]
def __call__(self, src, label):
"""Augmenter implementation body"""
src = self.augmenter(src)
return (src, label)
class DetRandomSelectAug(DetAugmenter):
"""Randomly select one augmenter to apply, with chance to skip all.
Parameters
----------
aug_list : list of DetAugmenter
The random selection will be applied to one of the augmenters
skip_prob : float
The probability to skip all augmenters and return input directly
"""
def __init__(self, aug_list, skip_prob=0):
super(DetRandomSelectAug, self).__init__(skip_prob=skip_prob)
if not isinstance(aug_list, (list, tuple)):
aug_list = [aug_list]
for aug in aug_list:
if not isinstance(aug, DetAugmenter):
raise ValueError('Allow DetAugmenter in list only')
if not aug_list:
skip_prob = 1 # disabled
self.aug_list = aug_list
self.skip_prob = skip_prob
def dumps(self):
"""Override default."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.aug_list]]
def __call__(self, src, label):
"""Augmenter implementation body"""
if random.random() < self.skip_prob:
return (src, label)
else:
random.shuffle(self.aug_list)
return self.aug_list[0](src, label)
class DetHorizontalFlipAug(DetAugmenter):
"""Random horizontal flipping.
Parameters
----------
p : float
chance [0, 1] to flip
"""
def __init__(self, p):
super(DetHorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src, label):
"""Augmenter implementation"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
self._flip_label(label)
return (src, label)
def _flip_label(self, label):
"""Helper function to flip label."""
tmp = 1.0 - label[:, 1]
label[:, 1] = 1.0 - label[:, 3]
label[:, 3] = tmp
class DetRandomCropAug(DetAugmenter):
"""Random cropping with constraints
Parameters
----------
min_object_covered : float, default=0.1
The cropped area of the image must contain at least this fraction of
any bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
min_eject_coverage : float, default=0.3
The minimum coverage of cropped sample w.r.t its original size. With this
constraint, objects that have marginal area after crop will be discarded.
aspect_ratio_range : tuple of floats, default=(0.75, 1.33)
The cropped area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats, default=(0.05, 1.0)
The cropped area of the image must contain a fraction of the supplied
image within in this range.
max_attempts : int, default=50
Number of attempts at generating a cropped/padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
"""
def __init__(self, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0), min_eject_coverage=0.3, max_attempts=50):
if not isinstance(aspect_ratio_range, (tuple, list)):
assert isinstance(aspect_ratio_range, numeric_types)
logging.info('Using fixed aspect ratio: %s in DetRandomCropAug',
str(aspect_ratio_range))
aspect_ratio_range = (aspect_ratio_range, aspect_ratio_range)
if not isinstance(area_range, (tuple, list)):
assert isinstance(area_range, numeric_types)
logging.info('Using fixed area range: %s in DetRandomCropAug', area_range)
area_range = (area_range, area_range)
super(DetRandomCropAug, self).__init__(min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
min_eject_coverage=min_eject_coverage,
max_attempts=max_attempts)
self.min_object_covered = min_object_covered
self.min_eject_coverage = min_eject_coverage
self.max_attempts = max_attempts
self.aspect_ratio_range = aspect_ratio_range
self.area_range = area_range
self.enabled = False
if (area_range[1] <= 0 or area_range[0] > area_range[1]):
logging.warn('Skip DetRandomCropAug due to invalid area_range: %s', area_range)
elif (aspect_ratio_range[0] > aspect_ratio_range[1] or aspect_ratio_range[0] <= 0):
logging.warn('Skip DetRandomCropAug due to invalid aspect_ratio_range: %s',
aspect_ratio_range)
else:
self.enabled = True
def __call__(self, src, label):
"""Augmenter implementation body"""
crop = self._random_crop_proposal(label, src.shape[0], src.shape[1])
if crop:
x, y, w, h, label = crop
src = fixed_crop(src, x, y, w, h, None)
return (src, label)
def _calculate_areas(self, label):
"""Calculate areas for multiple labels"""
heights = np.maximum(0, label[:, 3] - label[:, 1])
widths = np.maximum(0, label[:, 2] - label[:, 0])
return heights * widths
def _intersect(self, label, xmin, ymin, xmax, ymax):
"""Calculate intersect areas, normalized."""
left = np.maximum(label[:, 0], xmin)
right = np.minimum(label[:, 2], xmax)
top = np.maximum(label[:, 1], ymin)
bot = np.minimum(label[:, 3], ymax)
invalid = np.where(np.logical_or(left >= right, top >= bot))[0]
out = label.copy()
out[:, 0] = left
out[:, 1] = top
out[:, 2] = right
out[:, 3] = bot
out[invalid, :] = 0
return out
def _check_satisfy_constraints(self, label, xmin, ymin, xmax, ymax, width, height):
"""Check if constrains are satisfied"""
if (xmax - xmin) * (ymax - ymin) < 2:
return False # only 1 pixel
x1 = float(xmin) / width
y1 = float(ymin) / height
x2 = float(xmax) / width
y2 = float(ymax) / height
object_areas = self._calculate_areas(label[:, 1:])
valid_objects = np.where(object_areas * width * height > 2)[0]
if valid_objects.size < 1:
return False
intersects = self._intersect(label[valid_objects, 1:], x1, y1, x2, y2)
coverages = self._calculate_areas(intersects) / object_areas[valid_objects]
coverages = coverages[np.where(coverages > 0)[0]]
if coverages.size > 0 and np.amin(coverages) > self.min_object_covered:
return True
def _update_labels(self, label, crop_box, height, width):
"""Convert labels according to crop box"""
xmin = float(crop_box[0]) / width
ymin = float(crop_box[1]) / height
w = float(crop_box[2]) / width
h = float(crop_box[3]) / height
out = label.copy()
out[:, (1, 3)] -= xmin
out[:, (2, 4)] -= ymin
out[:, (1, 3)] /= w
out[:, (2, 4)] /= h
out[:, 1:5] = np.maximum(0, out[:, 1:5])
out[:, 1:5] = np.minimum(1, out[:, 1:5])
coverage = self._calculate_areas(out[:, 1:]) * w * h / self._calculate_areas(label[:, 1:])
valid = np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2])
valid = np.logical_and(valid, coverage > self.min_eject_coverage)
valid = np.where(valid)[0]
if valid.size < 1:
return None
out = out[valid, :]
return out
def _random_crop_proposal(self, label, height, width):
"""Propose cropping areas"""
from math import sqrt
if not self.enabled or height <= 0 or width <= 0:
return ()
min_area = self.area_range[0] * height * width
max_area = self.area_range[1] * height * width
for _ in range(self.max_attempts):
ratio = random.uniform(*self.aspect_ratio_range)
if ratio <= 0:
continue
h = int(round(sqrt(min_area / ratio)))
max_h = int(round(sqrt(max_area / ratio)))
if round(max_h * ratio) > width:
# find smallest max_h satifying round(max_h * ratio) <= width
max_h = int((width + 0.4999999) / ratio)
if max_h > height:
max_h = height
if h > max_h:
h = max_h
if h < max_h:
# generate random h in range [h, max_h]
h = random.randint(h, max_h)
w = int(round(h * ratio))
assert w <= width
# trying to fix rounding problems
area = w * h
if area < min_area:
h += 1
w = int(round(h * ratio))
area = w * h
if area > max_area:
h -= 1
w = int(round(h * ratio))
area = w * h
if (area < min_area or area > max_area or w > width or h > height \
or w <= 0 or h <= 0):
continue
y = random.randint(0, max(0, height - h))
x = random.randint(0, max(0, width - w))
if self._check_satisfy_constraints(label, x, y, x + w, y + h, width, height):
new_label = self._update_labels(label, (x, y, w, h), height, width)
if new_label is not None:
return (x, y, w, h, new_label)
return ()
class DetRandomPadAug(DetAugmenter):
"""Random padding augmenter.
Parameters
----------
aspect_ratio_range : tuple of floats, default=(0.75, 1.33)
The padded area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats, default=(1.0, 3.0)
The padded area of the image must be larger than the original area
max_attempts : int, default=50
Number of attempts at generating a padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
pad_val: float or tuple of float, default=(128, 128, 128)
pixel value to be filled when padding is enabled.
"""
def __init__(self, aspect_ratio_range=(0.75, 1.33), area_range=(1.0, 3.0),
max_attempts=50, pad_val=(128, 128, 128)):
if not isinstance(pad_val, (list, tuple)):
assert isinstance(pad_val, numeric_types)
pad_val = (pad_val)
if not isinstance(aspect_ratio_range, (list, tuple)):
assert isinstance(aspect_ratio_range, numeric_types)
logging.info('Using fixed aspect ratio: %s in DetRandomPadAug',
str(aspect_ratio_range))
aspect_ratio_range = (aspect_ratio_range, aspect_ratio_range)
if not isinstance(area_range, (tuple, list)):
assert isinstance(area_range, numeric_types)
logging.info('Using fixed area range: %s in DetRandomPadAug', area_range)
area_range = (area_range, area_range)
super(DetRandomPadAug, self).__init__(aspect_ratio_range=aspect_ratio_range,
area_range=area_range, max_attempts=max_attempts,
pad_val=pad_val)
self.pad_val = pad_val
self.aspect_ratio_range = aspect_ratio_range
self.area_range = area_range
self.max_attempts = max_attempts
self.enabled = False
if (area_range[1] <= 1.0 or area_range[0] > area_range[1]):
logging.warn('Skip DetRandomPadAug due to invalid parameters: %s', area_range)
elif (aspect_ratio_range[0] <= 0 or aspect_ratio_range[0] > aspect_ratio_range[1]):
logging.warn('Skip DetRandomPadAug due to invalid aspect_ratio_range: %s',
aspect_ratio_range)
else:
self.enabled = True
def __call__(self, src, label):
"""Augmenter body"""
height, width, _ = src.shape
pad = self._random_pad_proposal(label, height, width)
if pad:
x, y, w, h, label = pad
src = copyMakeBorder(src, y, h-y-height, x, w-x-width, 16, values=self.pad_val)
return (src, label)
def _update_labels(self, label, pad_box, height, width):
"""Update label according to padding region"""
out = label.copy()
out[:, (1, 3)] = (out[:, (1, 3)] * width + pad_box[0]) / pad_box[2]
out[:, (2, 4)] = (out[:, (2, 4)] * height + pad_box[1]) / pad_box[3]
return out
def _random_pad_proposal(self, label, height, width):
"""Generate random padding region"""
from math import sqrt
if not self.enabled or height <= 0 or width <= 0:
return ()
min_area = self.area_range[0] * height * width
max_area = self.area_range[1] * height * width
for _ in range(self.max_attempts):
ratio = random.uniform(*self.aspect_ratio_range)
if ratio <= 0:
continue
h = int(round(sqrt(min_area / ratio)))
max_h = int(round(sqrt(max_area / ratio)))
if round(h * ratio) < width:
h = int((width + 0.499999) / ratio)
if h < height:
h = height
if h > max_h:
h = max_h
if h < max_h:
h = random.randint(h, max_h)
w = int(round(h * ratio))
if (h - height) < 2 or (w - width) < 2:
continue # marginal padding is not helpful
y = random.randint(0, max(0, h - height))
x = random.randint(0, max(0, w - width))
new_label = self._update_labels(label, (x, y, w, h), height, width)
return (x, y, w, h, new_label)
return ()
def CreateMultiRandCropAugmenter(min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0), min_eject_coverage=0.3,
max_attempts=50, skip_prob=0):
"""Helper function to create multiple random crop augmenters.
Parameters
----------
min_object_covered : float or list of float, default=0.1
The cropped area of the image must contain at least this fraction of
any bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
min_eject_coverage : float or list of float, default=0.3
The minimum coverage of cropped sample w.r.t its original size. With this
constraint, objects that have marginal area after crop will be discarded.
aspect_ratio_range : tuple of floats or list of tuple of floats, default=(0.75, 1.33)
The cropped area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats or list of tuple of floats, default=(0.05, 1.0)
The cropped area of the image must contain a fraction of the supplied
image within in this range.
max_attempts : int or list of int, default=50
Number of attempts at generating a cropped/padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
Examples
--------
>>> # An example of creating multiple random crop augmenters
>>> min_object_covered = [0.1, 0.3, 0.5, 0.7, 0.9] # use 5 augmenters
>>> aspect_ratio_range = (0.75, 1.33) # use same range for all augmenters
>>> area_range = [(0.1, 1.0), (0.2, 1.0), (0.2, 1.0), (0.3, 0.9), (0.5, 1.0)]
>>> min_eject_coverage = 0.3
>>> max_attempts = 50
>>> aug = mx.image.det.CreateMultiRandCropAugmenter(min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range, area_range=area_range,
min_eject_coverage=min_eject_coverage, max_attempts=max_attempts,
skip_prob=0)
>>> aug.dumps() # show some details
"""
def align_parameters(params):
"""Align parameters as pairs"""
out_params = []
num = 1
for p in params:
if not isinstance(p, list):
p = [p]
out_params.append(p)
num = max(num, len(p))
# align for each param
for k, p in enumerate(out_params):
if len(p) != num:
assert len(p) == 1
out_params[k] = p * num
return out_params
aligned_params = align_parameters([min_object_covered, aspect_ratio_range, area_range,
min_eject_coverage, max_attempts])
augs = []
for moc, arr, ar, mec, ma in zip(*aligned_params):
augs.append(DetRandomCropAug(min_object_covered=moc, aspect_ratio_range=arr,
area_range=ar, min_eject_coverage=mec, max_attempts=ma))
return DetRandomSelectAug(augs, skip_prob=skip_prob)
def CreateDetAugmenter(data_shape, resize=0, rand_crop=0, rand_pad=0, rand_gray=0,
rand_mirror=False, mean=None, std=None, brightness=0, contrast=0,
saturation=0, pca_noise=0, hue=0, inter_method=2, min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 3.0),
min_eject_coverage=0.3, max_attempts=50, pad_val=(127, 127, 127)):
"""Create augmenters for detection.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : float
[0, 1], probability to apply random cropping
rand_pad : float
[0, 1], probability to apply random padding
rand_gray : float
[0, 1], probability to convert to grayscale for all channels
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
min_object_covered : float
The cropped area of the image must contain at least this fraction of
any bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
min_eject_coverage : float
The minimum coverage of cropped sample w.r.t its original size. With this
constraint, objects that have marginal area after crop will be discarded.
aspect_ratio_range : tuple of floats
The cropped area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats
The cropped area of the image must contain a fraction of the supplied
image within in this range.
max_attempts : int
Number of attempts at generating a cropped/padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
pad_val: float
Pixel value to be filled when padding is enabled. pad_val will automatically
be subtracted by mean and divided by std if applicable.
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateDetAugmenter(data_shape=(3, 300, 300), rand_crop=0.5,
... rand_pad=0.5, rand_mirror=True, mean=True, brightness=0.125, contrast=0.125,
... saturation=0.125, pca_noise=0.05, inter_method=10, min_object_covered=[0.3, 0.5, 0.9],
... area_range=(0.3, 3.0))
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(DetBorrowAug(ResizeAug(resize, inter_method)))
if rand_crop > 0:
crop_augs = CreateMultiRandCropAugmenter(min_object_covered, aspect_ratio_range,
area_range, min_eject_coverage,
max_attempts, skip_prob=(1 - rand_crop))
auglist.append(crop_augs)
if rand_mirror > 0:
auglist.append(DetHorizontalFlipAug(0.5))
# apply random padding as late as possible to save computation
if rand_pad > 0:
pad_aug = DetRandomPadAug(aspect_ratio_range,
(1.0, area_range[1]), max_attempts, pad_val)
auglist.append(DetRandomSelectAug([pad_aug], 1 - rand_pad))
# force resize
auglist.append(DetBorrowAug(ForceResizeAug((data_shape[2], data_shape[1]), inter_method)))
auglist.append(DetBorrowAug(CastAug()))
if brightness or contrast or saturation:
auglist.append(DetBorrowAug(ColorJitterAug(brightness, contrast, saturation)))
if hue:
auglist.append(DetBorrowAug(HueJitterAug(hue)))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(DetBorrowAug(LightingAug(pca_noise, eigval, eigvec)))
if rand_gray > 0:
auglist.append(DetBorrowAug(RandomGrayAug(rand_gray)))
if mean is True:
mean = np.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, np.ndarray) and mean.shape[0] in [1, 3]
if std is True:
std = np.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, np.ndarray) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(DetBorrowAug(ColorNormalizeAug(mean, std)))
return auglist
class ImageDetIter(ImageIter):
"""Image iterator with a large number of augmentation choices for detection.
Parameters
----------
aug_list : list or None
Augmenter list for generating distorted images
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Name for detection labels
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateDetAugmenter.
"""
def __init__(self, batch_size, data_shape,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='label', **kwargs):
super(ImageDetIter, self).__init__(batch_size=batch_size, data_shape=data_shape,
path_imgrec=path_imgrec, path_imglist=path_imglist,
path_root=path_root, path_imgidx=path_imgidx,
shuffle=shuffle, part_index=part_index,
num_parts=num_parts, aug_list=[], imglist=imglist,
data_name=data_name, label_name=label_name)
if aug_list is None:
self.auglist = CreateDetAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
# went through all labels to get the proper label shape
label_shape = self._estimate_label_shape()
self.provide_label = [(label_name, (self.batch_size, label_shape[0], label_shape[1]))]
self.label_shape = label_shape
def _check_valid_label(self, label):
"""Validate label and its shape."""
if len(label.shape) != 2 or label.shape[1] < 5:
msg = "Label with shape (1+, 5+) required, %s received." % str(label)
raise RuntimeError(msg)
valid_label = np.where(np.logical_and(label[:, 0] >= 0, label[:, 3] > label[:, 1],
label[:, 4] > label[:, 2]))[0]
if valid_label.size < 1:
raise RuntimeError('Invalid label occurs.')
def _estimate_label_shape(self):
"""Helper function to estimate label shape"""
max_count = 0
self.reset()
try:
while True:
label, _ = self.next_sample()
label = self._parse_label(label)
max_count = max(max_count, label.shape[0])
except StopIteration:
pass
self.reset()
return (max_count, label.shape[1])
def _parse_label(self, label):
"""Helper function to parse object detection label.
Format for raw label:
n \t k \t ... \t [id \t xmin\t ymin \t xmax \t ymax \t ...] \t [repeat]
where n is the width of header, 2 or larger
k is the width of each object annotation, can be arbitrary, at least 5
"""
if isinstance(label, nd.NDArray):
label = label.asnumpy()
raw = label.ravel()
if raw.size < 7:
raise RuntimeError("Label shape is invalid: " + str(raw.shape))
header_width = int(raw[0])
obj_width = int(raw[1])
if (raw.size - header_width) % obj_width != 0:
msg = "Label shape %s inconsistent with annotation width %d." \
%(str(raw.shape), obj_width)
raise RuntimeError(msg)
out = np.reshape(raw[header_width:], (-1, obj_width))
# remove bad ground-truths
valid = np.where(np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2]))[0]
if valid.size < 1:
raise RuntimeError('Encounter sample with no valid label.')
return out[valid, :]
def reshape(self, data_shape=None, label_shape=None):
"""Reshape iterator for data_shape or label_shape.
Parameters
----------
data_shape : tuple or None
Reshape the data_shape to the new shape if not None
label_shape : tuple or None
Reshape label shape to new shape if not None
"""
if data_shape is not None:
self.check_data_shape(data_shape)
self.provide_data = [(self.provide_data[0][0], (self.batch_size,) + data_shape)]
if label_shape is not None:
self.check_label_shape(label_shape)
self.provide_label = [(self.provide_label[0][0], (self.batch_size,) + label_shape)]
def next(self):
"""Override the function for returning next batch."""
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.zeros((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
batch_label[:] = -1
i = 0
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image([data])
label = self._parse_label(label)
data, label = self.augmentation_transform(data, label)
self._check_valid_label(label)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
for datum in [data]:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(datum)
num_object = label.shape[0]
batch_label[i][0:num_object] = nd.array(label)
if num_object < batch_label[i].shape[0]:
batch_label[i][num_object:] = -1
i += 1
except StopIteration:
if not i:
raise StopIteration
return io.DataBatch([batch_data], [batch_label], batch_size - i)
def augmentation_transform(self, data, label): # pylint: disable=arguments-differ
"""Override Transforms input data with specified augmentations."""
for aug in self.auglist:
data, label = aug(data, label)
return (data, label)
def check_label_shape(self, label_shape):
"""Checks if the new label shape is valid"""
if not len(label_shape) == 2:
raise ValueError('label_shape should have length 2')
if label_shape[0] < self.label_shape[0]:
msg = 'Attempts to reduce label count from %d to %d, not allowed.' \
% (self.label_shape[0], label_shape[0])
raise ValueError(msg)
if label_shape[1] != self.provide_label[0][1][2]:
msg = 'label_shape object width inconsistent: %d vs %d.' \
% (self.provide_label[0][1][2], label_shape[1])
raise ValueError(msg)
def draw_next(self, color=None, thickness=2, mean=None, std=None, clip=True,
waitKey=None, window_name='draw_next'):
"""Display next image with bounding boxes drawn.
Parameters
----------
color : tuple
Bounding box color in RGB, use None for random color
thickness : int
Bounding box border thickness
mean : True or numpy.ndarray
Compensate for the mean to have better visual effect
std : True or numpy.ndarray
Revert standard deviations
clip : bool
If true, clip to [0, 255] for better visual effect
waitKey : None or int
Hold the window for waitKey milliseconds if set, skip ploting if None
window_name : str
Plot window name if waitKey is set.
Returns
-------
numpy.ndarray
Examples
--------
>>> # use draw_next to get images with bounding boxes drawn
>>> iterator = mx.image.ImageDetIter(1, (3, 600, 600), path_imgrec='train.rec')
>>> for image in iterator.draw_next(waitKey=None):
... # display image
>>> # or let draw_next display using cv2 module
>>> for image in iterator.draw_next(waitKey=0, window_name='disp'):
... pass
"""
try:
import cv2
except ImportError as e:
logging.warn('Unable to import cv2, skip drawing: %s', str(e))
raise StopIteration
count = 0
try:
while True:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image([data])
label = self._parse_label(label)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
count += 1
data, label = self.augmentation_transform(data, label)
image = data.asnumpy()
# revert color_normalize
if std is True:
std = np.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, np.ndarray) and std.shape[0] in [1, 3]
if std is not None:
image *= std
if mean is True:
mean = np.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, np.ndarray) and mean.shape[0] in [1, 3]
if mean is not None:
image += mean
# swap RGB
image[:, :, (0, 1, 2)] = image[:, :, (2, 1, 0)]
if clip:
image = np.maximum(0, np.minimum(255, image))
if color:
color = color[::-1]
image = image.astype(np.uint8)
height, width, _ = image.shape
for i in range(label.shape[0]):
x1 = int(label[i, 1] * width)
if x1 < 0:
continue
y1 = int(label[i, 2] * height)
x2 = int(label[i, 3] * width)
y2 = int(label[i, 4] * height)
bc = np.random.rand(3) * 255 if not color else color
cv2.rectangle(image, (x1, y1), (x2, y2), bc, thickness)
if waitKey is not None:
cv2.imshow(window_name, image)
cv2.waitKey(waitKey)
yield image
except StopIteration:
if not count:
raise StopIteration
def sync_label_shape(self, it, verbose=False):
"""Synchronize label shape with the input iterator. This is useful when
train/validation iterators have different label padding.
Parameters
----------
it : ImageDetIter
The other iterator to synchronize
verbose : bool
Print verbose log if true
Returns
-------
ImageDetIter
The synchronized other iterator, the internal label shape is updated as well.
Examples
--------
>>> train_iter = mx.image.ImageDetIter(32, (3, 300, 300), path_imgrec='train.rec')
>>> val_iter = mx.image.ImageDetIter(32, (3, 300, 300), path.imgrec='val.rec')
>>> train_iter.label_shape
(30, 6)
>>> val_iter.label_shape
(25, 6)
>>> val_iter = train_iter.sync_label_shape(val_iter, verbose=False)
>>> train_iter.label_shape
(30, 6)
>>> val_iter.label_shape
(30, 6)
"""
assert isinstance(it, ImageDetIter), 'Synchronize with invalid iterator.'
train_label_shape = self.label_shape
val_label_shape = it.label_shape
assert train_label_shape[1] == val_label_shape[1], "object width mismatch."
max_count = max(train_label_shape[0], val_label_shape[0])
if max_count > train_label_shape[0]:
self.reshape(None, (max_count, train_label_shape[1]))
if max_count > val_label_shape[0]:
it.reshape(None, (max_count, val_label_shape[1]))
if verbose and max_count > min(train_label_shape[0], val_label_shape[0]):
logging.info('Resized label_shape to (%d, %d).', max_count, train_label_shape[1])
return it
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import wtforms
import warehouse.utils.otp as otp
import warehouse.utils.webauthn as webauthn
from warehouse import forms
from warehouse.accounts.forms import (
NewEmailMixin,
NewPasswordMixin,
PasswordMixin,
TOTPValueMixin,
WebAuthnCredentialMixin,
)
from warehouse.i18n import localize as _
class RoleNameMixin:
role_name = wtforms.SelectField(
"Select role",
choices=[("", "Select role"), ("Maintainer", "Maintainer"), ("Owner", "Owner")],
validators=[wtforms.validators.DataRequired(message="Select role")],
)
class UsernameMixin:
username = wtforms.StringField(
validators=[wtforms.validators.DataRequired(message="Specify username")]
)
def validate_username(self, field):
userid = self.user_service.find_userid(field.data)
if userid is None:
raise wtforms.validators.ValidationError(
"No user found with that username. Try again."
)
class CreateRoleForm(RoleNameMixin, UsernameMixin, forms.Form):
def __init__(self, *args, user_service, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
class ChangeRoleForm(RoleNameMixin, forms.Form):
pass
class SaveAccountForm(forms.Form):
__params__ = ["name", "public_email"]
name = wtforms.StringField(
validators=[
wtforms.validators.Length(
max=100,
message=_(
"The name is too long. "
"Choose a name with 100 characters or less."
),
)
]
)
public_email = wtforms.SelectField(choices=[("", "Not displayed")])
def __init__(self, *args, user_service, user_id, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
self.user_id = user_id
user = user_service.get_user(user_id)
self.public_email.choices.extend(
[(e.email, e.email) for e in user.emails if e.verified]
)
def validate_public_email(self, field):
if field.data:
user = self.user_service.get_user(self.user_id)
verified_emails = [e.email for e in user.emails if e.verified]
if field.data not in verified_emails:
raise wtforms.validators.ValidationError(
"%s is not a verified email for %s" % (field.data, user.username)
)
class AddEmailForm(NewEmailMixin, forms.Form):
__params__ = ["email"]
def __init__(self, *args, user_service, user_id, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
self.user_id = user_id
class ChangePasswordForm(PasswordMixin, NewPasswordMixin, forms.Form):
__params__ = ["password", "new_password", "password_confirm"]
def __init__(self, *args, user_service, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
class ConfirmPasswordForm(UsernameMixin, PasswordMixin, forms.Form):
__params__ = ["confirm_password"]
def __init__(self, *args, user_service, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
class DeleteTOTPForm(ConfirmPasswordForm):
# TODO: delete?
pass
class ProvisionTOTPForm(TOTPValueMixin, forms.Form):
__params__ = ["totp_value"]
def __init__(self, *args, totp_secret, **kwargs):
super().__init__(*args, **kwargs)
self.totp_secret = totp_secret
def validate_totp_value(self, field):
totp_value = field.data.encode("utf8")
if not otp.verify_totp(self.totp_secret, totp_value):
raise wtforms.validators.ValidationError("Invalid TOTP code. Try again?")
class DeleteWebAuthnForm(forms.Form):
__params__ = ["confirm_device_name"]
label = wtforms.StringField(
validators=[
wtforms.validators.DataRequired(message="Specify a device name"),
wtforms.validators.Length(
max=64, message=("Label must be 64 characters or less")
),
]
)
def __init__(self, *args, user_service, user_id, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
self.user_id = user_id
def validate_label(self, field):
label = field.data
webauthn = self.user_service.get_webauthn_by_label(self.user_id, label)
if webauthn is None:
raise wtforms.validators.ValidationError("No WebAuthn key with given label")
self.webauthn = webauthn
class ProvisionWebAuthnForm(WebAuthnCredentialMixin, forms.Form):
__params__ = ["label", "credential"]
label = wtforms.StringField(
validators=[
wtforms.validators.DataRequired(message="Specify a label"),
wtforms.validators.Length(
max=64, message=("Label must be 64 characters or less")
),
]
)
def __init__(
self, *args, user_service, user_id, challenge, rp_id, origin, **kwargs
):
super().__init__(*args, **kwargs)
self.user_service = user_service
self.user_id = user_id
self.challenge = challenge
self.rp_id = rp_id
self.origin = origin
def validate_credential(self, field):
try:
json.loads(field.data.encode("utf-8"))
except json.JSONDecodeError:
raise wtforms.validators.ValidationError(
"Invalid WebAuthn credential: Bad payload"
)
try:
validated_credential = self.user_service.verify_webauthn_credential(
field.data.encode("utf-8"),
challenge=self.challenge,
rp_id=self.rp_id,
origin=self.origin,
)
except webauthn.RegistrationRejectedError as e:
raise wtforms.validators.ValidationError(str(e))
self.validated_credential = validated_credential
def validate_label(self, field):
label = field.data
if self.user_service.get_webauthn_by_label(self.user_id, label) is not None:
raise wtforms.validators.ValidationError(f"Label '{label}' already in use")
class CreateMacaroonForm(forms.Form):
__params__ = ["description", "token_scope"]
def __init__(self, *args, user_id, macaroon_service, project_names, **kwargs):
super().__init__(*args, **kwargs)
self.user_id = user_id
self.macaroon_service = macaroon_service
self.project_names = project_names
description = wtforms.StringField(
validators=[
wtforms.validators.DataRequired(message="Specify a token name"),
wtforms.validators.Length(
max=100, message="Description must be 100 characters or less"
),
]
)
token_scope = wtforms.StringField(
validators=[wtforms.validators.DataRequired(message="Specify the token scope")]
)
def validate_description(self, field):
description = field.data
if (
self.macaroon_service.get_macaroon_by_description(self.user_id, description)
is not None
):
raise wtforms.validators.ValidationError("API token name already in use")
def validate_token_scope(self, field):
scope = field.data
try:
_, scope_kind = scope.split(":", 1)
except ValueError:
raise wtforms.ValidationError(f"Unknown token scope: {scope}")
if scope_kind == "unspecified":
raise wtforms.ValidationError("Specify the token scope")
if scope_kind == "user":
self.validated_scope = scope_kind
return
try:
scope_kind, scope_value = scope_kind.split(":", 1)
except ValueError:
raise wtforms.ValidationError(f"Unknown token scope: {scope}")
if scope_kind != "project":
raise wtforms.ValidationError(f"Unknown token scope: {scope}")
if scope_value not in self.project_names:
raise wtforms.ValidationError(
f"Unknown or invalid project name: {scope_value}"
)
self.validated_scope = {"projects": [scope_value]}
class DeleteMacaroonForm(UsernameMixin, PasswordMixin, forms.Form):
__params__ = ["confirm_password", "macaroon_id"]
macaroon_id = wtforms.StringField(
validators=[wtforms.validators.DataRequired(message="Identifier required")]
)
def __init__(self, *args, macaroon_service, user_service, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
self.macaroon_service = macaroon_service
def validate_macaroon_id(self, field):
macaroon_id = field.data
if self.macaroon_service.find_macaroon(macaroon_id) is None:
raise wtforms.validators.ValidationError("No such macaroon")
class Toggle2FARequirementForm(forms.Form):
__params__ = ["two_factor_requirement_sentinel"]
two_factor_requirement_sentinel = wtforms.HiddenField()
| |
"""
Test the lldb command line completion mechanism.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbplatform
from lldbsuite.test import lldbutil
class CommandLineCompletionTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@classmethod
def classCleanup(cls):
"""Cleanup the test byproducts."""
try:
os.remove("child_send.txt")
os.remove("child_read.txt")
except:
pass
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_at(self):
"""Test that 'at' completes to 'attach '."""
self.complete_from_to('at', 'attach ')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_de(self):
"""Test that 'de' completes to 'detach '."""
self.complete_from_to('de', 'detach ')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_frame_variable(self):
self.build()
self.main_source = "main.cpp"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
'// Break here', self.main_source_spec)
self.assertEquals(process.GetState(), lldb.eStateStopped)
# FIXME: This pulls in the debug information to make the completions work,
# but the completions should also work without.
self.runCmd("frame variable fooo")
self.complete_from_to('frame variable fo',
'frame variable fooo')
self.complete_from_to('frame variable fooo.',
'frame variable fooo.')
self.complete_from_to('frame variable fooo.dd',
'frame variable fooo.dd')
self.complete_from_to('frame variable ptr_fooo->',
'frame variable ptr_fooo->')
self.complete_from_to('frame variable ptr_fooo->dd',
'frame variable ptr_fooo->dd')
self.complete_from_to('frame variable cont',
'frame variable container')
self.complete_from_to('frame variable container.',
'frame variable container.MemberVar')
self.complete_from_to('frame variable container.Mem',
'frame variable container.MemberVar')
self.complete_from_to('frame variable ptr_cont',
'frame variable ptr_container')
self.complete_from_to('frame variable ptr_container->',
'frame variable ptr_container->MemberVar')
self.complete_from_to('frame variable ptr_container->Mem',
'frame variable ptr_container->MemberVar')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_process_attach_dash_dash_con(self):
"""Test that 'process attach --con' completes to 'process attach --continue '."""
self.complete_from_to(
'process attach --con',
'process attach --continue ')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_process_launch_arch(self):
self.complete_from_to('process launch --arch ',
['mips',
'arm64'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_ambiguous_long_opt(self):
self.completions_match('breakpoint modify --th',
['--thread-id',
'--thread-index',
'--thread-name'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_plugin_load(self):
self.complete_from_to('plugin load ', [])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_quoted_command(self):
self.complete_from_to('"set',
['"settings" '])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_quoted_arg_with_quoted_command(self):
self.complete_from_to('"settings" "repl',
['"replace" '])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_quoted_arg_without_quoted_command(self):
self.complete_from_to('settings "repl',
['"replace" '])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_single_quote_command(self):
self.complete_from_to("'set",
["'settings' "])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_terminated_quote_command(self):
# This should not crash, but we don't get any
# reasonable completions from this.
self.complete_from_to("'settings'", [])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_process_launch_arch_arm(self):
self.complete_from_to('process launch --arch arm',
['arm64'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_target_symbols_add_shlib(self):
# Doesn't seem to work, but at least it shouldn't crash.
self.complete_from_to('target symbols add --shlib ', [])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_log_file(self):
# Complete in our source directory which contains a 'main.cpp' file.
src_dir = os.path.dirname(os.path.realpath(__file__)) + '/'
self.complete_from_to('log enable lldb expr -f ' + src_dir,
['main.cpp'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_log_dir(self):
# Complete our source directory.
src_dir = os.path.dirname(os.path.realpath(__file__))
self.complete_from_to('log enable lldb expr -f ' + src_dir,
[src_dir + os.sep], turn_off_re_match=True)
# <rdar://problem/11052829>
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_infinite_loop_while_completing(self):
"""Test that 'process print hello\' completes to itself and does not infinite loop."""
self.complete_from_to('process print hello\\', 'process print hello\\',
turn_off_re_match=True)
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_watchpoint_co(self):
"""Test that 'watchpoint co' completes to 'watchpoint command '."""
self.complete_from_to('watchpoint co', 'watchpoint command ')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_watchpoint_command_space(self):
"""Test that 'watchpoint command ' completes to ['add', 'delete', 'list']."""
self.complete_from_to(
'watchpoint command ', [
'add', 'delete', 'list'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_watchpoint_command_a(self):
"""Test that 'watchpoint command a' completes to 'watchpoint command add '."""
self.complete_from_to(
'watchpoint command a',
'watchpoint command add ')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_watchpoint_set_ex(self):
"""Test that 'watchpoint set ex' completes to 'watchpoint set expression '."""
self.complete_from_to(
'watchpoint set ex',
'watchpoint set expression ')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_watchpoint_set_var(self):
"""Test that 'watchpoint set var' completes to 'watchpoint set variable '."""
self.complete_from_to('watchpoint set var', 'watchpoint set variable ')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_help_fi(self):
"""Test that 'help fi' completes to ['file', 'finish']."""
self.complete_from_to(
'help fi', [
'file', 'finish'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_help_watchpoint_s(self):
"""Test that 'help watchpoint s' completes to 'help watchpoint set '."""
self.complete_from_to('help watchpoint s', 'help watchpoint set ')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_append_target_er(self):
"""Test that 'settings append target.er' completes to 'settings append target.error-path'."""
self.complete_from_to(
'settings append target.er',
'settings append target.error-path')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_insert_after_target_en(self):
"""Test that 'settings insert-after target.env' completes to 'settings insert-after target.env-vars'."""
self.complete_from_to(
'settings insert-after target.env',
'settings insert-after target.env-vars')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_insert_before_target_en(self):
"""Test that 'settings insert-before target.env' completes to 'settings insert-before target.env-vars'."""
self.complete_from_to(
'settings insert-before target.env',
'settings insert-before target.env-vars')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_replace_target_ru(self):
"""Test that 'settings replace target.ru' completes to 'settings replace target.run-args'."""
self.complete_from_to(
'settings replace target.ru',
'settings replace target.run-args')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_show_term(self):
self.complete_from_to(
'settings show term-',
'settings show term-width')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_list_term(self):
self.complete_from_to(
'settings list term-',
'settings list term-width')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_remove_term(self):
self.complete_from_to(
'settings remove term-',
'settings remove term-width')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_s(self):
"""Test that 'settings s' completes to ['set', 'show']."""
self.complete_from_to(
'settings s', [
'set', 'show'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_set_th(self):
"""Test that 'settings set thread-f' completes to 'settings set thread-format'."""
self.complete_from_to('settings set thread-f', 'settings set thread-format')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_s_dash(self):
"""Test that 'settings set --g' completes to 'settings set --global'."""
self.complete_from_to('settings set --g', 'settings set --global')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_clear_th(self):
"""Test that 'settings clear thread-f' completes to 'settings clear thread-format'."""
self.complete_from_to(
'settings clear thread-f',
'settings clear thread-format')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_set_ta(self):
"""Test that 'settings set ta' completes to 'settings set target.'."""
self.complete_from_to(
'settings set target.ma',
'settings set target.max-')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_set_target_exec(self):
"""Test that 'settings set target.exec' completes to 'settings set target.exec-search-paths '."""
self.complete_from_to(
'settings set target.exec',
'settings set target.exec-search-paths')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_set_target_pr(self):
"""Test that 'settings set target.pr' completes to [
'target.prefer-dynamic-value', 'target.process.']."""
self.complete_from_to('settings set target.pr',
['target.prefer-dynamic-value',
'target.process.'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_set_target_process(self):
"""Test that 'settings set target.process' completes to 'settings set target.process.'."""
self.complete_from_to(
'settings set target.process',
'settings set target.process.')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_set_target_process_dot(self):
"""Test that 'settings set target.process.t' completes to 'settings set target.process.thread.'."""
self.complete_from_to(
'settings set target.process.t',
'settings set target.process.thread.')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_settings_set_target_process_thread_dot(self):
"""Test that 'settings set target.process.thread.' completes to [
'target.process.thread.step-avoid-regexp', 'target.process.thread.trace-thread']."""
self.complete_from_to('settings set target.process.thread.',
['target.process.thread.step-avoid-regexp',
'target.process.thread.trace-thread'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_target_space(self):
"""Test that 'target ' completes to ['create', 'delete', 'list',
'modules', 'select', 'stop-hook', 'variable']."""
self.complete_from_to('target ',
['create',
'delete',
'list',
'modules',
'select',
'stop-hook',
'variable'])
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_target_create_dash_co(self):
"""Test that 'target create --co' completes to 'target variable --core '."""
self.complete_from_to('target create --co', 'target create --core ')
@skipIfFreeBSD # timing out on the FreeBSD buildbot
def test_target_va(self):
"""Test that 'target va' completes to 'target variable '."""
self.complete_from_to('target va', 'target variable ')
def test_command_argument_completion(self):
"""Test completion of command arguments"""
self.complete_from_to("watchpoint set variable -", ["-w", "-s"])
self.complete_from_to('watchpoint set variable -w', 'watchpoint set variable -w ')
self.complete_from_to("watchpoint set variable --", ["--watch", "--size"])
self.complete_from_to("watchpoint set variable --w", "watchpoint set variable --watch")
self.complete_from_to('watchpoint set variable -w ', ['read', 'write', 'read_write'])
self.complete_from_to("watchpoint set variable --watch ", ["read", "write", "read_write"])
self.complete_from_to("watchpoint set variable --watch w", "watchpoint set variable --watch write")
self.complete_from_to('watchpoint set variable -w read_', 'watchpoint set variable -w read_write')
# Now try the same thing with a variable name (non-option argument) to
# test that getopts arg reshuffling doesn't confuse us.
self.complete_from_to("watchpoint set variable foo -", ["-w", "-s"])
self.complete_from_to('watchpoint set variable foo -w', 'watchpoint set variable foo -w ')
self.complete_from_to("watchpoint set variable foo --", ["--watch", "--size"])
self.complete_from_to("watchpoint set variable foo --w", "watchpoint set variable foo --watch")
self.complete_from_to('watchpoint set variable foo -w ', ['read', 'write', 'read_write'])
self.complete_from_to("watchpoint set variable foo --watch ", ["read", "write", "read_write"])
self.complete_from_to("watchpoint set variable foo --watch w", "watchpoint set variable foo --watch write")
self.complete_from_to('watchpoint set variable foo -w read_', 'watchpoint set variable foo -w read_write')
def test_completion_description_commands(self):
"""Test descriptions of top-level command completions"""
self.check_completion_with_desc("", [
["command", "Commands for managing custom LLDB commands."],
["bugreport", "Commands for creating domain-specific bug reports."]
])
self.check_completion_with_desc("pl", [
["platform", "Commands to manage and create platforms."],
["plugin", "Commands for managing LLDB plugins."]
])
# Just check that this doesn't crash.
self.check_completion_with_desc("comman", [])
self.check_completion_with_desc("non-existent-command", [])
def test_completion_description_command_options(self):
"""Test descriptions of command options"""
# Short options
self.check_completion_with_desc("breakpoint set -", [
["-h", "Set the breakpoint on exception catcH."],
["-w", "Set the breakpoint on exception throW."]
])
# Long options.
self.check_completion_with_desc("breakpoint set --", [
["--on-catch", "Set the breakpoint on exception catcH."],
["--on-throw", "Set the breakpoint on exception throW."]
])
# Ambiguous long options.
self.check_completion_with_desc("breakpoint set --on-", [
["--on-catch", "Set the breakpoint on exception catcH."],
["--on-throw", "Set the breakpoint on exception throW."]
])
# Unknown long option.
self.check_completion_with_desc("breakpoint set --Z", [
])
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24489")
def test_symbol_name(self):
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.complete_from_to('breakpoint set -n Fo',
'breakpoint set -n Foo::Bar(int,\\ int)',
turn_off_re_match=True)
# No completion for Qu because the candidate is
# (anonymous namespace)::Quux().
self.complete_from_to('breakpoint set -n Qu', '')
| |
# Copyright 2012 SINA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import attach_interfaces \
as attach_interfaces_v21
from nova.compute import api as compute_api
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network_cache_model
FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
FAKE_NOT_FOUND_PORT_ID = '00000000-0000-0000-0000-000000000000'
FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000'
port_data1 = {
"id": FAKE_PORT_ID1,
"network_id": FAKE_NET_ID1,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "aa:aa:aa:aa:aa:aa",
"fixed_ips": ["10.0.1.2"],
"device_id": FAKE_UUID1,
}
port_data2 = {
"id": FAKE_PORT_ID2,
"network_id": FAKE_NET_ID2,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": FAKE_UUID1,
}
port_data3 = {
"id": FAKE_PORT_ID3,
"network_id": FAKE_NET_ID3,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": '',
}
fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
ports = [port_data1, port_data2, port_data3]
def fake_show_port(context, port_id, **kwargs):
for port in ports:
if port['id'] == port_id:
return {'port': port}
else:
raise exception.PortNotFound(port_id=port_id)
def fake_attach_interface(self, context, instance, network_id, port_id,
requested_ip='192.168.1.3', tag=None):
if not network_id:
# if no network_id is given when add a port to an instance, use the
# first default network.
network_id = fake_networks[0]
if network_id == FAKE_BAD_NET_ID:
raise exception.NetworkNotFound(network_id=network_id)
if not port_id:
port_id = ports[fake_networks.index(network_id)]['id']
if port_id == FAKE_NOT_FOUND_PORT_ID:
raise exception.PortNotFound(port_id=port_id)
vif = fake_network_cache_model.new_vif()
vif['id'] = port_id
vif['network']['id'] = network_id
vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
return vif
def fake_detach_interface(self, context, instance, port_id):
for port in ports:
if port['id'] == port_id:
return
raise exception.PortNotFound(port_id=port_id)
def fake_get_instance(self, context, instance_id, expected_attrs=None,
cell_down_support=False):
return fake_instance.fake_instance_obj(
context, id=1, uuid=instance_id, project_id=context.project_id)
class InterfaceAttachTestsV21(test.NoDBTestCase):
controller_cls = attach_interfaces_v21.InterfaceAttachmentController
validate_exc = exception.ValidationError
in_use_exc = exc.HTTPConflict
not_found_exc = exc.HTTPNotFound
not_usable_exc = exc.HTTPBadRequest
def setUp(self):
super(InterfaceAttachTestsV21, self).setUp()
self.flags(timeout=30, group='neutron')
self.stub_out('nova.compute.api.API.get', fake_get_instance)
self.expected_show = {'interfaceAttachment':
{'net_id': FAKE_NET_ID1,
'port_id': FAKE_PORT_ID1,
'mac_addr': port_data1['mac_address'],
'port_state': port_data1['status'],
'fixed_ips': port_data1['fixed_ips'],
}}
self.attachments = self.controller_cls()
show_port_patch = mock.patch.object(self.attachments.network_api,
'show_port', fake_show_port)
show_port_patch.start()
self.addCleanup(show_port_patch.stop)
self.req = fakes.HTTPRequest.blank('')
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=''))
def _test_instance_not_found(self, func, args, mock_get, kwargs=None):
if not kwargs:
kwargs = {}
self.assertRaises(exc.HTTPNotFound, func, self.req, *args, **kwargs)
def test_show_instance_not_found(self):
self._test_instance_not_found(self.attachments.show, ('fake', 'fake'))
def test_index_instance_not_found(self):
self._test_instance_not_found(self.attachments.index, ('fake', ))
def test_detach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.delete,
('fake', 'fake'))
def test_attach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.create, ('fake', ),
kwargs={'body': {'interfaceAttachment': {}}})
def test_show(self):
result = self.attachments.show(self.req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual(self.expected_show, result)
def test_show_with_port_not_found(self):
self.assertRaises(exc.HTTPNotFound,
self.attachments.show, self.req, FAKE_UUID2,
FAKE_PORT_ID1)
def test_show_forbidden(self):
with mock.patch.object(self.attachments.network_api, 'show_port',
side_effect=exception.Forbidden):
self.assertRaises(exc.HTTPForbidden,
self.attachments.show, self.req, FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete(self):
self.stub_out('nova.compute.api.API.detach_interface',
fake_detach_interface)
req_context = self.req.environ['nova.context']
inst = objects.Instance(uuid=FAKE_UUID1,
project_id=req_context.project_id)
with mock.patch.object(common, 'get_instance',
return_value=inst) as mock_get_instance:
result = self.attachments.delete(self.req, FAKE_UUID1,
FAKE_PORT_ID1)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
attach_interfaces_v21.InterfaceAttachmentController):
status_int = self.attachments.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
ctxt = self.req.environ['nova.context']
mock_get_instance.assert_called_with(
self.attachments.compute_api, ctxt, FAKE_UUID1,
expected_attrs=['device_metadata'])
def test_detach_interface_instance_locked(self):
def fake_detach_interface_from_locked_server(self, context,
instance, port_id):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stub_out('nova.compute.api.API.detach_interface',
fake_detach_interface_from_locked_server)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete_interface_not_found(self):
self.stub_out('nova.compute.api.API.detach_interface',
fake_detach_interface)
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
self.req,
FAKE_UUID1,
'invalid-port-id')
def test_attach_interface_instance_locked(self):
def fake_attach_interface_to_locked_server(self, context,
instance, network_id, port_id, requested_ip, tag=None):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface_to_locked_server)
body = {}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_without_network_id(self):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
body = {}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID1)
@mock.patch.object(
compute_api.API, 'attach_interface',
side_effect=exception.NetworkInterfaceTaggedAttachNotSupported())
def test_interface_tagged_attach_not_supported(self, mock_attach):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2}}
self.assertRaises(exc.HTTPBadRequest, self.attachments.create,
self.req, FAKE_UUID1, body=body)
def test_attach_interface_with_network_id(self):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2}}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID2)
def _attach_interface_bad_request_case(self, body):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def _attach_interface_not_found_case(self, body):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
self.assertRaises(self.not_found_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_with_port_and_network_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_PORT_ID1,
'net_id': FAKE_NET_ID2
}
}
self._attach_interface_bad_request_case(body)
def test_attach_interface_with_not_found_network_id(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_BAD_NET_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_not_found_port_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_NOT_FOUND_PORT_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_invalid_state(self):
def fake_attach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='attach_interface')
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface_invalid_state)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID1}}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_port_limit_exceeded(self):
"""Tests the scenario where nova-compute attempts to create a port to
attach but the tenant port quota is exceeded and PortLimitExceeded
is raised from the neutron API code which results in a 403 response.
"""
with mock.patch.object(self.attachments.compute_api,
'attach_interface',
side_effect=exception.PortLimitExceeded):
body = {'interfaceAttachment': {}}
ex = self.assertRaises(
exc.HTTPForbidden, self.attachments.create,
self.req, FAKE_UUID1, body=body)
self.assertIn('Maximum number of ports exceeded', str(ex))
def test_detach_interface_with_invalid_state(self):
def fake_detach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='detach_interface')
self.stub_out('nova.compute.api.API.detach_interface',
fake_detach_interface_invalid_state)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_NET_ID1)
@mock.patch.object(compute_api.API, 'detach_interface',
side_effect=NotImplementedError())
def test_detach_interface_with_not_implemented(self, _mock):
self.assertRaises(exc.HTTPNotImplemented,
self.attachments.delete,
self.req, FAKE_UUID1, FAKE_NET_ID1)
def test_attach_interface_invalid_fixed_ip(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_NET_ID1,
'fixed_ips': [{'ip_address': 'invalid_ip'}]
}
}
self.assertRaises(self.validate_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_fixed_ip_already_in_use(self,
attach_mock,
get_mock):
req_context = self.req.environ['nova.context']
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=req_context.project_id)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.FixedIpAlreadyInUse(
address='10.0.2.2', instance_uuid=FAKE_UUID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None,
cell_down_support=False)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_in_use(self,
attach_mock,
get_mock):
req_context = self.req.environ['nova.context']
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=req_context.project_id)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortInUse(
port_id=FAKE_PORT_ID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None,
cell_down_support=False)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_not_usable(self,
attach_mock,
get_mock):
req_context = self.req.environ['nova.context']
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=req_context.project_id)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortNotUsable(
port_id=FAKE_PORT_ID1,
instance=fake_instance.uuid)
body = {}
self.assertRaises(self.not_usable_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None,
cell_down_support=False)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_failed_no_network(self, attach_mock, get_mock):
req_context = self.req.environ['nova.context']
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=req_context.project_id)
get_mock.return_value = fake_instance
attach_mock.side_effect = (
exception.InterfaceAttachFailedNoNetwork(project_id=FAKE_UUID2))
self.assertRaises(exc.HTTPBadRequest, self.attachments.create,
self.req, FAKE_UUID1, body={})
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None,
cell_down_support=False)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_no_more_fixed_ips(self,
attach_mock,
get_mock):
req_context = self.req.environ['nova.context']
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=req_context.project_id)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.NoMoreFixedIps(
net=FAKE_NET_ID1)
body = {}
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None,
cell_down_support=False)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_failed_securitygroup_cannot_be_applied(
self, attach_mock, get_mock):
req_context = self.req.environ['nova.context']
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=req_context.project_id)
get_mock.return_value = fake_instance
attach_mock.side_effect = (
exception.SecurityGroupCannotBeApplied())
self.assertRaises(exc.HTTPBadRequest, self.attachments.create,
self.req, FAKE_UUID1, body={})
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None, tag=None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
expected_attrs=None,
cell_down_support=False)
def _test_attach_interface_with_invalid_parameter(self, param):
self.stub_out('nova.compute.api.API.attach_interface',
fake_attach_interface)
body = {'interface_attachment': param}
self.assertRaises(exception.ValidationError,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_instance_with_non_uuid_net_id(self):
param = {'net_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_uuid_port_id(self):
param = {'port_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_array_fixed_ips(self):
param = {'fixed_ips': 'non_array'}
self._test_attach_interface_with_invalid_parameter(param)
class InterfaceAttachTestsV249(test.NoDBTestCase):
controller_cls = attach_interfaces_v21.InterfaceAttachmentController
def setUp(self):
super(InterfaceAttachTestsV249, self).setUp()
self.attachments = self.controller_cls()
self.req = fakes.HTTPRequest.blank('', version='2.49')
def test_tagged_interface_attach_invalid_tag_comma(self):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2,
'tag': ','}}
self.assertRaises(exception.ValidationError, self.attachments.create,
self.req, FAKE_UUID1, body=body)
def test_tagged_interface_attach_invalid_tag_slash(self):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2,
'tag': '/'}}
self.assertRaises(exception.ValidationError, self.attachments.create,
self.req, FAKE_UUID1, body=body)
def test_tagged_interface_attach_invalid_tag_too_long(self):
tag = ''.join(map(str, range(10, 41)))
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2,
'tag': tag}}
self.assertRaises(exception.ValidationError, self.attachments.create,
self.req, FAKE_UUID1, body=body)
@mock.patch('nova.compute.api.API.attach_interface')
@mock.patch('nova.compute.api.API.get', fake_get_instance)
def test_tagged_interface_attach_valid_tag(self, _):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2,
'tag': 'foo'}}
with mock.patch.object(self.attachments, 'show'):
self.attachments.create(self.req, FAKE_UUID1, body=body)
class InterfaceAttachTestsV270(test.NoDBTestCase):
"""os-interface API tests for microversion 2.70"""
def setUp(self):
super(InterfaceAttachTestsV270, self).setUp()
self.attachments = (
attach_interfaces_v21.InterfaceAttachmentController())
self.req = fakes.HTTPRequest.blank('', version='2.70')
self.stub_out('nova.compute.api.API.get', fake_get_instance)
@mock.patch('nova.objects.VirtualInterface.get_by_uuid', return_value=None)
def test_show_interface_no_vif(self, mock_get_by_uuid):
"""Tests GET /servers/{server_id}/os-interface/{id} where there is no
corresponding VirtualInterface database record for the attached port.
"""
with mock.patch.object(self.attachments.network_api, 'show_port',
fake_show_port):
attachment = self.attachments.show(
self.req, FAKE_UUID1, FAKE_PORT_ID1)['interfaceAttachment']
self.assertIn('tag', attachment)
self.assertIsNone(attachment['tag'])
ctxt = self.req.environ['nova.context']
mock_get_by_uuid.assert_called_once_with(ctxt, FAKE_PORT_ID1)
@mock.patch('nova.objects.VirtualInterfaceList.get_by_instance_uuid',
return_value=objects.VirtualInterfaceList())
def test_list_interfaces_no_vifs(self, mock_get_by_instance_uuid):
"""Tests GET /servers/{server_id}/os-interface where there is no
corresponding VirtualInterface database record for the attached ports.
"""
with mock.patch.object(self.attachments.network_api, 'list_ports',
return_value={'ports': ports}) as list_ports:
attachments = self.attachments.index(
self.req, FAKE_UUID1)['interfaceAttachments']
for attachment in attachments:
self.assertIn('tag', attachment)
self.assertIsNone(attachment['tag'])
ctxt = self.req.environ['nova.context']
list_ports.assert_called_once_with(ctxt, device_id=FAKE_UUID1)
mock_get_by_instance_uuid.assert_called_once_with(
self.req.environ['nova.context'], FAKE_UUID1)
| |
from CrowdStrikeFalconX import Client,\
send_uploaded_file_to_sandbox_analysis_command, send_url_to_sandbox_analysis_command,\
get_full_report_command, get_report_summary_command, get_analysis_status_command,\
check_quota_status_command, find_sandbox_reports_command, find_submission_id_command, run_polling_command, \
pop_polling_related_args, is_new_polling_search, arrange_args_for_upload_func, remove_polling_related_args
from TestsInput.context import SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_CONTEXT, SEND_URL_TO_SANDBOX_ANALYSIS_CONTEXT,\
GET_FULL_REPORT_CONTEXT, GET_REPORT_SUMMARY_CONTEXT, GET_ANALYSIS_STATUS_CONTEXT, CHECK_QUOTA_STATUS_CONTEXT,\
FIND_SANDBOX_REPORTS_CONTEXT, FIND_SUBMISSION_ID_CONTEXT, MULTIPLE_ERRORS_RESULT, GET_FULL_REPORT_CONTEXT_EXTENDED
from TestsInput.http_responses import SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_HTTP_RESPONSE,\
SEND_URL_TO_SANDBOX_ANALYSIS_HTTP_RESPONSE, GET_FULL_REPORT_HTTP_RESPONSE, GET_REPORT_SUMMARY_HTTP_RESPONSE,\
CHECK_QUOTA_STATUS_HTTP_RESPONSE, FIND_SANDBOX_REPORTS_HTTP_RESPONSE, FIND_SUBMISSION_ID_HTTP_RESPONSE,\
GET_ANALYSIS_STATUS_HTTP_RESPONSE, MULTI_ERRORS_HTTP_RESPONSE, NO_ERRORS_HTTP_RESPONSE, \
GET_FULL_REPORT_HTTP_RESPONSE_EMPTY
import pytest
class ResMocker:
def __init__(self, http_response):
self.http_response = http_response
self.ok = False
def json(self):
return self.http_response
SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_ARGS = {
"sha256": "sha256",
"environment_id": "160: Windows 10",
"action_script": "",
"command_line": "",
"document_password": "",
"enable_tor": "false",
"submit_name": "",
"system_date": "",
"system_time": ""
}
SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_ARGS_POLLING = {
"sha256": "sha256",
"environment_id": "160: Windows 10",
"action_script": "",
"command_line": "",
"document_password": "",
"enable_tor": "false",
"submit_name": "",
"system_date": "",
"system_time": "",
"polling": True,
"interval_in_seconds": "60",
"extended_data": "true"
}
SEND_URL_TO_SANDBOX_ANALYSIS_ARGS = {
"url": "https://www.google.com",
"environment_id": "160: Windows 10",
"enable_tor": "False",
"action_script": "",
"command_line": "",
"document_password": "",
"submit_name": "",
"system_date": "",
"system_time": ""
}
SEND_URL_TO_SANDBOX_ANALYSIS_ARGS_POLLING = {
"url": "https://www.google.com",
"environment_id": "160: Windows 10",
"enable_tor": "False",
"action_script": "",
"command_line": "",
"document_password": "",
"submit_name": "",
"system_date": "",
"system_time": "",
"polling": "true",
"interval_in_seconds": "10",
"extended_data": "true"
}
GET_FULL_REPORT_ARGS = {
"ids": "ids",
"extended_data": "false"
}
GET_FULL_REPORT_ARGS_EXTENDED = {
"ids": "ids",
"extended_data": "true"
}
GET_REPORT_SUMMARY_ARGS = {
"ids": "ids",
}
GET_ANALYSIS_STATUS_ARGS = {
"ids": "ids",
}
FIND_SANDBOX_REPORTS_ARGS = {
"offset": "",
"limit": "",
"sort": "",
"filter": "",
}
FIND_SUBMISSION_ID_ARGS = {
"offset": "",
"limit": "",
"sort": "",
"filter": "",
}
@pytest.mark.parametrize('command, args, http_response, context', [
(get_report_summary_command, GET_REPORT_SUMMARY_ARGS, GET_REPORT_SUMMARY_HTTP_RESPONSE, GET_REPORT_SUMMARY_CONTEXT),
(get_analysis_status_command, GET_ANALYSIS_STATUS_ARGS, GET_ANALYSIS_STATUS_HTTP_RESPONSE,
GET_ANALYSIS_STATUS_CONTEXT),
(check_quota_status_command, {}, CHECK_QUOTA_STATUS_HTTP_RESPONSE, CHECK_QUOTA_STATUS_CONTEXT),
(find_sandbox_reports_command, FIND_SANDBOX_REPORTS_ARGS, FIND_SANDBOX_REPORTS_HTTP_RESPONSE,
FIND_SANDBOX_REPORTS_CONTEXT),
(find_submission_id_command, FIND_SUBMISSION_ID_ARGS, FIND_SUBMISSION_ID_HTTP_RESPONSE, FIND_SUBMISSION_ID_CONTEXT),
])
def test_cs_falconx_commands(command, args, http_response, context, mocker):
"""Unit test
Given
- demisto args
- raw response of the http request
When
- mock the http request result
Then
- convert the result to human readable table
- create the context
- validate the expected_result and the created context
"""
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.crowdstrike.com/", username="user1", password="12345", use_ssl=False,
proxy=False)
mocker.patch.object(Client, '_http_request', return_value=http_response)
_, outputs, _ = command(client, **args)
assert outputs == context
@pytest.mark.parametrize('command, args, http_response, context', [
(send_uploaded_file_to_sandbox_analysis_command, SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_ARGS,
SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_HTTP_RESPONSE, SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_CONTEXT),
(send_url_to_sandbox_analysis_command, SEND_URL_TO_SANDBOX_ANALYSIS_ARGS,
SEND_URL_TO_SANDBOX_ANALYSIS_HTTP_RESPONSE, SEND_URL_TO_SANDBOX_ANALYSIS_CONTEXT),
(get_full_report_command, GET_FULL_REPORT_ARGS, GET_FULL_REPORT_HTTP_RESPONSE, GET_FULL_REPORT_CONTEXT),
(get_full_report_command, GET_FULL_REPORT_ARGS_EXTENDED, GET_FULL_REPORT_HTTP_RESPONSE,
GET_FULL_REPORT_CONTEXT_EXTENDED)
])
def test_cs_falcon_x_polling_related_commands(command, args, http_response, context, mocker):
"""Unit test
Given
- demisto args
- raw response of the http request
When
- mock the http request result
Then
- convert the result to human readable table
- create the context
- validate the expected_result and the created context
"""
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.crowdstrike.com/", username="user1", password="12345", use_ssl=False,
proxy=False)
mocker.patch.object(Client, '_http_request', return_value=http_response)
if command == get_full_report_command:
command_res, status = command(client, **args)
assert command_res.outputs == context
else:
command_res = command(client, **args)
assert command_res.outputs == context
@pytest.mark.parametrize('http_response, output', [
(MULTI_ERRORS_HTTP_RESPONSE, MULTIPLE_ERRORS_RESULT),
(NO_ERRORS_HTTP_RESPONSE, "")
])
def test_handle_errors(http_response, output, mocker):
"""Unit test
Given
- raw response of the http request
When
- there are or there are no errors
Then
- show the exception content
"""
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.crowdstrike.com/", username="user1", password="12345", use_ssl=False,
proxy=False)
try:
mocker.patch.object(client._session, 'request', return_value=ResMocker(http_response))
_, output, _ = check_quota_status_command(client)
except Exception as e:
assert (str(e) == str(output))
def test_running_polling_command_success_for_url(mocker):
"""
Given:
An upload request of a url or a file using the polling flow, that was already initiated priorly and is now
complete.
When:
When, while in the polling flow, we are checking the status of on an upload that was initiated earlier and is
already complete.
Then:
Return a command results object, without scheduling a new command.
"""
args = {'ids': "1234", "extended_data": "true"}
mocker.patch('CommonServerPython.ScheduledCommand.raise_error_if_not_supported')
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.crowdstrike.com/", username="user1", password="12345", use_ssl=False,
proxy=False)
mocker.patch.object(Client, 'send_url_to_sandbox_analysis', return_value=SEND_URL_TO_SANDBOX_ANALYSIS_HTTP_RESPONSE)
mocker.patch.object(Client, 'get_full_report', return_value=GET_FULL_REPORT_HTTP_RESPONSE)
expected_outputs = GET_FULL_REPORT_CONTEXT_EXTENDED
command_results = run_polling_command(client, args, 'cs-fx-submit-url', send_url_to_sandbox_analysis_command,
get_full_report_command, 'URL')
assert command_results.outputs == expected_outputs
assert command_results.scheduled_command is None
def test_running_polling_command_success_for_file(mocker):
"""
Given:
An upload request of a url or a file using the polling flow, that was already initiated priorly and is now
complete.
When:
When, while in the polling flow, we are checking the status of on an upload that was initiated earlier and is
already complete.
Then:
Return a command results object, without scheduling a new command.
"""
args = {'ids': "1234", "extended_data": "true"}
mocker.patch('CommonServerPython.ScheduledCommand.raise_error_if_not_supported')
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.crowdstrike.com/", username="user1", password="12345", use_ssl=False,
proxy=False)
mocker.patch.object(Client, 'send_url_to_sandbox_analysis', return_value=SEND_URL_TO_SANDBOX_ANALYSIS_HTTP_RESPONSE)
mocker.patch.object(Client, 'get_full_report', return_value=GET_FULL_REPORT_HTTP_RESPONSE)
expected_outputs = GET_FULL_REPORT_CONTEXT_EXTENDED
command_results = run_polling_command(client, args, 'cs-fx-submit-uploaded-file',
send_uploaded_file_to_sandbox_analysis_command,
get_full_report_command, 'FILE')
assert command_results.outputs == expected_outputs
assert command_results.scheduled_command is None
def test_running_polling_command_pending_for_url(mocker):
"""
Given:
An upload request of a url or a file using the polling flow, that was already initiated priorly and is not
completed yet.
When:
When, while in the polling flow, we are checking the status of on an upload that was initiated earlier and is
not complete yet.
Then:
Return a command results object, with scheduling a new command.
"""
args = {'ids': "1234", "extended_data": "true"}
mocker.patch('CommonServerPython.ScheduledCommand.raise_error_if_not_supported')
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.crowdstrike.com/", username="user1", password="12345", use_ssl=False,
proxy=False)
mocker.patch.object(Client, 'send_url_to_sandbox_analysis', return_value=SEND_URL_TO_SANDBOX_ANALYSIS_HTTP_RESPONSE)
mocker.patch.object(Client, 'get_full_report', return_value=GET_FULL_REPORT_HTTP_RESPONSE_EMPTY)
command_results = run_polling_command(client, args, 'cs-fx-submit-url', send_url_to_sandbox_analysis_command,
get_full_report_command, 'URL')
assert command_results.outputs is None
assert command_results.scheduled_command is not None
def test_running_polling_command_pending_for_file(mocker):
"""
Given:
An upload request of a url or a file using the polling flow, that was already initiated priorly and is not
completed yet.
When:
When, while in the polling flow, we are checking the status of on an upload that was initiated earlier and is
not complete yet.
Then:
Return a command results object, with scheduling a new command.
"""
args = {'ids': "1234", "extended_data": "true"}
mocker.patch('CommonServerPython.ScheduledCommand.raise_error_if_not_supported')
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.crowdstrike.com/", username="user1", password="12345", use_ssl=False,
proxy=False)
mocker.patch.object(Client, 'send_url_to_sandbox_analysis', return_value=SEND_URL_TO_SANDBOX_ANALYSIS_HTTP_RESPONSE)
mocker.patch.object(Client, 'get_full_report', return_value=GET_FULL_REPORT_HTTP_RESPONSE_EMPTY)
command_results = run_polling_command(client, args, 'cs-fx-submit-uploaded-file',
send_uploaded_file_to_sandbox_analysis_command,
get_full_report_command, 'FILE')
assert command_results.outputs is None
assert command_results.scheduled_command is not None
def test_running_polling_command_new_search_for_url(mocker):
"""
Given:
An upload request of a url using the polling flow, that was already initiated priorly and is not
completed yet.
When:
When, while in the polling flow, we are checking the status of on an upload that was initiated earlier and is
not complete yet.
Then:
Return a command results object, with scheduling a new command.
"""
args = SEND_URL_TO_SANDBOX_ANALYSIS_ARGS_POLLING
mocker.patch('CommonServerPython.ScheduledCommand.raise_error_if_not_supported')
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.crowdstrike.com/", username="user1", password="12345", use_ssl=False,
proxy=False)
mocker.patch.object(Client, 'send_url_to_sandbox_analysis',
return_value=SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_HTTP_RESPONSE)
mocker.patch.object(Client, 'get_full_report', return_value=GET_FULL_REPORT_HTTP_RESPONSE)
expected_outputs = SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_CONTEXT
command_results = run_polling_command(client, args, 'cs-fx-submit-url', send_url_to_sandbox_analysis_command,
get_full_report_command, 'URL')
assert command_results.outputs == [expected_outputs]
assert command_results.scheduled_command is not None
def test_running_polling_command_new_search_for_file(mocker):
"""
Given:
An upload request of a file using the polling flow, that was already initiated priorly and is not
completed yet.
When:
When, while in the polling flow, we are checking the status of on an upload that was initiated earlier and is
not complete yet.
Then:
Return a command results object, with scheduling a new command.
"""
args = SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_ARGS_POLLING
mocker.patch('CommonServerPython.ScheduledCommand.raise_error_if_not_supported')
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://api.crowdstrike.com/", username="user1", password="12345", use_ssl=False,
proxy=False)
mocker.patch.object(Client, 'send_uploaded_file_to_sandbox_analysis',
return_value=SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_HTTP_RESPONSE)
mocker.patch.object(Client, 'get_full_report', return_value=GET_FULL_REPORT_HTTP_RESPONSE)
expected_outputs = SEND_UPLOADED_FILE_TO_SENDBOX_ANALYSIS_CONTEXT
command_results = run_polling_command(client, args, 'cs-fx-submit-uploaded-file',
send_uploaded_file_to_sandbox_analysis_command,
get_full_report_command, 'FILE')
assert command_results.outputs == expected_outputs
assert command_results.scheduled_command is not None
def test_pop_polling_related_args():
args = {
'submit_file': 'submit_file',
'enable_tor': 'enable_tor',
'interval_in_seconds': 'interval_in_seconds',
'polling': 'polling',
'ids': 'ids'
}
pop_polling_related_args(args)
assert 'submit_file' not in args
assert 'enable_tor' not in args
assert 'interval_in_seconds' not in args
assert 'polling' not in args
assert 'ids' in args
def test_is_new_polling_search():
assert not is_new_polling_search({'ids': 'a'})
assert is_new_polling_search({'polling': 'a'})
def test_arrange_args_for_upload_func():
args = {
'submit_file': 'submit_file',
'enable_tor': 'enable_tor',
'interval_in_seconds': 'interval_in_seconds',
'polling': 'polling',
'ids': 'ids',
'extended_data': 'extended_data'
}
extended_data = arrange_args_for_upload_func(args)
assert extended_data == 'extended_data'
assert 'interval_in_seconds' not in args
assert 'polling' not in args
assert 'extended_data' not in args
def test_remove_polling_related_args():
args = {
'interval_in_seconds': 'interval_in_seconds',
'polling': 'polling',
'ids': 'ids',
'extended_data': 'extended_data'
}
remove_polling_related_args(args)
assert 'interval_in_seconds' not in args
assert 'extended_data' not in args
| |
#!/usr/bin/env python
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
# which means the Nova xenapi plugins must use only Python 2.4 features
#
# XenAPI plugin for reading/writing information to xenstore
#
try:
import json
except ImportError:
import simplejson as json
import utils # noqa
import XenAPIPlugin # noqa
import pluginlib_nova as pluginlib # noqa
pluginlib.configure_logging("xenstore")
class XenstoreError(pluginlib.PluginError):
"""Errors that occur when calling xenstore-* through subprocesses."""
def __init__(self, cmd, return_code, stderr, stdout):
msg = "cmd: %s; returncode: %d; stderr: %s; stdout: %s"
msg = msg % (cmd, return_code, stderr, stdout)
self.cmd = cmd
self.return_code = return_code
self.stderr = stderr
self.stdout = stdout
pluginlib.PluginError.__init__(self, msg)
def jsonify(fnc):
def wrapper(*args, **kwargs):
ret = fnc(*args, **kwargs)
try:
json.loads(ret)
except ValueError:
# Value should already be JSON-encoded, but some operations
# may write raw sting values; this will catch those and
# properly encode them.
ret = json.dumps(ret)
return ret
return wrapper
def record_exists(arg_dict):
"""Returns whether or not the given record exists. The record path
is determined from the given path and dom_id in the arg_dict.
"""
cmd = ["xenstore-exists", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
_run_command(cmd)
return True
except XenstoreError, e: # noqa
if e.stderr == '':
# if stderr was empty, this just means the path did not exist
return False
# otherwise there was a real problem
raise
@jsonify
def read_record(self, arg_dict):
"""Returns the value stored at the given path for the given dom_id.
These must be encoded as key/value pairs in arg_dict. You can
optionally include a key 'ignore_missing_path'; if this is present
and boolean True, attempting to read a non-existent path will return
the string 'None' instead of raising an exception.
"""
cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
result = _run_command(cmd)
return result.strip()
except XenstoreError, e: # noqa
if not arg_dict.get("ignore_missing_path", False):
raise
if not record_exists(arg_dict):
return "None"
# Just try again in case the agent write won the race against
# the record_exists check. If this fails again, it will likely raise
# an equally meaningful XenstoreError as the one we just caught
result = _run_command(cmd)
return result.strip()
@jsonify
def write_record(self, arg_dict):
"""Writes to xenstore at the specified path. If there is information
already stored in that location, it is overwritten. As in read_record,
the dom_id and path must be specified in the arg_dict; additionally,
you must specify a 'value' key, whose value must be a string. Typically,
you can json-ify more complex values and store the json output.
"""
cmd = ["xenstore-write",
"/local/domain/%(dom_id)s/%(path)s" % arg_dict,
arg_dict["value"]]
_run_command(cmd)
return arg_dict["value"]
@jsonify
def list_records(self, arg_dict):
"""Returns all the stored data at or below the given path for the
given dom_id. The data is returned as a json-ified dict, with the
path as the key and the stored value as the value. If the path
doesn't exist, an empty dict is returned.
"""
dirpath = "/local/domain/%(dom_id)s/%(path)s" % arg_dict
cmd = ["xenstore-ls", dirpath.rstrip("/")]
try:
recs = _run_command(cmd)
except XenstoreError, e: # noqa
if not record_exists(arg_dict):
return {}
# Just try again in case the path was created in between
# the "ls" and the existence check. If this fails again, it will
# likely raise an equally meaningful XenstoreError
recs = _run_command(cmd)
base_path = arg_dict["path"]
paths = _paths_from_ls(recs)
ret = {}
for path in paths:
if base_path:
arg_dict["path"] = "%s/%s" % (base_path, path)
else:
arg_dict["path"] = path
rec = read_record(self, arg_dict)
try:
val = json.loads(rec)
except ValueError:
val = rec
ret[path] = val
return ret
@jsonify
def delete_record(self, arg_dict):
"""Just like it sounds: it removes the record for the specified
VM and the specified path from xenstore.
"""
cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
return _run_command(cmd)
except XenstoreError, e: # noqa
if 'could not remove path' in e.stderr:
# Entry already gone. We're good to go.
return ''
raise
def _paths_from_ls(recs):
"""The xenstore-ls command returns a listing that isn't terribly
useful. This method cleans that up into a dict with each path
as the key, and the associated string as the value.
"""
last_nm = ""
level = 0
path = []
ret = []
for ln in recs.splitlines():
nm, val = ln.rstrip().split(" = ")
barename = nm.lstrip()
this_level = len(nm) - len(barename)
if this_level == 0:
ret.append(barename)
level = 0
path = []
elif this_level == level:
# child of same parent
ret.append("%s/%s" % ("/".join(path), barename))
elif this_level > level:
path.append(last_nm)
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
elif this_level < level:
path = path[:this_level]
ret.append("%s/%s" % ("/".join(path), barename))
level = this_level
last_nm = barename
return ret
def _run_command(cmd):
"""Wrap utils.run_command to raise XenstoreError on failure
"""
try:
return utils.run_command(cmd)
except utils.SubprocessException, e: # noqa
raise XenstoreError(e.cmdline, e.ret, e.err, e.out)
if __name__ == "__main__":
XenAPIPlugin.dispatch(
{"read_record": read_record,
"write_record": write_record,
"list_records": list_records,
"delete_record": delete_record})
| |
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotbext.eth import GmiiFrame, GmiiSource, GmiiSink
from cocotbext.eth import XgmiiFrame, XgmiiSource, XgmiiSink
class TB:
def __init__(self, dut):
self.dut = dut
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 6.4, units="ns").start())
# Ethernet
cocotb.start_soon(Clock(dut.phy_gmii_clk, 8, units="ns").start())
self.gmii_source = GmiiSource(dut.phy_gmii_rxd, dut.phy_gmii_rx_er, dut.phy_gmii_rx_dv,
dut.phy_gmii_clk, dut.phy_gmii_rst, dut.phy_gmii_clk_en)
self.gmii_sink = GmiiSink(dut.phy_gmii_txd, dut.phy_gmii_tx_er, dut.phy_gmii_tx_en,
dut.phy_gmii_clk, dut.phy_gmii_rst, dut.phy_gmii_clk_en)
dut.phy_gmii_clk_en.setimmediatevalue(1)
cocotb.start_soon(Clock(dut.qsfp_rx_clk_1, 6.4, units="ns").start())
self.qsfp_1_source = XgmiiSource(dut.qsfp_rxd_1, dut.qsfp_rxc_1, dut.qsfp_rx_clk_1, dut.qsfp_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_tx_clk_1, 6.4, units="ns").start())
self.qsfp_1_sink = XgmiiSink(dut.qsfp_txd_1, dut.qsfp_txc_1, dut.qsfp_tx_clk_1, dut.qsfp_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_rx_clk_2, 6.4, units="ns").start())
self.qsfp_2_source = XgmiiSource(dut.qsfp_rxd_2, dut.qsfp_rxc_2, dut.qsfp_rx_clk_2, dut.qsfp_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_tx_clk_2, 6.4, units="ns").start())
self.qsfp_2_sink = XgmiiSink(dut.qsfp_txd_2, dut.qsfp_txc_2, dut.qsfp_tx_clk_2, dut.qsfp_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_rx_clk_3, 6.4, units="ns").start())
self.qsfp_3_source = XgmiiSource(dut.qsfp_rxd_3, dut.qsfp_rxc_3, dut.qsfp_rx_clk_3, dut.qsfp_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_tx_clk_3, 6.4, units="ns").start())
self.qsfp_3_sink = XgmiiSink(dut.qsfp_txd_3, dut.qsfp_txc_3, dut.qsfp_tx_clk_3, dut.qsfp_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_rx_clk_4, 6.4, units="ns").start())
self.qsfp_4_source = XgmiiSource(dut.qsfp_rxd_4, dut.qsfp_rxc_4, dut.qsfp_rx_clk_4, dut.qsfp_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_tx_clk_4, 6.4, units="ns").start())
self.qsfp_4_sink = XgmiiSink(dut.qsfp_txd_4, dut.qsfp_txc_4, dut.qsfp_tx_clk_4, dut.qsfp_tx_rst_4)
dut.btnu.setimmediatevalue(0)
dut.btnl.setimmediatevalue(0)
dut.btnd.setimmediatevalue(0)
dut.btnr.setimmediatevalue(0)
dut.btnc.setimmediatevalue(0)
dut.sw.setimmediatevalue(0)
dut.uart_rxd.setimmediatevalue(0)
dut.uart_cts.setimmediatevalue(0)
async def init(self):
self.dut.rst.setimmediatevalue(0)
self.dut.phy_gmii_rst.setimmediatevalue(0)
self.dut.qsfp_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_tx_rst_4.setimmediatevalue(0)
for k in range(10):
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
self.dut.phy_gmii_rst <= 1
self.dut.qsfp_rx_rst_1 <= 1
self.dut.qsfp_tx_rst_1 <= 1
self.dut.qsfp_rx_rst_2 <= 1
self.dut.qsfp_tx_rst_2 <= 1
self.dut.qsfp_rx_rst_3 <= 1
self.dut.qsfp_tx_rst_3 <= 1
self.dut.qsfp_rx_rst_4 <= 1
self.dut.qsfp_tx_rst_4 <= 1
for k in range(10):
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
self.dut.phy_gmii_rst <= 0
self.dut.qsfp_rx_rst_1 <= 0
self.dut.qsfp_tx_rst_1 <= 0
self.dut.qsfp_rx_rst_2 <= 0
self.dut.qsfp_tx_rst_2 <= 0
self.dut.qsfp_rx_rst_3 <= 0
self.dut.qsfp_tx_rst_3 <= 0
self.dut.qsfp_rx_rst_4 <= 0
self.dut.qsfp_tx_rst_4 <= 0
@cocotb.test()
async def run_test(dut):
tb = TB(dut)
await tb.init()
tb.log.info("test UDP RX packet")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5a:51:52:53:54:55', dst='02:00:00:00:00:00')
ip = IP(src='192.168.1.100', dst='192.168.1.128')
udp = UDP(sport=5678, dport=1234)
test_pkt = eth / ip / udp / payload
test_frame = XgmiiFrame.from_payload(test_pkt.build())
await tb.qsfp_1_source.send(test_frame)
tb.log.info("receive ARP request")
rx_frame = await tb.qsfp_1_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt.dst == 'ff:ff:ff:ff:ff:ff'
assert rx_pkt.src == test_pkt.dst
assert rx_pkt[ARP].hwtype == 1
assert rx_pkt[ARP].ptype == 0x0800
assert rx_pkt[ARP].hwlen == 6
assert rx_pkt[ARP].plen == 4
assert rx_pkt[ARP].op == 1
assert rx_pkt[ARP].hwsrc == test_pkt.dst
assert rx_pkt[ARP].psrc == test_pkt[IP].dst
assert rx_pkt[ARP].hwdst == '00:00:00:00:00:00'
assert rx_pkt[ARP].pdst == test_pkt[IP].src
tb.log.info("send ARP response")
eth = Ether(src=test_pkt.src, dst=test_pkt.dst)
arp = ARP(hwtype=1, ptype=0x0800, hwlen=6, plen=4, op=2,
hwsrc=test_pkt.src, psrc=test_pkt[IP].src,
hwdst=test_pkt.dst, pdst=test_pkt[IP].dst)
resp_pkt = eth / arp
resp_frame = XgmiiFrame.from_payload(resp_pkt.build())
await tb.qsfp_1_source.send(resp_frame)
tb.log.info("receive UDP packet")
rx_frame = await tb.qsfp_1_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt.dst == test_pkt.src
assert rx_pkt.src == test_pkt.dst
assert rx_pkt[IP].dst == test_pkt[IP].src
assert rx_pkt[IP].src == test_pkt[IP].dst
assert rx_pkt[UDP].dport == test_pkt[UDP].sport
assert rx_pkt[UDP].sport == test_pkt[UDP].dport
assert rx_pkt[UDP].payload == test_pkt[UDP].payload
tb.log.info("test gigabit tap, RX side")
# insert tap
await RisingEdge(dut.clk)
dut.sw <= 0x8
await RisingEdge(dut.clk)
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5a:51:52:53:54:55', dst='02:00:00:00:00:00')
ip = IP(src='192.168.1.100', dst='192.168.1.128')
udp = UDP(sport=5678, dport=1234)
test_pkt = eth / ip / udp / payload
test_frame = GmiiFrame.from_payload(test_pkt.build())
await tb.gmii_source.send(test_frame)
tb.log.info("loop back packet on XGMII interface")
rx_frame = await tb.qsfp_1_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
await tb.qsfp_1_source.send(rx_frame)
tb.log.info("receive UDP packet")
rx_frame = await tb.gmii_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt.dst == test_pkt.src
assert rx_pkt.src == test_pkt.dst
assert rx_pkt[IP].dst == test_pkt[IP].src
assert rx_pkt[IP].src == test_pkt[IP].dst
assert rx_pkt[UDP].dport == test_pkt[UDP].sport
assert rx_pkt[UDP].sport == test_pkt[UDP].dport
assert rx_pkt[UDP].payload == test_pkt[UDP].payload
tb.log.info("test gigabit tap, TX side")
# insert tap
await RisingEdge(dut.clk)
dut.sw <= 0xC
await RisingEdge(dut.clk)
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5a:51:52:53:54:55', dst='02:00:00:00:00:00')
ip = IP(src='192.168.1.100', dst='192.168.1.128')
udp = UDP(sport=5678, dport=1234)
test_pkt = eth / ip / udp / payload
test_frame = GmiiFrame.from_payload(test_pkt.build())
await tb.gmii_source.send(test_frame)
tb.log.info("loop back packet on XGMII interface")
rx_frame = await tb.qsfp_1_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
await tb.qsfp_1_source.send(rx_frame)
tb.log.info("receive UDP packet")
rx_frame = await tb.gmii_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt.dst == test_pkt.src
assert rx_pkt.src == test_pkt.dst
assert rx_pkt[IP].dst == test_pkt[IP].src
assert rx_pkt[IP].src == test_pkt[IP].dst
assert rx_pkt[UDP].dport == test_pkt[UDP].sport
assert rx_pkt[UDP].sport == test_pkt[UDP].dport
assert rx_pkt[UDP].payload == test_pkt[UDP].payload
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'lib', 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(eth_rtl_dir, "eth_mac_1g_fifo.v"),
os.path.join(eth_rtl_dir, "eth_mac_1g.v"),
os.path.join(eth_rtl_dir, "axis_gmii_rx.v"),
os.path.join(eth_rtl_dir, "axis_gmii_tx.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g_fifo.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_rx_64.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_tx_64.v"),
os.path.join(eth_rtl_dir, "lfsr.v"),
os.path.join(eth_rtl_dir, "eth_axis_rx.v"),
os.path.join(eth_rtl_dir, "eth_axis_tx.v"),
os.path.join(eth_rtl_dir, "udp_complete_64.v"),
os.path.join(eth_rtl_dir, "udp_checksum_gen_64.v"),
os.path.join(eth_rtl_dir, "udp_64.v"),
os.path.join(eth_rtl_dir, "udp_ip_rx_64.v"),
os.path.join(eth_rtl_dir, "udp_ip_tx_64.v"),
os.path.join(eth_rtl_dir, "ip_complete_64.v"),
os.path.join(eth_rtl_dir, "ip_64.v"),
os.path.join(eth_rtl_dir, "ip_eth_rx_64.v"),
os.path.join(eth_rtl_dir, "ip_eth_tx_64.v"),
os.path.join(eth_rtl_dir, "ip_arb_mux.v"),
os.path.join(eth_rtl_dir, "arp.v"),
os.path.join(eth_rtl_dir, "arp_cache.v"),
os.path.join(eth_rtl_dir, "arp_eth_rx.v"),
os.path.join(eth_rtl_dir, "arp_eth_tx.v"),
os.path.join(eth_rtl_dir, "eth_arb_mux.v"),
os.path.join(axis_rtl_dir, "arbiter.v"),
os.path.join(axis_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_switch.v"),
os.path.join(axis_rtl_dir, "axis_register.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
]
parameters = {}
# parameters['A'] = val
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| |
from __future__ import print_function
import copy
import time
import warnings
program_start_time = time.time()
warnings.simplefilter("ignore", UserWarning) # cuDNN warning
import logging
import formatting
logger_evaluate = logging.getLogger('evaluate')
logger_evaluate.setLevel(logging.INFO)
FORMAT = '[$BOLD%(filename)s$RESET:%(lineno)d][%(levelname)-5s]: %(message)s '
formatter = logging.Formatter(formatting.formatter_message(FORMAT, False))
formatter2 = logging.Formatter('%(asctime)s - %(name)-5s - %(levelname)-10s - (%(filename)s:%(lineno)d): %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger_evaluate.addHandler(ch)
print("\n * Importing libraries...")
from RNN_tools_lstm import *
import general_tools
import preprocessWavs
import fixDataset.transform as transform
from readData import *
class modelEvaluator:
def __init__(self, model_dataset="TCDTIMIT"):
###########################
# network parameters
nbMFCCs = 39 # num of features to use -> see 'utils.py' in convertToPkl under processDatabase
nbPhonemes = 39
N_HIDDEN_LIST = [64,64]
BIDIRECTIONAL = True
ADD_DENSE_LAYERS = False
batch_size = 256
root = os.path.expanduser("~/TCDTIMIT/audioSR/")
# where preprocessed data will be stored in PKL format
data_store_dir = root + "dataPreparedForEvaluation/batch_size"+str(batch_size)+"/"
# get all the wavDirs we're going to evaluate
evaluate_datasets = ['TIMIT', 'TCDTIMIT']
dataNames = ['/TEST'] # ,'/TRAIN']
# MODEL and log locations
self.model_dataset = model_dataset # the dataset the model has been trained on
model_dir = root + model_dataset + "/results/"
meanStd_path = root + model_dataset + "/binary39/" + model_dataset + "MeanStd.pkl"
store_dir = root + model_dataset + "/evaluations"
#################
# locations for LOG, PARAMETERS, TRAIN info (automatically generated)
model_name = str(len(N_HIDDEN_LIST)) + "_LSTMLayer" + '_'.join([str(layer) for layer in N_HIDDEN_LIST]) \
+ "_nbMFCC" + str(nbMFCCs) + (
"_bidirectional" if BIDIRECTIONAL else "_unidirectional") + (
"_withDenseLayers" if ADD_DENSE_LAYERS else "") + "_" + model_dataset + "____TESTSliceLayer"
store_dir = store_dir + os.sep + model_name
if not os.path.exists(store_dir): os.makedirs(store_dir)
# model parameters and network_training_info
model_load = os.path.join(model_dir, model_name + ".npz")
#### BUIDING MODEL ####
logger_evaluate.info('* Building network ...')
self.RNN_network = NeuralNetwork('RNN', batch_size=batch_size, num_features=nbMFCCs, n_hidden_list=N_HIDDEN_LIST,
num_output_units=nbPhonemes, bidirectional=BIDIRECTIONAL, addDenseLayers=ADD_DENSE_LAYERS, seed=0,
logger=logger_evaluate)
# Try to load stored model
logger_evaluate.info(' Network built. Trying to load stored model: %s', model_load)
returnVal = self.RNN_network.load_model(model_load, roundParams=ROUND_PARAMS,logger=logger_evaluate)
if returnVal != 0: raise IOError("Model not found, no weights loaded. Train the model first with RNN.py")
# print number of parameters
nb_params = lasagne.layers.count_params(self.RNN_network.network_lout_batch)
logger_evaluate.info(" Number of parameters of this network: %s", nb_params )
##### COMPILING FUNCTIONS #####
logger_evaluate.info("* Compiling functions ...")
self.RNN_network.build_functions(debug=False, train=False, logger=logger_evaluate)
## EVALUATION TIME :) ##
# loop over the train/test test of the different datasets
for evaluate_dataset in evaluate_datasets:
for dataName in dataNames:
wavDir = os.path.expanduser("~/TCDTIMIT/audioSR/" + evaluate_dataset + "/fixed39/") \
+ evaluate_dataset + dataName
dataName = evaluate_dataset + dataName
self.evaluateModel(BIDIRECTIONAL, N_HIDDEN_LIST, batch_size, dataName, wavDir, data_store_dir, meanStd_path, model_load,
nbMFCCs, store_dir, force_overwrite=True)
# # evaluate TCDTIMIT lipspeakers
# dataName = 'TCDTIMIT/lipspeakers'
# # lipspeakers = 'TCDTIMIT/lipspeakers'
# # Lipspkr1 = 'TCDTIMIT/lipspeakers/Lipspkr1'
# # volunteers = 'TCDTIMIT/volunteers'
# # volunteer10M = 'TCDTIMIT/volunteers/10M'
# wavDir = os.path.expanduser('~/TCDTIMIT/audioSR/TCDTIMIT/fixed39_nonSplit/TCDTIMIT/lipspeakers')
# self.evaluateModel(BIDIRECTIONAL, N_HIDDEN_LIST, batch_size, dataName, wavDir, data_store_dir, meanStd_path,
# model_load, nbMFCCs, store_dir, force_overwrite=True)
def evaluateModel(self, BIDIRECTIONAL, N_HIDDEN_LIST, batch_size, dataName, wavDir, data_store_dir, meanStd_path, model_load,
nbMFCCs, store_dir, force_overwrite=False):
logger_evaluate.info("\n\n\n")
####### THE DATA you want to evaluate ##########
data_store_path = data_store_dir + dataName.replace('/', '_') + "_nbMFCC" + str(nbMFCCs)
if not os.path.exists(data_store_dir): os.makedirs(data_store_dir)
predictions_path = store_dir + os.sep + dataName.replace('/', '_') + "_predictions.pkl"
# log file
logFile = store_dir + os.sep + "Evaluation" + dataName.replace('/', '_') + '.log'
if os.path.exists(logFile) and not force_overwrite:
from general_tools import query_yes_no
if query_yes_no("Log file already exists at %s\n Do you want to evaluate again and overwrite?","y"):
pass
else:
logger_evaluate.info("Log file already exists, not re-evaluating.... ")
return 0
fh = logging.FileHandler(logFile, 'w') # create new logFile
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger_evaluate.addHandler(fh)
logger_evaluate.info("\n MODEL: %s", model_load)
logger_evaluate.info("\n WAV_DIR: %s", wavDir)
logger_evaluate.info("\n PREDICTS: %s", predictions_path)
logger_evaluate.info("\n LOG: %s", logFile)
logger_evaluate.info("\n")
# GATHERING DATA
logger_evaluate.info("* Gathering Data ...")
if os.path.exists(data_store_path + ".pkl"):
[inputs, targets, valid_frames] = unpickle(data_store_path + ".pkl")
calculateAccuracy = True
logger_evaluate.info("Successfully loaded preprocessed data, with targets")
elif os.path.exists(data_store_path + "_noTargets.pkl"): # TODO: make it work for unlabeled datasets. see RNN_tools_lstm.py, eg iterate_minibatch_noTargets.
[inputs] = unpickle(data_store_path + "_noTargets.pkl")
calculateAccuracy = False # we can't as we don't know the correct labels
logger_evaluate.info("Successfully loaded preprocessed data, no targets")
else:
logger_evaluate.info("Data not found, preprocessing...")
# From WAVS, generate X, y and valid_frames; also store under data_store_dir
def preprocessLabeledWavs(wavDir, store_dir, name):
# fixWavs -> suppose this is done
# convert to pkl
X, y, valid_frames = preprocessWavs.preprocess_dataset(source_path=wavDir, nbMFCCs=nbMFCCs,logger=logger_evaluate)
X_data_type = 'float32'
X = preprocessWavs.set_type(X, X_data_type)
y_data_type = 'int32'
y = preprocessWavs.set_type(y, y_data_type)
valid_frames_data_type = 'int32'
valid_frames = preprocessWavs.set_type(valid_frames, valid_frames_data_type)
return X, y, valid_frames
def preprocessUnlabeledWavs(wavDir, store_dir, name): #TODO
# fixWavs -> suppose this is done
# convert to pkl
X = preprocessWavs.preprocess_unlabeled_dataset(source_path=wavDir, nbMFCCs=nbMFCCs, logger=logger_evaluate)
X_data_type = 'float32'
X = preprocessWavs.set_type(X, X_data_type)
return X
# load wavs and labels
wav_files = transform.loadWavs(wavDir)
wav_filenames = [str(
os.path.basename(
os.path.dirname(os.path.dirname(os.path.dirname(wav_file)))) + os.sep + os.path.basename(
os.path.dirname(os.path.dirname(wav_file))) + os.sep + os.path.basename(
os.path.dirname(wav_file)) + os.sep + os.path.basename(wav_file)) for wav_file in wav_files]
logger_evaluate.info("Found %s files to evaluate \n Example: %s", len(wav_filenames), wav_filenames[0])
label_files = transform.loadPhns(wavDir)
# if source dir doesn't contain labels, we can't calculate accuracy
calculateAccuracy = True
if not (len(wav_files) == len(label_files)):
calculateAccuracy = False
inputs = preprocessUnlabeledWavs(wavDir=wavDir, store_dir=store_dir, name=dataName)
else:
inputs, targets, valid_frames = preprocessLabeledWavs(wavDir=wavDir, store_dir=store_dir, name=dataName)
# normalize inputs using dataset Mean and Std_dev; convert to float32 for GPU evaluation
with open(meanStd_path, 'rb') as cPickle_file:
[mean_val, std_val] = cPickle.load(cPickle_file)
inputs = preprocessWavs.normalize(inputs, mean_val, std_val)
# just to be sure
X_data_type = 'float32'
inputs = preprocessWavs.set_type(inputs, X_data_type)
# Print some information
logger_evaluate.debug("* Data information")
logger_evaluate.debug(' inputs')
logger_evaluate.debug('%s %s', type(inputs), len(inputs))
logger_evaluate.debug('%s %s', type(inputs[0]), inputs[0].shape)
logger_evaluate.debug('%s %s', type(inputs[0][0]), inputs[0][0].shape)
logger_evaluate.debug('%s', type(inputs[0][0][0]))
logger_evaluate.debug('y train')
logger_evaluate.debug(' %s %s', type(targets), len(targets))
logger_evaluate.debug(' %s %s', type(targets[0]), targets[0].shape)
logger_evaluate.debug(' %s %s', type(targets[0][0]), targets[0][0].shape)
# slice to have a number of inputs that is a multiple of batch size
logger_evaluate.info("Not evaluating %s last files (batch size mismatch)", len(inputs) % batch_size)
inputs = inputs[:-(len(inputs) % batch_size) or None]
if calculateAccuracy:
targets = targets[:-(len(targets) % batch_size) or None]
valid_frames = valid_frames[:-(len(valid_frames) % batch_size) or None]
# pad the inputs to process batches easily
inputs = pad_sequences_X(inputs)
if calculateAccuracy: targets = pad_sequences_y(targets)
# save the preprocessed data
logger_evaluate.info("storing preprocessed data to: %s", data_store_path)
if calculateAccuracy:
general_tools.saveToPkl(data_store_path + '.pkl', [inputs, targets, valid_frames])
else:
general_tools.saveToPkl(data_store_path + '_noTargets.pkl', [inputs])
# Gather filenames; for debugging
wav_files = transform.loadWavs(wavDir)
wav_filenames = [str(
os.path.basename(
os.path.dirname(os.path.dirname(os.path.dirname(wav_file)))) + os.sep + os.path.basename(
os.path.dirname(os.path.dirname(wav_file))) + os.sep + os.path.basename(
os.path.dirname(wav_file)) + os.sep + os.path.basename(wav_file)) for wav_file in wav_files]
logger_evaluate.debug(" # inputs: %s, # wav files: %s", len(inputs), len(wav_files))
# make copy of data because we might need to use is again for calculating accurasy, and the iterator will remove elements from the array
inputs_bak = copy.deepcopy(inputs)
if calculateAccuracy:
targets_bak = copy.deepcopy(targets)
valid_frames_bak = copy.deepcopy(valid_frames)
logger_evaluate.info("* Evaluating: pass over Evaluation Set")
if calculateAccuracy: # if .phn files are provided, we can check our predictions
logger_evaluate.info("Getting predictions and calculating accuracy...")
avg_error, avg_acc, predictions = self.RNN_network.run_epoch(X=inputs, y=targets, valid_frames=valid_frames, \
get_predictions=True, batch_size=batch_size)
logger_evaluate.info("All batches, avg Accuracy: %s", avg_acc)
inputs = inputs_bak
targets = targets_bak
valid_frames = valid_frames_bak
#uncomment if you want to save everything in one place (takes quite a lot of storage space)
#general_tools.saveToPkl(predictions_path, [inputs, predictions, targets, valid_frames, avg_Acc])
else:
# TODO fix this
for inputs, masks, seq_lengths in tqdm(
iterate_minibatches_noTargets(inputs, batch_size=batch_size, shuffle=False),
total=len(inputs)):
# get predictions
nb_inputs = len(inputs) # usually batch size, but could be lower
seq_len = len(inputs[0])
prediction = self.RNN_network.predictions_fn(inputs, masks)
prediction = np.reshape(prediction, (nb_inputs, -1))
prediction = list(prediction)
predictions = predictions + prediction
inputs = inputs_bak
#general_tools.saveToPkl(predictions_path, [inputs, predictions])
# Print information about the predictions
logger_evaluate.info("* Done")
end_evaluation_time = time.time()
eval_duration = end_evaluation_time - program_start_time
logger_evaluate.info('Total time: {:.3f}'.format(eval_duration))
# Print the results
try:
printEvaluation(wav_filenames, inputs, predictions, targets, valid_frames, avg_acc, range(len(inputs)),
logger=logger_evaluate, only_final_accuracy=True)
except:
pdb.set_trace()
logger_evaluate.info('Evaluation duration: {:.3f}'.format(eval_duration))
logger_evaluate.info('Printing duration: {:.3f}'.format(time.time() - end_evaluation_time))
# close the log handler
fh.close()
logger_evaluate.removeHandler(fh)
evaluator = modelEvaluator()
| |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyOCD.target.target import TARGET_RUNNING
import logging
from struct import unpack
from time import time
from flash_builder import FLASH_PAGE_ERASE, FLASH_CHIP_ERASE, FlashBuilder
DEFAULT_PAGE_PROGRAM_WEIGHT = 0.130
DEFAULT_PAGE_ERASE_WEIGHT = 0.048
DEFAULT_CHIP_ERASE_WEIGHT = 0.174
# Program to compute the CRC of sectors. This works on cortex-m processors.
# Code is relocatable and only needs to be on a 4 byte boundary.
# 200 bytes of executable data below + 1024 byte crc table = 1224 bytes
# Usage requirements:
# -In memory reserve 0x600 for code & table
# -Make sure data buffer is big enough to hold 4 bytes for each page that could be checked (ie. >= num pages * 4)
analyzer = (
0x2180468c, 0x2600b5f0, 0x4f2c2501, 0x447f4c2c, 0x1c2b0049, 0x425b4033, 0x40230872, 0x085a4053,
0x425b402b, 0x40534023, 0x402b085a, 0x4023425b, 0x085a4053, 0x425b402b, 0x40534023, 0x402b085a,
0x4023425b, 0x085a4053, 0x425b402b, 0x40534023, 0x402b085a, 0x4023425b, 0x085a4053, 0x425b402b,
0x40534023, 0xc7083601, 0xd1d2428e, 0x2b004663, 0x4663d01f, 0x46b4009e, 0x24ff2701, 0x44844d11,
0x1c3a447d, 0x88418803, 0x4351409a, 0xd0122a00, 0x22011856, 0x780b4252, 0x40533101, 0x009b4023,
0x0a12595b, 0x42b1405a, 0x43d2d1f5, 0x4560c004, 0x2000d1e7, 0x2200bdf0, 0x46c0e7f8, 0x000000b6,
0xedb88320, 0x00000044,
)
def _msb( n ):
ndx = 0
while ( 1 < n ):
n = ( n >> 1 )
ndx += 1
return ndx
def _same(d1, d2):
if len(d1) != len(d2):
return False
for i in range(len(d1)):
if d1[i] != d2[i]:
return False
return True
class PageInfo(object):
def __init__(self):
self.base_addr = None # Start address of this page
self.erase_weight = None # Time it takes to erase a page
self.program_weight = None # Time it takes to program a page (Not including data transfer time)
self.size = None # Size of page
self.crc_supported = None # Is the function computeCrcs supported?
class FlashInfo(object):
def __init__(self):
self.rom_start = None # Starting address of ROM
self.erase_weight = None # Time it takes to perform a chip erase
class Flash(object):
"""
This class is responsible to flash a new binary in a target
"""
def __init__(self, target, flash_algo):
self.target = target
self.flash_algo = flash_algo
self.flash_algo_debug = False
if flash_algo is not None:
self.end_flash_algo = flash_algo['load_address'] + len(flash_algo)*4
self.begin_stack = flash_algo['begin_stack']
self.begin_data = flash_algo['begin_data']
self.static_base = flash_algo['static_base']
self.min_program_length = flash_algo.get('min_program_length', 0)
# Check for double buffering support.
if flash_algo.has_key('page_buffers'):
self.page_buffers = flash_algo['page_buffers']
else:
self.page_buffers = [self.begin_data]
self.double_buffer_supported = len(self.page_buffers) > 1
else:
self.end_flash_algo = None
self.begin_stack = None
self.begin_data = None
self.static_base = None
@property
def minimumProgramLength(self):
return self.min_program_length
def init(self):
"""
Download the flash algorithm in RAM
"""
self.target.halt()
self.target.setTargetState("PROGRAM")
# update core register to execute the init subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_init'], init=True)
# check the return code
if result != 0:
logging.error('init error: %i', result)
def computeCrcs(self, sectors):
data = []
# Convert address, size pairs into commands
# for the crc computation algorithm to preform
for addr, size in sectors:
size_val = _msb(size)
addr_val = addr // size
# Size must be a power of 2
assert (1 << size_val) == size
# Address must be a multiple of size
assert (addr % size) == 0
val = (size_val << 0) | (addr_val << 16)
data.append(val)
self.target.writeBlockMemoryAligned32(self.begin_data, data)
# update core register to execute the subroutine
result = self.callFunctionAndWait(self.flash_algo['analyzer_address'], self.begin_data, len(data))
# Read back the CRCs for each section
data = self.target.readBlockMemoryAligned32(self.begin_data, len(data))
return data
def eraseAll(self):
"""
Erase all the flash
"""
# update core register to execute the eraseAll subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_eraseAll'])
# check the return code
if result != 0:
logging.error('eraseAll error: %i', result)
def erasePage(self, flashPtr):
"""
Erase one page
"""
# update core register to execute the erasePage subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_erase_sector'], flashPtr)
# check the return code
if result != 0:
logging.error('erasePage(0x%x) error: %i', flashPtr, result)
def programPage(self, flashPtr, bytes):
"""
Flash one page
"""
# prevent security settings from locking the device
bytes = self.overrideSecurityBits(flashPtr, bytes)
# first transfer in RAM
self.target.writeBlockMemoryUnaligned8(self.begin_data, bytes)
# get info about this page
page_info = self.getPageInfo(flashPtr)
# update core register to execute the program_page subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_program_page'], flashPtr, page_info.size, self.begin_data)
# check the return code
if result != 0:
logging.error('programPage(0x%x) error: %i', flashPtr, result)
def getPageBufferCount(self):
return len(self.page_buffers)
def isDoubleBufferingSupported(self):
return self.double_buffer_supported
def startProgramPageWithBuffer(self, bufferNumber, flashPtr):
"""
Flash one page
"""
assert bufferNumber < len(self.page_buffers), "Invalid buffer number"
# get info about this page
page_info = self.getPageInfo(flashPtr)
# update core register to execute the program_page subroutine
result = self.callFunction(self.flash_algo['pc_program_page'], flashPtr, page_info.size, self.page_buffers[bufferNumber])
def loadPageBuffer(self, bufferNumber, flashPtr, bytes):
assert bufferNumber < len(self.page_buffers), "Invalid buffer number"
# prevent security settings from locking the device
bytes = self.overrideSecurityBits(flashPtr, bytes)
# transfer the buffer to device RAM
self.target.writeBlockMemoryUnaligned8(self.page_buffers[bufferNumber], bytes)
def programPhrase(self, flashPtr, bytes):
"""
Flash a portion of a page.
"""
# Get min programming length. If one was not specified, use the page size.
if self.min_program_length:
min_len = self.min_program_length
else:
min_len = self.getPageInfo(flashPtr).size
# Require write address and length to be aligned to min write size.
if (flashPtr % min_len) or (len(bytes) % min_len):
raise RuntimeError("unaligned address or length")
# prevent security settings from locking the device
bytes = self.overrideSecurityBits(flashPtr, bytes)
# first transfer in RAM
self.target.writeBlockMemoryUnaligned8(self.begin_data, bytes)
# update core register to execute the program_page subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_program_page'], flashPtr, bytes_len, self.begin_data)
# check the return code
if result != 0:
logging.error('programPhrase(0x%x) error: %i', flashPtr, result)
def getPageInfo(self, addr):
"""
Get info about the page that contains this address
Override this function if variable page sizes are supported
"""
region = self.target.getMemoryMap().getRegionForAddress(addr)
if not region:
return None
info = PageInfo()
info.erase_weight = DEFAULT_PAGE_ERASE_WEIGHT
info.program_weight = DEFAULT_PAGE_PROGRAM_WEIGHT
info.size = region.blocksize
info.base_addr = addr - (addr % info.size)
return info
def getFlashInfo(self):
"""
Get info about the flash
Override this function to return differnt values
"""
boot_region = self.target.getMemoryMap().getBootMemory()
info = FlashInfo()
info.rom_start = boot_region.start if boot_region else 0
info.erase_weight = DEFAULT_CHIP_ERASE_WEIGHT
info.crc_supported = self.flash_algo['analyzer_supported']
return info
def getFlashBuilder(self):
return FlashBuilder(self, self.getFlashInfo().rom_start)
def flashBlock(self, addr, data, smart_flash = True, chip_erase = None, progress_cb = None, fast_verify = False):
"""
Flash a block of data
"""
flash_start = self.getFlashInfo().rom_start
fb = FlashBuilder(self, flash_start)
fb.addData(addr, data)
info = fb.program(chip_erase, progress_cb, smart_flash, fast_verify)
return info
def flashBinary(self, path_file, flashPtr = None, smart_flash = True, chip_erase = None, progress_cb = None, fast_verify = False):
"""
Flash a binary
"""
if flashPtr is None:
flashPtr = self.getFlashInfo().rom_start
f = open(path_file, "rb")
with open(path_file, "rb") as f:
data = f.read()
data = unpack(str(len(data)) + 'B', data)
self.flashBlock(flashPtr, data, smart_flash, chip_erase, progress_cb, fast_verify)
def callFunction(self, pc, r0=None, r1=None, r2=None, r3=None, init=False):
reg_list = []
data_list = []
if self.flash_algo_debug:
# Save vector catch state for use in waitForCompletion()
self._vector_catch_enabled = self.target.getVectorCatchFault()
self._reset_catch_enabled = self.target.getVectorCatchReset()
self.target.setVectorCatchFault(True)
self.target.setVectorCatchReset(True)
if init:
# download flash algo in RAM
self.target.writeBlockMemoryAligned32(self.flash_algo['load_address'], self.flash_algo['instructions'])
if self.flash_algo['analyzer_supported']:
self.target.writeBlockMemoryAligned32(self.flash_algo['analyzer_address'], analyzer)
reg_list.append('pc')
data_list.append(pc)
if r0 is not None:
reg_list.append('r0')
data_list.append(r0)
if r1 is not None:
reg_list.append('r1')
data_list.append(r1)
if r2 is not None:
reg_list.append('r2')
data_list.append(r2)
if r3 is not None:
reg_list.append('r3')
data_list.append(r3)
if init:
reg_list.append('r9')
data_list.append(self.static_base)
if init:
reg_list.append('sp')
data_list.append(self.begin_stack)
reg_list.append('lr')
data_list.append(self.flash_algo['load_address'] + 1)
self.target.writeCoreRegistersRaw(reg_list, data_list)
# resume target
self.target.resume()
## @brief Wait until the breakpoint is hit.
def waitForCompletion(self):
while(self.target.getState() == TARGET_RUNNING):
pass
if self.flash_algo_debug:
analyzer_supported = self.flash_algo['analyzer_supported']
expected_fp = self.flash_algo['static_base']
expected_sp = self.flash_algo['begin_stack']
expected_pc = self.flash_algo['load_address']
expected_flash_algo = self.flash_algo['instructions']
if analyzer_supported:
expected_analyzer = analyzer
final_fp = self.target.readCoreRegister('r9')
final_sp = self.target.readCoreRegister('sp')
final_pc = self.target.readCoreRegister('pc')
#TODO - uncomment if Read/write and zero init sections can be moved into a separate flash algo section
#final_flash_algo = self.target.readBlockMemoryAligned32(self.flash_algo['load_address'], len(self.flash_algo['instructions']))
#if analyzer_supported:
# final_analyzer = self.target.readBlockMemoryAligned32(self.flash_algo['analyzer_address'], len(analyzer))
error = False
if final_fp != expected_fp:
# Frame pointer should not change
logging.error("Frame pointer should be 0x%x but is 0x%x" % (expected_fp, final_fp))
error = True
if final_sp != expected_sp:
# Stack pointer should return to original value after function call
logging.error("Stack pointer should be 0x%x but is 0x%x" % (expected_sp, final_sp))
error = True
if final_pc != expected_pc:
# PC should be pointing to breakpoint address
logging.error("PC should be 0x%x but is 0x%x" % (expected_pc, final_pc))
error = True
#TODO - uncomment if Read/write and zero init sections can be moved into a separate flash algo section
#if not _same(expected_flash_algo, final_flash_algo):
# logging.error("Flash algorithm overwritten!")
# error = True
#if analyzer_supported and not _same(expected_analyzer, final_analyzer):
# logging.error("Analyzer overwritten!")
# error = True
assert error == False
self.target.setVectorCatchFault(self._vector_catch_enabled)
self.target.setVectorCatchReset(self._reset_catch_enabled)
return self.target.readCoreRegister('r0')
def callFunctionAndWait(self, pc, r0=None, r1=None, r2=None, r3=None, init=False):
self.callFunction(pc, r0, r1, r2, r3, init)
return self.waitForCompletion()
def setFlashAlgoDebug(self, enable):
"""
Turn on extra flash algorithm checking
When set this will greatly slow down flash algo performance
"""
self.flash_algo_debug = enable
def overrideSecurityBits(self, address, data):
return data
| |
""" Defines the PlotLabel class.
"""
from __future__ import with_statement
from enable.font_metrics_provider import font_metrics_provider
from traits.api import DelegatesTo, Enum, Instance, Str, Trait
from abstract_overlay import AbstractOverlay
from label import Label
LabelDelegate = DelegatesTo("_label")
class PlotLabel(AbstractOverlay):
""" A label used by plots.
This class wraps a simple Label instance, and delegates some traits to it.
"""
# The text of the label.
text = LabelDelegate
# The color of the label text.
color = DelegatesTo("_label")
# The font for the label text.
font = LabelDelegate
# The angle of rotation of the label.
angle = DelegatesTo("_label", "rotate_angle")
bgcolor = LabelDelegate
border_width = LabelDelegate
border_color = LabelDelegate
border_visible = LabelDelegate
margin = LabelDelegate
line_spacing = LabelDelegate
#------------------------------------------------------------------------
# Layout-related traits
#------------------------------------------------------------------------
# Horizontal justification used if the label has more horizontal space
# than it needs.
hjustify = Enum("center", "left", "right")
# Vertical justification used if the label has more vertical space than it
# needs.
vjustify = Enum("center", "bottom", "top")
# The position of this label relative to the object it is overlaying.
# Can be "top", "left", "right", "bottom", and optionally can be preceeded
# by the words "inside" or "outside", separated by a space. If "inside"
# and "outside" are not provided, then defaults to "outside".
# Examples:
# inside top
# outside right
overlay_position = Trait("outside top", Str, None)
# Should this PlotLabel modify the padding on its underlying component
# if there is not enough room to lay out the text?
# FIXME: This could cause cycles in layout, so not implemented for now
#modify_component = Bool(True)
# By default, this acts like a component and will render on the main
# "plot" layer unless its **component** attribute gets set.
draw_layer = "plot"
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
# The label has a fixed height and can be resized horizontally. (Overrides
# PlotComponent.)
resizable = "h"
# The Label instance this plot label is wrapping.
_label = Instance(Label, args=())
def __init__(self, text="", *args, **kw):
super(PlotLabel, self).__init__(*args, **kw)
self.text = text
return
def overlay(self, component, gc, view_bounds=None, mode="normal"):
""" Draws this label overlaid on another component.
Overrides AbstractOverlay.
"""
self._draw_overlay(gc, view_bounds, mode)
return
def get_preferred_size(self):
""" Returns the label's preferred size.
Overrides PlotComponent.
"""
dummy_gc = font_metrics_provider()
size = self._label.get_bounding_box(dummy_gc)
return size
def do_layout(self):
""" Tells this component to do layout.
Overrides PlotComponent.
"""
if self.component is not None:
self._layout_as_overlay()
else:
self._layout_as_component()
return
def _draw_overlay(self, gc, view_bounds=None, mode="normal"):
""" Draws the overlay layer of a component.
Overrides PlotComponent.
"""
# Perform justification and compute the correct offsets for
# the label position
width, height = self._label.get_bounding_box(gc)
if self.hjustify == "left":
x_offset = 0
elif self.hjustify == "right":
x_offset = self.width - width
elif self.hjustify == "center":
x_offset = int((self.width - width) / 2)
if self.vjustify == "bottom":
y_offset = 0
elif self.vjustify == "top":
y_offset = self.height - height
elif self.vjustify == "center":
y_offset = int((self.height - height) / 2)
with gc:
# XXX: Uncomment this after we fix kiva GL backend's clip stack
#gc.clip_to_rect(self.x, self.y, self.width, self.height)
# We have to translate to our position because the label
# tries to draw at (0,0).
gc.translate_ctm(self.x + x_offset, self.y + y_offset)
self._label.draw(gc)
return
def _draw_plot(self, gc, view_bounds=None, mode="normal"):
if self.component is None:
# We are not overlaying anything else, so we should render
# on this layer
self._draw_overlay(gc, view_bounds, mode)
def _layout_as_component(self, size=None, force=False):
pass
def _layout_as_overlay(self, size=None, force=False):
""" Lays out the label as an overlay on another component.
"""
if self.component is not None:
orientation = self.overlay_position
outside = True
if "inside" in orientation:
tmp = orientation.split()
tmp.remove("inside")
orientation = tmp[0]
outside = False
elif "outside" in orientation:
tmp = orientation.split()
tmp.remove("outside")
orientation = tmp[0]
if orientation in ("left", "right"):
self.y = self.component.y
self.height = self.component.height
if not outside:
gc = font_metrics_provider()
self.width = self._label.get_bounding_box(gc)[0]
if orientation == "left":
if outside:
self.x = self.component.outer_x
self.width = self.component.padding_left
else:
self.outer_x = self.component.x
elif orientation == "right":
if outside:
self.x = self.component.x2 + 1
self.width = self.component.padding_right
else:
self.x = self.component.x2 - self.outer_width
elif orientation in ("bottom", "top"):
self.x = self.component.x
self.width = self.component.width
if not outside:
gc = font_metrics_provider()
self.height = self._label.get_bounding_box(gc)[1]
if orientation == "bottom":
if outside:
self.y = self.component.outer_y
self.height = self.component.padding_bottom
else:
self.outer_y = self.component.y
elif orientation == "top":
if outside:
self.y = self.component.y2 + 1
self.height = self.component.padding_top
else:
self.y = self.component.y2 - self.outer_height
else:
# Leave the position alone
pass
return
def _text_changed(self, old, new):
self._label.text = new
self.do_layout()
return
def _font_changed(self, old, new):
self._label.font = new
self.do_layout()
return
def _angle_changed(self, old, new):
self._label.rotate_angle = new
self.do_layout()
return
def _overlay_position_changed(self):
self.do_layout()
def _component_changed(self, old, new):
if new:
self.draw_layer = "overlay"
else:
self.draw_layer = "plot"
return
# EOF
| |
# -*- coding: utf-8 -*-
"""Diff module."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: ee41492e16f464fb9f159339e791a13fdad11d8c $'
import difflib
import math
import sys
from collections import Sequence
if sys.version_info[0] > 2:
from itertools import zip_longest
else:
from itertools import izip_longest as zip_longest
import pywikibot
from pywikibot.tools import chars
from pywikibot.backports import format_range_unified # introduced in 2.7.2
from pywikibot.tools import deprecated_args
class Hunk(object):
"""One change hunk between a and b.
Note: parts of this code are taken from by difflib.get_grouped_opcodes().
"""
APPR = 1
NOT_APPR = -1
PENDING = 0
def __init__(self, a, b, grouped_opcode):
"""
Constructor.
@param a: sequence of lines
@param b: sequence of lines
@param grouped_opcode: list of 5-tuples describing how to turn a into b.
it has the same format as returned by difflib.get_opcodes().
"""
self.a = a
self.b = b
self.group = grouped_opcode
self.header = u''
self.colors = {
'+': 'lightgreen',
'-': 'lightred',
}
self.diff = list(self.create_diff())
self.diff_plain_text = u''.join(self.diff)
self.diff_text = u''.join(self.format_diff())
first, last = self.group[0], self.group[-1]
self.a_rng = (first[1], last[2])
self.b_rng = (first[3], last[4])
self.header = self.get_header()
self.diff_plain_text = u'%s\n%s' % (self.header, self.diff_plain_text)
self.diff_text = u'%s' % self.diff_text
self.reviewed = self.PENDING
def get_header(self):
"""Provide header of unified diff."""
return self.get_header_text(self.a_rng, self.b_rng) + '\n'
@staticmethod
def get_header_text(a_rng, b_rng, affix='@@'):
"""Provide header for any ranges."""
a_rng = format_range_unified(*a_rng)
b_rng = format_range_unified(*b_rng)
return '{0} -{1} +{2} {0}'.format(affix, a_rng, b_rng)
def create_diff(self):
"""Generator of diff text for this hunk, without formatting."""
# make sure each line ends with '\n' to prevent
# behaviour like http://bugs.python.org/issue2142
def check_line(l):
if not l.endswith('\n'):
return l + '\n'
return l
for tag, i1, i2, j1, j2 in self.group:
# equal/delete/insert add additional space after the sign as it's
# what difflib.ndiff does do too.
if tag == 'equal':
for line in self.a[i1:i2]:
yield ' ' + check_line(line)
if tag in ('delete'):
for line in self.a[i1:i2]:
yield '- ' + check_line(line)
if tag in ('insert'):
for line in self.b[j1:j2]:
yield '+ ' + check_line(line)
if tag in ('replace'):
for line in difflib.ndiff(self.a[i1:i2], self.b[j1:j2]):
yield check_line(line)
def format_diff(self):
"""Color diff lines."""
diff = iter(self.diff)
l1, l2 = '', next(diff)
for line in diff:
l1, l2 = l2, line
# do not show lines starting with '?'.
if l1.startswith('?'):
continue
if l2.startswith('?'):
yield self.color_line(l1, l2)
else:
yield self.color_line(l1)
# handle last line
if not l2.startswith('?'):
yield self.color_line(l2)
def color_line(self, line, line_ref=None):
"""Color line characters.
If line_ref is None, the whole line is colored.
If line_ref[i] is not blank, line[i] is colored.
Color depends if line starts with +/-.
line: string
line_ref: string.
"""
color = line[0]
if line_ref is None:
if color in self.colors:
colored_line = '\03{%s}%s\03{default}' % (self.colors[color], line)
return colored_line
else:
return line
colored_line = u''
color_closed = True
for char, char_ref in zip_longest(line, line_ref.strip(), fillvalue=' '):
char_tagged = char
if color_closed:
if char_ref != ' ':
char_tagged = '\03{%s}%s' % (self.colors[color], char)
color_closed = False
else:
if char_ref == ' ':
char_tagged = '\03{default}%s' % char
color_closed = True
colored_line += char_tagged
if not color_closed:
colored_line += '\03{default}'
return colored_line
def apply(self):
"""Turn a into b for this hunk."""
return self.b[self.b_rng[0]:self.b_rng[1]]
def __str__(self):
"""Return the diff as plain text."""
return u''.join(self.diff_plain_text)
def __repr__(self):
"""Return a reconstructable representation."""
# TODO
return '%s(a, b, %s)' \
% (self.__class__.__name__, self.group)
class _SuperHunk(Sequence):
def __init__(self, hunks):
self._hunks = hunks
self.a_rng = (self._hunks[0].a_rng[0], self._hunks[-1].a_rng[1])
self.b_rng = (self._hunks[0].b_rng[0], self._hunks[-1].b_rng[1])
self.pre_context = self._hunks[0].pre_context
self.post_context = self._hunks[0].post_context
def __getitem__(self, idx):
return self._hunks[idx]
def __len__(self):
return len(self._hunks)
def split(self):
return [_SuperHunk([hunk]) for hunk in self._hunks]
@property
def reviewed(self):
assert(len(set(hunk.reviewed for hunk in self._hunks)) == 1)
return self._hunks[0].reviewed
@reviewed.setter
def reviewed(self, reviewed):
for hunk in self._hunks:
hunk.reviewed = reviewed
class PatchManager(object):
"""Apply patches to text_a to obtain a new text.
If all hunks are approved, text_b will be obtained.
"""
@deprecated_args(n='context')
def __init__(self, text_a, text_b, context=0, by_letter=False,
replace_invisible=False):
"""Constructor.
@param text_a: base text
@type text_a: basestring
@param text_b: target text
@type text_b: basestring
@param context: number of lines which are context
@type context: int
@param by_letter: if text_a and text_b are single lines, comparison can be done
letter by letter.
@type by_letter: bool
@param replace_invisible: Replace invisible characters like U+200e with
the charnumber in brackets (e.g. <200e>).
@type replace_invisible: bool
"""
if '\n' in text_a or '\n' in text_b:
self.a = text_a.splitlines(1)
self.b = text_b.splitlines(1)
else:
if by_letter:
self.a = text_a
self.b = text_b
else:
self.a = text_a.splitlines(1)
self.b = text_b.splitlines(1)
# groups and hunk have same order (one hunk correspond to one group).
s = difflib.SequenceMatcher(None, self.a, self.b)
self.groups = list(s.get_grouped_opcodes(0))
self.hunks = []
previous_hunk = None
for group in self.groups:
hunk = Hunk(self.a, self.b, group)
self.hunks.append(hunk)
hunk.pre_context = hunk.a_rng[0]
if previous_hunk:
hunk.pre_context -= previous_hunk.a_rng[1]
previous_hunk.post_context = hunk.pre_context
previous_hunk = hunk
if self.hunks:
self.hunks[-1].post_context = len(self.a) - self.hunks[-1].a_rng[1]
# blocks are a superset of hunk, as include also parts not
# included in any hunk.
self.blocks = self.get_blocks()
self.context = context
self._super_hunks = self._generate_super_hunks()
self._replace_invisible = replace_invisible
def get_blocks(self):
"""Return list with blocks of indexes which compose a and, where applicable, b.
Format of each block::
[-1, (i1, i2), (-1, -1)] -> block a[i1:i2] does not change from a to b
then is there is no corresponding hunk.
[hunk index, (i1, i2), (j1, j2)] -> block a[i1:i2] becomes b[j1:j2]
"""
blocks = []
i2 = 0
for hunk_idx, group in enumerate(self.groups):
first, last = group[0], group[-1]
i1, prev_i2, i2 = first[1], i2, last[2]
# there is a section of unchanged text before this hunk.
if prev_i2 < i1:
rng = (-1, (prev_i2, i1), (-1, -1))
blocks.append(rng)
rng = (hunk_idx, (first[1], last[2]), (first[3], last[4]))
blocks.append(rng)
# there is a section of unchanged text at the end of a, b.
if i2 < len(self.a):
rng = (-1, (i2, len(self.a)), (-1, -1))
blocks.append(rng)
return blocks
def print_hunks(self):
"""Print the headers and diff texts of all hunks to the output."""
if self.hunks:
pywikibot.output('\n'.join(self._generate_diff(super_hunk)
for super_hunk in self._super_hunks))
def _generate_super_hunks(self, hunks=None):
if hunks is None:
hunks = self.hunks
if self.context:
# Determine if two hunks are connected by self.context
super_hunk = []
super_hunks = [super_hunk]
for hunk in hunks:
# self.context * 2, because if self.context is 2 the hunks would be
# directly adjacent when 4 lines in between and for anything
# below 4 they share lines.
# not super_hunk == first hunk as any other super_hunk is
# created with one hunk
if (not super_hunk or
hunk.pre_context <= self.context * 2):
# previous hunk has shared/adjacent self.context lines
super_hunk += [hunk]
else:
super_hunk = [hunk]
super_hunks += [super_hunk]
else:
super_hunks = [[hunk] for hunk in hunks]
return [_SuperHunk(sh) for sh in super_hunks]
def _get_context_range(self, super_hunk):
"""Dynamically determine context range for a super hunk."""
return ((super_hunk.a_rng[0] - min(super_hunk.pre_context, self.context),
super_hunk.a_rng[1] + min(super_hunk.post_context, self.context)),
(super_hunk.b_rng[0] - min(super_hunk.pre_context, self.context),
super_hunk.b_rng[1] + min(super_hunk.post_context, self.context)))
def _generate_diff(self, hunks):
"""Generate a diff text for the given hunks."""
def extend_context(start, end):
"""Add context lines."""
return ''.join(' {0}\n'.format(line.rstrip())
for line in self.a[start:end])
context_range = self._get_context_range(hunks)
output = ('\03{aqua}' +
Hunk.get_header_text(*context_range) + '\03{default}\n' +
extend_context(context_range[0][0], hunks[0].a_rng[0]))
previous_hunk = None
for hunk in hunks:
if previous_hunk:
output += extend_context(previous_hunk.a_rng[1], hunk.a_rng[0])
previous_hunk = hunk
output += hunk.diff_text
output += extend_context(hunks[-1].a_rng[1], context_range[0][1])
if self._replace_invisible:
output = chars.replace_invisible(output)
return output
def review_hunks(self):
"""Review hunks."""
def find_pending(start, end):
step = -1 if start > end else +1
for pending in range(start, end, step):
if super_hunks[pending].reviewed == Hunk.PENDING:
return pending
# TODO: Missing commands (compared to git --patch): edit and search
help_msg = {'y': 'accept this hunk',
'n': 'do not accept this hunk',
'q': 'do not accept this hunk and quit reviewing',
'a': 'accept this hunk and all other pending',
'd': 'do not apply this hunk or any of the later hunks in the file',
'g': 'select a hunk to go to',
'j': 'leave this hunk undecided, see next undecided hunk',
'J': 'leave this hunk undecided, see next hunk',
'k': 'leave this hunk undecided, see previous undecided hunk',
'K': 'leave this hunk undecided, see previous hunk',
's': 'split this hunk into smaller ones',
'?': 'help',
}
super_hunks = self._generate_super_hunks(
h for h in self.hunks if h.reviewed == Hunk.PENDING)
position = 0
while any(any(hunk.reviewed == Hunk.PENDING for hunk in super_hunk)
for super_hunk in super_hunks):
super_hunk = super_hunks[position]
next_pending = find_pending(position + 1, len(super_hunks))
prev_pending = find_pending(position - 1, -1)
answers = ['y', 'n', 'q', 'a', 'd', 'g']
if next_pending is not None:
answers += ['j']
if position < len(super_hunks) - 1:
answers += ['J']
if prev_pending is not None:
answers += ['k']
if position > 0:
answers += ['K']
if len(super_hunk) > 1:
answers += ['s']
answers += ['?']
pywikibot.output(self._generate_diff(super_hunk))
choice = pywikibot.input('Accept this hunk [{0}]?'.format(
','.join(answers)))
if choice not in answers:
choice = '?'
if choice == 'y' or choice == 'n':
super_hunk.reviewed = Hunk.APPR if choice == 'y' else Hunk.NOT_APPR
if next_pending is not None:
position = next_pending
else:
position = find_pending(0, position)
elif choice == 'q':
for super_hunk in super_hunks:
for hunk in super_hunk:
if hunk.reviewed == Hunk.PENDING:
hunk.reviewed = Hunk.NOT_APPR
elif choice == 'a' or choice == 'd':
for super_hunk in super_hunks[position:]:
for hunk in super_hunk:
if hunk.reviewed == Hunk.PENDING:
hunk.reviewed = Hunk.APPR if choice == 'a' else Hunk.NOT_APPR
position = find_pending(0, position)
elif choice == 'g':
hunk_list = []
rng_width = 18
for index, super_hunk in enumerate(super_hunks, start=1):
if super_hunk.reviewed == Hunk.PENDING:
status = ' '
elif super_hunk.reviewed == Hunk.APPR:
status = '+'
elif super_hunk.reviewed == Hunk.NOT_APPR:
status = '-'
else:
assert(False)
if super_hunk[0].a_rng[1] - super_hunk[0].a_rng[0] > 0:
mode = '-'
first = self.a[super_hunk[0].a_rng[0]]
else:
mode = '+'
first = self.b[super_hunk[0].b_rng[0]]
hunk_list += [(status, index,
Hunk.get_header_text(
*self._get_context_range(super_hunk), affix=''),
mode, first)]
rng_width = max(len(hunk_list[-1][2]), rng_width)
line_template = ('{0}{1} {2: >' +
str(int(math.log10(len(super_hunks)) + 1)) +
'}: {3: <' + str(rng_width) + '} {4}{5}')
# the last entry is the first changed line which usually ends
# with a \n (only the last may not, which is covered by the
# if-condition following this block)
hunk_list = ''.join(
line_template.format(
'*' if hunk_entry[1] == position + 1 else ' ', *hunk_entry)
for hunk_entry in hunk_list)
if hunk_list.endswith('\n'):
hunk_list = hunk_list[:-1]
pywikibot.output(hunk_list)
next_hunk = pywikibot.input('Go to which hunk?')
try:
next_hunk_position = int(next_hunk) - 1
except ValueError:
next_hunk_position = False
if (next_hunk_position is not False and
0 <= next_hunk_position < len(super_hunks)):
position = next_hunk_position
elif next_hunk: # nothing entered is silently ignored
pywikibot.error('Invalid hunk number "{0}"'.format(next_hunk))
elif choice == 'j':
position = next_pending
elif choice == 'J':
position += 1
elif choice == 'k':
position = prev_pending
elif choice == 'K':
position -= 1
elif choice == 's':
super_hunks = (super_hunks[:position] +
super_hunks[position].split() +
super_hunks[position + 1:])
pywikibot.output('Split into {0} hunks'.format(len(super_hunk._hunks)))
elif choice == '?':
pywikibot.output(
'\03{purple}%s\03{default}' % '\n'.join(
'{0} -> {1}'.format(answer, help_msg[answer])
for answer in answers))
else:
assert(False)
def apply(self):
"""Apply changes. If there are undecided changes, ask to review."""
if any(h.reviewed == h.PENDING for h in self.hunks):
pywikibot.output("There are unreviewed hunks.\n"
"Please review them before proceeding.\n")
self.review_hunks()
l_text = []
for hunk_idx, (i1, i2), (j1, j2) in self.blocks:
# unchanged text.
if hunk_idx < 0:
l_text.extend(self.a[i1:i2])
# changed text; check if hunk is approved.
else:
hunk = self.hunks[hunk_idx]
if hunk.reviewed == hunk.APPR:
l_text.extend(self.b[j1:j2])
else:
l_text.extend(self.a[i1:i2])
# Make a sanity check in case all are approved.
if all(h.reviewed == h.APPR for h in self.hunks):
assert u''.join(l_text) == u''.join(self.b)
return l_text
def cherry_pick(oldtext, newtext, n=0, by_letter=False):
"""Propose a list of changes for approval.
Text with approved changes will be returned.
n: int, line of context as defined in difflib.get_grouped_opcodes().
by_letter: if text_a and text_b are single lines, comparison can be done
"""
patch = PatchManager(oldtext, newtext, n=n, by_letter=by_letter)
pywikibot.output('\03{{lightpurple}}\n{0:*^50}\03{{default}}\n'.format(' ALL CHANGES '))
for hunk in patch.hunks:
pywikibot.output(hunk.diff_text)
pywikibot.output('\03{{lightpurple}}\n{0:*^50}\03{{default}}\n'.format(' REVIEW CHANGES '))
text_list = patch.apply()
pywikibot.output('\03{{lightpurple}}\n{0:*^50}\03{{default}}\n'.format(' APPROVED CHANGES '))
if any(hunk.reviewed == hunk.APPR for hunk in patch.hunks):
for hunk in patch.hunks:
if hunk.reviewed == hunk.APPR:
pywikibot.output(hunk.diff_text)
else:
pywikibot.output('\03{{lightpurple}}{0:^50}\03{{default}}'.format('None.'))
text = ''.join(text_list)
return text
def html_comparator(compare_string):
"""List of added and deleted contexts from 'action=compare' html string.
This function is useful when combineds with site.py's "compare" method.
Site.compare() returns HTML that is useful for displaying on a page.
Here we use BeautifulSoup to get the un-HTML-ify the context of changes.
Finally we present the added and deleted contexts.
@param compare_string: HTML string from mediawiki API
@type compare_string: str
@return: deleted and added list of contexts
@rtype: dict
"""
from bs4 import BeautifulSoup
comparands = {'deleted-context': [], 'added-context': []}
soup = BeautifulSoup(compare_string)
for change_type, css_class in (('deleted-context', 'diff-deletedline'), ('added-context', 'diff-addedline')):
crutons = soup.find_all('td', class_=css_class)
for cruton in crutons:
cruton_string = ''.join(cruton.strings)
comparands[change_type].append(cruton_string)
return comparands
| |
"""Polynomial factorization routines in characteristic zero. """
from sympy.polys.galoistools import (
gf_from_int_poly, gf_to_int_poly,
gf_degree, gf_from_dict,
gf_lshift, gf_add_mul, gf_mul,
gf_div, gf_rem,
gf_gcd, gf_gcdex,
gf_sqf_p,
gf_factor_sqf
)
from sympy.polys.densebasic import (
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC, dmp_TC, dmp_ground_TC,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dup_from_dict, dmp_from_dict,
dmp_zero, dmp_zero_p,
dmp_one, dmp_one_p,
dmp_nest, dmp_raise,
dup_strip, dmp_strip,
dmp_ground,
dup_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dmp_terms_gcd
)
from sympy.polys.densearith import (
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_pow, dmp_pow,
dup_div, dmp_div,
dup_rem, dmp_rem,
dup_exquo, dmp_exquo,
dup_expand, dmp_expand,
dup_add_mul, dmp_add_mul,
dup_sub_mul, dmp_sub_mul,
dup_max_norm, dmp_max_norm,
dup_l1_norm, dmp_l1_norm,
dup_mul_ground, dmp_mul_ground,
dup_exquo_ground, dmp_exquo_ground
)
from sympy.polys.densetools import (
dup_gcd, dmp_gcd,
dup_sqf_p, dmp_sqf_p,
dup_sqf_part, dmp_sqf_part,
dup_trunc, dmp_ground_trunc,
dup_content, dmp_ground_content,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_primitive, dmp_ground_primitive,
dup_ground_to_ring, dmp_ground_to_ring,
dup_eval, dmp_eval_tail,
dmp_eval_in, dmp_diff_eval_in,
dup_inner_gcd, dmp_inner_gcd,
dup_sqf_norm, dmp_sqf_norm,
dup_compose, dmp_compose,
dup_taylor
)
from sympy.polys.polyutils import (
_sort_factors
)
from sympy.polys.polyerrors import (
ExtraneousFactors, DomainError, EvaluationFailed
)
from sympy.ntheory import nextprime, isprime, factorint
from sympy.utilities import any, all, subsets, cythonized
from math import ceil, log
from random import randint
@cythonized("k")
def dup_trial_division(f, factors, K):
"""Determine multiplicities of factors using trial division. """
result = []
for factor in factors:
k = 0
while True:
q, r = dup_div(f, factor, K)
if not r:
f, k = q, k+1
else:
break
result.append((factor, k))
return _sort_factors(result)
@cythonized("u,k")
def dmp_trial_division(f, factors, u, K):
"""Determine multiplicities of factors using trial division. """
result = []
for factor in factors:
k = 0
while True:
q, r = dmp_div(f, factor, u, K)
if dmp_zero_p(r, u):
f, k = q, k+1
else:
break
result.append((factor, k))
return _sort_factors(result)
return sort_factors_if_mult(result)
def dup_zz_mignotte_bound(f, K):
"""Mignotte bound for univariate polynomials in `K[x]`. """
a = dup_max_norm(f, K)
b = abs(dup_LC(f, K))
n = dup_degree(f)
return K.sqrt(n+1)*2**n*a*b
def dmp_zz_mignotte_bound(f, u, K):
"""Mignotte bound for multivariate polynomials in `K[X]`. """
a = dmp_max_norm(f, u, K)
b = abs(dmp_ground_LC(f, u, K))
n = sum(dmp_degree_list(f, u))
return K.sqrt(n+1)*2**n*a*b
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f == g*h (mod m)
s*g + t*h == 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) == 1
deg(f) == deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f == G*H (mod m**2)
S*G + T**H == 1 (mod m**2)
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 418
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
@cythonized("l,r,k,d")
def dup_zz_hensel_lift(p, f, f_list, l, K):
"""Multifactor Hensel lifting in `Z[x]`.
Given a prime `p`, polynomial `f` over `Z[x]` such that `lc(f)`
is a unit modulo `p`, monic pair-wise coprime polynomials `f_i`
over `Z[x]` satisfying::
f = lc(f) f_1 ... f_r (mod p)
and a positive integer `l`, returns a list of monic polynomials
`F_1`, `F_2`, ..., `F_r` satisfying::
f = lc(f) F_1 ... F_r (mod p**l)
F_i = f_i (mod p), i = 1..r
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 424
"""
r = len(f_list)
lc = dup_LC(f, K)
if r == 1:
F = dup_mul_ground(f, K.gcdex(lc, p**l)[0], K)
return [ dup_trunc(F, p**l, K) ]
m = p
k = r // 2
d = int(ceil(log(l, 2)))
g = gf_from_int_poly([lc], p)
for f_i in f_list[:k]:
g = gf_mul(g, gf_from_int_poly(f_i, p), p, K)
h = gf_from_int_poly(f_list[k], p)
for f_i in f_list[k+1:]:
h = gf_mul(h, gf_from_int_poly(f_i, p), p, K)
s, t, _ = gf_gcdex(g, h, p, K)
g = gf_to_int_poly(g, p)
h = gf_to_int_poly(h, p)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
for _ in range(1, d+1):
(g, h, s, t), m = dup_zz_hensel_step(m, f, g, h, s, t, K), m**2
return dup_zz_hensel_lift(p, g, f_list[:k], l, K) \
+ dup_zz_hensel_lift(p, h, f_list[k:], l, K)
@cythonized("l,s")
def dup_zz_zassenhaus(f, K):
"""Factor primitive square-free polynomials in `Z[x]`. """
n = dup_degree(f)
if n == 1:
return [f]
A = dup_max_norm(f, K)
b = dup_LC(f, K)
B = int(abs(K.sqrt(n+1)*2**n*A*b))
C = int((n+1)**(2*n)*A**(2*n-1))
gamma = int(ceil(2*log(C, 2)))
bound = int(2*gamma*log(gamma))
for p in xrange(3, bound+1):
if not isprime(p) or b % p == 0:
continue
p = K.convert(p)
F = gf_from_int_poly(f, p)
if gf_sqf_p(F, p, K):
break
l = int(ceil(log(2*B + 1, p)))
modular = []
for ff in gf_factor_sqf(F, p, K)[1]:
modular.append(gf_to_int_poly(ff, p))
g = dup_zz_hensel_lift(p, f, modular, l, K)
T = set(range(len(g)))
factors, s = [], 1
while 2*s <= len(T):
for S in subsets(T, s):
G, H = [b], [b]
S = set(S)
for i in S:
G = dup_mul(G, g[i], K)
for i in T-S:
H = dup_mul(H, g[i], K)
G = dup_trunc(G, p**l, K)
H = dup_trunc(H, p**l, K)
G_norm = dup_l1_norm(G, K)
H_norm = dup_l1_norm(H, K)
if G_norm*H_norm <= B:
T = T - S
G = dup_primitive(G, K)[1]
f = dup_primitive(H, K)[1]
factors.append(G)
b = dup_LC(f, K)
break
else:
s += 1
return factors + [f]
def dup_zz_irreducible_p(f, K):
"""Test irreducibility using Eisenstein's criterion. """
lc = dup_LC(f, K)
tc = dup_TC(f, K)
e_fc = dup_content(f[1:], K)
if e_fc:
e_ff = factorint(int(e_fc))
for p in e_ff.iterkeys():
if (lc % p) and (tc % p**2):
return True
@cythonized("n,p,k")
def dup_zz_cyclotomic_poly(n, K):
"""Efficiently generate n-th cyclotomic polnomial. """
h = [K.one,-K.one]
for p, k in factorint(n).iteritems():
h = dup_exquo(dup_inflate(h, p, K), h, K)
h = dup_inflate(h, p**(k-1), K)
return h
@cythonized("n,p,k,i")
def _dup_cyclotomic_decompose(n, K):
H = [[K.one,-K.one]]
for p, k in factorint(n).iteritems():
Q = [ dup_exquo(dup_inflate(h, p, K), h, K) for h in H ]
H.extend(Q)
for i in xrange(1, k):
Q = [ dup_inflate(q, p, K) for q in Q ]
H.extend(Q)
return H
@cythonized("n")
def dup_zz_cyclotomic_factor(f, K):
"""Efficiently factor polynomials `x**n - 1` and `x**n + 1` in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` returns a list of factors
of `f`, provided that `f` is in the form `x**n - 1` or `x**n + 1` for
`n >= 1`. Otherwise returns None.
Factorization is performed using using cyclotomic decomposition of `f`,
which makes this method much faster that any other direct factorization
approach (e.g. Zassenhaus's).
References
==========
.. [Weisstein09] Eric W. Weisstein, Cyclotomic Polynomial, From MathWorld - A
Wolfram Web Resource, http://mathworld.wolfram.com/CyclotomicPolynomial.html
"""
lc_f, tc_f = dup_LC(f, K), dup_TC(f, K)
if dup_degree(f) <= 0:
return None
if lc_f != 1 or tc_f not in [-1, 1]:
return None
if any([ bool(cf) for cf in f[1:-1] ]):
return None
n = dup_degree(f)
F = _dup_cyclotomic_decompose(n, K)
if not K.is_one(tc_f):
return F
else:
H = []
for h in _dup_cyclotomic_decompose(2*n, K):
if h not in F:
H.append(h)
return H
@cythonized("n")
def dup_zz_factor_sqf(f, K, **args):
"""Factor square-free (non-primitive) polyomials in `Z[x]`. """
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
if n == 1 or dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
factors = []
if args.get('cyclotomic', True):
factors = dup_zz_cyclotomic_factor(g, K)
if factors is None:
factors = dup_zz_zassenhaus(g, K)
return cont, _sort_factors(factors, multiple=False)
@cythonized("n,k")
def dup_zz_factor(f, K, **args):
"""Factor (non square-free) polynomials in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Zassenhaus algorithm. Trial division is used to recover the
multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*x**4 - 2`::
>>> from sympy.polys.factortools import dup_zz_factor
>>> from sympy.polys.algebratools import ZZ
>>> dup_zz_factor([2, 0, 0, 0, -2], ZZ)
(2, [([1, -1], 1), ([1, 1], 1), ([1, 0, 1], 1)])
In result we got the following factorization::
f = 2 (x - 1) (x + 1) (x**2 + 1)
Note that this is a complete factorization over integers,
however over Gaussian integers we can factor the last term.
By default, polynomials `x**n - 1` and `x**n + 1` are factored
using cyclotomic decomposition to speedup computations. To
disable this behaviour set cyclotomic=False.
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 427
"""
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
if n == 1 or dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
g = dup_sqf_part(g, K)
H, factors = None, []
if args.get('cyclotomic', True):
H = dup_zz_cyclotomic_factor(g, K)
if H is None:
H = dup_zz_zassenhaus(g, K)
for h in H:
k = 0
while True:
q, r = dup_div(f, h, K)
if not r:
f, k = q, k+1
else:
break
factors.append((h, k))
return cont, _sort_factors(factors)
def dmp_zz_wang_non_divisors(E, cs, ct, K):
"""Wang/EEZ: Compute a set of valid divisors. """
result = [ cs*ct ]
for q in E:
q = abs(q)
for r in reversed(result):
while r != 1:
r = K.gcd(r, q)
q = q // r
if K.is_one(q):
return None
result.append(q)
return result[1:]
@cythonized("u,v")
def dmp_zz_wang_test_points(f, T, ct, A, u, K):
"""Wang/EEZ: Test evaluation points for suitability. """
if not dmp_eval_tail(dmp_LC(f, K), A, u-1, K):
raise EvaluationFailed('no luck')
g = dmp_eval_tail(f, A, u, K)
if not dup_sqf_p(g, K):
raise EvaluationFailed('no luck')
c, h = dup_primitive(g, K)
if K.is_negative(dup_LC(h, K)):
c, h = -c, dup_neg(h, K)
v = u-1
E = [ dmp_eval_tail(t, A, v, K) for t, _ in T ]
D = dmp_zz_wang_non_divisors(E, c, ct, K)
if D is not None:
return c, h, E
else:
raise EvaluationFailed('no luck')
@cythonized("u,v,i,j,k")
def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K):
"""Wang/EEZ: Compute correct leading coefficients. """
C, J, v = [], [0]*len(E), u-1
for h in H:
c = dmp_one(v, K)
d = dup_LC(h, K)*cs
for i in reversed(xrange(len(E))):
k, e, (t, _) = 0, E[i], T[i]
while not (d % e):
d, k = d//e, k+1
if k != 0:
c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1
C.append(c)
if any([ not j for j in J ]):
raise ExtraneousFactors # pragma: no cover
CC, HH = [], []
for c, h in zip(C, H):
d = dmp_eval_tail(c, A, v, K)
lc = dup_LC(h, K)
if K.is_one(cs):
cc = lc//d
else:
g = K.gcd(lc, d)
d, cc = d//g, lc//g
h, cs = dup_mul_ground(h, d, K), cs//d
c = dmp_mul_ground(c, cc, v, K)
CC.append(c)
HH.append(h)
if K.is_one(cs):
return f, HH, CC
CCC, HHH = [], []
for c, h in zip(CC, HH):
CCC.append(dmp_mul_ground(c, cs, v, K))
HHH.append(dmp_mul_ground(h, cs, 0, K))
f = dmp_mul_ground(f, cs**(len(H)-1), u, K)
return f, HHH, CCC
@cythonized("m")
def dup_zz_diophantine(F, m, p, K):
"""Wang/EEZ: Solve univariate Diophantine equations. """
if len(F) == 2:
a, b = F
f = gf_from_int_poly(a, p)
g = gf_from_int_poly(b, p)
s, t, G = gf_gcdex(g, f, p, K)
s = gf_lshift(s, m, K)
t = gf_lshift(t, m, K)
q, s = gf_div(s, f, p, K)
t = gf_add_mul(t, q, g, p, K)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
result = [s, t]
else:
G = [F[-1]]
for f in reversed(F[1:-1]):
G.insert(0, dup_mul(f, G[0], K))
S, T = [], [[1]]
for f, g in zip(F, G):
t, s = dmp_zz_diophantine([g, f], T[-1], [], 0, p, 1, K)
T.append(t)
S.append(s)
result, S = [], S + [T[-1]]
for s, f in zip(S, F):
s = gf_from_int_poly(s, p)
f = gf_from_int_poly(f, p)
r = gf_rem(gf_lshift(s, m, K), f, p, K)
s = gf_to_int_poly(r, p)
result.append(s)
return result
@cythonized("u,v,d,n,i,j,k")
def dmp_zz_diophantine(F, c, A, d, p, u, K):
"""Wang/EEZ: Solve multivariate Diophantine equations. """
if not A:
S = [ [] for _ in F ]
n = dup_degree(c)
for i, coeff in enumerate(c):
if not coeff:
continue
T = dup_zz_diophantine(F, n-i, p, K)
for j, (s, t) in enumerate(zip(S, T)):
t = dup_mul_ground(t, coeff, K)
S[j] = dup_trunc(dup_add(s, t, K), p, K)
else:
n = len(A)
e = dmp_expand(F, u, K)
a, A = A[-1], A[:-1]
B, G = [], []
for f in F:
B.append(dmp_exquo(e, f, u, K))
G.append(dmp_eval_in(f, a, n, u, K))
C = dmp_eval_in(c, a, n, u, K)
v = u - 1
S = dmp_zz_diophantine(G, C, A, d, p, v, K)
S = [ dmp_raise(s, 1, v, K) for s in S ]
for s, b in zip(S, B):
c = dmp_sub_mul(c, s, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
m = dmp_nest([K.one, -a], n, K)
M = dmp_one(n, K)
for k in xrange(0, d):
if dmp_zero_p(c, u):
break
M = dmp_mul(M, m, u, K)
C = dmp_diff_eval_in(c, k+1, a, n, u, K)
if not dmp_zero_p(C, v):
C = dmp_exquo_ground(C, K.factorial(k+1), v, K)
T = dmp_zz_diophantine(G, C, A, d, p, v, K)
for i, t in enumerate(T):
T[i] = dmp_mul(dmp_raise(t, 1, v, K), M, u, K)
for i, (s, t) in enumerate(zip(S, T)):
S[i] = dmp_add(s, t, u, K)
for t, b in zip(T, B):
c = dmp_sub_mul(c, t, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
S = [ dmp_ground_trunc(s, p, u, K) for s in S ]
return S
@cythonized("u,v,d,dj,n,i,j,k,w")
def dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K):
"""Wang/EEZ: Parallel Hensel lifting algorithm. """
S, n, v = [f], len(A), u-1
H = list(H)
for i, a in enumerate(reversed(A[1:])):
s = dmp_eval_in(S[0], a, n-i, u-i, K)
S.insert(0, dmp_ground_trunc(s, p, v-i, K))
d = max(dmp_degree_list(f, u)[1:])
for j, s, a in zip(xrange(2, n+2), S, A):
G, w = list(H), j-1
I, J = A[:j-2], A[j-1:]
for i, (h, lc) in enumerate(zip(H, LC)):
lc = dmp_ground_trunc(dmp_eval_tail(lc, J, v, K), p, w-1, K)
H[i] = [lc] + dmp_raise(h[1:], 1, w-1, K)
m = dmp_nest([K.one, -a], w, K)
M = dmp_one(w, K)
c = dmp_sub(s, dmp_expand(H, w, K), w, K)
dj = dmp_degree_in(s, w, w)
for k in xrange(0, dj):
if dmp_zero_p(c, w):
break
M = dmp_mul(M, m, w, K)
C = dmp_diff_eval_in(c, k+1, a, w, w, K)
if not dmp_zero_p(C, w-1):
C = dmp_exquo_ground(C, K.factorial(k+1), w-1, K)
T = dmp_zz_diophantine(G, C, I, d, p, w-1, K)
for i, (h, t) in enumerate(zip(H, T)):
h = dmp_add_mul(h, dmp_raise(t, 1, w-1, K), M, w, K)
H[i] = dmp_ground_trunc(h, p, w, K)
h = dmp_sub(s, dmp_expand(H, w, K), w, K)
c = dmp_ground_trunc(h, p, w, K)
if dmp_expand(H, u, K) != f:
raise ExtraneousFactors # pragma: no cover
else:
return H
EEZ_NUM_OK = 3
EEZ_NUM_TRY = 5
EEZ_MOD_STEP = 2
@cythonized("u,mod,i,j,s_arg,negative")
def dmp_zz_wang(f, u, K, **args):
"""Factor primitive square-free polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x_1,...,x_n]`, which
is primitive and square-free in `x_1`, computes factorization
of `f` into irreducibles over integers.
The procedure is based on Wang's Enhanced Extended Zassenhaus
algorithm. The algorithm works by viewing `f` as a univariate
polynomial in `Z[x_2,...,x_n][x_1]`, for which an evaluation
mapping is computed::
x_2 -> a_2, ..., x_n -> a_n
where `a_i`, for `i = 2, ..., n`, are carefully chosen integers.
The mapping is used to transform `f` into a univariate polynomial
in `Z[x_1]`, which can be factored efficiently using Zassenhaus
algorithm. The last step is to lift univariate factors to obtain
true multivariate factors. For this purpose a parallel Hensel
lifting procedure is used.
References
==========
.. [Wang78] P. S. Wang, An Improved Multivariate Polynomial Factoring
Algorithm, Math. of Computation 32, 1978, pp. 1215--1231
.. [Geddes92] K. Geddes, S. R. Czapor, G. Labahn, Algorithms for
Computer Algebra, Springer, 1992, pp. 264--272
"""
ct, T = dmp_zz_factor(dmp_LC(f, K), u-1, K)
b = dmp_zz_mignotte_bound(f, u, K)
p = K(nextprime(b))
eez_mod = args.get('mod', None)
if eez_mod is None:
if u == 1:
eez_mod = 2
else:
eez_mod = 1
history, configs, A, r = set([]), [], [K.zero]*u, None
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
_, H = dup_zz_factor_sqf(s, K)
r = len(H)
if r == 1:
return [f]
bad_points = set([tuple(A)])
configs = [(s, cs, E, H, A)]
except EvaluationFailed:
pass
while len(configs) < EEZ_NUM_OK:
for _ in xrange(EEZ_NUM_TRY):
A = [ K(randint(-eez_mod, eez_mod)) for _ in xrange(u) ]
if tuple(A) not in history:
history.add(tuple(A))
else:
continue
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
except EvaluationFailed:
continue
_, H = dup_zz_factor_sqf(s, K)
rr = len(H)
if r is not None:
if rr != r: # pragma: no cover
if rr < r:
configs, r = [], rr
else:
continue
else:
r = rr
if r == 1:
return [f]
configs.append((s, cs, E, H, A))
if len(configs) == EEZ_NUM_OK:
break
else:
eez_mod += EEZ_MOD_STEP
s_norm, s_arg, i = None, 0, 0
for s, _, _, _, _ in configs:
_s_norm = dup_max_norm(s, K)
if s_norm is not None:
if _s_norm < s_norm:
s_norm = _s_norm
s_arg = i
else:
s_norm = _s_norm
i += 1
_, cs, E, H, A = configs[s_arg]
try:
f, H, LC = dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K)
factors = dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K)
except ExtraneousFactors: # pragma: no cover
if args.get('restart', True):
return dmp_zz_wang(f, u, K, mod=eez_mod+1)
else:
raise ExtraneousFactors("we need to restart algorithm with better parameters")
negative, result = 0, []
for f in factors:
_, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
result.append(f)
return result
@cythonized("u,d,k")
def dmp_zz_factor(f, u, K):
"""Factor (non square-free) polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Enhanced Extended Zassenhaus (EEZ) algorithm. Trial division
is used to recover the multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*(x**2 - y**2)`::
>>> from sympy.polys.factortools import dmp_zz_factor
>>> from sympy.polys.algebratools import ZZ
>>> dmp_zz_factor([[2], [], [-2, 0, 0]], 1, ZZ)
(2, [([[1], [-1, 0]], 1), ([[1], [1, 0]], 1)])
In result we got the following factorization::
f = 2 (x - y) (x + y)
References
==========
.. [Gathen99] J. von zur Gathen, J. Gerhard, Modern Computer Algebra,
First Edition, Cambridge University Press, 1999, pp. 427
"""
if not u:
return dup_zz_factor(f, K)
if dmp_zero_p(f, u):
return K.zero, []
cont, g = dmp_ground_primitive(f, u, K)
if dmp_ground_LC(g, u, K) < 0:
cont, g = -cont, dmp_neg(g, u, K)
if all([ d <= 0 for d in dmp_degree_list(g, u) ]):
return cont, []
G, g = dmp_primitive(g, u, K)
factors = []
if dmp_degree(g, u) > 0:
g = dmp_sqf_part(g, u, K)
H = dmp_zz_wang(g, u, K)
for h in H:
k = 0
while True:
q, r = dmp_div(f, h, u, K)
if dmp_zero_p(r, u):
f, k = q, k+1
else:
break
factors.append((h, k))
for g, k in dmp_zz_factor(G, u-1, K)[1]:
factors.insert(0, ([g], k))
return cont, _sort_factors(factors)
def dup_ext_factor(f, K):
"""Factor polynomials over algebraic number fields. """
n, lc = dup_degree(f), dup_LC(f, K)
f = dup_monic(f, K)
if n <= 0:
return lc, []
if n == 1:
return lc, [(f, 1)]
f, F = dup_sqf_part(f, K), f
s, g, r = dup_sqf_norm(f, K)
factors = dup_factor_list(r, K.dom, include=True)
if len(factors) == 1:
return lc, [(f, n//dup_degree(f))]
H = s*K.unit
for i, (factor, _) in enumerate(factors):
h = dup_convert(factor, K.dom, K)
h, _, g = dup_inner_gcd(h, g, K)
h = dup_taylor(h, H, K)
factors[i] = h
factors = dup_trial_division(F, factors, K)
return lc, factors
@cythonized("u")
def dmp_ext_factor(f, u, K):
"""Factor polynomials over algebraic number fields. """
if not u:
return dup_ext_factor(f, K)
lc = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
if all([ d <= 0 for d in dmp_degree_list(f, u) ]):
return lc, []
f, F = dmp_sqf_part(f, u, K), f
s, g, r = dmp_sqf_norm(f, u, K)
factors = dmp_factor_list(r, u, K.dom, include=True)
if len(factors) == 1:
coeff, factors = lc, [f]
else:
H = dmp_raise([K.one, s*K.unit], u, 0, K)
for i, (factor, _) in enumerate(factors):
h = dmp_convert(factor, u, K.dom, K)
h, _, g = dmp_inner_gcd(h, g, u, K)
h = dmp_compose(h, H, u, K)
factors[i] = h
return lc, dmp_trial_division(F, factors, u, K)
@cythonized("i,k,u")
def dup_factor_list(f, K0, **args):
"""Factor polynomials into irreducibles in `K[x]`. """
if not K0.has_CharacteristicZero: # pragma: no cover
raise DomainError('only characteristic zero allowed')
if K0.is_Algebraic:
coeff, factors = dup_ext_factor(f, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dup_convert(f, K0_inexact, K0)
else:
K0_inexact = None
if K0.has_Field:
K = K0.get_ring()
denom, f = dup_ground_to_ring(f, K0, K)
f = dup_convert(f, K0, K)
else:
K = K0
if K.is_ZZ:
coeff, factors = dup_zz_factor(f, K, **args)
elif K.is_Poly:
f, u = dmp_inject(f, 0, K)
coeff, factors = dmp_factor_list(f, u, K.dom, **args)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, u, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.has_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K, K0), k)
coeff = K0.convert(coeff, K)
denom = K0.convert(denom, K)
coeff = K0.quo(coeff, denom)
if K0_inexact is not None:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K0, K0_inexact), k)
coeff = K0_inexact.convert(coeff, K0)
if not args.get('include', False):
return coeff, factors
else:
if not factors:
return [(dup_strip([coeff]), 1)]
else:
g = dup_mul_ground(factors[0][0], coeff, K)
return [(g, factors[0][1])] + factors[1:]
@cythonized("u,v,i,k")
def _dmp_inner_factor(f, u, K):
"""Simplify factorization in `Z[X]` as much as possible. """
gcd, f = dmp_terms_gcd(f, u, K)
J, f, v = dmp_exclude(f, u, K)
coeff, factors = dmp_zz_factor(f, v, K)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_include(f, J, v, K), k)
for i, g in enumerate(reversed(gcd)):
if not g:
continue
term = {(0,)*(u-i) + (1,) + (0,)*i: K.one}
factors.insert(0, (dmp_from_dict(term, u, K), g))
return coeff, factors
@cythonized("u,v,i,k")
def dmp_factor_list(f, u, K0, **args):
"""Factor polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list(f, K0, **args)
if not K0.has_CharacteristicZero: # pragma: no cover
raise DomainError('only characteristic zero allowed')
if K0.is_Algebraic:
coeff, factors = dmp_ext_factor(f, u, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dmp_convert(f, u, K0_inexact, K0)
else:
K0_inexact = None
if K0.has_Field:
K = K0.get_ring()
denom, f = dmp_ground_to_ring(f, u, K0, K)
f = dmp_convert(f, u, K0, K)
else:
K = K0
if K.is_ZZ:
coeff, factors = _dmp_inner_factor(f, u, K)
elif K.is_Poly:
f, v = dmp_inject(f, u, K)
coeff, factors = dmp_factor_list(f, v, K.dom, **args)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, v, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.has_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K, K0), k)
coeff = K0.convert(coeff, K)
denom = K0.convert(denom, K)
coeff = K0.quo(coeff, denom)
if K0_inexact is not None:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K0, K0_inexact), k)
coeff = K0_inexact.convert(coeff, K0)
if not args.get('include', False):
return coeff, factors
else:
if not factors:
return [(dmp_ground(coeff, u), 1)]
else:
g = dmp_mul_ground(factors[0][0], coeff, u, K)
return [(g, factors[0][1])] + factors[1:]
| |
import urllib2
import urllib
import re
import os
# structure : http://<ip>/<device>/<app>?t=<password>&p=<command>
class UrlCommands:
pw = "SeniorProject"
ip = "10.5.5.9"
device = ""
app = ""
command = ""
url = ""
current = ""
imageid = ""
def __init__(self):
self.command = ""
# http://<ip>/camera/VR?t=<password>&p=%00
def build_url(self, dev, a, com):
self.url = "http://" + self.ip + "/" + dev + "/" + a + "?t=" + self.pw + "&p=" + "%" + com
try:
urllib2.urlopen(self.url)
print self.url + " Command Sent.."
print self.current # print the command and current stored debug info
current = ""
except urllib2.URLError:
print "An error occurred while attempting to retrieve url: " + self.url
# BACPAC device commands: off, on, change_mode, start/stop capture
# http://<ip>/bacpac/PW?t=<password>&p=%01
def turn_off(self):
device = "bacpac"
app = "PW"
command = "00"
current = "\tBACPAC:PW:00: Power down.."
self.build_url(device, app, command)
def turn_on(self):
device = "bacpac"
app = "PW"
command = "01"
current = "\tBACPAC:PW:01: Power On.."
self.build_url(device, app, command)
def change_mode(self):
device = "bacpac"
app = "PW"
command = "02"
current = "\tBACPAC:PW:02: Change Mode."
self.build_url(device, app, command)
# http://<ip>/bacpac/SH?t=<password>&p=%01
def start_capture(self):
device = "bacpac"
app = "SH"
command = "01"
current = "\tBACPAC:SH:01: * Start Capture"
self.build_url(device, app, command)
def stop_capture(self):
device = "bacpac"
app = "SH"
command = "00"
current = "\tBACPAC:SH:00: * Stop Capture"
self.build_url(device, app, command)
# Camera preview methods on/off
# http://<ip>/camera/PV?t=<password>&p=%02
def preview_on(self):
device = "camera"
app = "PV"
command = "02"
current = "\tCAMERA:PV:02: Preview On"
self.build_url(device, app, command)
def preview_off(self):
device = "camera"
app = "PV"
command = "00"
current = "\tCAMERA:PV:00: Preview Off"
self.build_url(device, app, command)
# Camera mode methods
# http://<ip>/camera/CM?t=<password>&p=%00
def enable_camera_mode(self):
device = "camera"
app = "CM"
command = "00"
self.build_url(device, app, command)
def enable_photo_mode(self):
device = "camera"
app = "CM"
command = "01"
current = "\tCAMERA:CM:01: Photo Mode Enabled"
self.build_url(device, app, command)
def enable_burst_mode(self):
device = "camera"
app = "CM"
command = "02"
self.build_url(device, app, command)
def enable_tl_mode(self):
device = "camera"
app = "CM"
command = "03"
self.build_url(device, app, command)
def enable_tl2_mode(self):
device = "camera"
app = "CM"
command = "04"
self.build_url(device, app, command)
# Camera orientation methods
# http://<ip>/camera/UP?t=<password>&p=%00
def set_head_up(self):
device = "camera"
app = "UP"
command = "00"
self.build_url(device, app, command)
def set_head_down(self):
device = "camera"
app = "UP"
command = "01"
self.build_url(device, app, command)
# Video resolution methods
# http://<ip>/camera/VR?t=<password>&p=%00
def set_video_resolution(self, r):
if r == 'WVGA-60':
command = "00"
elif r == 'WVGA-120':
command = "01"
elif r == '720-30':
command = "02"
elif r == '720-60':
command = "03"
elif r == '960-30':
command = "04"
elif r == '960-60':
command = "05"
elif r == '1080-30':
command = "06"
else:
print "Error occurred while setting camera's video resolution: " + r + " is not a valid parameter"
command = "00"
device = "camera"
app = "VR"
self.build_url(device, app, command)
# methods to set photo resolutions
# http://<ip>/camera/PR?t=<password>&p=%00
def set_photo_resolution(self, r):
if r == '11MP_WIDE':
command = "00"
elif r == '8MP_MEDIUM':
command = "01"
elif r == '5MP_WIDE':
command = "02"
elif r == '5MP_MEDIUM':
command = "03"
else:
print "Error occurred while setting camera's photo resolution: " + r + " is not a valid parameter"
command = "00"
device = "camera"
app = "PR"
self.build_url(device, app, command)
# Camera timer settings
# http://<ip>/camera/TI?t=<password>&p=%00
def set_timer(self, tm):
if tm == 0.5:
command = "00"
elif tm == 1:
command = "01"
elif tm == 2:
command = "02"
elif tm == 5:
command = "03"
elif tm == 10:
command = "04"
elif tm == 30:
command = "05"
elif tm == 60:
command = "06"
else:
print "Error occurred::<set_timer>::timer interval: " + tm + " is not a valid parameter"
command = "00"
device = "camera"
app = "TI"
self.build_url(device, app, command)
# home_directory = "http://10.5.5.9:8080/videos/DCIM/100GOPRO/"
def get_photo(self):
home_dir = "http://" + self.ip + ":8080/videos/DCIM/100GOPRO/" # Cherokee Web Server
repeat = True
# Waiting for the server to update
while repeat == True:
path = urllib2.urlopen(home_dir) # Get the Cherokee URL
string = path.read() # Get the HTML info from web server
pattern = re.compile('\w\w\w\w\w\w\w\w.JPG"') # regular expression to find all JPG names
files = pattern.findall(string) # get the filenames into a list
temp = files[-1].replace('\"', "")
if temp != self.imageid:
self.imageid = temp
repeat = False
# This will staydisabled during the testing phase, in order to follow frames captured
# for i in os.listdir("media/external/img"): # remove all previous images
# os.remove(i)
urllib.urlretrieve(home_dir+self.imageid, self.imageid) # get the last element from the list
def get_image_id(self):
return self.imageid
| |
<<<<<<< HEAD
<<<<<<< HEAD
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
d.viewkeys() -> d.keys()
d.viewitems() -> d.items()
d.viewvalues() -> d.values()
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
iter_exempt = fixer_util.consuming_calls | {"iter"}
class FixDict(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues'|
'viewkeys'|'viewitems'|'viewvalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith("iter")
isview = method_name.startswith("view")
if isiter or isview:
method_name = method_name[4:]
assert method_name in ("keys", "items", "values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.prefix)]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not (special or isview):
new.prefix = ""
new = Call(Name("iter" if isiter else "list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in fixer_util.consuming_calls
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
=======
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
d.viewkeys() -> d.keys()
d.viewitems() -> d.items()
d.viewvalues() -> d.values()
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
iter_exempt = fixer_util.consuming_calls | {"iter"}
class FixDict(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues'|
'viewkeys'|'viewitems'|'viewvalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith("iter")
isview = method_name.startswith("view")
if isiter or isview:
method_name = method_name[4:]
assert method_name in ("keys", "items", "values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.prefix)]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not (special or isview):
new.prefix = ""
new = Call(Name("iter" if isiter else "list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in fixer_util.consuming_calls
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
d.viewkeys() -> d.keys()
d.viewitems() -> d.items()
d.viewvalues() -> d.values()
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
iter_exempt = fixer_util.consuming_calls | {"iter"}
class FixDict(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues'|
'viewkeys'|'viewitems'|'viewvalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith("iter")
isview = method_name.startswith("view")
if isiter or isview:
method_name = method_name[4:]
assert method_name in ("keys", "items", "values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.prefix)]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not (special or isview):
new.prefix = ""
new = Call(Name("iter" if isiter else "list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in fixer_util.consuming_calls
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
from cStringIO import StringIO
from couchdbkit import ResourceNotFound
from datetime import datetime, timedelta
from django.template.defaultfilters import yesno
from django.utils.translation import ugettext as _
from corehq.apps.fixtures.exceptions import FixtureDownloadError
from corehq.apps.fixtures.models import FixtureDataType, FixtureDataItem, _id_from_doc
from corehq.apps.fixtures.upload import DELETE_HEADER
from couchexport.export import export_raw
from couchexport.models import Format
from soil import DownloadBase
from soil.util import expose_cached_download
def prepare_fixture_download(table_ids, domain, task, download_id):
"""Prepare fixture data for Excel download
"""
data_types_book, excel_sheets = _prepare_fixture(table_ids, domain, task=task)
header_groups = [("types", excel_sheets["types"]["headers"])]
value_groups = [("types", excel_sheets["types"]["rows"])]
for data_type in data_types_book:
header_groups.append((data_type.tag, excel_sheets[data_type.tag]["headers"]))
value_groups.append((data_type.tag, excel_sheets[data_type.tag]["rows"]))
file = StringIO()
format = Format.XLS_2007
export_raw(tuple(header_groups), tuple(value_groups), file, format)
return expose_cached_download(
file.getvalue(),
60 * 60 * 2,
file_extension=".xlsx",
mimetype=Format.from_format(format).mimetype,
content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain,
download_id=download_id,
)
def prepare_fixture_html(table_ids, domain):
"""Prepare fixture data for HTML view
"""
return _prepare_fixture(table_ids, domain, html_response=True)[1]
def _prepare_fixture(table_ids, domain, html_response=False, task=None):
if table_ids and table_ids[0]:
try:
data_types_view = [FixtureDataType.get(id) for id in table_ids]
except ResourceNotFound:
if html_response:
raise FixtureDownloadError(
_("Sorry, we couldn't find that table. If you think this "
"is a mistake please report an issue."))
data_types_view = FixtureDataType.by_domain(domain)
else:
data_types_view = FixtureDataType.by_domain(domain)
if html_response:
data_types_view = list(data_types_view)[0:1]
total_tables = len(data_types_view)
# when total_tables < 4 the final percentage can be >= 100%, but for
# a small number of tables it renders more accurate progress
total_events = (total_tables + (0 if total_tables < 4 else 1)) * 10
now = datetime.utcnow
last_update = [now()]
upate_period = timedelta(seconds=1) # do not update progress more than once a second
def _update_progress(event_count, item_count, items_in_table):
if task and now() - last_update[0] > upate_period:
last_update[0] = now()
processed = event_count * 10 + (10. * item_count / items_in_table)
processed = min(processed, total_events) # limit at 100%
DownloadBase.set_progress(task, processed, total_events)
# book-keeping data from view_results for repeated use
data_types_book = []
data_items_book_by_type = {}
item_helpers_by_type = {}
"""
Contains all excel sheets in following format
excel_sheets = {
"types": {
"headers": [],
"rows": [(row), (row), (row)]
}
"next-sheet": {
"headers": [],
"rows": [(row), (row), (row)]
},
...
}
"""
excel_sheets = {}
def empty_padding_list(length):
return ["" for x in range(0, length)]
max_fields = 0
max_item_attributes = 0
"""
- Helper to generate headers like "field 2: property 1"
- Captures max_num_of_properties for any field of any type at the list-index.
Example values:
[0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property)
[1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property)
[0, 2] -> "field 2: property 1", "field 2: property 2"
"""
field_prop_count = []
"""
captures all possible 'field-property' values for each data-type
Example value
{
u'clinics': {'field 2 : property 1': u'lang'},
u'growth_chart': {'field 2 : property 2': u'maxWeight'}
}
"""
type_field_properties = {}
get_field_prop_format = lambda x, y: "field " + str(x) + " : property " + str(y)
for event_count, data_type in enumerate(data_types_view):
# Helpers to generate 'types' sheet
type_field_properties[data_type.tag] = {}
data_types_book.append(data_type)
if len(data_type.fields) > max_fields:
max_fields = len(data_type.fields)
if len(data_type.item_attributes) > max_item_attributes:
max_item_attributes = len(data_type.item_attributes)
for index, field in enumerate(data_type.fields):
if len(field_prop_count) <= index:
field_prop_count.append(len(field.properties))
elif field_prop_count[index] <= len(field.properties):
field_prop_count[index] = len(field.properties)
if len(field.properties) > 0:
for prop_index, property in enumerate(field.properties):
prop_key = get_field_prop_format(index + 1, prop_index + 1)
type_field_properties[data_type.tag][prop_key] = property
# Helpers to generate item-sheets
data_items_book_by_type[data_type.tag] = []
max_users = 0
max_groups = 0
max_locations = 0
max_field_prop_combos = {field_name: 0 for field_name in data_type.fields_without_attributes}
fixture_data = FixtureDataItem.by_data_type(domain, data_type.get_id)
num_rows = len(fixture_data)
for n, item_row in enumerate(fixture_data):
_update_progress(event_count, n, num_rows)
data_items_book_by_type[data_type.tag].append(item_row)
max_groups = max(max_groups, len(item_row.groups))
max_users = max(max_users, len(item_row.users))
max_locations = max(max_locations, len(item_row.locations))
for field_key in item_row.fields:
if field_key in max_field_prop_combos:
max_combos = max_field_prop_combos[field_key]
cur_combo_len = len(item_row.fields[field_key].field_list)
max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos
max_field_prop_combos[field_key] = max_combos
item_helpers = {
"max_users": max_users,
"max_groups": max_groups,
"max_locations": max_locations,
"max_field_prop_combos": max_field_prop_combos,
}
item_helpers_by_type[data_type.tag] = item_helpers
# Prepare 'types' sheet data
types_sheet = {"headers": [], "rows": []}
types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?']
types_sheet["headers"].extend(["field %d" % x for x in range(1, max_fields + 1)])
types_sheet["headers"].extend(["property %d" % x for x in range(1, max_item_attributes + 1)])
field_prop_headers = []
for field_num, prop_num in enumerate(field_prop_count):
if prop_num > 0:
for c in range(0, prop_num):
prop_key = get_field_prop_format(field_num + 1, c + 1)
field_prop_headers.append(prop_key)
types_sheet["headers"].append(prop_key)
for data_type in data_types_book:
common_vals = ["N", data_type.tag, yesno(data_type.is_global)]
field_vals = ([field.field_name for field in data_type.fields]
+ empty_padding_list(max_fields - len(data_type.fields)))
item_att_vals = (data_type.item_attributes + empty_padding_list(
max_item_attributes - len(data_type.item_attributes)
))
prop_vals = []
if data_type.tag in type_field_properties:
props = type_field_properties.get(data_type.tag)
prop_vals.extend([props.get(key, "") for key in field_prop_headers])
row = tuple(common_vals[2 if html_response else 0:] + field_vals + item_att_vals + prop_vals)
types_sheet["rows"].append(row)
types_sheet["rows"] = tuple(types_sheet["rows"])
types_sheet["headers"] = tuple(types_sheet["headers"])
excel_sheets["types"] = types_sheet
# Prepare 'items' sheet data for each data-type
for n, data_type in enumerate(data_types_book):
_update_progress(total_tables, n, total_tables)
item_sheet = {"headers": [], "rows": []}
item_helpers = item_helpers_by_type[data_type.tag]
max_users = item_helpers["max_users"]
max_groups = item_helpers["max_groups"]
max_locations = item_helpers["max_locations"]
max_field_prop_combos = item_helpers["max_field_prop_combos"]
common_headers = ["UID", DELETE_HEADER]
user_headers = ["user %d" % x for x in range(1, max_users + 1)]
group_headers = ["group %d" % x for x in range(1, max_groups + 1)]
location_headers = ["location %d" % x for x in range(1, max_locations + 1)]
field_headers = []
item_att_headers = ["property: " + attribute for attribute in data_type.item_attributes]
for field in data_type.fields:
if len(field.properties) == 0:
field_headers.append("field: " + field.field_name)
else:
prop_headers = []
for x in range(1, max_field_prop_combos[field.field_name] + 1):
for property in field.properties:
prop_headers.append("%(name)s: %(prop)s %(count)s" % {
"name": field.field_name,
"prop": property,
"count": x
})
prop_headers.append("field: %(name)s %(count)s" % {
"name": field.field_name,
"count": x
})
field_headers.extend(prop_headers)
item_sheet["headers"] = tuple(
common_headers[2 if html_response else 0:]
+ field_headers
+ item_att_headers
+ user_headers
+ group_headers
+ location_headers
)
excel_sheets[data_type.tag] = item_sheet
for item_row in data_items_book_by_type[data_type.tag]:
common_vals = [str(_id_from_doc(item_row)), "N"]
user_vals = ([user.raw_username for user in item_row.users]
+ empty_padding_list(max_users - len(item_row.users)))
group_vals = ([group.name for group in item_row.groups]
+ empty_padding_list(max_groups - len(item_row.groups)))
location_vals = ([loc.site_code for loc in item_row.locations]
+ empty_padding_list(max_groups - len(item_row.locations)))
field_vals = []
item_att_vals = [item_row.item_attributes[attribute] for attribute in data_type.item_attributes]
for field in data_type.fields:
if len(field.properties) == 0:
fixture_fields = item_row.fields.get(field.field_name)
if fixture_fields and any(fixture_fields.field_list):
value = item_row.fields.get(field.field_name).field_list[0].field_value
else:
value = ""
field_vals.append(value)
else:
field_prop_vals = []
cur_combo_count = len(item_row.fields.get(field.field_name).field_list)
cur_prop_count = len(field.properties)
for count, field_prop_combo in enumerate(item_row.fields.get(field.field_name).field_list):
for property in field.properties:
field_prop_vals.append(field_prop_combo.properties.get(property, None) or "")
field_prop_vals.append(field_prop_combo.field_value)
padding_list_len = ((max_field_prop_combos[field.field_name] - cur_combo_count)
* (cur_prop_count + 1))
field_prop_vals.extend(empty_padding_list(padding_list_len))
field_vals.extend(field_prop_vals)
row = tuple(
common_vals[2 if html_response else 0:]
+ field_vals
+ item_att_vals
+ user_vals
+ group_vals
+ location_vals
)
item_sheet["rows"].append(row)
item_sheet["rows"] = tuple(item_sheet["rows"])
excel_sheets[data_type.tag] = item_sheet
return data_types_book, excel_sheets
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import os
import pickle
import shutil
import sys
import types
import warnings
from tempfile import TemporaryDirectory
from textwrap import dedent
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
import dill
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.models.skipmixin import SkipMixin
from airflow.models.taskinstance import _CURRENT_CONTEXT
from airflow.utils.operator_helpers import determine_kwargs
from airflow.utils.process_utils import execute_in_subprocess
from airflow.utils.python_virtualenv import prepare_virtualenv, write_python_script
def task(python_callable: Optional[Callable] = None, multiple_outputs: Optional[bool] = None, **kwargs):
"""
Deprecated function that calls @task.python and allows users to turn a python function into
an Airflow task. Please use the following instead:
from airflow.decorators import task
@task
def my_task()
:param python_callable: A reference to an object that is callable
:type python_callable: python callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:type op_kwargs: dict
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:type op_args: list
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys.
Defaults to False.
:type multiple_outputs: bool
:return:
"""
# To maintain backwards compatibility, we import the task object into this file
# This prevents breakages in dags that use `from airflow.operators.python import task`
from airflow.decorators.python import python_task
warnings.warn(
"""airflow.operators.python.task is deprecated. Please use the following instead
from airflow.decorators import task
@task
def my_task()""",
DeprecationWarning,
stacklevel=2,
)
return python_task(python_callable=python_callable, multiple_outputs=multiple_outputs, **kwargs)
class PythonOperator(BaseOperator):
"""
Executes a Python callable
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonOperator`
:param python_callable: A reference to an object that is callable
:type python_callable: python callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function
:type op_kwargs: dict (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable
:type op_args: list (templated)
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied. (templated)
:type templates_dict: dict[str]
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:type templates_exts: list[str]
"""
template_fields = ('templates_dict', 'op_args', 'op_kwargs')
template_fields_renderers = {"templates_dict": "json", "op_args": "py", "op_kwargs": "py"}
BLUE = '#ffefeb'
ui_color = BLUE
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects(e.g protobuf).
shallow_copy_attrs = (
'python_callable',
'op_kwargs',
)
def __init__(
self,
*,
python_callable: Callable,
op_args: Optional[List] = None,
op_kwargs: Optional[Dict] = None,
templates_dict: Optional[Dict] = None,
templates_exts: Optional[List[str]] = None,
**kwargs,
) -> None:
if kwargs.get("provide_context"):
warnings.warn(
"provide_context is deprecated as of 2.0 and is no longer required",
DeprecationWarning,
stacklevel=2,
)
kwargs.pop('provide_context', None)
super().__init__(**kwargs)
if not callable(python_callable):
raise AirflowException('`python_callable` param must be callable')
self.python_callable = python_callable
self.op_args = op_args or []
self.op_kwargs = op_kwargs or {}
self.templates_dict = templates_dict
if templates_exts:
self.template_ext = templates_exts
def execute(self, context: Dict):
context.update(self.op_kwargs)
context['templates_dict'] = self.templates_dict
self.op_kwargs = determine_kwargs(self.python_callable, self.op_args, context)
return_value = self.execute_callable()
self.log.info("Done. Returned value was: %s", return_value)
return return_value
def execute_callable(self):
"""
Calls the python callable with the given arguments.
:return: the return value of the call.
:rtype: any
"""
return self.python_callable(*self.op_args, **self.op_kwargs)
class BranchPythonOperator(PythonOperator, SkipMixin):
"""
Allows a workflow to "branch" or follow a path following the execution
of this task.
It derives the PythonOperator and expects a Python function that returns
a single task_id or list of task_ids to follow. The task_id(s) returned
should point to a task directly downstream from {self}. All other "branches"
or directly downstream tasks are marked with a state of ``skipped`` so that
these paths can't move forward. The ``skipped`` states are propagated
downstream to allow for the DAG state to fill up and the DAG run's state
to be inferred.
"""
def execute(self, context: Dict):
branch = super().execute(context)
self.skip_all_except(context['ti'], branch)
return branch
class ShortCircuitOperator(PythonOperator, SkipMixin):
"""
Allows a workflow to continue only if a condition is met. Otherwise, the
workflow "short-circuits" and downstream tasks are skipped.
The ShortCircuitOperator is derived from the PythonOperator. It evaluates a
condition and short-circuits the workflow if the condition is False. Any
downstream tasks are marked with a state of "skipped". If the condition is
True, downstream tasks proceed as normal.
The condition is determined by the result of `python_callable`.
"""
def execute(self, context: Dict):
condition = super().execute(context)
self.log.info("Condition result is %s", condition)
if condition:
self.log.info('Proceeding with downstream tasks...')
return
self.log.info('Skipping downstream tasks...')
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks)
self.log.info("Done.")
class PythonVirtualenvOperator(PythonOperator):
"""
Allows one to run a function in a virtualenv that is created and destroyed
automatically (with certain caveats).
The function must be defined using def, and not be
part of a class. All imports must happen inside the function
and no variables outside of the scope may be referenced. A global scope
variable named virtualenv_string_args will be available (populated by
string_args). In addition, one can pass stuff through op_args and op_kwargs, and one
can use a return value.
Note that if your virtualenv runs in a different Python major version than Airflow,
you cannot use return values, op_args, op_kwargs, or use any macros that are being provided to
Airflow through plugins. You can use string_args though.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonVirtualenvOperator`
:param python_callable: A python function with no references to outside variables,
defined with def, which will be run in a virtualenv
:type python_callable: function
:param requirements: A list of requirements as specified in a pip install command
:type requirements: list[str]
:param python_version: The Python version to run the virtualenv with. Note that
both 2 and 2.7 are acceptable forms.
:type python_version: Optional[Union[str, int, float]]
:param use_dill: Whether to use dill to serialize
the args and result (pickle is default). This allow more complex types
but requires you to include dill in your requirements.
:type use_dill: bool
:param system_site_packages: Whether to include
system_site_packages in your virtualenv.
See virtualenv documentation for more information.
:type system_site_packages: bool
:param op_args: A list of positional arguments to pass to python_callable.
:type op_args: list
:param op_kwargs: A dict of keyword arguments to pass to python_callable.
:type op_kwargs: dict
:param string_args: Strings that are present in the global var virtualenv_string_args,
available to python_callable at runtime as a list[str]. Note that args are split
by newline.
:type string_args: list[str]
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied
:type templates_dict: dict of str
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:type templates_exts: list[str]
"""
BASE_SERIALIZABLE_CONTEXT_KEYS = {
'ds_nodash',
'inlets',
'next_ds',
'next_ds_nodash',
'outlets',
'params',
'prev_ds',
'prev_ds_nodash',
'run_id',
'task_instance_key_str',
'test_mode',
'tomorrow_ds',
'tomorrow_ds_nodash',
'ts',
'ts_nodash',
'ts_nodash_with_tz',
'yesterday_ds',
'yesterday_ds_nodash',
}
PENDULUM_SERIALIZABLE_CONTEXT_KEYS = {
'execution_date',
'next_execution_date',
'prev_execution_date',
'prev_execution_date_success',
'prev_start_date_success',
}
AIRFLOW_SERIALIZABLE_CONTEXT_KEYS = {'macros', 'conf', 'dag', 'dag_run', 'task'}
def __init__(
self,
*,
python_callable: Callable,
requirements: Optional[Iterable[str]] = None,
python_version: Optional[Union[str, int, float]] = None,
use_dill: bool = False,
system_site_packages: bool = True,
op_args: Optional[List] = None,
op_kwargs: Optional[Dict] = None,
string_args: Optional[Iterable[str]] = None,
templates_dict: Optional[Dict] = None,
templates_exts: Optional[List[str]] = None,
**kwargs,
):
if (
not isinstance(python_callable, types.FunctionType)
or isinstance(python_callable, types.LambdaType)
and python_callable.__name__ == "<lambda>"
):
raise AirflowException('PythonVirtualenvOperator only supports functions for python_callable arg')
if (
python_version
and str(python_version)[0] != str(sys.version_info.major)
and (op_args or op_kwargs)
):
raise AirflowException(
"Passing op_args or op_kwargs is not supported across different Python "
"major versions for PythonVirtualenvOperator. Please use string_args."
)
if not shutil.which("virtualenv"):
raise AirflowException('PythonVirtualenvOperator requires virtualenv, please install it.')
super().__init__(
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
templates_dict=templates_dict,
templates_exts=templates_exts,
**kwargs,
)
self.requirements = list(requirements or [])
self.string_args = string_args or []
self.python_version = python_version
self.use_dill = use_dill
self.system_site_packages = system_site_packages
if not self.system_site_packages and self.use_dill and 'dill' not in self.requirements:
self.requirements.append('dill')
self.pickling_library = dill if self.use_dill else pickle
def execute(self, context: Dict):
serializable_context = {key: context[key] for key in self._get_serializable_context_keys()}
return super().execute(context=serializable_context)
def execute_callable(self):
with TemporaryDirectory(prefix='venv') as tmp_dir:
if self.templates_dict:
self.op_kwargs['templates_dict'] = self.templates_dict
input_filename = os.path.join(tmp_dir, 'script.in')
output_filename = os.path.join(tmp_dir, 'script.out')
string_args_filename = os.path.join(tmp_dir, 'string_args.txt')
script_filename = os.path.join(tmp_dir, 'script.py')
prepare_virtualenv(
venv_directory=tmp_dir,
python_bin=f'python{self.python_version}' if self.python_version else None,
system_site_packages=self.system_site_packages,
requirements=self.requirements,
)
self._write_args(input_filename)
self._write_string_args(string_args_filename)
write_python_script(
jinja_context=dict(
op_args=self.op_args,
op_kwargs=self.op_kwargs,
pickling_library=self.pickling_library.__name__,
python_callable=self.python_callable.__name__,
python_callable_source=self.get_python_source(),
),
filename=script_filename,
render_template_as_native_obj=self.dag.render_template_as_native_obj,
)
execute_in_subprocess(
cmd=[
f'{tmp_dir}/bin/python',
script_filename,
input_filename,
output_filename,
string_args_filename,
]
)
return self._read_result(output_filename)
def get_python_source(self):
"""
Returns the source of self.python_callable
@return:
"""
return dedent(inspect.getsource(self.python_callable))
def _write_args(self, filename):
if self.op_args or self.op_kwargs:
with open(filename, 'wb') as file:
self.pickling_library.dump({'args': self.op_args, 'kwargs': self.op_kwargs}, file)
def _get_serializable_context_keys(self):
def _is_airflow_env():
return self.system_site_packages or 'apache-airflow' in self.requirements
def _is_pendulum_env():
return 'pendulum' in self.requirements and 'lazy_object_proxy' in self.requirements
serializable_context_keys = self.BASE_SERIALIZABLE_CONTEXT_KEYS.copy()
if _is_airflow_env():
serializable_context_keys.update(self.AIRFLOW_SERIALIZABLE_CONTEXT_KEYS)
if _is_pendulum_env() or _is_airflow_env():
serializable_context_keys.update(self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS)
return serializable_context_keys
def _write_string_args(self, filename):
with open(filename, 'w') as file:
file.write('\n'.join(map(str, self.string_args)))
def _read_result(self, filename):
if os.stat(filename).st_size == 0:
return None
with open(filename, 'rb') as file:
try:
return self.pickling_library.load(file)
except ValueError:
self.log.error(
"Error deserializing result. Note that result deserialization "
"is not supported across major Python versions."
)
raise
def __deepcopy__(self, memo):
# module objects can't be copied _at all__
memo[id(self.pickling_library)] = self.pickling_library
return super().__deepcopy__(memo)
def get_current_context() -> Dict[str, Any]:
"""
Obtain the execution context for the currently executing operator without
altering user method's signature.
This is the simplest method of retrieving the execution context dictionary.
**Old style:**
.. code:: python
def my_task(**context):
ti = context["ti"]
**New style:**
.. code:: python
from airflow.operators.python import get_current_context
def my_task():
context = get_current_context()
ti = context["ti"]
Current context will only have value if this method was called after an operator
was starting to execute.
"""
if not _CURRENT_CONTEXT:
raise AirflowException(
"Current context was requested but no context was found! "
"Are you running within an airflow task?"
)
return _CURRENT_CONTEXT[-1]
| |
import random
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import BasicLSTMCell
from basic.read_data import DataSet
from my.tensorflow import get_initializer
from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d
from my.tensorflow.rnn import bidirectional_dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell
from basic.attention_gru_cell import AttentionGRUCell
def get_multi_gpu_models(config):
models = []
for gpu_idx in range(config.num_gpus):
with tf.name_scope("model_{}".format(gpu_idx)) as scope, tf.device("/{}:{}".format(config.device_type, gpu_idx)):
model = Model(config, scope, rep=gpu_idx == 0)
#tf.get_variable_scope().reuse_variables()
models.append(model)
return models
class Model(object):
def __init__(self, config, scope, rep=True):
self.scope = scope
self.config = config
self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
# Define forward inputs here
N, M, JX, JQ, VW, VC, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
self.x = tf.placeholder('int32', [N, M, JX], name='x')
self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
self.q = tf.placeholder('int32', [N, JQ], name='q')
self.cq = tf.placeholder('int32', [N, None, W], name='cq')
self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
self.y = tf.placeholder('bool', [N, None, None], name='y')
self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
self.is_train = tf.placeholder('bool', [], name='is_train')
self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')
# Define misc
self.tensor_dict = {}
# Forward outputs / loss inputs
self.logits = None
self.yp = None
self.var_list = None
# Loss outputs
self.loss = None
self._build_forward()
self._build_loss()
self.var_ema = None
if rep:
self._build_var_ema()
if config.mode == 'train':
self._build_ema()
self.summary = tf.merge_all_summaries()
self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope))
def _build_forward(self):
config = self.config
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, \
config.max_word_size
JX = tf.shape(self.x)[2]
JQ = tf.shape(self.q)[1]
print("M: ", M)
M = tf.shape(self.x)[1]
dc, dw, dco = config.char_emb_size, config.word_emb_size, config.char_out_size
with tf.variable_scope("emb"):
if config.use_char_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[VC, dc], dtype='float')
with tf.variable_scope("char"):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx) # [N, M, JX, W, dc]
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq) # [N, JQ, W, dc]
Acx = tf.reshape(Acx, [-1, JX, W, dc])
Acq = tf.reshape(Acq, [-1, JQ, W, dc])
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco, (filter_sizes, dco)
with tf.variable_scope("conv"):
xx = multi_conv1d(Acx, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="xx")
else:
qq = multi_conv1d(Acq, filter_sizes, heights, "VALID", self.is_train, config.keep_prob, scope="qq")
xx = tf.reshape(xx, [-1, M, JX, dco])
qq = tf.reshape(qq, [-1, JQ, dco])
if config.use_word_emb:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
if config.mode == 'train':
word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[VW, dw], initializer=get_initializer(config.emb_mat))
else:
word_emb_mat = tf.get_variable("word_emb_mat", shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(0, [word_emb_mat, self.new_emb_mat])
with tf.name_scope("word"):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x) # [N, M, JX, d]
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q) # [N, JQ, d]
self.tensor_dict['x'] = Ax
self.tensor_dict['q'] = Aq
if config.use_char_emb:
xx = tf.concat(3, [xx, Ax]) # [N, M, JX, di]
qq = tf.concat(2, [qq, Aq]) # [N, JQ, di]
else:
xx = Ax
qq = Aq
# highway network
if config.highway:
with tf.variable_scope("highway"):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
tf.get_variable_scope().reuse_variables()
qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
self.tensor_dict['xx'] = xx
self.tensor_dict['qq'] = qq
x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2) # [N, M]
q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1) # [N]
with tf.variable_scope("prepro"):
(fw_u, bw_u), ((_, fw_u_f), (_, bw_u_f)) = bidirectional_dynamic_rnn(self.get_drnncell(), self.get_drnncell(), qq, q_len, dtype='float', scope='u1') # [N, J, d], [N, d]
u = tf.concat(2, [fw_u, bw_u])
if config.share_lstm_weights:
tf.get_variable_scope().reuse_variables()
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(self.get_rnncell(), self.get_rnncell(), xx, x_len, dtype='float', scope='u1') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
else:
(fw_h, bw_h), _ = bidirectional_dynamic_rnn(self.get_rnncell(), self.get_rnncell(), xx, x_len, dtype='float', scope='h1') # [N, M, JX, 2d]
h = tf.concat(3, [fw_h, bw_h]) # [N, M, JX, 2d]
self.tensor_dict['u'] = u
self.tensor_dict['h'] = h
with tf.variable_scope("main"):
if config.dynamic_att:
p0 = h
print("M : ", M)
u = tf.reshape(tf.tile(tf.expand_dims(u, 1), [1, M, 1, 1]), [N * M, JQ, 2 * d])
q_mask = tf.reshape(tf.tile(tf.expand_dims(self.q_mask, 1), [1, M, 1]), [N * M, JQ])
first_cell_fw = AttentionCell(self.get_rnncell(), u, mask=q_mask, mapper='sim',
input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
first_cell_bw = AttentionCell(self.get_rnncell(), u, mask=q_mask, mapper='sim',
input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
else:
print("u_shape :", u.get_shape().as_list(), N, M)
r = inference(config, h, u, d) #[N, M, 2*d]
#r = np.ones((N, 2*d), dtype=np.float32)
p0 = attention_layer(config, self.is_train, h, u, r, h_mask=self.x_mask, u_mask=self.q_mask, scope="p0", tensor_dict=self.tensor_dict)
first_cell_fw = self.get_drnncell()
first_cell_bw = self.get_drnncell()
(fw_g0, bw_g0), _ = bidirectional_dynamic_rnn(first_cell_fw, first_cell_bw, p0, x_len, dtype='float', scope='g0') # [N, M, JX, 2d]
g0 = tf.concat(3, [fw_g0, bw_g0])
if config.dynamic_att:
first_cell_fw = AttentionCell(self.get_rnncell(), u, mask=q_mask, mapper='sim',
input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
first_cell_bw = AttentionCell(self.get_rnncell(), u, mask=q_mask, mapper='sim',
input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
else:
first_cell_fw = self.get_drnncell()
first_cell_bw = self.get_drnncell()
(fw_g1, bw_g1), _ = bidirectional_dynamic_rnn(first_cell_fw, first_cell_bw, g0, x_len, dtype='float', scope='g1') # [N, M, JX, 2d]
g1 = tf.concat(3, [fw_g1, bw_g1])
logits = get_logits([g1, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=self.x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
a1i = softsel(tf.reshape(g1, [N, M * JX, 2 * d]), tf.reshape(logits, [N, M * JX]))
a1i = tf.tile(tf.expand_dims(tf.expand_dims(a1i, 1), 1), [1, M, JX, 1])
(fw_g2, bw_g2), _ = bidirectional_dynamic_rnn(self.get_drnncell(), self.get_drnncell(), tf.concat(3, [p0, g1, a1i, g1 * a1i]),
x_len, dtype='float', scope='g2') # [N, M, JX, 2d]
g2 = tf.concat(3, [fw_g2, bw_g2])
logits2 = get_logits([g2, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob,
mask=self.x_mask,
is_train=self.is_train, func=config.answer_func, scope='logits2')
flat_logits = tf.reshape(logits, [-1, M * JX])
flat_yp = tf.nn.softmax(flat_logits) # [-1, M*JX]
yp = tf.reshape(flat_yp, [-1, M, JX])
flat_logits2 = tf.reshape(logits2, [-1, M * JX])
flat_yp2 = tf.nn.softmax(flat_logits2)
yp2 = tf.reshape(flat_yp2, [-1, M, JX])
print("M 2: ", M)
self.tensor_dict['g1'] = g1
self.tensor_dict['g2'] = g2
self.logits = flat_logits
self.logits2 = flat_logits2
self.yp = yp
self.yp2 = yp2
def get_rnncell(self):
cell = BasicLSTMCell(self.config.hidden_size, state_is_tuple=True)
return cell
def get_drnncell(self):
cell = BasicLSTMCell(self.config.hidden_size, state_is_tuple=True)
d_cell = SwitchableDropoutWrapper(cell, self.is_train, input_keep_prob=self.config.input_keep_prob)
return d_cell
def _build_loss(self):
config = self.config
JX = tf.shape(self.x)[2]
M = tf.shape(self.x)[1]
JQ = tf.shape(self.q)[1]
loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1)
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(tf.reshape(self.y, [-1, M * JX]), 'float'))
ce_loss = tf.reduce_mean(loss_mask * losses)
tf.add_to_collection('losses', ce_loss)
ce_loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits2, labels=tf.cast(tf.reshape(self.y2, [-1, M * JX]), 'float')))
tf.add_to_collection("losses", ce_loss2)
self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.add_to_collection('ema/scalar', self.loss)
def _build_ema(self):
self.ema = tf.train.ExponentialMovingAverage(self.config.decay)
ema = self.ema
tensors = tf.get_collection("ema/scalar", scope=self.scope) + tf.get_collection("ema/vector", scope=self.scope)
ema_op = ema.apply(tensors)
for var in tf.get_collection("ema/scalar", scope=self.scope):
ema_var = ema.average(var)
tf.summary.scalar(ema_var.op.name, ema_var)
for var in tf.get_collection("ema/vector", scope=self.scope):
ema_var = ema.average(var)
tf.summary.scalar(ema_var.op.name, ema_var)
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def _build_var_ema(self):
self.var_ema = tf.train.ExponentialMovingAverage(self.config.var_decay)
ema = self.var_ema
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([ema_op]):
self.loss = tf.identity(self.loss)
def get_loss(self):
return self.loss
def get_yp(self):
return self.yp
def get_yp2(self):
return self.yp2
def get_global_step(self):
return self.global_step
def get_var_list(self):
return self.var_list
def get_feed_dict(self, batch, is_train, supervised=True):
assert isinstance(batch, DataSet)
config = self.config
N, M, JX, JQ, VW, VC, d, W = \
config.batch_size, config.max_num_sents, config.max_sent_size, \
config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size
feed_dict = {}
if config.len_opt:
"""
Note that this optimization results in variable GPU RAM usage (i.e. can cause OOM in the middle of training.)
First test without len_opt and make sure no OOM, and use len_opt
"""
if sum(len(sent) for para in batch.data['x'] for sent in para) == 0:
new_JX = 1
else:
new_JX = max(len(sent) for para in batch.data['x'] for sent in para)
JX = min(JX, new_JX)
if sum(len(ques) for ques in batch.data['q']) == 0:
new_JQ = 1
else:
new_JQ = max(len(ques) for ques in batch.data['q'])
JQ = min(JQ, new_JQ)
if config.cpu_opt:
if sum(len(para) for para in batch.data['x']) == 0:
new_M = 1
else:
new_M = max(len(para) for para in batch.data['x'])
M = min(M, new_M)
x = np.zeros([N, M, JX], dtype='int32')
cx = np.zeros([N, M, JX, W], dtype='int32')
x_mask = np.zeros([N, M, JX], dtype='bool')
q = np.zeros([N, JQ], dtype='int32')
cq = np.zeros([N, JQ, W], dtype='int32')
q_mask = np.zeros([N, JQ], dtype='bool')
feed_dict[self.x] = x
feed_dict[self.x_mask] = x_mask
feed_dict[self.cx] = cx
feed_dict[self.q] = q
feed_dict[self.cq] = cq
feed_dict[self.q_mask] = q_mask
feed_dict[self.is_train] = is_train
if config.use_glove_for_unk:
feed_dict[self.new_emb_mat] = batch.shared['new_emb_mat']
X = batch.data['x']
CX = batch.data['cx']
if supervised:
y = np.zeros([N, M, JX], dtype='bool')
y2 = np.zeros([N, M, JX], dtype='bool')
feed_dict[self.y] = y
feed_dict[self.y2] = y2
for i, (xi, cxi, yi) in enumerate(zip(X, CX, batch.data['y'])):
start_idx, stop_idx = random.choice(yi)
j, k = start_idx
j2, k2 = stop_idx
if config.single:
X[i] = [xi[j]]
CX[i] = [cxi[j]]
j, j2 = 0, 0
if config.squash:
offset = sum(map(len, xi[:j]))
j, k = 0, k + offset
offset = sum(map(len, xi[:j2]))
j2, k2 = 0, k2 + offset
y[i, j, k] = True
y2[i, j2, k2-1] = True
def _get_word(word):
d = batch.shared['word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d:
return d[each]
if config.use_glove_for_unk:
d2 = batch.shared['new_word2idx']
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in d2:
return d2[each] + len(d)
return 1
def _get_char(char):
d = batch.shared['char2idx']
if char in d:
return d[char]
return 1
for i, xi in enumerate(X):
if self.config.squash:
xi = [list(itertools.chain(*xi))]
for j, xij in enumerate(xi):
if j == config.max_num_sents:
break
for k, xijk in enumerate(xij):
if k == config.max_sent_size:
break
each = _get_word(xijk)
assert isinstance(each, int), each
x[i, j, k] = each
x_mask[i, j, k] = True
for i, cxi in enumerate(CX):
if self.config.squash:
cxi = [list(itertools.chain(*cxi))]
for j, cxij in enumerate(cxi):
if j == config.max_num_sents:
break
for k, cxijk in enumerate(cxij):
if k == config.max_sent_size:
break
for l, cxijkl in enumerate(cxijk):
if l == config.max_word_size:
break
cx[i, j, k, l] = _get_char(cxijkl)
for i, qi in enumerate(batch.data['q']):
for j, qij in enumerate(qi):
q[i, j] = _get_word(qij)
q_mask[i, j] = True
for i, cqi in enumerate(batch.data['cq']):
for j, cqij in enumerate(cqi):
for k, cqijk in enumerate(cqij):
cq[i, j, k] = _get_char(cqijk)
if k + 1 == config.max_word_size:
break
return feed_dict
def bi_attention(config, is_train, h, u, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "bi_attention"):
JX = tf.shape(h)[2]
M = tf.shape(h)[1]
JQ = tf.shape(u)[1]
h_aug = tf.tile(tf.expand_dims(h, 3), [1, 1, 1, JQ, 1])
u_aug = tf.tile(tf.expand_dims(tf.expand_dims(u, 1), 1), [1, M, JX, 1, 1])
if h_mask is None:
hu_mask = None
else:
h_mask_aug = tf.tile(tf.expand_dims(h_mask, 3), [1, 1, 1, JQ])
u_mask_aug = tf.tile(tf.expand_dims(tf.expand_dims(u_mask, 1), 1), [1, M, JX, 1])
hu_mask = h_mask_aug & u_mask_aug
u_logits = get_logits([h_aug, u_aug], None, True, wd=config.wd, mask=hu_mask,
is_train=is_train, func=config.logit_func, scope='u_logits') # [N, M, JX, JQ]
u_a = softsel(u_aug, u_logits) # [N, M, JX, d]
h_a = softsel(h, tf.reduce_max(u_logits, 3)) # [N, M, d]
h_a = tf.tile(tf.expand_dims(h_a, 2), [1, 1, JX, 1])
if tensor_dict is not None:
a_u = tf.nn.softmax(u_logits) # [N, M, JX, JQ]
a_h = tf.nn.softmax(tf.reduce_max(u_logits, 3))
tensor_dict['a_u'] = a_u
tensor_dict['a_h'] = a_h
variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope=tf.get_variable_scope().name)
for var in variables:
tensor_dict[var.name] = var
return u_a, h_a
def attention_layer(config, is_train, h, u, r, h_mask=None, u_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "attention_layer"):
JX = config.max_sent_size
M = config.max_num_sents
JQ = config.max_ques_size
r_expand = tf.tile(tf.expand_dims(r, 2), [1, 1, JX, 1])
if config.q2c_att or config.c2q_att:
u_a, h_a = bi_attention(config, is_train, h, u, h_mask=h_mask, u_mask=u_mask, tensor_dict=tensor_dict)
if not config.c2q_att:
u_a = tf.tile(tf.expand_dims(tf.expand_dims(tf.reduce_mean(u, 1), 1), 1), [1, M, JX, 1])
print("r_expand: ", r_expand.get_shape())
print("h: ", h.get_shape())
if config.q2c_att:
p0 = tf.concat(3, [h, u_a, h * u_a, h * h_a, h * r_expand])
else:
p0 = tf.concat(3, [h, u_a, h * u_a])
return p0
def get_attention(q_vec, prev_memory, fact_vec, reuse):
"""Use question vector and previous memory to create scalar attention for current fact"""
with tf.variable_scope("attention", reuse=None):
#memory = tf.reduce_sum(prev_memory, 1)
memory = prev_memory
features = [fact_vec*q_vec,
fact_vec*memory,
tf.abs(fact_vec - q_vec),
tf.abs(fact_vec - memory)]
print("memory: ", memory.get_shape().as_list)
print("fact_vec: ", fact_vec.get_shape().as_list)
print("q_vec: ", q_vec.get_shape().as_list)
feature_vec = tf.concat(1, features)
#feature_vec = tf.pack(features, axis=1)
with tf.variable_scope('first_fnn', reuse=reuse) as scope:
#tf.get_variable('weights')
attention = tf.contrib.layers.fully_connected(feature_vec,
60,
activation_fn=tf.nn.tanh)
scope.reuse_variables()
with tf.variable_scope('second_fnn', reuse=reuse) as scope:
#tf.get_variable('weights')
attention = tf.contrib.layers.fully_connected(attention,
1,
activation_fn=None)
scope.reuse_variables()
return attention
def generate_episode(sattentions, memory, q_vec, fact_vecs, hop_index, N, d):
"""Generate episode by applying attention to current fact vectors through a modified GRU"""
memoryu = tf.unpack(memory, axis=1)
factsu = tf.unpack(fact_vecs, axis=1)
temp = list(zip(factsu, memoryu))
attentions = [tf.squeeze(
get_attention(q_vec, fv[1], fv[0], bool(hop_index) or bool(i)), [1])
for i, fv in enumerate(temp)]
#fv : [N, 2*d]
attentions = tf.transpose(tf.pack(attentions))
sattentions.append(attentions)
print("attentions: ", attentions.get_shape())
attentions = tf.nn.softmax(attentions)
print("attentions: ", attentions.get_shape())
attentions = tf.expand_dims(attentions, -1) #[N, 1]
reuse = True if hop_index > 0 else False
# concatenate fact vectors and attentions for input into attGRU
gru_inputs = tf.concat(2, [fact_vecs, attentions]) #[N, M, 2d+1]
#gru_inputs = tf.expand_dims(gru_inputs, 1)
print("gru: ", gru_inputs.get_shape())
input_len = np.ones((N, ), dtype=np.int32)
with tf.variable_scope('attention_gru', reuse=reuse):
epicout, episode = tf.nn.dynamic_rnn(AttentionGRUCell(2*d), gru_inputs, dtype=np.float32, sequence_length=input_len)
return epicout
def inference(config, h, u, d):
"""Performs inference on the DMN model"""
JX = tf.shape(h)[2]
#N = tf.shape(h)[0]
M = config.max_num_sents
JQ = tf.shape(u)[1]
N = config.batch_size
print("u : ", u.get_shape().as_list())
print("h : ", h.get_shape())
q_vec = tf.reduce_sum(u, 1) #[N, 2*d]
q_vec2 = tf.tile(tf.expand_dims(q_vec, 1), [1, M, 1])
print("q : ", q_vec.get_shape().as_list())
#fact_vecs = tf.reduce_sum(tf.reduce_sum(h, 2), 1) #[N, 2*d]
fact_vecs = tf.reduce_sum(h, 2) #[N, M, 2d]
sattentions = []
with tf.variable_scope("memory", initializer=tf.contrib.layers.xavier_initializer()):
# memory module
# generate n_hops episodes
prev_memory = q_vec2 #[N, M, 2*d]
#prev_memory = q_vec #[N, 2*d]
num_hops = 3
for i in range(num_hops):
# get a new episode
#print '==> generating episode', i
episode = generate_episode(sattentions, prev_memory, q_vec, fact_vecs, i, N, d) #[N, M, 2*d]
# untied weights for memory update
with tf.variable_scope("hop_%d" % i):
prev_memory = tf.contrib.layers.fully_connected(tf.concat(2, [prev_memory, episode, q_vec2]),
2*d,
activation_fn=tf.nn.relu)
output = prev_memory
return output
| |
#! /usr/bin/python
# Copyright 2019 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_overlay_widgets.py:
# Code generation for overlay widgets. Should be run when the widgets declaration file,
# overlay_widgets.json, is changed.
# NOTE: don't run this script directly. Run scripts/run_code_generation.py.
from datetime import date
import json
import sys
out_file = 'Overlay_autogen.cpp'
in_file = 'overlay_widgets.json'
template_out_file = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {input_file_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Autogenerated overlay widget declarations.
#include "libANGLE/renderer/driver_utils.h"
#include "libANGLE/Overlay.h"
#include "libANGLE/OverlayWidgets.h"
#include "libANGLE/Overlay_font_autogen.h"
namespace gl
{{
using namespace overlay;
namespace
{{
int GetFontSize(int fontSize, bool largeFont)
{{
if (largeFont && fontSize > 0)
{{
return fontSize - 1;
}}
return fontSize;
}}
}} // anonymous namespace
void Overlay::initOverlayWidgets()
{{
const bool kLargeFont = rx::IsAndroid();
{init_widgets}
}}
}} // namespace gl
"""
template_init_widget = u"""{{
const int32_t fontSize = GetFontSize({font_size}, kLargeFont);
const int32_t offsetX = {offset_x};
const int32_t offsetY = {offset_y};
const int32_t width = {width};
const int32_t height = {height};
widget->{subwidget}type = WidgetType::{type};
widget->{subwidget}fontSize = fontSize;
widget->{subwidget}coords[0] = {coord0};
widget->{subwidget}coords[1] = {coord1};
widget->{subwidget}coords[2] = {coord2};
widget->{subwidget}coords[3] = {coord3};
widget->{subwidget}color[0] = {color_r};
widget->{subwidget}color[1] = {color_g};
widget->{subwidget}color[2] = {color_b};
widget->{subwidget}color[3] = {color_a};
}}
"""
def extract_type_and_constructor(properties):
constructor = properties['type']
args_separated = constructor.split('(', 1)
if len(args_separated) == 1:
return constructor, constructor
type_no_constructor = args_separated[0]
return type_no_constructor, constructor
def get_font_size_constant(properties):
return 'kFontLayer' + properties['font'].capitalize()
def is_graph_type(type):
return type == 'RunningGraph' or type == 'RunningHistogram'
def is_text_type(type):
return not is_graph_type(type)
class OverlayWidget:
def __init__(self, properties, is_graph_description=False):
if not is_graph_description:
self.name = properties['name']
self.type, self.constructor = extract_type_and_constructor(properties)
self.extract_common(properties)
if is_graph_type(self.type):
description_properties = properties['description']
description_properties['type'] = 'Text'
self.description = OverlayWidget(description_properties, True)
def extract_common(self, properties):
self.color = properties['color']
self.coords = properties['coords']
if is_graph_type(self.type):
self.bar_width = properties['bar_width']
self.height = properties['height']
else:
self.font = get_font_size_constant(properties)
self.length = properties['length']
self.negative_alignment = [False, False]
def is_negative_coord(coords, axis, widgets_so_far):
if isinstance(coords[axis], unicode):
coord_split = coords[axis].split('.')
# The coordinate is in the form other_widget.edge.mode
# We simply need to know if other_widget's coordinate is negative or not.
return widgets_so_far[coord_split[0]].negative_alignment[axis]
return coords[axis] < 0
def set_alignment_flags(overlay_widget, widgets_so_far):
overlay_widget.negative_alignment[0] = is_negative_coord(overlay_widget.coords, 0,
widgets_so_far)
overlay_widget.negative_alignment[1] = is_negative_coord(overlay_widget.coords, 1,
widgets_so_far)
if is_graph_type(overlay_widget.type):
set_alignment_flags(overlay_widget.description, widgets_so_far)
def get_offset_helper(widget, axis, smaller_coord_side):
# Assume axis is X. This function returns two values:
# - An offset where the bounding box is placed at,
# - Whether this offset is for the left or right edge.
#
# The input coordinate (widget.coord[axis]) is either:
#
# - a number: in this case, the offset is that number, and its sign determines whether this refers to the left or right edge of the bounding box.
# - other_widget.edge.mode: this has multiple possibilities:
# * edge=left, mode=align: the offset is other_widget.left, the edge is left.
# * edge=left, mode=adjacent: the offset is other_widget.left, the edge is right.
# * edge=right, mode=align: the offset is other_widget.right, the edge is right.
# * edge=right, mode=adjacent: the offset is other_widget.right, the edge is left.
#
# The case for the Y axis is similar, with the edge values being top or bottom.
coord = widget.coords[axis]
if not isinstance(coord, unicode):
is_left = coord >= 0
return coord, is_left
coord_split = coord.split('.')
is_left = coord_split[1] == smaller_coord_side
is_align = coord_split[2] == 'align'
other_widget_coords = 'mState.mOverlayWidgets[WidgetId::' + coord_split[0] + ']->coords'
other_widget_coord_index = axis + (0 if is_left else 2)
offset = other_widget_coords + '[' + str(other_widget_coord_index) + ']'
return offset, is_left == is_align
def get_offset_x(widget):
return get_offset_helper(widget, 0, 'left')
def get_offset_y(widget):
return get_offset_helper(widget, 1, 'top')
def get_bounding_box_coords(offset, width, offset_is_left, is_left_aligned):
# See comment in generate_widget_init_helper. This function is implementing the following:
#
# - offset_is_left && is_left_aligned: [offset, offset + width]
# - offset_is_left && !is_left_aligned: [offset, std::min(offset + width, -1)]
# - !offset_is_left && is_left_aligned: [std::max(1, offset - width), offset]
# - !offset_is_left && !is_left_aligned: [offset - width, offset]
coord_left = offset if offset_is_left else (offset + ' - ' + width)
coord_right = (offset + ' + ' + width) if offset_is_left else offset
if offset_is_left and not is_left_aligned:
coord_right = 'std::min(' + coord_right + ', -1)'
if not offset_is_left and is_left_aligned:
coord_left = 'std::max(' + coord_left + ', 1)'
return coord_left, coord_right
def generate_widget_init_helper(widget, is_graph_description=False):
font_size = '0'
# Common attributes
color = [channel / 255.0 for channel in widget.color]
offset_x, offset_x_is_left = get_offset_x(widget)
offset_y, offset_y_is_top = get_offset_y(widget)
if is_text_type(widget.type):
# Attributes deriven from text properties
font_size = widget.font
width = str(widget.length) + ' * kFontGlyphWidths[fontSize]'
height = 'kFontGlyphHeights[fontSize]'
else:
# Attributes deriven from graph properties
width = str(widget.bar_width) + ' * static_cast<uint32_t>(widget->runningValues.size())'
height = widget.height
is_left_aligned = not widget.negative_alignment[0]
is_top_aligned = not widget.negative_alignment[1]
# We have offset_x, offset_y, width and height which together determine the bounding box. If
# offset_x_is_left, the bounding box X would be in [offset_x, offset_x + width], otherwise it
# would be in [offset_x - width, offset_x]. Similarly for y. Since we use negative values to
# mean aligned to the right side of the screen, we need to make sure that:
#
# - if left aligned: offset_x - width is at minimum 1
# - if right aligned: offset_x + width is at maximum -1
#
# We therefore have the following combinations for the X axis:
#
# - offset_x_is_left && is_left_aligned: [offset_x, offset_x + width]
# - offset_x_is_left && !is_left_aligned: [offset_x, std::min(offset_x + width, -1)]
# - !offset_x_is_left && is_left_aligned: [std::max(1, offset_x - width), offset_x]
# - !offset_x_is_left && !is_left_aligned: [offset_x - width, offset_x]
#
# Similarly for y.
coord0, coord2 = get_bounding_box_coords('offsetX', 'width', offset_x_is_left, is_left_aligned)
coord1, coord3 = get_bounding_box_coords('offsetY', 'height', offset_y_is_top, is_top_aligned)
return template_init_widget.format(
subwidget='description.' if is_graph_description else '',
offset_x=offset_x,
offset_y=offset_y,
width=width,
height=height,
type=widget.type,
font_size=font_size,
coord0=coord0,
coord1=coord1,
coord2=coord2,
coord3=coord3,
color_r=color[0],
color_g=color[1],
color_b=color[2],
color_a=color[3])
def generate_widget_init(widget):
widget_init = '{\n' + widget.type + ' *widget = new ' + widget.constructor + ';\n'
widget_init += generate_widget_init_helper(widget)
widget_init += 'mState.mOverlayWidgets[WidgetId::' + widget.name + '].reset(widget);\n'
if is_graph_type(widget.type):
widget_init += generate_widget_init_helper(widget.description, True)
widget_init += '}\n'
return widget_init
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'inputs':
print(in_file)
return
if len(sys.argv) == 2 and sys.argv[1] == 'outputs':
print(out_file)
return
with open(in_file) as fin:
layout = json.loads(fin.read())
# Read the layouts from the json file and determine alignment of widgets (as they can refer to
# other widgets.
overlay_widgets = {}
for widget_properties in layout['widgets']:
widget = OverlayWidget(widget_properties)
overlay_widgets[widget.name] = widget
set_alignment_flags(widget, overlay_widgets)
# Go over the widgets again and generate initialization code. Note that we need to iterate over
# the widgets in order, so we can't use the overlay_widgets dictionary for iteration.
init_widgets = []
for widget_properties in layout['widgets']:
init_widgets.append(generate_widget_init(overlay_widgets[widget_properties['name']]))
with open(out_file, 'w') as outfile:
outfile.write(
template_out_file.format(
script_name=__file__,
copyright_year=date.today().year,
input_file_name=in_file,
out_file_name=out_file,
init_widgets='\n'.join(init_widgets)))
outfile.close()
if __name__ == '__main__':
sys.exit(main())
| |
"""
gene.py realize the methods that are related to system recommendation.
@author: Bowen
"""
from system.models import gene, reaction, compound, reaction_compound, compound_gene, pathway, pathway_compound, organism
from system.fasta_reader import parse_fasta_str
from elasticsearch import Elasticsearch
import traceback
import urllib2
import json
from django.db.models import Q
def search_compound(keyword):
"""
search compound based on the keyword
@param keyword: the keyword that the user typed. Which would be used in search
@type keyword: str
@return: return a list that contains searched compounds
@rtype: list
"""
es = Elasticsearch()
result = format_fuzzy_result(fuzzy_search_compound(es, keyword))
return result
def fuzzy_search_compound(es, keyword):
"""
fuzzy search compound based on the keyword with elasticsearch
@param es: the elasticsearch object
@param keyword: the search keyword
@type es: Elasticsearch
@type keyword: str
@return a dict generated by the elasticsearch, which contains the search result
@rtype: dict
"""
query_body = {
"from" : 0,
"size" : 20,
"query" : {
"fuzzy_like_this" : {
"fields" : ["name"],
"like_text" : keyword,
"max_query_terms" : 20
}
}
}
result = es.search(index="biodesigners", doc_type="compounds", body=query_body)
return result
def format_fuzzy_result(es_result):
"""
format the es search result to front end processable format
@param es_result: the es search result
@type es_result: dict
@return: the front end processable format, while will be like this::
[{'compound_id': id, 'name': name},...]
@rtype: list
"""
compound_result = es_result['hits']['hits']
result = list()
if len(compound_result) != 0:
for compound_item in compound_result:
info = compound_item['_source']
compound_info = {
'compound_id': info["compound_id"],
'name': info['name'],
}
result.append(compound_info)
return result
def get_gene_info(gid):
"""
get gene information from the database
@param gid: the gene id
@ytpe gid: str
@return: gene information dict
@rtype: dict
"""
base_gene_url = 'http://www.ncbi.nlm.nih.gov/gene/'
try:
gene_obj = gene.objects.get(gene_id=gid)
result = {
'gene_id': gene_obj.gene_id,
'name': gene_obj.name,
'definition': gene_obj.definition,
'organism_short': gene_obj.organism_short,
'organism': gene_obj.organism,
'gene_url' : base_gene_url + gene_obj.gene_id
}
return True, result
except:
traceback.print_exc()
return False, None
def get_compound_info(cid):
"""
get a specific compound's information
@param cid: compound id
@type cid: str
@return: a tunple that contains is compound can be retrived and the information
@rtype: dict
"""
try:
compound_obj = compound.objects.get(compound_id=cid)
result = {
'compound_id' : compound_obj.compound_id,
'name': compound_obj.name,
'nicknames' : compound_obj.nicknames.replace('_', '\n'),
'formula' : compound_obj.formula,
'exact_mass' : compound_obj.exact_mass,
'mol_weight' : compound_obj.mol_mass
}
return True, result
except:
traceback.print_exc()
return False, None
class gene_graph:
"""
gene graph, including calculation and generate of gene & protein relation graph
"""
def __init__(self, cid_list, ogm):
"""
constructor for gene_graph class
@param cid_list: compound id list
@type cid_list: str
@param ogm: organisms
@type ogm:str
"""
if cid_list.startswith('_'):
cid_list = cid_list[1:]
if cid_list.endswith('_'):
cid_list = cid_list[:-1]
self.cid_list = cid_list.split('_')
self.nodes = list()
self.edges = list()
self.index_dict = dict()
self.index = 0
if ogm != None:
if ogm.startswith('_'):
ogm = ogm[1:]
if ogm.endswith('_'):
ogm = ogm[:-1]
self.organisms = ogm.split('_')
else:
self.organisms = None
def get_compound_object(self, cid):
"""
get compound object by compound id
@param cid: compound id
@type cid: str
@return: compound object or none if not found
@rtype: compound
"""
try:
compound_obj = compound.objects.get(compound_id=cid)
return compound_obj
except:
return None
def retrive_gene_detain(self, gid):
"""
get gene data from ncib
@param gid: gene id
@type gid: str
@return: gene information in dict or none
@rtype: dict
"""
#get information from ncbi
baseUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&retmode=json&version=2.0&id='
try:
req = urllib2.Request(baseUrl + gid)
response = urllib2.urlopen(req)
resStr = response.read()
result = json.loads(resStr)
infos = result['result'][gid]
detail_info = dict()
detail_info['name'] = infos['name']
detail_info['definition'] = infos['description']
detail_info['organism'] = infos['organism']['scientificname']
return detail_info
except:
traceback.print_exc()
return None
def related_compound(self, cid):
"""
find a compound's related compound
@param cid: compound id
@type cid: str
@return: list of related compound
@rtype: list
"""
compound_obj = self.get_compound_object(cid)
if self.organisms != None:
organism_pathway_id_list = pathway.objects.filter(organism_id__in=self.organisms).values_list('pathway_id', flat=True)
else:
organism_pathway_id_list = pathway.objects.all()
valued_pathway_id_list = pathway_compound.objects.filter(pathway_id__in=organism_pathway_id_list, compound=compound_obj)
valued_compound_list = pathway_compound.objects.filter(Q(pathway_id__in=valued_pathway_id_list), ~Q(compound=compound_obj)).values_list('compound', flat=True)
compound_list = compound.objects.filter(compound_id__in=valued_compound_list)
return compound_list
def create_node(self, name, id):
"""
create a node (gene or compound) in the graph
@param name: name for the node
@param id: id for the node
@type name : str
@type id : str
"""
node_info = {
'name': name,
'id': id
}
self.nodes.append(node_info)
if id in self.index_dict.keys():
return True
self.index_dict[id] = self.index
self.index += 1
return True
def create_n_link(self, center_node, compound_obj):
"""
create nodes and link them
@param center_node: source node
@type center_node:compound
@param compound_obj: compound object
@type compound_obj: compound
"""
gene_list = self.search_gene(compound_obj)
for gene_id in gene_list:
try:
gene_obj = gene.objects.get(gene_id=gene_id)
if self.create_node(gene_obj.name, gene_obj.gene_id):
edge_info = {
'source' : self.index_dict[center_node],
'target' : self.index_dict[gene_obj.gene_id],
'relation' : compound_obj.name
}
self.edges.append(edge_info)
except:
traceback.print_exc()
pass
return gene_list[0]
def get_or_create_gene(self, gid):
"""
find gene in database, if found, return gene, or search in ncbi
@param gid: gene id
@type gid: str
@return gene object
@rtype: gene
"""
#get in database
try:
gene_obj = gene.objects.get(gene_id=gid)
return gene_obj
except:
#get from ncbi
baseUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&rettype=fasta&id='
req = urllib2.Request(baseUrl + gid)
response = urllib2.urlopen(req)
resStr = response.read()
gene_dict = parse_fasta_str(resStr)
for gn in gene_dict.keys():
gid = gn.split('|')[1]
#get detail information
new_gene_obj = gene(gene_id=gid)
detail_info = self.retrive_gene_detain(gid)
if detail_info == None:
continue
new_gene_obj.name = detail_info['name']
new_gene_obj.definition = detail_info['definition']
new_gene_obj.organism = detail_info['organism']
new_gene_obj.ntseq = gene_dict[gn]
new_gene_obj.ntseq_length = len(gene_dict[gn])
try:
new_gene_obj.save()
return new_gene_obj
except:
pass
return None
def save_relation_to_db(self, geneIdList, compound_obj):
"""
save relation between compound_obj and gene to database
@param geneIdList: gene id in a list
@type geneIdList: list
@param compound_obj: compound object
@type compound_obj: compound
"""
#create new obj
for gid in geneIdList:
new_rela_obj = compound_gene(compound=compound_obj)
gene_obj = self.get_or_create_gene(gid)
if gene_obj == None:
continue
new_rela_obj.gene = gene_obj
try:
new_rela_obj.save()
except:
pass
def search_gene(self, compound_obj):
"""
find gene realted to a compound
@param compound_obj: the compound object
@type compound_obj: compound
@return related genes
@rtype: list
"""
#search in database
obj_list = compound_gene.objects.filter(compound=compound_obj)
if len(obj_list) != 0:
geneIdList = list()
for obj in obj_list:
geneIdList.append(obj.gene.gene_id)
return geneIdList[:2]
else:
baseGeneFindUrl = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=gene&retmode=json&term='
try:
req = urllib2.Request(baseGeneFindUrl + compound_obj.name)
response = urllib2.urlopen(req)
resStr = response.read()
except:
traceback.print_exc()
return None
if len(resStr) == 0:
return None
result = json.loads(resStr)
geneIdList = result['esearchresult']['idlist']
self.save_relation_to_db(geneIdList, compound_obj)
return geneIdList[:2]
def cal_graph(self):
"""
calculate the relation graph
"""
for cid in self.cid_list:
center_compound_obj = self.get_compound_object(cid)
if center_compound_obj == None:
continue
self.create_node(center_compound_obj.name, center_compound_obj.compound_id)
related_list = self.related_compound(center_compound_obj.compound_id)[:5]
for compound_obj in related_list:
new_center = self.create_n_link(center_compound_obj.compound_id, compound_obj)
self.create_node(compound_obj.name, compound_obj.compound_id)
edge_info = {
'source': self.index_dict[center_compound_obj.compound_id],
'target': self.index_dict[compound_obj.compound_id],
'relation': compound_obj.name,
}
deep_related_list = self.related_compound(compound_obj.compound_id)[:2]
for deep_compound_obj in deep_related_list:
self.create_n_link(compound_obj.compound_id, deep_compound_obj)
def get_graph(self):
"""
get the graph
@return: th graph
@rtype: dict
"""
result = {
'nodes': self.nodes,
'edges' : self.edges
}
return result
'''
def find_related_compound(cid_str):
"""
find the compound that are related to current compound in reaction
@param cid: list of compound id
@type cid: list
@return: dict of compound that are related to the compound, empty list will be returned if there is no related compound
@rtype: dict
"""
result = dict()
nodes = list()
edges = list()
all_genes = list()
index_dict = dict()
index = 0
if cid_str.endswith('_'):
cid_str = cid_str[:-1]
cid_list = cid_str.split('_')
for cid in cid_list:
try:
compound_obj = compound.objects.get(compound_id=cid)
#get first gene and create new node
cen_gene_id = None
try:
cen_gene_id = search_gene_in_ncbi(compound_obj.name,)[0]
if not cen_gene_id in all_genes:
all_genes.append(cen_gene_id)
gene_obj = gene.objects.get(gene_id=cen_gene_id)
node_info = {
'name': gene_obj.name,
'id': gene_obj.gene_id
}
nodes.append(node_info)
index_dict[cen_gene_id] = index
index += 1
except:
pass
# find related reactions
rid_list = reaction_compound.objects.filter(compound=compound_obj, isReactant=True).values_list('reaction_id', flat=True)
cname_list = list()
for rid in rid_list:
rs = reaction_compound.objects.filter(Q(reaction_id=rid), ~Q(compound=compound_obj))[:5]
for r in rs:
cname_list.append(r.compound.name)
for cname in cname_list:
# find genes
gene_list = search_gene_in_ncbi(cname, expect=cen_gene_id, index=1)
for gene_id in gene_list:
if gene_id in all_genes:
continue
try:
gene_obj = gene.objects.get(gene_id=gene_id)
#create new node
all_genes.append(gene_id)
node_info = {
'name' : gene_obj.name,
'id': gene_obj.gene_id
}
nodes.append(node_info)
index_dict[gene_obj.gene_id] = index
index += 1
# add edge
edge_info = {
'source': index_dict[cen_gene_id],
'target': index_dict[gene_obj.gene_id],
'relation': cname
}
edges.append(edge_info)
except:
traceback.print_exc()
pass
except:
traceback.print_exc()
pass
result = {
'nodes': nodes,
'edges': edges
}
return result
'''
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from obs import ObsClient
from pycarbon.core.carbon_reader import make_carbon_reader, make_batch_carbon_reader
from pycarbon.integration.pytorch import decimal_friendly_collate, DataLoader
from pycarbon.integration.tensorflow import TensorFlow
def make_reader(dataset_url=None,
workers_count=10,
results_queue_size=100,
num_epochs=1,
obs_client=None,
shuffle=True,
schema_fields=None,
is_batch=True,
reader_pool_type='thread',
data_format='carbon',
cache_properties={'cache_type': None, 'cache_location': None, 'cache_size_limit': None,
'cache_row_size_estimate': None, 'cache_extra_settings': None},
**properties
):
"""
an unified api for different data format dataset
:param dataset_url: an filepath or a url to a carbon directory,
e.g. ``'hdfs://some_hdfs_cluster/user/yevgeni/carbon8'``, or ``'file:///tmp/mydataset'``
or ``'s3a://bucket/mydataset'``.
:param data_format: dataset data format (default: carbon)
:param is_batch: return single record or batch records (default: True)
:param obs_client: obs client object
access key
secret key
endpoint_url
:param schema_fields: Can be: a list of unischema fields and/or regex pattern strings; ``None`` to read all fields;
an NGram object, then it will return an NGram of the specified fields.
:param reader_pool_type: A string denoting the reader pool type. Should be one of ['thread', 'process', 'dummy']
denoting a thread pool, process pool, or running everything in the master thread. Defaults to 'thread'
TODO: process support
:param workers_count: An int for the number of workers to use in the reader pool. This only is used for the
thread or process pool. Defaults to 10
:param results_queue_size: Size of the results queue to store prefetched rows. Currently only applicable to
thread reader pool type.
:param shuffle: Whether to shuffle partition (the order in which full partition are read)
:param num_epochs: An epoch is a single pass over all rows in the dataset. Setting ``num_epochs`` to
``None`` will result in an infinite number of epochs.
:param cache_properties: a dict of cache parameters
cache_type: A string denoting the cache type, if desired. Options are [None, 'null', 'local-disk', 'memory-cache']
to either have a null/noop cache or a cache implemented using diskcache. Caching is useful when communication
to the main data store is either slow or expensive and the local machine has large enough storage
to store entire dataset. By default will be a null cache.
cache_location: A string denoting the location or path of the cache.
cache_size_limit: An int specifying the size limit of the cache in bytes
cache_row_size_estimate: An int specifying the estimated size of a row in the dataset
cache_extra_settings: A dictionary of extra settings to pass to the cache implementation,
:param **properties: other parameters (using dict)
:return: A :class:`Reader` object
"""
if is_batch is True:
if data_format == 'carbon':
if isinstance(obs_client, ObsClient):
if obs_client.is_secure is True:
endpoint = "https://" + obs_client.server
else:
endpoint = "http://" + obs_client.server
return make_batch_carbon_reader(dataset_url,
key=obs_client.securityProvider.access_key_id,
secret=obs_client.securityProvider.secret_access_key,
endpoint=endpoint,
proxy=obs_client.proxy_host,
proxy_port=obs_client.proxy_port,
schema_fields=schema_fields,
reader_pool_type=reader_pool_type,
workers_count=workers_count,
results_queue_size=results_queue_size,
shuffle_blocklets=shuffle,
num_epochs=num_epochs,
cache_type=cache_properties['cache_type'],
cache_location=cache_properties['cache_location'],
cache_size_limit=cache_properties['cache_size_limit'],
cache_row_size_estimate=cache_properties['cache_row_size_estimate'],
cache_extra_settings=cache_properties['cache_extra_settings'],
**properties)
elif obs_client is None:
return make_batch_carbon_reader(dataset_url,
schema_fields=schema_fields,
reader_pool_type=reader_pool_type,
workers_count=workers_count,
results_queue_size=results_queue_size,
shuffle_blocklets=shuffle,
num_epochs=num_epochs,
cache_type=cache_properties['cache_type'],
cache_location=cache_properties['cache_location'],
cache_size_limit=cache_properties['cache_size_limit'],
cache_row_size_estimate=cache_properties['cache_row_size_estimate'],
cache_extra_settings=cache_properties['cache_extra_settings'],
**properties)
else:
raise ValueError("""obs_client should be a ObsClient object or None""")
else:
raise NotImplementedError("""not support other data format datset""")
elif is_batch is False:
if data_format == 'carbon':
if isinstance(obs_client, ObsClient):
if obs_client.is_secure is True:
endpoint = "https://" + obs_client.server
else:
endpoint = "http://" + obs_client.server
return make_carbon_reader(dataset_url,
key=obs_client.securityProvider.access_key_id,
secret=obs_client.securityProvider.secret_access_key,
endpoint=endpoint,
proxy=obs_client.proxy_host,
proxy_port=obs_client.proxy_port,
schema_fields=schema_fields,
reader_pool_type=reader_pool_type,
workers_count=workers_count,
results_queue_size=results_queue_size,
shuffle_blocklets=shuffle,
num_epochs=num_epochs,
cache_type=cache_properties['cache_type'],
cache_location=cache_properties['cache_location'],
cache_size_limit=cache_properties['cache_size_limit'],
cache_row_size_estimate=cache_properties['cache_row_size_estimate'],
cache_extra_settings=cache_properties['cache_extra_settings'],
**properties)
elif obs_client is None:
return make_carbon_reader(dataset_url,
schema_fields=schema_fields,
reader_pool_type=reader_pool_type,
workers_count=workers_count,
results_queue_size=results_queue_size,
shuffle_blocklets=shuffle,
num_epochs=num_epochs,
cache_type=cache_properties['cache_type'],
cache_location=cache_properties['cache_location'],
cache_size_limit=cache_properties['cache_size_limit'],
cache_row_size_estimate=cache_properties['cache_row_size_estimate'],
cache_extra_settings=cache_properties['cache_extra_settings'],
**properties)
else:
raise ValueError("""obs_client should be a ObsClient object or None""")
else:
raise NotImplementedError("""not support other data format datset""")
else:
raise ValueError("""the value of is_batch is invalid, it should be set True or False""")
def make_dataset(reader):
"""Creates a `tensorflow.data.Dataset <https://www.tensorflow.org/api_docs/python/tf/data/Dataset>`_ object from
NGrams are not yet supported by this function.
:param reader: An instance of :class:`Reader` object that would serve as a data source.
:return: A ``tf.data.Dataset`` instance.
"""
tensorflow = TensorFlow()
return tensorflow.make_dataset(reader)
def make_tensor(reader, shuffling_queue_capacity=0, min_after_dequeue=0):
"""Bridges between python-only interface of the Reader (next(Reader)) and tensorflow world.
This function returns a named tuple of tensors from the dataset, e.g.,
If the reader was created with ``ngram=NGram(...)`` parameter, then a dictionary of named tuples is returned
(indexed by time):
An optional shuffling queue is created if shuffling_queue_capacity is greater than 0.
Note that if reading a unischema field that is unicode (``np.unicode_`` or ``np.str_``) tensorflow will
represent it as a tf.string which will be an array of bytes. If using python3 you may need to decode
it to convert it back to a python str type.
:param reader: An instance of Reader object used as the data source
:param shuffling_queue_capacity: Queue capacity is passed to the underlying :class:`tf.RandomShuffleQueue`
instance. If set to 0, no suffling will be done.
:param min_after_dequeue: If ``shuffling_queue_capacity > 0``, this value is passed to the underlying
:class:`tf.RandomShuffleQueue`.
:return: If no ngram reading is used, the function will return a named tuple with tensors that are populated
from the underlying dataset. If ngram reading is enabled, a dictionary of named tuples of tensors is returned.
The dictionary is indexed by time.
"""
tensorflow = TensorFlow()
return tensorflow.make_tensor(reader, shuffling_queue_capacity, min_after_dequeue)
def make_data_loader(reader, batch_size=1, collate_fn=decimal_friendly_collate):
"""
Initializes a data loader object, with a default collate.
Number of epochs is defined by the configuration of the reader argument.
:param reader: PyCarbon Reader instance
:param batch_size: the number of items to return per batch; factored into the len() of this reader
:param collate_fn: an optional callable to merge a list of samples to form a mini-batch.
"""
return DataLoader(reader, batch_size=batch_size, collate_fn=collate_fn)
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Drivers for volumes.
"""
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_shell_tries',
default=3,
help='Number of times to attempt to run flakey shell commands'),
cfg.IntOpt('reserved_percentage',
default=0,
help='The percentage of backend capacity is reserved'),
cfg.IntOpt('iscsi_num_targets',
default=100,
help='The maximum number of iSCSI target IDs per host'),
cfg.StrOpt('iscsi_target_prefix',
default='iqn.2010-10.org.openstack:',
help='Prefix for iSCSI volumes'),
cfg.StrOpt('iscsi_ip_address',
default='$my_ip',
help='The IP address that the iSCSI daemon is listening on'),
cfg.IntOpt('iscsi_port',
default=3260,
help='The port that the iSCSI daemon is listening on'),
cfg.IntOpt('num_volume_device_scan_tries',
deprecated_name='num_iscsi_scan_tries',
default=3,
help='The maximum number of times to rescan targets'
' to find volume'),
cfg.StrOpt('volume_backend_name',
default=None,
help='The backend name for a given driver implementation'),
cfg.BoolOpt('use_multipath_for_image_xfer',
default=False,
help='Do we attach/detach volumes in cinder using multipath '
'for volume to image and image to volume transfers?'),
cfg.StrOpt('volume_clear',
default='zero',
help='Method used to wipe old volumes (valid options are: '
'none, zero, shred)'),
cfg.IntOpt('volume_clear_size',
default=0,
help='Size in MiB to wipe at start of old volumes. 0 => all'),
cfg.StrOpt('volume_clear_ionice',
default=None,
help='The flag to pass to ionice to alter the i/o priority '
'of the process used to zero a volume after deletion, '
'for example "-c3" for idle only priority.'),
cfg.StrOpt('iscsi_helper',
default='tgtadm',
help='iSCSI target user-land tool to use. tgtadm is default, '
'use lioadm for LIO iSCSI support, iseradm for the ISER '
'protocol, or fake for testing.'),
cfg.StrOpt('volumes_dir',
default='$state_path/volumes',
help='Volume configuration file storage '
'directory'),
cfg.StrOpt('iet_conf',
default='/etc/iet/ietd.conf',
help='IET configuration file'),
cfg.StrOpt('lio_initiator_iqns',
default='',
help='This option is deprecated and unused. '
'It will be removed in the next release.'),
cfg.StrOpt('iscsi_iotype',
default='fileio',
help=('Sets the behavior of the iSCSI target '
'to either perform blockio or fileio '
'optionally, auto can be set and Cinder '
'will autodetect type of backing device')),
cfg.StrOpt('volume_dd_blocksize',
default='1M',
help='The default block size used when copying/clearing '
'volumes'),
cfg.StrOpt('volume_copy_blkio_cgroup_name',
default='cinder-volume-copy',
help='The blkio cgroup name to be used to limit bandwidth '
'of volume copy'),
cfg.IntOpt('volume_copy_bps_limit',
default=0,
help='The upper limit of bandwidth of volume copy. '
'0 => unlimited'),
cfg.StrOpt('iscsi_write_cache',
default='on',
help='Sets the behavior of the iSCSI target to either '
'perform write-back(on) or write-through(off). '
'This parameter is valid if iscsi_helper is set '
'to tgtadm or iseradm.'),
cfg.StrOpt('driver_client_cert_key',
default=None,
help='The path to the client certificate key for verification, '
'if the driver supports it.'),
cfg.StrOpt('driver_client_cert',
default=None,
help='The path to the client certificate for verification, '
'if the driver supports it.'),
cfg.BoolOpt('driver_use_ssl',
default=False,
help='Tell driver to use SSL for connection to backend '
'storage if the driver supports it.'),
]
# for backward compatibility
iser_opts = [
cfg.IntOpt('num_iser_scan_tries',
default=3,
help='The maximum number of times to rescan iSER target'
'to find volume'),
cfg.IntOpt('iser_num_targets',
default=100,
help='The maximum number of iSER target IDs per host'),
cfg.StrOpt('iser_target_prefix',
default='iqn.2010-10.org.openstack:',
help='Prefix for iSER volumes'),
cfg.StrOpt('iser_ip_address',
default='$my_ip',
help='The IP address that the iSER daemon is listening on'),
cfg.IntOpt('iser_port',
default=3260,
help='The port that the iSER daemon is listening on'),
cfg.StrOpt('iser_helper',
default='tgtadm',
help='The name of the iSER target user-land tool to use'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
CONF.register_opts(iser_opts)
class VolumeDriver(object):
"""Executes commands relating to Volumes.
Base Driver for Cinder Volume Control Path,
This includes supported/required implementation
for API calls. Also provides *generic* implementation
of core features like cloning, copy_image_to_volume etc,
this way drivers that inherit from this base class and
don't offer their own impl can fall back on a general
solution here.
Key thing to keep in mind with this driver is that it's
intended that these drivers ONLY implement Control Path
details (create, delete, extend...), while transport or
data path related implementation should be a *member object*
that we call a connector. The point here is that for example
don't allow the LVM driver to implement iSCSI methods, instead
call whatever connector it has configured via conf file
(iSCSI{LIO, TGT, IET}, FC, etc).
In the base class and for example the LVM driver we do this via a has-a
relationship and just provide an interface to the specific connector
methods. How you do this in your own driver is of course up to you.
"""
VERSION = "N/A"
def __init__(self, execute=utils.execute, *args, **kwargs):
# NOTE(vish): db is set by Manager
self.db = kwargs.get('db')
self.host = kwargs.get('host')
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(volume_opts)
self.configuration.append_config_values(iser_opts)
self.set_execute(execute)
self._stats = {}
self.pools = []
# We set these mappings up in the base driver so they
# can be used by children
# (intended for LVM and BlockDevice, but others could use as well)
self.target_mapping = {
'fake': 'cinder.volume.targets.fake.FakeTarget',
'ietadm': 'cinder.volume.targets.iet.IetAdm',
'iseradm': 'cinder.volume.targets.iser.ISERTgtAdm',
'lioadm': 'cinder.volume.targets.lio.LioAdm',
'tgtadm': 'cinder.volume.targets.tgt.TgtAdm', }
# set True by manager after successful check_for_setup
self._initialized = False
def _is_non_recoverable(self, err, non_recoverable_list):
for item in non_recoverable_list:
if item in err:
return True
return False
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
non_recoverable = kwargs.pop('no_retry_list', [])
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except processutils.ProcessExecutionError as ex:
tries = tries + 1
if tries >= self.configuration.num_shell_tries or\
self._is_non_recoverable(ex.stderr, non_recoverable):
raise
LOG.exception(_LE("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def _detach_volume(self, context, attach_info, volume, properties,
force=False, remote=False):
"""Disconnect the volume from the host."""
# Use Brick's code to do attach/detach
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
# Call remote manager's terminate_connection which includes
# driver's terminate_connection and remove export
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(context, volume, properties,
force=force)
else:
# Call local driver's terminate_connection and remove export.
# NOTE(avishay) This is copied from the manager's code - need to
# clean this up in the future.
try:
self.terminate_connection(volume, properties, force=force)
except Exception as err:
err_msg = (_('Unable to terminate volume connection: %(err)s')
% {'err': err})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
LOG.debug(("volume %s: removing export"), volume['id'])
self.remove_export(context, volume)
except Exception as ex:
LOG.exception(_LE("Error detaching volume %(volume)s, "
"due to remove export failure."),
{"volume": volume['id']})
raise exception.RemoveExportException(volume=volume['id'],
reason=ex)
def set_execute(self, execute):
self._execute = execute
def set_initialized(self):
self._initialized = True
@property
def initialized(self):
return self._initialized
def get_version(self):
"""Get the current version of this driver."""
return self.VERSION
def check_for_setup_error(self):
raise NotImplementedError()
def create_volume(self, volume):
"""Creates a volume. Can optionally return a Dictionary of
changes to the volume object to be persisted.
If volume_type extra specs includes
'capabilities:replication <is> True' the driver
needs to create a volume replica (secondary), and setup replication
between the newly created volume and the secondary volume.
Returned dictionary should include:
volume['replication_status'] = 'copying'
volume['replication_extended_status'] = driver specific value
volume['driver_data'] = driver specific value
"""
raise NotImplementedError()
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
If volume_type extra specs includes 'replication: <is> True'
the driver needs to create a volume replica (secondary),
and setup replication between the newly created volume and
the secondary volume.
"""
raise NotImplementedError()
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
If volume_type extra specs includes 'replication: <is> True' the
driver needs to create a volume replica (secondary)
and setup replication between the newly created volume
and the secondary volume.
"""
raise NotImplementedError()
def create_replica_test_volume(self, volume, src_vref):
"""Creates a test replica clone of the specified replicated volume.
Create a clone of the replicated (secondary) volume.
"""
raise NotImplementedError()
def delete_volume(self, volume):
"""Deletes a volume.
If volume_type extra specs includes 'replication: <is> True'
then the driver needs to delete the volume replica too.
"""
raise NotImplementedError()
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
raise NotImplementedError()
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
raise NotImplementedError()
def local_path(self, volume):
raise NotImplementedError()
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is
True, run the update first.
For replication the following state should be reported:
replication_support = True (None or false disables replication)
"""
return None
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug(('copy_data_between_volumes %(src)s -> %(dest)s.')
% {'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
dest_remote = True if remote in ['dest', 'both'] else False
dest_orig_status = dest_vol['status']
try:
dest_attach_info = self._attach_volume(context,
dest_vol,
properties,
remote=dest_remote)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to attach volume %(vol)s")
LOG.error(msg % {'vol': dest_vol['id']})
self.db.volume_update(context, dest_vol['id'],
{'status': dest_orig_status})
src_remote = True if remote in ['src', 'both'] else False
src_orig_status = src_vol['status']
try:
src_attach_info = self._attach_volume(context,
src_vol,
properties,
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to attach volume %(vol)s")
LOG.error(msg % {'vol': src_vol['id']})
self.db.volume_update(context, src_vol['id'],
{'status': src_orig_status})
self._detach_volume(context, dest_attach_info, dest_vol,
properties, force=True, remote=dest_remote)
copy_error = True
try:
size_in_mb = int(src_vol['size']) * 1024 # vol size is in GB
volume_utils.copy_volume(
src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Failed to copy volume %(src)s to %(dest)s.")
LOG.error(msg % {'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
self._detach_volume(context, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote)
self._detach_volume(context, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
LOG.debug(('copy_image_to_volume %s.') % volume['name'])
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
try:
image_utils.fetch_to_raw(context,
image_service,
image_id,
attach_info['device']['path'],
self.configuration.volume_dd_blocksize,
size=volume['size'])
finally:
self._detach_volume(context, attach_info, volume, properties)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
LOG.debug(('copy_volume_to_image %s.') % volume['name'])
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
try:
image_utils.upload_volume(context,
image_service,
image_meta,
attach_info['device']['path'])
finally:
self._detach_volume(context, attach_info, volume, properties)
def _attach_volume(self, context, volume, properties, remote=False):
"""Attach the volume."""
if remote:
# Call remote manager's initialize_connection which includes
# driver's create_export and initialize_connection
rpcapi = volume_rpcapi.VolumeAPI()
conn = rpcapi.initialize_connection(context, volume, properties)
else:
# Call local driver's create_export and initialize_connection.
# NOTE(avishay) This is copied from the manager's code - need to
# clean this up in the future.
model_update = None
try:
LOG.debug(("Volume %s: creating export"), volume['id'])
model_update = self.create_export(context, volume)
if model_update:
volume = self.db.volume_update(context, volume['id'],
model_update)
except exception.CinderException as ex:
if model_update:
LOG.exception(_LE("Failed updating model of volume "
"%(volume_id)s with driver provided "
"model %(model)s") %
{'volume_id': volume['id'],
'model': model_update})
raise exception.ExportFailure(reason=ex)
try:
conn = self.initialize_connection(volume, properties)
except Exception as err:
try:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') % {'err': err})
LOG.error(err_msg)
LOG.debug("Cleaning up failed connect initialization.")
self.remove_export(context, volume)
except Exception as ex:
ex_msg = (_('Error encountered during cleanup '
'of a failed attach: %(ex)s') % {'ex': ex})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=ex_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return self._connect_device(conn)
def _connect_device(self, conn):
# Use Brick's code to do attach/detach
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
device = connector.connect_volume(conn['data'])
host_device = device['path']
# Secure network file systems will NOT run as root.
root_access = not self.secure_file_operations_enabled()
if not connector.check_valid_device(host_device, root_access):
raise exception.DeviceUnavailable(path=host_device,
reason=(_("Unable to access "
"the backend storage "
"via the path "
"%(path)s.") %
{'path': host_device}))
return {'conn': conn, 'device': device, 'connector': connector}
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Create a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
image_meta is a dictionary that includes 'disk_format' (e.g.
raw, qcow2) and other image attributes that allow drivers to
decide whether they can clone the image without first requiring
conversion.
image_service is the reference of the image_service to use.
Note that this is needed to be passed here for drivers that
will want to fetch images from the image service directly.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred
"""
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
LOG.debug(('Creating a new backup for volume %s.') %
volume['name'])
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
try:
volume_path = attach_info['device']['path']
# Secure network file systems will not chown files.
if self.secure_file_operations_enabled():
with fileutils.file_open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
else:
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
finally:
self._detach_volume(context, attach_info, volume, properties)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug(('Restoring backup %(backup)s to '
'volume %(volume)s.') %
{'backup': backup['id'],
'volume': volume['name']})
properties = utils.brick_get_connector_properties()
attach_info = self._attach_volume(context, volume, properties)
try:
volume_path = attach_info['device']['path']
# Secure network file systems will not chown files.
if self.secure_file_operations_enabled():
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
else:
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'],
volume_file)
finally:
self._detach_volume(context, attach_info, volume, properties)
def clear_download(self, context, volume):
"""Clean up after an interrupted image copy."""
pass
def extend_volume(self, volume, new_size):
msg = _("Extend volume not implemented")
raise NotImplementedError(msg)
def migrate_volume(self, context, volume, host):
"""Migrate the volume to the specified host.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
return (False, None)
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns either:
A boolean indicating whether the retype occurred, or
A tuple (retyped, model_update) where retyped is a boolean
indicating if the retype occurred, and the model_update includes
changes for the volume db.
if diff['extra_specs'] includes 'replication' then:
if ('True', _ ) then replication should be disabled:
Volume replica should be deleted
volume['replication_status'] should be changed to 'disabled'
volume['replication_extended_status'] = None
volume['replication_driver_data'] = None
if (_, 'True') then replication should be enabled:
Volume replica (secondary) should be created, and replication
should be setup between the volume and the newly created
replica
volume['replication_status'] = 'copying'
volume['replication_extended_status'] = driver specific value
volume['replication_driver_data'] = driver specific value
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
return False, None
def accept_transfer(self, context, volume, new_user, new_project):
"""Accept the transfer of a volume for a new user/project."""
pass
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
"""
msg = _("Manage existing volume not implemented.")
raise NotImplementedError(msg)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
msg = _("Manage existing volume not implemented.")
raise NotImplementedError(msg)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
For most drivers, this will not need to do anything. However, some
drivers might use this call as an opportunity to clean up any
Cinder-specific configuration that they have associated with the
backend storage object.
"""
pass
def attach_volume(self, context, volume, instance_uuid, host_name,
mountpoint):
"""Callback for volume attached to instance or host."""
pass
def detach_volume(self, context, volume):
"""Callback for volume detached."""
pass
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
pass
def validate_connector(self, connector):
"""Fail if connector doesn't contain all the data needed by driver."""
pass
@staticmethod
def validate_connector_has_setting(connector, setting):
pass
def reenable_replication(self, context, volume):
"""Re-enable replication between the replica and primary volume.
This is used to re-enable/fix the replication between primary
and secondary. One use is as part of the fail-back process, when
you re-synchorize your old primary with the promoted volume
(the old replica).
Returns model_update for the volume to reflect the actions of the
driver.
The driver is expected to update the following entries:
'replication_status'
'replication_extended_status'
'replication_driver_data'
Possible 'replication_status' values (in model_update) are:
'error' - replication in error state
'copying' - replication copying data to secondary (inconsistent)
'active' - replication copying data to secondary (consistent)
'active-stopped' - replication data copy on hold (consistent)
'inactive' - replication data copy on hold (inconsistent)
Values in 'replication_extended_status' and 'replication_driver_data'
are managed by the driver.
:param context: Context
:param volume: A dictionary describing the volume
"""
msg = _("sync_replica not implemented.")
raise NotImplementedError(msg)
def get_replication_status(self, context, volume):
"""Query the actual volume replication status from the driver.
Returns model_update for the volume.
The driver is expected to update the following entries:
'replication_status'
'replication_extended_status'
'replication_driver_data'
Possible 'replication_status' values (in model_update) are:
'error' - replication in error state
'copying' - replication copying data to secondary (inconsistent)
'active' - replication copying data to secondary (consistent)
'active-stopped' - replication data copy on hold (consistent)
'inactive' - replication data copy on hold (inconsistent)
Values in 'replication_extended_status' and 'replication_driver_data'
are managed by the driver.
:param context: Context
:param volume: A dictionary describing the volume
"""
return None
def promote_replica(self, context, volume):
"""Promote the replica to be the primary volume.
Following this command, replication between the volumes at
the storage level should be stopped, the replica should be
available to be attached, and the replication status should
be in status 'inactive'.
Returns model_update for the volume.
The driver is expected to update the following entries:
'replication_status'
'replication_extended_status'
'replication_driver_data'
Possible 'replication_status' values (in model_update) are:
'error' - replication in error state
'inactive' - replication data copy on hold (inconsistent)
Values in 'replication_extended_status' and 'replication_driver_data'
are managed by the driver.
:param context: Context
:param volume: A dictionary describing the volume
"""
msg = _("promote_replica not implemented.")
raise NotImplementedError(msg)
# ####### Interface methods for DataPath (Connector) ########
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
raise NotImplementedError()
def create_export(self, context, volume):
"""Exports the volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
raise NotImplementedError()
def remove_export(self, context, volume):
"""Removes an export for a volume."""
raise NotImplementedError()
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector"""
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
raise NotImplementedError()
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group."""
raise NotImplementedError()
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a cgsnapshot."""
raise NotImplementedError()
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
raise NotImplementedError()
def get_pool(self, volume):
"""Return pool name where volume reside on.
:param volume: The volume hosted by the the driver.
:return: name of the pool where given volume is in.
"""
return None
def secure_file_operations_enabled(self):
"""Determine if driver is running in Secure File Operations mode.
The Cinder Volume driver needs to query if this driver is running
in a secure file operations mode. By default, it is False: any driver
that does support secure file operations should override this method.
"""
return False
def update_migrated_volume(self, ctxt, volume, new_volume):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:return model_update to update DB with any needed changes
"""
return None
class ProxyVD(object):
"""Proxy Volume Driver to mark proxy drivers
If a driver uses a proxy class (e.g. by using __setattr__ and
__getattr__) without directly inheriting from base volume driver this
class can help marking them and retrieve the actual used driver object.
"""
def _get_driver(self):
"""Returns the actual driver object. Can be overloaded by the proxy.
"""
return getattr(self, "driver", None)
class ISCSIDriver(VolumeDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
super(ISCSIDriver, self).__init__(*args, **kwargs)
def _do_iscsi_discovery(self, volume):
# TODO(justinsb): Deprecate discovery and use stored info
# NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
LOG.warn(_LW("ISCSI provider_location not "
"stored, using discovery"))
volume_name = volume['name']
try:
# NOTE(griff) We're doing the split straight away which should be
# safe since using '@' in hostname is considered invalid
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p',
volume['host'].split('@')[0],
run_as_root=True)
except processutils.ProcessExecutionError as ex:
LOG.error(_LE("ISCSI discovery attempt failed for:%s") %
volume['host'].split('@')[0])
LOG.debug("Error from iscsiadm -m discovery: %s" % ex.stderr)
return None
for target in out.splitlines():
if (self.configuration.iscsi_ip_address in target
and volume_name in target):
return target
return None
def _get_iscsi_properties(self, volume):
"""Gets iscsi configuration
We ideally get saved information in the volume entity, but fall back
to discovery if need be. Discovery may be completely removed in future
The properties are:
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the id of the volume (currently used by xen)
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
:access_mode: the volume access mode allow client used
('rw' or 'ro' currently supported)
"""
properties = {}
location = volume['provider_location']
if location:
# provider_location is the same format as iSCSI discovery output
properties['target_discovered'] = False
else:
location = self._do_iscsi_discovery(volume)
if not location:
msg = (_("Could not find iSCSI export for volume %s") %
(volume['name']))
raise exception.InvalidVolume(reason=msg)
LOG.debug("ISCSI Discovery: Found %s" % (location))
properties['target_discovered'] = True
results = location.split(" ")
properties['target_portal'] = results[0].split(",")[0]
properties['target_iqn'] = results[1]
try:
properties['target_lun'] = int(results[2])
except (IndexError, ValueError):
if (self.configuration.volume_driver in
['cinder.volume.drivers.lvm.LVMISCSIDriver',
'cinder.volume.drivers.lvm.LVMISERDriver',
'cinder.volume.drivers.lvm.ThinLVMVolumeDriver'] and
self.configuration.iscsi_helper in ('tgtadm', 'iseradm')):
properties['target_lun'] = 1
else:
properties['target_lun'] = 0
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
geometry = volume.get('provider_geometry', None)
if geometry:
(physical_block_size, logical_block_size) = geometry.split()
properties['physical_block_size'] = physical_block_size
properties['logical_block_size'] = logical_block_size
encryption_key_id = volume.get('encryption_key_id', None)
properties['encrypted'] = encryption_key_id is not None
return properties
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm',
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The iscsi driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
'access_mode': 'rw'
}
}
"""
# NOTE(jdg): Yes, this is duplicated in the volume/target
# drivers, for now leaving it as there are 3'rd party
# drivers that don't use target drivers, but inherit from
# this base class and use this init data
iscsi_properties = self._get_iscsi_properties(volume)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
def validate_connector(self, connector):
# iSCSI drivers require the initiator information
required = 'initiator'
if required not in connector:
err_msg = (_LE('The volume driver requires %(data)s '
'in the connector.'), {'data': required})
LOG.error(*err_msg)
raise exception.InvalidConnectorException(missing=required)
def terminate_connection(self, volume, connector, **kwargs):
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'Generic_iSCSI'
data["vendor_name"] = 'Open Source'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSCSI'
data["pools"] = []
if self.pools:
for pool in self.pools:
new_pool = {}
new_pool.update(dict(
pool_name=pool,
total_capacity_gb=0,
free_capacity_gb=0,
provisioned_capacity_gb=0,
reserved_percentage=100,
QoS_support=False
))
data["pools"].append(new_pool)
else:
# No pool configured, the whole backend will be treated as a pool
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
total_capacity_gb=0,
free_capacity_gb=0,
provisioned_capacity_gb=0,
reserved_percentage=100,
QoS_support=False
))
data["pools"].append(single_pool)
self._stats = data
class FakeISCSIDriver(ISCSIDriver):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
*args, **kwargs)
def create_volume(self, volume):
pass
def check_for_setup_error(self):
"""No setup necessary in fake mode."""
pass
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'iscsi',
'data': {'access_mode': 'rw'}
}
def terminate_connection(self, volume, connector, **kwargs):
pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug("FAKE ISCSI: %s", cmd)
return (None, None)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
pass
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
pass
def delete_volume(self, volume):
"""Deletes a volume."""
pass
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
pass
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
pass
def local_path(self, volume):
return '/tmp/volume-%s' % volume.id
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume):
"""Exports the volume. Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
pass
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
class ISERDriver(ISCSIDriver):
"""Executes commands relating to ISER volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSER target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
super(ISERDriver, self).__init__(*args, **kwargs)
# for backward compatibility
self.configuration.num_volume_device_scan_tries = \
self.configuration.num_iser_scan_tries
self.configuration.iscsi_num_targets = \
self.configuration.iser_num_targets
self.configuration.iscsi_target_prefix = \
self.configuration.iser_target_prefix
self.configuration.iscsi_ip_address = \
self.configuration.iser_ip_address
self.configuration.iscsi_port = self.configuration.iser_port
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The iser driver returns a driver_volume_type of 'iser'.
The format of the driver data is defined in _get_iser_properties.
Example return value::
{
'driver_volume_type': 'iser'
'data': {
'target_discovered': True,
'target_iqn':
'iqn.2010-10.org.iser.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
}
}
"""
iser_properties = self._get_iscsi_properties(volume)
return {
'driver_volume_type': 'iser',
'data': iser_properties
}
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'Generic_iSER'
data["vendor_name"] = 'Open Source'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSER'
data["pools"] = []
if self.pools:
for pool in self.pools:
new_pool = {}
new_pool.update(dict(
pool_name=pool,
total_capacity_gb=0,
free_capacity_gb=0,
reserved_percentage=100,
QoS_support=False
))
data["pools"].append(new_pool)
else:
# No pool configured, the whole backend will be treated as a pool
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
total_capacity_gb=0,
free_capacity_gb=0,
reserved_percentage=100,
QoS_support=False
))
data["pools"].append(single_pool)
self._stats = data
class FakeISERDriver(FakeISCSIDriver):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeISERDriver, self).__init__(execute=self.fake_execute,
*args, **kwargs)
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'iser',
'data': {}
}
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug("FAKE ISER: %s", cmd)
return (None, None)
class FibreChannelDriver(VolumeDriver):
"""Executes commands relating to Fibre Channel volumes."""
def __init__(self, *args, **kwargs):
super(FibreChannelDriver, self).__init__(*args, **kwargs)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'access_mode': 'rw'
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'access_mode': 'rw'
}
}
"""
msg = _("Driver must implement initialize_connection")
raise NotImplementedError(msg)
def validate_connector(self, connector):
"""Fail if connector doesn't contain all the data needed by driver.
Do a check on the connector and ensure that it has wwnns, wwpns.
"""
self.validate_connector_has_setting(connector, 'wwpns')
self.validate_connector_has_setting(connector, 'wwnns')
@staticmethod
def validate_connector_has_setting(connector, setting):
"""Test for non-empty setting in connector."""
if setting not in connector or not connector[setting]:
msg = (_LE(
"FibreChannelDriver validate_connector failed. "
"No '%(setting)s'. Make sure HBA state is Online."),
{'setting': setting})
LOG.error(*msg)
raise exception.InvalidConnectorException(missing=setting)
| |
#!/usr/bin/python
#
# set_defs.py
#
# Class inclues all set defintions
# Allow defining set of accepted values and quick membership checking
#
import re;
from collections import deque;
from qutil import *
import nltk;
# set definitions here
# allows for quick change
# and quick intersection / union, subtraction of sets
# be CAREFUL of abbreviations (converts to lower for check);
months = set(['january','february','march','april','may','june',
'july', 'august','september','october','november','december',
'jan','feb','mar','apr','may', 'jun',
'jul','aug','sep','sept','oct','nov','dec']);
days = set(['monday','tuesday','wednesday',
'mon', 'tue','tues','wed','thur','thu','fri','sat','sun'
'thursday','friday','saturday','sunday',]);
timewords = set(['today','tomorrow','yesterday']);
qWords = set(['who','what','where','when','why','did','do','does','is','was','how']);
namePre = set(['mr.', 'mrs.', 'ms.', 'dr.', 'miss']);
linkVerb = set(['is', 'am', 'are','was']);
endPhrasePunc = set(['!', ',','.',';','?']);
subPronouns = set(['he','she','we','they','i']);
objPronouns = set(['her','him','me','us','them']);
posPronouns = set(['their','his','her','our','my']);
beingV = set(['is','are','was','were']);
# from wikipedia
states = set(['Alabama','Alaska','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','District of Columbia','Florida','Georgia','Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana','Maine','Maryland','Massachusetts','Michigan','Minnesota','Mississippi','Missouri','Montana','Nebraska','Nevada','New Hampshire','New Jersey','New Mexico','New York','North Carolina','North Dakota','Ohio','Oklahoma','Oregon','Pennsylvania','Rhode Island','South Carolina','South Dakota','Tennessee','Texas','Utah','Vermont','Virginia','Washington','West Virginia','Wisconsin','Wyoming','AL','AK','AZ','AR','CA','CO','CT','DE','DC','FL','GA','HI','ID','IL','IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY']);
countries = set([]);
## REGULAR EXPRESSION STRINGS
# (note there is an alernative way of savying the expression,
# but that is mostly applied when used multiple times)
# dates divided by foward slashes or dashes
# accept both year/month/day and month/day/year
# also year-month-day and month-day-year
# with both the year as 2 or 4 digits;
# does not check value of digits
RE_DATE_FSLH1 = '\d{1,2}/\d{1,2}/(\d{4}|\d{2})$';
RE_DATE_FSLH2 = '(\d{4}|\d{2})/\d{1,2}/\d{1,2}$'
RE_DATE_DASH1 = '\d{1,2}-\d{1,2}-(\d{4}|\d{2})$'
RE_DATE_DASH2 = '(\d{4}|\d{2})-\d{1,2}-\d{1,2}$'
# tag sequence is number [anything] number
RE_CD_EP_CD = 'CD (?P<mid>[^\s]{1,4}) CD'
# tag sequence is [not_number] proper_noun number
RE_X_NNP_CD = '([^C][^D]+) NNP CD'
# re.match(' NNP CD',newStr):
#uses python sets for speed.
class Identity(object):
def isBeingVerb(self,word):
return word.lower() in beingV;
def isEndPhrasePunc(self,word):
return word.lower() in endPhrasePunc;
# "replaceable" means it is a subject or object
# returns 0 as "false"
def isReplacablePronoun(self,word):
if word.lower() in subPronouns:
return 1;
elif word.lower() in objPronouns:
return -1;
elif word.lower() == "it":
return 2;
# not pronoun
else:
return 0;
def isMonth(self,word):
return word.lower() in months;
def isDayOfWeek(self, word):
return word.lower() in days;
def isTimeWord(self, word):
return word.lower() in timewords;
def isQuestionWord(self,word):
return word.lower() in qWords;
def isNamePre(self, word):
return word.lower() in namePre;
def isLinkVerb(self,word):
return word.lower() in linkVerb;
def isPlace(self, first, second):
(firstTok, firstTag) = first;
(secondTok, secondTag) = second;
state = False;
country = False;
if is_propN(firstTag) and is_propN(secondTag):
state = secondTok in states;
country = secondTok in countries;
return state or country;
else:
return False;
# timewords: today, friday, yesterday, etc
def isTemporal(self, word):
words = days | timewords;
return word.lower() in words;
# > 0 to check for days of the week
# < 0 to check for today, tommorrow, yesterday
# = 0 to check for both
def isTimeDep(self, wordList, ckCode):
for word in wordList:
if ckCode < 0 and self.isTimeWord(word):
return True;
elif ckCode > 0 and self.isDayOfWeeK(word):
return True;
elif ckCode == 0 and self.isTemporal(word):
return True;
return False;
# return dates in a given phrase
# TODO pin down numerical constraints better
def findDates(self, wordList, tagList):
n = len(wordList);
tagset = deque(["",""]);
tag = "";
locations = [];
for idx in range(0,n):
start = idx -2;
tag = tagList[idx];
tagset.append(tag);
newStr = q2str(tagset,3);
m = re.match(RE_CD_EP_CD,newStr);
if m:
midTag = m.groupdict()['mid'];
if len(midTag) >=2 and midTag[0:2] == "NN":
if self.isMonth(wordList[start+1]):
locations.append((start,3));
elif len(midTag) > 0 and midTag == ",":
if idx > 0 and self.isMonth(wordList[start-1]):
locations.append((start-1,4));
# case for a month and day without year
# contains errors with regular expression
elif re.match(RE_X_NNP_CD,newStr) or \
newStr == ' NNP CD':
if self.isMonth(wordList[start+1]):
locations.append((start+1, 2));
elif tag == "CD":
word = wordList[idx];
# case for numeric date seprated by slashes or dashes
if re.match(RE_DATE_FSLH1,word) or \
re.match(RE_DATE_FSLH2,word) or \
re.match(RE_DATE_DASH1,word) or \
re.match(RE_DATE_DASH2,word):
locations.append((idx,1));
# case for year by itself
elif re.match('\d{4}$',word):
if idx < n-1 and not self.isMonth(wordList[idx+1]):
if int(word) > 0 and int(word) < 2100:
locations.append((idx,1));
tagset.popleft();
return locations;
# cheap Named Entity Recognition (really only identifying
# capitalizaed strings of words, does not take into account meaning
# does not include the first word of the sentence unless it is
# undenibly "Proper"
def findNER(self, wordList, tagList):
n = len(wordList);
nltkTag = nltk.pos_tag(wordList);
locations = [];
idx = 0;
propStrLen = 0;
if tagList[idx] == "NNP" and nltkTag[idx][1] == "NNP":
tag = tagList[idx];
elif n > 1:
idx += 1;
tag = tagList[idx];
prevTag = None;
while idx < n:
if idx > 0:
prevTag = tagList[idx-1];
if self.isPropN(wordList[idx], tagList[idx]):
propStrLen += 1;
elif prevTag == "NNP" and tagList[idx] == "CD":
propStrLen += 1;
else:
if propStrLen > 1:
locations.append((idx-propStrLen,propStrLen));
propStrLen = 0;
idx += 1;
return locations;
# determinds if a word is proper noun based on the tag "NNP"
# or the capitalization
def isPropN(self,word, tag):
return tag == "NNP" or word[0].isupper();
# finds of a subset list is of the form "NNP of/and the NNP"
# or "NNP of NNP"
# returns the length of the phrase or 0 if not one
def NNPoftheNNP(self, wordList, tagList):
n = len(tagList);
if n >= 3:
if self.isPropN(wordList[0],tagList[0]) and \
(tagList[1] == "IN" or tagList[1] == "CC" or tagList[1] == ":"):
if self.isPropN(wordList[2],tagList[2]):
return 3;
elif n >= 4:
if tagList[2] == "DT":
if self.isPropN(wordList[3],tagList[3]):
return 4;
return 0;
# finds locations of Proper Prepositional phrases such as
# Lord of the Rings, Harry Potter and the _______
# wrapper function for the above NNPoftheNNP
def findPropPrep(self, wordList, tagList):
locations = [];
for idx in range(len(tagList)):
if self.isPropN(wordList[idx],tagList[idx]):
subWord = wordList[idx:idx+4];
subTags = tagList[idx:idx+4];
n = self.NNPoftheNNP(subWord,subTags);
if n > 0:
locations.append((idx,n));
return locations;
# now deprecated with the above changes
# used to find NamePrefix First Name, Last Name
"""
def findNm(self, wordList, tagList):
prevTag = tagList[0];
locations = [];
for idx in range(1, len(tagList)):
tag = tagList[idx];
if prevTag == "NNP" and tag == "NNP":
if self.isNamePre(wordList[idx-1]):
if idx < len(tagList) and tagList[idx+1] == "NNP":
locations.append((idx-1, 3));
else:
locations.append((idx-1, 2));
elif idx > 0 and idx < len(tagList) - 2:
lenPropPhrase = NNPoftheNNP(tagList[idx-1:idx+3]);
if lenPropPhrase > 0:
locations.append((idx-1,lenPropPhrase));
prevTag = tag;
return locations;"""
| |
#import optimizers.BollingerOptimizer as Optimizer
import optimizers.BollingerOptimizer as Optimizer
import models.PortfolioModel, models.PositionModel, models.OrderModel, models.StrategyDataModel
import tables as pt, numpy as np
from optparse import OptionParser
import sys, time
import Portfolio, Position, Order, DataAccess as da , StrategyData
import os
import dircache
import numpy as np
#import curveFittingOptimizer
#import optimizers.BollingerOptimizer as Optimizer
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
class Simulator():
def __init__(self, cash, stocks, startTime, endTime, interval, minCom, comPerShare, isTable, maxEffect, arrayFile, listOfStocksFile):
# strategy contains a reference to the strategy method specified in the command line
# self.strategy = strategy
# startTime/endTime are the timestamps marking the beginning and end of the time for which the simulation should run
self.startTime = startTime
self.currTimestamp = startTime
self.endTime = endTime
# interval is the amount of time between iterations of the strategy
self.interval = interval
# minCom is the minimum commission per transaction
self.minCom = minCom
# comPerShare is the calculated commission per share--if this is greater than the minimum commission, this is what gets used
self.comPerShare = comPerShare
# timeStampIndex and currDataTimeIndex are markers to track the current position in the list of timestamps
self.timeStampIndex = 0
self.currDataTimeIndex = 0
# maxEffect is the maximum percentage change in price a single transaction can have on the actual market price
self.maxEffect = maxEffect
# times becomes the list of timestamps
self.times = []
# isTable tells the simulator whether to use the table- or array-specific methods
self.isTable = isTable
#starting portfolio, position, and order initializations
self.portfolio = Portfolio.Portfolio(cash, stocks)
self.position = Position.Position()
self.order = Order.Order(self.isTable)
#populate the strategyData with the relevant type of data storage
if isTable:
# self.h5f= pt.openFile(pytablesFile, mode = "a") # if mode ='w' is used here then the file gets overwritten!
listOfPaths=list()
#listOfPaths.append("C:\\generated data files\\one stock per file\\maintain folder structure\\US_NASDAQ\\")
#listOfPaths.append("C:\\temp\\")
#listOfPaths.append("C:\\tempoutput\\")
listOfPaths.append("/hzr71/research/QSData/tempdata/") #Modification for gekko
self.listOfStocks= self.getStocks(listOfStocksFile, listOfPaths)
self.dataAccess= da.DataAccess (True, listOfPaths, "/StrategyData", "StrategyData", True, self.listOfStocks, self.startTime, self.endTime)
self.strategyData = StrategyData.StrategyData("someRandomStringToNotBreakTheCode", self.dataAccess, self.isTable)
# else:
# self.strategyData = StrategyData.StrategyData(arrayFile,self.isTable)
def getStocks(self, pathToFile, listOfPaths):
listOfStocks=list()
if (os.path.exists(pathToFile)):
print "Reading in stock names from file..."
f= open(pathToFile)
lines= f.readlines()
f.close()
for line1 in lines:
listOfStocks.append(line1.partition("\n")[0])
#for done
else:
#Path does not exist
print "Reading in all stock names..."
fileExtensionToRemove=".h5"
for path in listOfPaths:
stocksAtThisPath=list ()
stocksAtThisPath= dircache.listdir(str(path))
#Next, throw away everything that is not a .h5 And these are our stocks!
stocksAtThisPath = filter (lambda x:(str(x).find(str(fileExtensionToRemove)) > -1), stocksAtThisPath)
#Now, we remove the .h5 to get the name of the stock
stocksAtThisPath = map(lambda x:(x.partition(str(fileExtensionToRemove))[0]),stocksAtThisPath)
for stock in stocksAtThisPath:
listOfStocks.append(stock)
return listOfStocks
#readStocksFromFile done
def addTimeStamps(self):
# generates the list of timestamps
global timersActive
temp = []
if timersActive:
print 'Generating valid timestamps'
cnt = 0
cycTime = time.time()
# for i in self.strategyData.strategyData.iterrows():
for ts in self.dataAccess.getTimestampArray():
if ts not in temp:
temp.append(ts)
if timersActive:
if(cnt%1000000==0):
print '%i rows finished: %i secs elapsed'%(cnt,time.time()-cycTime)
cnt+=1
if timersActive:
print 'all rows added: %i secs elapsed'%(time.time()-cycTime)
#Put the list in order, convert it to a NumPy array
temp.sort()
temp = np.array(temp)
return temp
def calcCommission(self, volume):
'''
@summary: returns the commission on a given trade given the volume
'''
return max(minCom,volume * self.comPerShare)
def getCurrentDataTimestamp(self):
'''
@summary: returns the timestamp of the most recent data available
'''
while self.times[self.currDataTimeIndex+1]<self.currTimestamp:
self.currDataTimeIndex += 1
return self.times[self.currDataTimeIndex]
def getExecutionTimestamp(self):
'''
@summary: returns the timestamp of the current execution timestamp
@attention: Orders placed on the last day can not be executed after it..so they will be executed on that day. Possible bug?
'''
while self.times[self.timeStampIndex]<self.currTimestamp:
self.timeStampIndex += 1
if (self.timeStampIndex+1 < len (self.times)):
idealTime = self.times[self.timeStampIndex+1]
else:
idealTime = self.times[self.timeStampIndex]
return idealTime
def calcEffect(self, maxVol, shares):
# calculates the effect in the market of a given trade
return float(shares)/maxVol * self.maxEffect
# def getVolumePerDay(self, symbol, timestamp):
# '''
# COMMENT BY SHREYAS JOSHI. THIS FUNCTION IS NOT NECESSARY. 22 JUN 2010. HENCE REMOVING IT.
# @summary: returns the volume of a given stock for the given day (used in conjunction with calcEffect). Call with startTime = endTime = desired timestamp to get just that timestamp
# '''
#
#
## stocks = self.strategyData.getStocks(timestamp, timestamp+1, symbol)
# stocks= self.dataAccess.getData(symbol, 'volume', timestamp, timestamp+1) # we need only the volume here
# if len(stocks) > 0:
# myStockasDict = stocks[0] #Grab the first dictionary in the list
# return myStockasDict['volume'] # Get the volume
# return None
def buyStock(self, newOrder):
'''
@summary: function takes in an instance of OrderDetails, executes the changes to the portfolio and adds the order to the order table
@param newOrder: an instance of OrderDetails representing the new order
@warning: The Order should not be added to the order table before calling this function
'''
ts = self.getCurrentDataTimestamp()
maxVol4Day = self.dataAccess.getStockDataItem(newOrder['symbol'], 'volume', ts)#self.getVolumePerDay(newOrder['symbol'], ts)
if newOrder['order_type'] == 'moo':
#market order open
# price = strategyData.getPrice(ts, newOrder['symbol'], 'adj_open')
price = self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_open', ts)
if price == None or np.isnan (price):
if noisy:
print "Price data unavailable for ts:",ts,'stock:',newOrder['symbol']
return None
elif maxVol4Day == None or np.isnan(maxVol4Day):
if noisy:
print "Volume Data Not Available for ts:", ts, 'stock:', newOrder['symbol']
return None
else:
print "Checking cash..."
checkAmount = min(abs(newOrder['shares']),maxVol4Day)
# New is cost the original total price (price * shares) + effect*Total Price
# Basically, you raise the cost as you buy
cost = (checkAmount * price[0]['adj_open'] + (checkAmount * price[0]['adj_open'] * self.calcEffect(maxVol4Day, checkAmount))) + self.calcCommission(checkAmount)
if(cost>self.portfolio.currCash):
#Not enough cash to buy stock
print "Not enough cash to buy stock."
#print "Apparently not enough cash. I don't believe this. Current cash: " + str (self.portfolio.currCash) + " total cost: "+ str (cost)+ ", cost of one share: "+str (self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_open', ts))
return None
if abs(newOrder['shares']) > maxVol4Day:
if newOrder['shares'] < 0:
newOrder['shares'] = -maxVol4Day
else:
newOrder['shares'] = maxVol4Day
newOrder.update()
self.order.order.flush()
#__execute trade__
#populate fill field in order
newOrder['fill/timestamp'] = ts
newOrder['fill/quantity'] = newOrder['shares'] if (newOrder['task'].upper() == 'BUY') else -newOrder['shares']
newOrder['fill/cashChange'] = -price
newOrder['fill/commission'] = self.calcCommission(newOrder['shares'])
newOrder['fill/impactCost'] = newOrder['shares'] * price * self.calcEffect(maxVol4Day, newOrder['shares']) # This is the CHANGE in the total cost - what effect the volume has
#add trade to portfolio
self.portfolio.buyTransaction(newOrder)
#add position
self.position.addPosition(ts,newOrder['symbol'],newOrder['fill/quantity'],price)
elif newOrder['order_type'] == 'moc':
#market order close
# price = self.strategyData.getPrice(ts, newOrder['symbol'], 'adj_close')
# price = self.dataAccess.getData(newOrder['symbol'], 'adj_close', ts, ts)[0]['adj_close']
price = self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_close', ts)
if price == None or np.isnan (price):
if noisy:
print "Price data unavailable for ts:",ts,'stock:',newOrder['symbol']
return None
elif maxVol4Day == None or np.isnan(maxVol4Day):
if noisy:
print "Volume Data Not Available for ts:", ts, 'stock:', newOrder['symbol']
return None
else:
checkAmount = min(abs(newOrder['shares']),maxVol4Day)
# New is cost the original total price (price * shares) + effect*Total Price
# Basically, you raise the cost as you buy
# cost = (checkAmount + (checkAmount * self.calcEffect(maxVol4Day, checkAmount))) + self.calcCommission(checkAmount)
cost = (checkAmount * price + (checkAmount * price * self.calcEffect(maxVol4Day, checkAmount))) + self.calcCommission(checkAmount)
if(cost>self.portfolio.currCash):
#Not enough cash to buy stock
print "Not enough cash. Current cash: " + str (self.portfolio.currCash) + " total cost: "+ str (cost)+ ", cost of one share: "+str (self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_close', ts))
return None
if abs(newOrder['shares']) > maxVol4Day:
if newOrder['shares'] < 0:
newOrder['shares'] = -maxVol4Day
else:
newOrder['shares'] = maxVol4Day
newOrder.update()
self.order.order.flush()
newOrder['fill/timestamp'] = ts
newOrder['fill/quantity'] = newOrder['shares'] if (newOrder['task'].upper() == 'BUY') else -newOrder['shares']
newOrder['fill/cashChange'] = -price
newOrder['fill/commission'] = self.calcCommission(newOrder['shares'])
newOrder['fill/impactCost'] = newOrder['shares'] * price * self.calcEffect(maxVol4Day, newOrder['shares']) # This is the CHANGE in the total cost - what effect the volume has
#add trade to portfolio
self.portfolio.buyTransaction(newOrder)
#add position
self.position.addPosition(ts,newOrder['symbol'],newOrder['fill/quantity'],price)
elif newOrder['order_type'] == 'limit':
#limit order
price = newOrder['limit_price']
if price == None or np.isnan (price):
if noisy:
print "Price data unavailable for ts:",ts,'stock:',newOrder['symbol']
return None
elif maxVol4Day == None or np.isnan(maxVol4Day):
if noisy:
print "Volume Data Not Available for ts:", ts, 'stock:', newOrder['symbol']
return None
else:
# if ((newOrder['limit_price'] > self.strategyData.getPrice(ts, newOrder['symbol'], 'adj_high')) or ( newOrder['limit_price'] < self.strategyData.getPrice(ts, newOrder['symbol'], 'adj_low'))):
if ((newOrder['limit_price'] > self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_high', ts)) or ( newOrder['limit_price'] < self.dataAccess.getData(newOrder['symbol'], 'adj_low', ts))):
#limit price outside of daily range
return None
checkAmount = min(abs(newOrder['shares']),maxVol4Day)
# New is cost the original total price (price * shares) + effect*Total Price
# Basically, you raise the cost as you buy
cost = (checkAmount * price + (checkAmount * price * self.calcEffect(maxVol4Day, checkAmount))) + self.calcCommission(checkAmount)
if(cost>self.portfolio.currCash):
#Not enough cash to buy stock
return None
if abs(newOrder['shares']) > maxVol4Day:
if newOrder['shares'] < 0:
newOrder['shares'] = -maxVol4Day
else:
newOrder['shares'] = maxVol4Day
newOrder.update()
self.order.order.flush()
#__execute trade__
#populate fill field in order
newOrder['fill/timestamp'] = ts
newOrder['fill/quantity'] = newOrder['shares'] if (newOrder['task'].upper() == 'BUY') else -newOrder['shares']
newOrder['fill/cashChange'] = -price
newOrder['fill/commission'] = self.calcCommission(newOrder['shares'])
newOrder['fill/impactCost'] = newOrder['shares'] * price * self.calcEffect(maxVol4Day, newOrder['shares']) # This is the CHANGE in the total cost - what effect the volume has
#add trade to portfolio
self.portfolio.buyTransaction(newOrder)
#add position
self.position.addPosition(ts,newOrder['symbol'],newOrder['fill/quantity'],price)
elif newOrder['order_type'] == 'vwap':
#volume weighted average price
# price = strategyData.getPrice(ts, newOrder['symbol'], 'adj_open')
# price = self.dataAccess.getData(newOrder['symbol'], 'adj_open', ts, ts)[0]['adj_close']
price = self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_open', ts)
if price == None or np.isnan (price):
if noisy:
print "Price data unavailable for ts:",ts,'stock:',newOrder['symbol']
return None
elif maxVol4Day == None or np.isnan(maxVol4Day):
if noisy:
print "Volume Data Not Available for ts:", ts, 'stock:', newOrder['symbol']
return None
else:
checkAmount = min(abs(newOrder['shares']),maxVol4Day)
# New is cost the original total price (price * shares) + effect*Total Price
# Basically, you raise the cost as you buy
price += self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_close', ts)#[0]['adj_close'] #strategyData.getPrice(ts, newOrder['symbol'], 'adj_close')
price += self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_high', ts)#[0]['adj_high'] #strategyData.getPrice(ts, newOrder['symbol'], 'adj_high')
price += self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_low', ts)#[0]['adj_low'] #strategyData.getPrice(ts, newOrder['symbol'], 'adj_low')
price = price / 4.
cost = (checkAmount * price + (checkAmount * price * self.calcEffect(maxVol4Day, checkAmount))) + self.calcCommission(checkAmount)
if(cost>self.portfolio.currCash):
#Not enough cash to buy stock
return None
if abs(newOrder['shares']) > maxVol4Day:
if newOrder['shares'] < 0:
newOrder['shares'] = -maxVol4Day
else:
newOrder['shares'] = maxVol4Day
newOrder.update()
self.order.order.flush()
# New is cost the original total price (price * shares) + effect*Total Price
# Basically, you raise the cost the more you buy.
#__execute trade__
#populate fill field in order
newOrder['fill/timestamp'] = ts
newOrder['fill/quantity'] = newOrder['shares'] if (newOrder['task'].upper() == 'BUY') else -newOrder['shares']
newOrder['fill/cashChange'] = -price
newOrder['fill/commission'] = self.calcCommission(newOrder['shares'])
newOrder['fill/impactCost'] = newOrder['shares'] * price * self.calcEffect(maxVol4Day, newOrder['shares']) # This is the CHANGE in the total cost - what effect the volume has
#add trade to portfolio
self.portfolio.buyTransaction(newOrder)
#add position
self.position.addPosition(ts,newOrder['symbol'],newOrder['fill/quantity'],price)
else:
#throw invalid type error
raise TypeError("Not an existing trade type '%s'." % str(newOrder['order_type']))
newOrder.update()
self.order.order.flush()
return price
def sellStock(self,newOrder):
'''
@summary: function takes in an instance of OrderDetails, executes the changes to the portfolio and adds the order to the order table
@param newOrder: an instance of OrderDetails representing the new order
@warning: The Order should not be added to the order table before calling this function
'''
ts = self.getCurrentDataTimestamp() #need a function to get the next available time we can trade
maxVol4Day = self.dataAccess.getStockDataItem(newOrder['symbol'], 'volume', ts)#self.getVolumePerDay(newOrder['symbol'], ts)
if newOrder['order_type'] == 'moo':
#market order open
price = self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_open', ts)#[0]['adj_open'] #self.strategyData.getPrice(ts, newOrder['symbol'], 'adj_open')
if price == None or np.isnan (price):
if noisy:
print "Price data unavailable for",ts,newOrder['symbol']
return None
elif maxVol4Day == None or np.isnan(maxVol4Day):
if noisy:
print "Volume Data Not Available for ts:", ts, 'stock:', newOrder['symbol']
return None
else:
checkAmount = min(abs(newOrder['shares']),maxVol4Day)
if newOrder['task'].upper() == 'SELL':
if not (self.portfolio.hasStock(newOrder['symbol'],checkAmount)): # NEW
#Not enough shares owned to sell requested amount
print "Not enough shares owned to sell the requested amount"
return None
else:
if not (self.portfolio.hasStock(newOrder['symbol'],-checkAmount)): # NEW
#Not enough shares owned to sell requested amount
print "Not enough shares owned to sell the requested amount"
return None
cost = (checkAmount * price + (checkAmount * price * self.calcEffect(maxVol4Day, checkAmount))) + self.calcCommission(checkAmount)
if(cost>self.portfolio.currCash) and (newOrder['shares'] < 0):
#Not enough cash to cover stock
print "Not enough cash to cover stock"
return None
#__execute trade__
#populate fill field in order
if abs(newOrder['shares']) > maxVol4Day:
if newOrder['shares'] < 0:
newOrder['shares'] = -maxVol4Day
else:
newOrder['shares'] = maxVol4Day
newOrder.update()
self.order.order.flush()
newOrder['fill/timestamp'] = ts
newOrder['fill/quantity'] = newOrder['shares'] if (newOrder['task'].upper() == 'SELL') else -newOrder['shares']
newOrder['fill/cashChange'] = price #NEW
newOrder['fill/commission'] = self.calcCommission(newOrder['shares'])
newOrder['fill/impactCost'] = newOrder['shares'] * price * self.calcEffect(maxVol4Day, newOrder['shares']) # This is the CHANGE in the total cost - what effect the volume has
#add trade to portfolio
self.portfolio.sellTransaction(newOrder)
#remove positions according to lifo/fifo
self.position.removePosition(newOrder['symbol'],newOrder['shares'] if (newOrder['task'].upper() == 'SELL') else -newOrder['shares'],newOrder['close_type'])
elif newOrder['order_type'] == 'moc':
#market order close
price = self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_close', ts)#[0]['adj_close'] #strategyData.getPrice(ts, newOrder['symbol'], 'adj_close')
if price == None or np.isnan (price):
if noisy:
print "Price data unavailable for",ts,newOrder['symbol']
return None
elif maxVol4Day == None or np.isnan(maxVol4Day):
if noisy:
print "Volume Data Not Available for ts:", ts, 'stock:', newOrder['symbol']
return None
else:
checkAmount = min(abs(newOrder['shares']),maxVol4Day)
if newOrder['shares'] > 0:
if not (self.portfolio.hasStock(newOrder['symbol'],checkAmount)): # NEW
#Not enough shares owned to sell requested amount
print "Not enough shares owned to sell the requested amount"
return None
else:
if not (self.portfolio.hasStock(newOrder['symbol'],-checkAmount)): # NEW
#Not enough shares owned to sell requested amount
print "Not enough shares owned to sell the requested amount"
return None
cost = (checkAmount * price + (checkAmount * price * self.calcEffect(maxVol4Day, checkAmount))) + self.calcCommission(checkAmount)
if(cost>self.portfolio.currCash) and (newOrder['shares'] < 0):
#Not enough cash to cover stock
print "Not enough cash to cover stock"
return None
#__execute trade__
#populate fill field in order
if abs(newOrder['shares']) > maxVol4Day:
if newOrder['shares'] < 0:
newOrder['shares'] = -maxVol4Day
else:
newOrder['shares'] = maxVol4Day
newOrder.update()
self.order.order.flush()
newOrder['fill/timestamp'] = ts
newOrder['fill/quantity'] = newOrder['shares'] if (newOrder['task'].upper() == 'SELL') else -newOrder['shares']
newOrder['fill/cashChange'] = price
newOrder['fill/commission'] = self.calcCommission(newOrder['shares'])
newOrder['fill/impactCost'] = newOrder['shares'] * price * self.calcEffect(maxVol4Day, newOrder['shares']) # This is the CHANGE in the total cost - what effect the volume has
#add trade to portfolio
self.portfolio.sellTransaction(newOrder)
#remove positions according to lifo/fifo
self.position.removePosition(newOrder['symbol'],newOrder['shares'] if (newOrder['task'].upper() == 'SELL') else -newOrder['shares'],newOrder['close_type'])
elif newOrder['order_type'] == 'limit':
#limit order
price = newOrder['limit_price']
if price == None or np.isnan (price):
if noisy:
print "Price data unavailable for",ts,newOrder['symbol']
return None
elif maxVol4Day == None or np.isnan(maxVol4Day):
if noisy:
print "Volume Data Not Available for ts:", ts, 'stock:', newOrder['symbol']
return None
else:
checkAmount = min(abs(newOrder['shares']),maxVol4Day)
if newOrder['shares'] > 0:
if not (self.portfolio.hasStock(newOrder['symbol'],checkAmount)): # NEW
#Not enough shares owned to sell requested amount
print "Not enough shares owned to sell the requested amount"
return None
else:
if not (self.portfolio.hasStock(newOrder['symbol'],-checkAmount)): # NEW
#Not enough shares owned to sell requested amount
return None
cost = (checkAmount * price + (checkAmount * price * self.calcEffect(maxVol4Day, checkAmount))) + self.calcCommission(checkAmount)
if(cost>self.portfolio.currCash) and (newOrder['shares'] < 0):
#Not enough cash to cover stock
print "Not enough cash to cover stock"
return None
#__execute trade__
#populate fill field in order
# if ((newOrder['limit_price'] > strategyData.getPrice(ts, newOrder['symbol'], 'adj_high')) or ( newOrder['limit_price'] < strategyData.getPrice(ts, newOrder['symbol'], 'adj_low'))):
if ((newOrder['limit_price'] > self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_high', ts)) or ( newOrder['limit_price'] < self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_low', ts))):
#limit price outside of daily range
return None
if abs(newOrder['shares']) > maxVol4Day:
if newOrder['shares'] < 0:
newOrder['shares'] = -maxVol4Day
else:
newOrder['shares'] = maxVol4Day
newOrder.update()
self.order.order.flush()
#__execute trade__
#populate fill field in order
newOrder['fill/timestamp'] = ts
newOrder['fill/quantity'] = newOrder['shares'] if (newOrder['task'].upper() == 'SELL') else -newOrder['shares']
newOrder['fill/cashChange'] = price
newOrder['fill/commission'] = self.calcCommission(newOrder['shares'])
newOrder['fill/impactCost'] = newOrder['shares'] * price * self.calcEffect(maxVol4Day, newOrder['shares']) # This is the CHANGE in the total cost - what effect the volume has
#add trade to portfolio
self.portfolio.sellTransaction(newOrder)
#remove positions according to lifo/fifo
self.position.removePosition(newOrder['symbol'],newOrder['shares'] if (newOrder['task'].upper() == 'SELL') else -newOrder['shares'],newOrder['close_type'])
elif newOrder.order_type == 'vwap':
#volume weighted average price
price = self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_open', ts)#[0]['adj_open'] #strategyData.getPrice(ts, newOrder['symbol'], 'adj_open')
if price == None or np.isnan (price):
if noisy:
print "Price data unavailable for",ts,newOrder['symbol']
return None
elif maxVol4Day == None or np.isnan(maxVol4Day):
if noisy:
print "Volume Data Not Available for ts:", ts, 'stock:', newOrder['symbol']
return None
else:
checkAmount = min(abs(newOrder['shares']),maxVol4Day)
if newOrder['shares'] > 0:
if not (self.portfolio.hasStock(newOrder['symbol'],checkAmount)): # NEW
#Not enough shares owned to sell requested amount
print "Not enough shares owned to sell the requested amount"
return None
else:
if not (self.portfolio.hasStock(newOrder['symbol'],-checkAmount)): # NEW
#Not enough shares owned to sell requested amount
print "Not enough shares owned to sell the requested amount"
return None
price += self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_close', ts)#[0]['adj_close'] #strategyData.getPrice(ts, newOrder['symbol'], 'adj_close')
price += self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_high', ts)#[0]['adj_high'] #strategyData.getPrice(ts, newOrder['symbol'], 'adj_high')
price += self.dataAccess.getStockDataItem(newOrder['symbol'], 'adj_low', ts)#[0]['adj_low'] #strategyData.getPrice(ts, newOrder['symbol'], 'adj_low')
price = price / 4.
cost = (checkAmount * price + (checkAmount * price * self.calcEffect(maxVol4Day, checkAmount))) + self.calcCommission(checkAmount)
if(cost>self.portfolio.currCash) and (newOrder['shares'] < 0):
#Not enough cash to cover stock
print "Not enough cash to cover stock"
return None
#__execute trade__
#populate fill field in order
if abs(newOrder['shares']) > maxVol4Day:
if newOrder['shares'] < 0:
newOrder['shares'] = -maxVol4Day
else:
newOrder['shares'] = maxVol4Day
newOrder.update()
self.order.order.flush()
newOrder['fill/timestamp'] = ts
newOrder['fill/quantity'] = newOrder['shares'] if (newOrder['task'].upper() == 'SELL') else -newOrder['shares']
newOrder['fill/cashChange'] = price
newOrder['fill/commission'] = self.calcCommission(newOrder['shares'])
newOrder['fill/impactCost'] = newOrder['shares'] * price * self.calcEffect(maxVol4Day, newOrder['shares']) # This is the CHANGE in the total cost - what effect the volume has
#add trade to portfolio
self.portfolio.sellTransaction(newOrder)
#remove positions according to lifo/fifo
self.position.removePosition(newOrder['symbol'],newOrder['shares'] if (newOrder['task'].upper() == 'SELL') else -newOrder['shares'],newOrder['close_type'])
else:
#throw invalid type error
raise TypeError("Not an existing trade type '%s'." % str(newOrder.order_type))
newOrder.update()
self.order.order.flush()
return price
def execute(self):
'''
@summary: This function iterates through the orders and attempts to execute all the ones that are still valid and unfilled
'''
count = 0
for order in self.order.getOrders():
if (order['timestamp'] < self.currTimestamp):
if (order['duration'] + order['timestamp']) >= self.currTimestamp:
if order['fill/timestamp'] == 0:
#Have unfilled, valid orders
if order['task'].upper() == "BUY":
#is a buy
if self.portfolio.hasStock(order['symbol'],1):
if order['shares']>0:
result = self.buyStock(order)
if noisy:
if result is not None:
print "Succeeded in buying %d shares of %s for %.2f as %s, with close type %s. Placed at: %d. Current timestamp: %d, order #%d" % (order['shares'], order['symbol'], result, order['order_type'], order['close_type'], order['timestamp'], self.currTimestamp, count)
#else:
#print "THIS IS MOST LIKELY WRONG- Did not succeed in buying %d shares of %s as %s; not enough cash. Order valid until %d. Placed at: %d. Current timestamp: %d, order #%d" %(order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], order['timestamp'], self.currTimestamp, count)
else:
if noisy:
print "Did not succeed in buying %d shares of %s as %s; negative values are not valid buy amounts. Order valid until %d. Placed at: %d. Current timestamp: %d, order #%d" %(order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], order['timestamp'], self.currTimestamp, count)
elif self.portfolio.hasStock(order['symbol'],-1):
if noisy:
print "Did not succeed in buying %d shares of %s as %s; you must cover your shortsell before you can buy. Order valid until %d. Placed at: %d. Current timestamp: %d, order #%d" %(order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], order['timestamp'], self.currTimestamp, count)
else:
result = self.buyStock(order)
if noisy:
if result:
print "Succeeded in buying %d shares of %s for %.2f as %s. Placed at: %d. Current timestamp: %d, order #%d" % (order['shares'], order['symbol'], result, order['order_type'], order['timestamp'], self.currTimestamp, count)
else:
print "Did not succeed in buying %d shares of %s as %s. Order valid until %d. Placed at: %d. Current timestamp: %d, order #%d" %(order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], order['timestamp'], self.currTimestamp, count)
elif order['task'].upper() == "SELL":
# is a sell
if order['shares']>0:
result = self.sellStock(order)
if noisy:
if result:
print "Succeeded in selling %d shares of %s for %.2f as %s, with close type %s. Current timestamp: %d" % (order['shares'], order['symbol'], result, order['order_type'], order['close_type'], self.currTimestamp)
#else:
#print "Did not succeed in selling %d shares of %s as %s; not enough owned. Order valid until %d. Current timestamp: %d" %(order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], self.currTimestamp)
else:
if noisy:
print "Did not succeed in selling %d shares of %s as %s; you cannot sell a non-positive amount. Order valid until %d. Current timestamp: %d" %(order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], self.currTimestamp)
elif order['task'].upper() == "SHORT":
#is a short sell
if self.portfolio.hasStock(order['symbol'],-1):
if order['shares']>0:
result = self.buyStock(order)
if noisy:
if result:
print "Succeeded in short selling %d shares of %s for %.2f as %s, with close type %s. Placed at: %d. Current timestamp: %d, order #%d" % (-order['shares'], order['symbol'], -result, order['order_type'], order['close_type'], order['timestamp'], self.currTimestamp, count)
else:
print "Did not succeed in short selling %d shares of %s as %s; not enough cash??? How do you not have enough cash for a short sell?. Order valid until %d. Placed at: %d. Current timestamp: %d, order #%d" %(order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], order['timestamp'], self.currTimestamp, count)
else:
if noisy:
print "Did not succeed in short selling %d shares of %s as %s; negative values are not valid short sell amounts. Order valid until %d. Placed at: %d. Current timestamp: %d, order #%d" %(-order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], order['timestamp'], self.currTimestamp, count)
elif self.portfolio.hasStock(order['symbol'],1):
if noisy:
print "Did not succeed in short selling %d shares of %s as %s; you cannot short sell a stock you already own. Order valid until %d. Placed at: %d. Current timestamp: %d, order #%d" %(-order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], order['timestamp'], self.currTimestamp, count)
else:
result = self.buyStock(order)
if noisy:
if result:
print "Succeeded in short selling %d shares of %s for %.2f as %s, with close type %s. Placed at: %d. Current timestamp: %d, order #%d" % (-order['shares'], order['symbol'], result, order['order_type'], order['close_type'], order['timestamp'], self.currTimestamp, count)
else:
print "Did not succeed in short selling %d shares of %s as %s; not enough cash??? How do you not have enough cash for a short sell?. Order valid until %d. Placed at: %d. Current timestamp: %d, order #%d" %(-order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], order['timestamp'], self.currTimestamp, count)
elif order['task'].upper() == "COVER":
# is a cover
if order['shares']>0:
result = self.sellStock(order)
if noisy:
if result:
print "Succeeded in covering %d shares of %s for %.2f as %s, with close type %s. Current timestamp: %d" % (-order['shares'], order['symbol'], result, order['order_type'], order['close_type'], self.currTimestamp)
else:
print "Did not succeed in covering %d shares of %s as %s; not short enough or not enough cash. Order valid until %d. Current timestamp: %d" %(-order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], self.currTimestamp)
else:
if noisy:
print "Did not succeed in covering %d shares of %s as %s; you cannot cover a non-positive amount. Order valid until %d. Current timestamp: %d" %(-order['shares'], order['symbol'], order['order_type'], order['duration'] + order['timestamp'], self.currTimestamp)
else:
if noisy:
print "'%s' is not a valid task. Order valid until %d. Current timestamp: %d" % (order['task'].upper(), order['duration'] + order['timestamp'], self.currTimestamp)
count += 1
def addOrders(self,commands):
'''
@summary: takes in commands (return value of strategy), parses it, and adds it in the correct format to the order data storage
'''
if self.isTable:
for stock in commands:
newOrder = self.order.addOrder(self.getExecutionTimestamp(),stock[0],stock[1],stock[2],stock[3],stock[4],stock[5],stock[6])
newOrder.append()
self.order.order.flush()
else:
for stock in commands:
self.order.addOrder(self.getExecutionTimestamp(),stock[0],stock[1],stock[2],stock[3],stock[4],stock[5],stock[6])
def run(self):
'''
@summary: Run the simulation
'''
optimizer= Optimizer.Optimizer(self.listOfStocks)
#optimizer= curveFittingOptimizer.Optimizer(self.listOfStocks)
timestamps= list(self.dataAccess.getTimestampArray())
portfolioValList= list()
ctr=0
while (timestamps[ctr]< self.startTime):
ctr+=1
#while loop done
self.currTimestamp = timestamps[ctr] #self.startTime
ctr2= ctr
while (timestamps[ctr2]< self.endTime):
ctr2+=1
if (ctr2>= len(timestamps)):
break
#while loop done
if (ctr2>= len (timestamps)):
self.endTime= timestamps[ctr2-1]
else:
self.endTime= timestamps[ctr2]
if timersActive:
print "Simulation timer started at "+ str(self.currTimestamp)
totalTime = time.time()
cycTime = time.clock()
# self.strategyData.currTimestamp = self.currTimestamp
i=1
while self.currTimestamp < self.endTime and self.currTimestamp < time.time(): # and self.currTimestamp < self.strategyData.timestampIndex[len(self.strategyData.timestampIndex)-2]: ************POSSIBLE BUG***** JUST TRYING OUT
# While not yet reached the end timestamp AND not yet caught up to present AND not yet reached the end of the data
# execute the existing orders, then run the strategy and add the new orders
beforeExec=time.clock()
self.execute()
afterExec= time.clock()
# self.addOrders(self.strategy(self.portfolio,self.position,self.currTimestamp,self.strategyData))
# self.addOrders(optimizer.execute(self.portfolio,self.position,self.currTimestamp,self.strategyData))
beforeAddOrders= time.clock()
self.addOrders(optimizer.execute(self.portfolio,self.position,self.currTimestamp,self.strategyData, self.dataAccess))
afterAddOrders= time.clock()
if noisy or timersActive:
print '' #newline
if mtm:
#portValue = self.portfolio.currCash + self.strategyData.calculatePortValue(self.portfolio.currStocks,self.currTimestamp)
portValue= float (0.0)
print "| %i %.2f |"%(self.currTimestamp,portValue) + " Value from portfolio class: " + str (self.portfolio.calcPortfolioValue(self.currTimestamp, self.dataAccess))
if timersActive and not noisy:
print "Strategy at %i took %.4f secs"%(self.currTimestamp,(time.clock()-cycTime))
i+=1
cycTime = time.clock()
if noisy and not timersActive:
portValue = (self.portfolio.calcPortfolioValue(self.currTimestamp, self.dataAccess)) #self.portfolio.currCash + self.strategyData.calculatePortValue(self.portfolio.currStocks,self.currTimestamp)
portfolioValList.append(portValue)
print "Strategy at %d completed successfully." % self.currTimestamp
print "Current cash: " + str(self.portfolio.currCash)
print "Current stocks: %s."%self.portfolio.currStocks
print "Current portfolio value: "+ str(portValue)+"\n\n"
#print "Current portfolio value: %.2f.\n\n"%(portValue)
if noisy and timersActive:
portValue = float (self.portfolio.calcPortfolioValue(self.currTimestamp, self.dataAccess)) #self.portfolio.currCash + self.strategyData.calculatePortValue(self.portfolio.currStocks,self.currTimestamp)
portfolioValList.append(portValue)
print "Strategy at %i took %.4f secs"%(self.currTimestamp,(time.clock()-cycTime))
print "Exec function took: " + str(afterExec - beforeExec)
print "Time for addorders: " + str(afterAddOrders - beforeAddOrders)
print "Strategy at %d completed successfully." % self.currTimestamp
#print "Current cash: %.2f."%(self.portfolio.currCash)
print "Current cash: " + str(self.portfolio.currCash)
print "Current stocks: %s."%self.portfolio.currStocks
#print "Current portfolio value: %.2f.\n\n"%(portValue)
print "Current portfolio value: "+ str(portValue)+"\n\n"
i+=1
cycTime = time.clock()
#self.currTimestamp += self.interval -- Unfortunately this does not work becuase of daylight saving time complications
ctr+=1
self.currTimestamp= timestamps[ctr]
#self.strategyData.currTimestamp = self.currTimestamp
if noisy:
print "Simulation complete."
if timersActive:
print "Simulation complete in %i seconds."%(time.time() - totalTime)
self.portfolio.close()
self.position.close()
self.order.close()
#self.strategyData.close()
#plotting the portfolio value
fig = Figure()
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.plot (portfolioValList)
ax.set_title('Portfolio value')
ax.grid(True)
ax.set_xlabel('time')
ax.set_ylabel('$')
canvas.print_figure('portfolio')
#def run ends
cash = 0; comPerShare = 0.0; minCom = 0.; startTime = 0; endTime = 0; timeStep = 0; maxEffect = 0.; decayCycles = 0
noisy = False; timersActive = False; mtm = False; isTable = False; arrayFile = 'datafiles/defaultArrayFile.pk'; listOfStocksFile="someRandomString"
def main():
global cash,comPerShare,minCom,startTime,endTime,timeStep,maxEffect,decayCycles,noisy,timersActive,mtm,isTable,arrayFile,listOfStocksFile
# NOTE: the OptionParser class is currently not necessary, as we can just access sys.argv[1:], but if we
# want to implement optional arguments, this will make it considerably easier.
parser = OptionParser()
# parser.parse_args() returns a tuple of (options, args)
# As of right now, we don't have any options for our program, so we only care about the three arguments:
# config file, strategy module name, strategy main function name
args = parser.parse_args()[1]
# if len(args) != 3 and len(args) != 2:
# print "FAILURE TO INCLUDE THE CORRECT NUMBER OF ARGUMENTS; TERMINATING."
# return
if len(args) != 1:
print "FAILURE TO INCLUDE THE CORRECT NUMBER OF ARGUMENTS; TERMINATING."
return
configFile = 'configfiles/'+args[0]
# if len(args) == 3:
# stratName = args[2]
# else:
# stratName = "strategyMain"
if noisy:
print "About to parse configuration files. Any invalid fields found in the user-specified file will use the relevant value from the default file instead."
for fileName in ["configfiles/default.ini",configFile]:
if noisy:
print "Parsing %s now..." % filename[12:]
thisFile = open(fileName,'r')
for line in thisFile.readlines():
# Separate the command in the config file from the arguments
if not ('#' in line):
line = line.strip().split('=')
command = line[0].strip().upper()
if(command == 'ARRAYFILE' or command =='PYTABLESFILE'):
if len(line)>1:
vals = line[1].split()
else:
vals = []
else:
if len(line)>1:
vals = line[1].upper().split()
else:
vals = []
# Parse commands, look for correct number of arguments, do rudimentary error checking, apply to simulator as appropriate
if command == 'CASH':
if len(vals) != 1:
print "WRONG NUMBER OF ARGUMENTS FOR CASH!"
else:
try:
cash = float(vals[0])
except ValueError:
print "ARGUMENT FOR CASH IS NOT A FLOAT!"
# Code for handling stocks in a starting portfolio. Implementation not correct; removing for the time being.
# elif command == "STOCK":
# if len(vals) != 2:
# print "WRONG NUMBER OF ARGUMENTS FOR STOCK!! RAAAAWR! ALSO, I NEED TO LEARN TO THROW ERRORS!"
# else:
# try:
# stocks.append([vals[0],int(vals[1])])
# except:
# print "STOCK TAKES IN A STOCK NAME AND AN INT! AND DON'T YOU FORGET IT!"
elif command == "COMPERSHARE":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR COMMISSIONS PER SHARE."
else:
try:
comPerShare = float(vals[0])
except ValueError:
print "COMMISSIONS PER SHARE REQUIRES A FLOAT INPUT"
elif command == "MINCOM":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR MINIMUM COMMISSION."
else:
try:
minCom = float(vals[0])
except ValueError:
print "MINIMUM COMMISSIONS REQUIRES A FLOAT INPUT"
elif command == "STARTTIME":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR START TIME."
else:
try:
startTime = long(vals[0])
except ValueError:
print "START TIME REQUIRES A LONG INPUT"
elif command == "ENDTIME":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR END TIME."
else:
try:
endTime = long(vals[0])
except ValueError:
print "END TIME REQUIRES A LONG INPUT"
elif command == "TIMESTEP":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR TIME STEP."
else:
try:
timeStep = long(vals[0])
except ValueError:
print "TIME STEP REQUIRES A LONG INPUT"
elif command == "MAXMARKETEFFECT":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR MAX MARKET EFFECT."
else:
try:
maxEffect = float(vals[0])
except ValueError:
print "MAX MARKET EFFECT REQUIRES A FLOAT INPUT"
elif command == "DECAYCYCLES":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR DECAY CYCLES."
else:
try:
decayCycles = int(vals[0])
except ValueError:
print "DECAY CYCLES REQUIRES AN INTEGER INPUT"
elif command == "DATATYPE":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR DATATYPE."
else:
if vals[0] == "TABLE":
isTable = True
elif vals[0] == "ARRAY":
isTable = False
else:
print "%s IS NOT A VALID PARAMETER FOR DATATYPE." % vals[0]
elif command == "ARRAYFILE":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR ARRAYFILE."
else:
try:
arrayFile = str(vals[0])
except ValueError:
print "ARRAYFILE REQUIRES A STRING INPUT"
elif command == "PYTABLESFILE":
if len(vals) != 1:
print "NEED EXACTLY ONE PARAMETER FOR PYTABLESFILE."
else:
try:
pytablesFile = str(vals[0])
except ValueError:
print "PYTABLESFILE REQUIRES A STRING INPUT"
elif command == "NOISY":
noisy = True
elif command == "TIMER":
timersActive = True
elif command == "MTM":
mtm = True
elif command == "LISTOFSTOCKSFILE":
listOfStocksFile= str (vals[0])
if not (os.path.exists(listOfStocksFile)):
print "File containing list of stocks does not exist. Will read in all files at specified paths."
# raise ValueError
elif command != '':
print "Unrecognized command '%s'." % command
thisFile.close()
if noisy:
print "Config files finished parsing. Starting simulation."
# Add the strategies subdirectory to the system path so Python can find the module
sys.path.append(sys.path[0] + '/strategies')
# myStrategy = eval("__import__('%s').%s" % (args[1],stratName) )
mySim = Simulator(cash,{}, startTime, endTime, timeStep, minCom, comPerShare, isTable, maxEffect, arrayFile, listOfStocksFile)
# Add the timestamps
if isTable:
mySim.times = mySim.addTimeStamps()
#mySim.strategyData.timestampIndex = mySim.times
else:
pass
#mySim.times = mySim.strategyData.timestampIndex
mySim.run()
# This ensures the main function runs automatically when the program is run from the command line, but
# not if the file somehow gets imported from something else. Nifty, eh?
if __name__ == "__main__":
main()
| |
#@PydevCodeAnalysisIgnore
"""
Python Enumerations
See https://pypi.python.org/pypi/enum34
BSD License
"""
#pylint: disable-all
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __call__(cls, value, names=None, module=None, type=None):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+1) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
def __dir__(self):
added_behavior = [m for m in self.__class__.__dict__ if m[0] != '_']
return (['__class__', '__doc__', '__module__', 'name', 'value'] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
| |
import asyncio
from contextlib import contextmanager
import socket
import threading
from time import sleep
import pytest
from tornado import gen
from distributed import Scheduler, Worker, Client, config, default_client
from distributed.core import rpc
from distributed.metrics import time
from distributed.utils_test import ( # noqa: F401
cleanup,
cluster,
gen_cluster,
inc,
gen_test,
wait_for_port,
new_config,
)
from distributed.utils_test import ( # noqa: F401
loop,
tls_only_security,
security,
tls_client,
tls_cluster,
)
from distributed.utils import get_ip
def test_bare_cluster(loop):
with cluster(nworkers=10) as (s, _):
pass
def test_cluster(loop):
with cluster() as (s, [a, b]):
with rpc(s["address"]) as s:
ident = loop.run_sync(s.identity)
assert ident["type"] == "Scheduler"
assert len(ident["workers"]) == 2
@gen_cluster(client=True)
async def test_gen_cluster(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert await c.submit(lambda: 123) == 123
@gen_cluster(client=True)
def test_gen_cluster_legacy_implicit(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert (yield c.submit(lambda: 123)) == 123
@gen_cluster(client=True)
@gen.coroutine
def test_gen_cluster_legacy_explicit(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert (yield c.submit(lambda: 123)) == 123
@pytest.mark.skip(reason="This hangs on travis")
def test_gen_cluster_cleans_up_client(loop):
import dask.context
assert not dask.config.get("get", None)
@gen_cluster(client=True)
async def f(c, s, a, b):
assert dask.config.get("get", None)
await c.submit(inc, 1)
f()
assert not dask.config.get("get", None)
@gen_cluster(client=False)
async def test_gen_cluster_without_client(s, a, b):
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
async with Client(s.address, asynchronous=True) as c:
future = c.submit(lambda x: x + 1, 1)
result = await future
assert result == 2
@gen_cluster(
client=True,
scheduler="tls://127.0.0.1",
nthreads=[("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)],
security=tls_only_security(),
)
async def test_gen_cluster_tls(e, s, a, b):
assert isinstance(e, Client)
assert isinstance(s, Scheduler)
assert s.address.startswith("tls://")
for w in [a, b]:
assert isinstance(w, Worker)
assert w.address.startswith("tls://")
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
@gen_test()
async def test_gen_test():
await asyncio.sleep(0.01)
@gen_test()
def test_gen_test_legacy_implicit():
yield asyncio.sleep(0.01)
@gen_test()
@gen.coroutine
def test_gen_test_legacy_explicit():
yield asyncio.sleep(0.01)
@contextmanager
def _listen(delay=0):
serv = socket.socket()
serv.bind(("127.0.0.1", 0))
e = threading.Event()
def do_listen():
e.set()
sleep(delay)
serv.listen(5)
ret = serv.accept()
if ret is not None:
cli, _ = ret
cli.close()
serv.close()
t = threading.Thread(target=do_listen)
t.daemon = True
t.start()
try:
e.wait()
sleep(0.01)
yield serv
finally:
t.join(5.0)
def test_wait_for_port():
t1 = time()
with pytest.raises(RuntimeError):
wait_for_port((get_ip(), 9999), 0.5)
t2 = time()
assert t2 - t1 >= 0.5
with _listen(0) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 1.0
with _listen(1) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 2.0
def test_new_config():
c = config.copy()
with new_config({"xyzzy": 5}):
config["xyzzy"] == 5
assert config == c
assert "xyzzy" not in config
def test_lingering_client():
@gen_cluster()
async def f(s, a, b):
await Client(s.address, asynchronous=True)
f()
with pytest.raises(ValueError):
default_client()
def test_lingering_client(loop):
with cluster() as (s, [a, b]):
client = Client(s["address"], loop=loop)
def test_tls_cluster(tls_client):
tls_client.submit(lambda x: x + 1, 10).result() == 11
assert tls_client.security
@pytest.mark.asyncio
async def test_tls_scheduler(security, cleanup):
async with Scheduler(security=security, host="localhost") as s:
assert s.address.startswith("tls")
| |
# -*- coding: utf-8 -*-
"""
Shortest path algorithms for unweighted graphs.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['bidirectional_shortest_path',
'single_source_shortest_path',
'single_source_shortest_path_length',
'all_pairs_shortest_path',
'all_pairs_shortest_path_length',
'predecessor']
import networkx as nx
def single_source_shortest_path_length(G,source,cutoff=None):
"""Compute the shortest path lengths from source to all reachable nodes.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : iterator
(target, shortest path length) iterator
Examples
--------
>>> G = nx.path_graph(5)
>>> length = nx.single_source_shortest_path_length(G, 0)
>>> dict(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
See Also
--------
shortest_path_length
"""
if source not in G:
raise nx.NodeNotFound('Source {} is not in G'.format(source))
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
nextlevel = {source:1} # dict of nodes to check at next level
while nextlevel:
thislevel = nextlevel # advance to next level
nextlevel = {} # and start a new list (fringe)
for v in thislevel:
if v not in seen:
seen[v] = level # set the level of vertex v
nextlevel.update(G[v]) # add neighbors of v
yield (v, level)
if (cutoff is not None and cutoff <= level): break
level=level+1
del seen
def all_pairs_shortest_path_length(G, cutoff=None):
"""Computes the shortest path lengths between all nodes in `G`.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
Depth at which to stop the search. Only paths of length at most
`cutoff` are returned.
Returns
-------
lengths : iterator
(source, dictionary) iterator with dictionary keyed by target and
shortest path length as the key value.
Notes
-----
The iterator returned only has reachable node pairs.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = nx.all_pairs_shortest_path_length(G)
>>> dict(length)[1]
{0: 1, 1: 0, 2: 1, 3: 2, 4: 3}
"""
length = single_source_shortest_path_length
# TODO This can be trivially parallelized.
for n in G:
yield (n, dict(length(G, n, cutoff=cutoff)))
def bidirectional_shortest_path(G,source,target):
"""Return a list of nodes in a shortest path between source and target.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
Returns
-------
path: list
List of nodes in a path from source to target.
Raises
------
NetworkXNoPath
If no path exists between source and target.
See Also
--------
shortest_path
Notes
-----
This algorithm is used by shortest_path(G,source,target).
"""
if source not in G or target not in G:
msg = 'Either source {} or target {} is not in G'
raise nx.NodeNotFound(msg.format(source, target))
# call helper to do the real work
results=_bidirectional_pred_succ(G,source,target)
pred,succ,w=results
# build path from pred+w+succ
path=[]
# from source to w
while w is not None:
path.append(w)
w=pred[w]
path.reverse()
# from w to target
w=succ[path[-1]]
while w is not None:
path.append(w)
w=succ[w]
return path
def _bidirectional_pred_succ(G, source, target):
"""Bidirectional shortest path helper.
Returns (pred,succ,w) where
pred is a dictionary of predecessors from w to the source, and
succ is a dictionary of successors from w to the target.
"""
# does BFS from both source and target and meets in the middle
if target == source:
return ({target:None},{source:None},source)
# handle either directed or undirected
if G.is_directed():
Gpred=G.predecessors
Gsucc=G.successors
else:
Gpred=G.neighbors
Gsucc=G.neighbors
# predecesssor and successors in search
pred={source:None}
succ={target:None}
# initialize fringes, start with forward
forward_fringe=[source]
reverse_fringe=[target]
while forward_fringe and reverse_fringe:
if len(forward_fringe) <= len(reverse_fringe):
this_level=forward_fringe
forward_fringe=[]
for v in this_level:
for w in Gsucc(v):
if w not in pred:
forward_fringe.append(w)
pred[w]=v
if w in succ: return pred,succ,w # found path
else:
this_level=reverse_fringe
reverse_fringe=[]
for v in this_level:
for w in Gpred(v):
if w not in succ:
succ[w]=v
reverse_fringe.append(w)
if w in pred: return pred,succ,w # found path
raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
def single_source_shortest_path(G,source,cutoff=None):
"""Compute shortest path between source
and all other nodes reachable from source.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by target, of shortest paths.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.single_source_shortest_path(G,0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
The shortest path is not necessarily unique. So there can be multiple
paths between the source and each target node, all of which have the
same 'shortest' length. For each target node, this function returns
only one of those paths.
See Also
--------
shortest_path
"""
if source not in G:
raise nx.NodeNotFound("Source {} not in G".format(source));
level=0 # the current level
nextlevel={source:1} # list of nodes to check at next level
paths={source:[source]} # paths dictionary (paths to key from source)
if cutoff==0:
return paths
while nextlevel:
thislevel=nextlevel
nextlevel={}
for v in thislevel:
for w in G[v]:
if w not in paths:
paths[w]=paths[v]+[w]
nextlevel[w]=1
level=level+1
if (cutoff is not None and cutoff <= level): break
return paths
def all_pairs_shortest_path(G, cutoff=None):
"""Compute shortest paths between all nodes.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
Depth at which to stop the search. Only paths of length at most
`cutoff` are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.all_pairs_shortest_path(G)
>>> print(path[0][4])
[0, 1, 2, 3, 4]
See Also
--------
floyd_warshall()
"""
# TODO This can be trivially parallelized.
return {n: single_source_shortest_path(G, n, cutoff=cutoff) for n in G}
def predecessor(G,source,target=None,cutoff=None,return_seen=None):
""" Returns dictionary of predecessors for the path from source to all nodes in G.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path. If provided only predecessors between
source and target are returned
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
pred : dictionary
Dictionary, keyed by node, of predecessors in the shortest path.
Examples
--------
>>> G = nx.path_graph(4)
>>> list(G)
[0, 1, 2, 3]
>>> nx.predecessor(G, 0)
{0: [], 1: [0], 2: [1], 3: [2]}
"""
if source not in G:
raise nx.NodeNotFound("Source {} not in G".format(source));
level=0 # the current level
nextlevel=[source] # list of nodes to check at next level
seen={source:level} # level (number of hops) when seen in BFS
pred={source:[]} # predecessor dictionary
while nextlevel:
level=level+1
thislevel=nextlevel
nextlevel=[]
for v in thislevel:
for w in G[v]:
if w not in seen:
pred[w]=[v]
seen[w]=level
nextlevel.append(w)
elif (seen[w]==level):# add v to predecessor list if it
pred[w].append(v) # is at the correct level
if (cutoff and cutoff <= level):
break
if target is not None:
if return_seen:
if not target in pred: return ([],-1) # No predecessor
return (pred[target],seen[target])
else:
if not target in pred: return [] # No predecessor
return pred[target]
else:
if return_seen:
return (pred,seen)
else:
return pred
| |
"""
$Id$
$HeadURL$
Copyright (c) 2005, 2006, 2008 Peter Kropf. All rights reserved.
Module documentation goes here.
"""
__revision__ = '$LastChangedRevision$'
__id__ = '$Id$'
__headurl__ = '$HeadURL$'
__docformat__ = 'restructuredtext'
__version__ = '0.7.5dev'
import inspect
import locale
import os
import re
import subprocess
import sys
# Copied from trac.util.compat, but didn't exist in 0.11
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from genshi.builder import Element, tag
from genshi.core import Markup
from trac.config import BoolOption, IntOption, Option
from trac.core import *
from trac.mimeview.api import Context, IHTMLPreviewRenderer, MIME_MAP
from trac.util import escape
from trac.util.text import to_unicode
from trac.util.translation import _
from trac.web.api import IRequestHandler
from trac.wiki.api import IWikiMacroProvider
from trac.wiki.formatter import extract_link
class Graphviz(Component):
"""
Graphviz (http://trac-hacks.org/wiki/GraphvizPlugin) provides
a plugin for Trac to render graphviz (http://www.graphviz.org/)
drawings within a Trac wiki page.
"""
implements(IWikiMacroProvider, IHTMLPreviewRenderer, IRequestHandler)
# Available formats and processors, default first (dot/png)
Processors = ['dot', 'neato', 'twopi', 'circo', 'fdp']
Bitmap_Formats = ['png', 'jpg', 'gif']
Vector_Formats = ['svg', 'svgz']
Formats = Bitmap_Formats + Vector_Formats
Cmd_Paths = {
'linux2': ['/usr/bin',
'/usr/local/bin',],
'win32': ['c:\\Program Files\\Graphviz\\bin',
'c:\\Program Files\\ATT\\Graphviz\\bin',
],
'freebsd6': ['/usr/local/bin',
],
'freebsd5': ['/usr/local/bin',
],
'darwin': ['/opt/local/bin',
'/sw/bin',],
}
# Note: the following options named "..._option" are those which need
# some additional processing, see `_load_config()` below.
DEFAULT_CACHE_DIR = 'gvcache'
cache_dir_option = Option("graphviz", "cache_dir", DEFAULT_CACHE_DIR,
"""The directory that will be used to cache the generated images.
Note that if different than the default (`%s`), this directory must
exist.
If not given as an absolute path, the path will be relative to
the Trac environment's directory.
""" % DEFAULT_CACHE_DIR)
encoding = Option("graphviz", "encoding", 'utf-8',
"""The encoding which should be used for communicating with
Graphviz (should match `-Gcharset` if given).
""")
cmd_path = Option("graphviz", "cmd_path", '',
r"""Full path to the directory where the graphviz
programs are located. If not specified, the
default is `/usr/bin` on Linux,
`C:\Program Files\ATT\Graphviz\bin` on Windows and
`/usr/local/bin` on FreeBSD 6.
""")
out_format = Option("graphviz", "out_format", Formats[0],
"""Graph output format. Valid formats are: png, jpg,
svg, svgz, gif. If not specified, the default is
png. This setting can be overrided on a per-graph
basis.
""")
processor = Option("graphviz", "processor", Processors[0],
"""Graphviz default processor. Valid processors
are: dot, neato, twopi, fdp, circo. If not
specified, the default is dot. This setting can
be overrided on a per-graph basis.
!GraphvizMacro will verify that the default
processor is installed and will not work if it
is missing. All other processors are optional.
If any of the other processors are missing, a
warning message will be sent to the trac log and
!GraphvizMacro will continue to work.
""")
png_anti_alias = BoolOption("graphviz", "png_antialias", False,
"""If this entry exists in the configuration file,
then PNG outputs will be antialiased.
Note that this requires `rsvg` to be installed.
""")
rsvg_path_option = Option("graphviz", "rsvg_path", "",
"""Full path to the rsvg program (including the filename).
The default is `<cmd_path>/rsvg`.
""")
cache_manager = BoolOption("graphviz", "cache_manager", False,
"""If this entry exists and set to true in the configuration file,
then the cache management logic will be invoked
and the cache_max_size, cache_min_size,
cache_max_count and cache_min_count must be
defined.
""")
cache_max_size = IntOption("graphviz", "cache_max_size", 1024*1024*10,
"""The maximum size in bytes that the cache should
consume. This is the high watermark for disk space
used.
""")
cache_min_size = IntOption("graphviz", "cache_min_size", 1024*1024*5,
"""When cleaning out the cache, remove files until
this size in bytes is used by the cache. This is
the low watermark for disk space used.
""")
cache_max_count = IntOption("graphviz", "cache_max_count", 2000,
"""The maximum number of files that the cache should
contain. This is the high watermark for the
directory entry count.
""")
cache_min_count = IntOption("graphviz", "cache_min_count", 1500,
"""The minimum number of files that the cache should
contain. This is the low watermark for the
directory entry count.
""")
dpi = IntOption('graphviz', 'default_graph_dpi', 96,
"""Default dpi setting for graphviz, used during SVG to PNG
rasterization.
""")
def __init__(self):
self.log.info('version: %s - id: %s' % (__version__, str(__id__)))
#self.log.info('processors: %s' % str(Graphviz.Processors))
#self.log.info('formats: %s' % str(Graphviz.Formats))
# IHTMLPreviewRenderer methods
MIME_TYPES = ('application/graphviz')
def get_quality_ratio(self, mimetype):
if mimetype in self.MIME_TYPES:
return 2
return 0
def render(self, context, mimetype, content, filename=None, url=None):
ext = filename.split('.')[1]
name = ext == 'graphviz' and 'graphviz' or 'graphviz.%s' % ext
text = hasattr(content, 'read') and content.read() or content
return self.expand_macro(context, name, text)
# IRequestHandler methods
def match_request(self, req):
return req.path_info.startswith('/graphviz')
def process_request(self, req):
# check and load the configuration
errmsg = self._load_config()
if errmsg:
return self._error_div(errmsg)
pieces = [item for item in req.path_info.split('/graphviz') if item]
if pieces:
pieces = [item for item in pieces[0].split('/') if item]
if pieces:
name = pieces[0]
img_path = os.path.join(self.cache_dir, name)
return req.send_file(img_path)
# IWikiMacroProvider methods
def get_macros(self):
"""Return an iterable that provides the names of the provided macros."""
self._load_config()
for p in ['.' + p for p in Graphviz.Processors] + ['']:
for f in ['/' + f for f in Graphviz.Formats] + ['']:
yield 'graphviz%s%s' % (p, f)
def get_macro_description(self, name):
"""
Return a plain text description of the macro with the
specified name. Only return a description for the base
graphviz macro. All the other variants (graphviz/png,
graphviz/svg, etc.) will have no description. This will
cleanup the WikiMacros page a bit.
"""
if name == 'graphviz':
return inspect.getdoc(Graphviz)
else:
return None
def expand_macro(self, formatter_or_context, name, content):
"""Return the HTML output of the macro.
:param formatter_or_context: a Formatter when called as a macro,
a Context when called by `GraphvizPlugin.render`
:param name: Wiki macro command that resulted in this method being
called. In this case, it should be 'graphviz', followed
(or not) by the processor name, then by an output
format, as following: graphviz.<processor>/<format>
Valid processor names are: dot, neato, twopi, circo,
and fdp. The default is dot.
Valid output formats are: jpg, png, gif, svg and svgz.
The default is the value specified in the out_format
configuration parameter. If out_format is not specified
in the configuration, then the default is png.
examples: graphviz.dot/png -> dot png
graphviz.neato/jpg -> neato jpg
graphviz.circo -> circo png
graphviz/svg -> dot svg
:param content: The text the user entered for the macro to process.
"""
# check and load the configuration
errmsg = self._load_config()
if errmsg:
return self._error_div(errmsg)
## Extract processor and format from name
processor = out_format = None
# first try with the RegExp engine
try:
m = re.match('graphviz\.?([a-z]*)\/?([a-z]*)', name)
(processor, out_format) = m.group(1, 2)
# or use the string.split method
except:
(d_sp, s_sp) = (name.split('.'), name.split('/'))
if len(d_sp) > 1:
s_sp = d_sp[1].split('/')
if len(s_sp) > 1:
out_format = s_sp[1]
processor = s_sp[0]
elif len(s_sp) > 1:
out_format = s_sp[1]
# assign default values, if instance ones are empty
if not out_format:
out_format = self.out_format
if not processor:
processor = self.processor
if processor in Graphviz.Processors:
proc_cmd = self.cmds[processor]
else:
self.log.error('render_macro: requested processor (%s) not found.' %
processor)
return self._error_div('requested processor (%s) not found.' %
processor)
if out_format not in Graphviz.Formats:
self.log.error('render_macro: requested format (%s) not found.' %
out_format)
return self._error_div(
tag.p(_("Graphviz macro processor error: "
"requested format (%(fmt)s) not valid.",
fmt=out_format)))
encoded_cmd = (processor + unicode(self.processor_options)) \
.encode(self.encoding)
encoded_content = content.encode(self.encoding)
sha_key = sha1(encoded_cmd + encoded_content).hexdigest()
img_name = '%s.%s.%s' % (sha_key, processor, out_format)
# cache: hash.<dot>.<png>
img_path = os.path.join(self.cache_dir, img_name)
map_name = '%s.%s.map' % (sha_key, processor)
# cache: hash.<dot>.map
map_path = os.path.join(self.cache_dir, map_name)
# Check for URL="" presence in graph code
URL_in_graph = 'URL=' in content
# Create image if not in cache
if not os.path.exists(img_path):
self._clean_cache()
if URL_in_graph: # translate wiki TracLinks in URL
if isinstance(formatter_or_context, Context):
context = formatter_or_context
else:
context = formatter_or_context.context
content = self._expand_wiki_links(context, out_format,
content)
encoded_content = content.encode(self.encoding)
# Antialias PNGs with rsvg, if requested
if out_format == 'png' and self.png_anti_alias == True:
# 1. SVG output
failure, errmsg = self._launch(
encoded_content, proc_cmd, '-Tsvg',
'-o%s.svg' % img_path, *self.processor_options)
if failure:
return self._error_div(errmsg)
# 2. SVG to PNG rasterization
failure, errmsg = self._launch(
None, self.rsvg_path, '--dpi-x=%d' % self.dpi,
'--dpi-y=%d' % self.dpi, '%s.svg' % img_path, img_path)
if failure:
return self._error_div(errmsg)
else: # Render other image formats
failure, errmsg = self._launch(
encoded_content, proc_cmd, '-T%s' % out_format,
'-o%s' % img_path, *self.processor_options)
if failure:
return self._error_div(errmsg)
# Generate a map file for binary formats
if URL_in_graph and out_format in Graphviz.Bitmap_Formats:
# Create the map if not in cache
if not os.path.exists(map_path):
failure, errmsg = self._launch(
encoded_content, proc_cmd, '-Tcmap',
'-o%s' % map_path, *self.processor_options)
if failure:
return self._error_div(errmsg)
if errmsg:
# there was a warning. Ideally we should be able to use
# `add_warning` here, but that's not possible as the warnings
# are already emitted at this point in the template processing
return self._error_div(errmsg)
# Generate HTML output
img_url = formatter_or_context.href.graphviz(img_name)
# for SVG(z)
if out_format in Graphviz.Vector_Formats:
try: # try to get SVG dimensions
f = open(img_path, 'r')
svg = f.readlines(1024) # don't read all
f.close()
svg = "".join(svg).replace('\n', '')
w = re.search('width="([0-9]+)(.*?)" ', svg)
h = re.search('height="([0-9]+)(.*?)"', svg)
(w_val, w_unit) = w.group(1,2)
(h_val, h_unit) = h.group(1,2)
# Graphviz seems to underestimate height/width for SVG images,
# so we have to adjust them.
# The correction factor seems to be constant.
w_val, h_val = [1.35 * float(x) for x in (w_val, h_val)]
width = unicode(w_val) + w_unit
height = unicode(h_val) + h_unit
except ValueError:
width = height = '100%'
# insert SVG, IE compatibility
return tag.object(
tag.embed(src=img_url, type="image/svg+xml",
width=width, height=height),
data=img_url, type="image/svg+xml",
width=width, height=height)
# for binary formats, add map
elif URL_in_graph and os.path.exists(map_path):
f = open(map_path, 'r')
map = f.readlines()
f.close()
map = "".join(map).replace('\n', '')
return tag(tag.map(Markup(map), id='G'+sha_key, name='G'+sha_key),
tag.img(src=img_url, usemap="#G"+sha_key,
alt=_("GraphViz image")))
else:
return tag.img(src=img_url, alt=_("GraphViz image"))
# Private methods
def _expand_wiki_links(self, context, out_format, content):
"""Expand TracLinks that follow all URL= patterns."""
def expand(match):
wiki_text = match.groups()[0] # TracLink ([1], source:file/, ...)
link = extract_link(self.env, context, wiki_text)
if isinstance(link, Element):
href = link.attrib.get('href')
name = link.children
description = link.attrib.get('title', '')
else:
href = wiki_text
description = None
if out_format == 'svg':
format = 'URL="javascript:window.parent.location.href=\'%s\'"'
else:
format = 'URL="%s"'
url = format % href
if description:
url += '\ntooltip="%s"' % description \
.replace('"', '').replace('\n', '')
return url
return re.sub(r'URL="(.*?)"', expand, content)
def _load_config(self):
"""Preprocess the graphviz trac.ini configuration."""
# if 'graphviz' not in self.config.sections():
# ... so what? the defaults might be good enough
# check for the cache_dir entry
self.cache_dir = self.cache_dir_option
if not self.cache_dir:
return _("The [graphviz] section is missing the cache_dir field.")
if not os.path.isabs(self.cache_dir):
self.cache_dir = os.path.join(self.env.path, self.cache_dir)
if not os.path.exists(self.cache_dir):
if self.cache_dir_option == self.DEFAULT_CACHE_DIR:
os.mkdir(self.cache_dir)
else:
return _("The cache_dir '%(path)s' doesn't exist, "
"please create it.", path=self.cache_dir)
# Get optional configuration parameters from trac.ini.
# check for the cmd_path entry and setup the various command paths
cmd_paths = Graphviz.Cmd_Paths.get(sys.platform, [])
if self.cmd_path:
if not os.path.exists(self.cmd_path):
return _("The '[graphviz] cmd_path' configuration entry "
"is set to '%(path)s' but that path does not exist.",
path=self.cmd_path)
cmd_paths = [self.cmd_path]
if not cmd_paths:
return _("The '[graphviz] cmd_path' configuration entry "
"is not set and there is no default for %(platform)s.",
platform=sys.platform)
self.cmds = {}
pname = self._find_cmd(self.processor, cmd_paths)
if not pname:
return _("The default processor '%(proc)s' was not found "
"in '%(paths)s'.", proc=self.processor, paths=cmd_paths)
for name in Graphviz.Processors:
pname = self._find_cmd(name, cmd_paths)
if not pname:
self.log.warn('The %s program was not found. '
'The graphviz/%s macro will be disabled.' %
(pname, name))
Graphviz.Processors.remove(name)
self.cmds[name] = pname
if self.png_anti_alias:
self.rsvg_path = (self.rsvg_path_option or
self._find_cmd('rsvg', cmd_paths))
if not (self.rsvg_path and os.path.exists(self.rsvg_path)):
return _("The rsvg program is set to '%(path)s' but that path "
"does not exist.", path=self.rsvg_path)
# get default graph/node/edge attributes
self.processor_options = []
defaults = [opt for opt in self.config.options('graphviz')
if opt[0].startswith('default_')]
for name, value in defaults:
for prefix, optkey in [
('default_graph_', '-G'),
('default_node_', '-N'),
('default_edge_', '-E')]:
if name.startswith(prefix):
self.processor_options.append("%s%s=%s" %
(optkey, name.replace(prefix,''), value))
# setup mimetypes to support the IHTMLPreviewRenderer interface
if 'graphviz' not in MIME_MAP:
MIME_MAP['graphviz'] = 'application/graphviz'
for processor in Graphviz.Processors:
if processor not in MIME_MAP:
MIME_MAP[processor] = 'application/graphviz'
def _launch(self, encoded_input, *args):
"""Launch a process (cmd), and returns exitcode, stdout + stderr"""
# Note: subprocess.Popen doesn't support unicode options arguments
# (http://bugs.python.org/issue1759845) so we have to encode them.
# Anyway, dot expects utf-8 or the encoding specified with -Gcharset.
encoded_cmd = []
for arg in args:
if isinstance(arg, unicode):
arg = arg.encode(self.encoding, 'replace')
encoded_cmd.append(arg)
p = subprocess.Popen(encoded_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if encoded_input:
p.stdin.write(encoded_input)
p.stdin.close()
out = p.stdout.read()
err = p.stderr.read()
failure = p.wait() != 0
if failure or err or out:
return (failure, tag.p(tag.br(), _("The command:"),
tag.pre(repr(' '.join(encoded_cmd))),
(_("succeeded but emitted the following output:"),
_("failed with the following output:"))[failure],
out and tag.pre(repr(out)),
err and tag.pre(repr(err))))
else:
return (False, None)
def _error_div(self, msg):
"""Display msg in an error box, using Trac style."""
if isinstance(msg, str):
msg = to_unicode(msg)
self.log.error(msg)
if isinstance(msg, unicode):
msg = tag.pre(escape(msg))
return tag.div(
tag.strong(_("Graphviz macro processor has detected an error. "
"Please fix the problem before continuing.")),
msg, class_="system-message")
def _clean_cache(self):
"""
The cache manager (clean_cache) is an attempt at keeping the
cache directory under control. When the cache manager
determines that it should clean up the cache, it will delete
files based on the file access time. The files that were least
accessed will be deleted first.
The graphviz section of the trac configuration file should
have an entry called cache_manager to enable the cache
cleaning code. If it does, then the cache_max_size,
cache_min_size, cache_max_count and cache_min_count entries
must also be there.
"""
if self.cache_manager:
# os.stat gives back a tuple with: st_mode(0), st_ino(1),
# st_dev(2), st_nlink(3), st_uid(4), st_gid(5),
# st_size(6), st_atime(7), st_mtime(8), st_ctime(9)
entry_list = {}
atime_list = {}
size_list = {}
count = 0
size = 0
for name in os.listdir(self.cache_dir):
#self.log.debug('clean_cache.entry: %s' % name)
entry_list[name] = os.stat(os.path.join(self.cache_dir, name))
atime_list.setdefault(entry_list[name][7], []).append(name)
count = count + 1
size_list.setdefault(entry_list[name][6], []).append(name)
size = size + entry_list[name][6]
atime_keys = atime_list.keys()
atime_keys.sort()
#self.log.debug('clean_cache.atime_keys: %s' % atime_keys)
#self.log.debug('clean_cache.count: %d' % count)
#self.log.debug('clean_cache.size: %d' % size)
# In the spirit of keeping the code fairly simple, the
# clearing out of files from the cache directory may
# result in the count dropping below cache_min_count if
# multiple entries are have the same last access
# time. Same for cache_min_size.
if count > self.cache_max_count or size > self.cache_max_size:
while atime_keys and (self.cache_min_count < count or
self.cache_min_size < size):
key = atime_keys.pop(0)
for file in atime_list[key]:
os.unlink(os.path.join(self.cache_dir, file))
count = count - 1
size = size - entry_list[file][6]
def _find_cmd(self, cmd, paths):
exe_suffix = ''
if sys.platform == 'win32':
exe_suffix = '.exe'
for path in paths:
p = os.path.join(path, cmd) + exe_suffix
if os.path.exists(p):
return p
| |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import numpy as np
import torch
from omegaconf import DictConfig
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for generation!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(
cfg.common_eval.results_path,
"generate-{}.txt".format(cfg.dataset.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
else:
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("fairseq_cli.generate")
utils.import_user_module(cfg.common)
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 12000
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Load dataset splits
task = tasks.setup_task(cfg.task)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
if cfg.generation.lm_path is not None:
overrides["data"] = cfg.task.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[cfg.generation.lm_path], arg_overrides=overrides, task=None
)
except:
logger.warning(
f"Failed to load language model! Please make sure that the language model dict is the same "
f"as target dict and is located in the data dir ({cfg.task.data})"
)
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight}
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(cfg.scoring, tgt_dict)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
has_target = sample["target"] is not None
# Remove padding
if "src_tokens" in sample["net_input"]:
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
)
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = (
utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text(
sample_id
)
target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(
target_tokens,
cfg.common_eval.post_process,
escape_unk=True,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(
generator
),
)
src_str = decode_fn(src_str)
if has_target:
target_str = decode_fn(target_str)
if not cfg.common_eval.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str), file=output_file)
if has_target:
print("T-{}\t{}".format(sample_id, target_str), file=output_file)
# Process top predictions
for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
if not cfg.common_eval.quiet:
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print(
"H-{}\t{}\t{}".format(sample_id, score, hypo_str),
file=output_file,
)
# detokenized hypothesis
print(
"D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str),
file=output_file,
)
print(
"P-{}\t{}".format(
sample_id,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"]
.div_(math.log(2))
.tolist(),
)
),
),
file=output_file,
)
if cfg.generation.print_alignment == "hard":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_alignment == "soft":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[",".join(src_probs) for src_probs in alignment]
),
),
file=output_file,
)
if cfg.generation.print_step:
print(
"I-{}\t{}".format(sample_id, hypo["steps"]),
file=output_file,
)
if cfg.generation.retain_iter_history:
for step, h in enumerate(hypo["history"]):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens=h["tokens"].int().cpu(),
src_str=src_str,
alignment=None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=None,
)
print(
"E-{}_{}\t{}".format(sample_id, step, h_str),
file=output_file,
)
# Score only the top hypothesis
if has_target and j == 0:
if (
align_dict is not None
or cfg.common_eval.post_process is not None
):
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(
target_str, add_if_not_exist=True
)
hypo_tokens = tgt_dict.encode_line(
detok_hypo_str, add_if_not_exist=True
)
if hasattr(scorer, "add_string"):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info(
"Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
if has_target:
if cfg.bpe and not cfg.generation.sacrebleu:
if cfg.common_eval.post_process:
logger.warning(
"BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization"
)
else:
logger.warning(
"If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization"
)
# use print to be consistent with other main outputs: S-, H-, T-, D- and so on
print(
"Generate {} with beam={}: {}".format(
cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string()
),
file=output_file,
)
return scorer
def cli_main():
parser = options.get_generation_parser()
# TODO: replace this workaround with refactoring of `AudioPretraining`
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="wav2vec2",
help="Model architecture. For constructing tasks that rely on "
"model args (e.g. `AudioPretraining`)",
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| |
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from __future__ import with_statement
import gettext
import inspect
import os
import struct
import array
from locale import normalize
from twext.python.log import Logger
from pycalendar.duration import Duration
try:
from osx.corefoundation import CFError, CFDataRef
from osx.utils import CFLocaleRef, CFPropertyListRef
foundationImported = True
except ImportError:
foundationImported = False
from twistedcaldav.config import config
log = Logger()
"""
Localization module
How to use:
from __future__ import with_statement
from localization import translationTo
with translationTo('de'):
print(_("Hello"))
print(_("The event will last %(days)d days") % { 'days' : 4 })
... Hallo
... Die Veranstaltung dauert 4 Tage
Before you can actually get translated text, you need to:
1) Choose a "domain" for your code, such as 'calendarserver'
2) Run xgettext on your source to generate a <domain>.po file.
3) For each language, give the .po file to the person
who is doing the translation for editing
4) Run msgfmt.py on the translated .po to generate a binary .mo
5) Put the .mo into locales/<lang>/LC_MESSAGES/<domain>.mo
The German .po file for the above example would look like:
msgid "Hello"
msgstr "Hallo"
msgid "The event will last %(days)d days"
msgstr "Die Veranstaltung dauert %(days)d Tage"
The transationTo class automatically binds '_' to the appropriate translation
function for the duration of the "with" context. It's smart enough to allow
nesting of "with" contexts, as in:
with translationTo('de'):
print(_("Hello") # in German)
with translationTo('fr'):
print(_("Hello") # in French)
print(_("Hello") # in German)
If a translation file cannot be found for the specified language, it will fall
back to 'en'. If 'en' can't be found, gettext will raise IOError.
If you use the with/as form, you will get an object that implements some
helper methods for date formatting:
with translationTo('en') as trans:
print(trans.dtDate(DateTime.getToday()))
... Thursday, October 23, 2008
with translationTo('fr') as trans:
print(trans.dtDate(DateTime.getToday()))
... Jeudi, Octobre 23, 2008
The .po files contain localizable strings for month and day names, as well as
date format strings, in case a locale likes these values in a different order
or with different punctuation.
TODO: recurrence
"""
class translationTo(object):
translations = {}
def __init__(self, lang, domain='calendarserver', localeDir=None):
if localeDir is None:
localeDir = config.Localization.LocalesDirectory
# Cache gettext translation objects in class.translations
key = (lang, domain, localeDir)
self.translation = self.translations.get(key, None)
if self.translation is None:
self.translation = gettext.translation(
domain=domain,
localedir=localeDir, languages=[lang, 'en'], fallback=True)
self.translations[key] = self.translation
def __enter__(self):
# Get the caller's globals so we can rebind their '_' to our translator
caller_globals = inspect.stack()[1][0].f_globals
# Store whatever '_' is already bound to so we can restore it later
if '_' in caller_globals:
self.prev = caller_globals['_']
# Rebind '_' to our translator
caller_globals['_'] = self.translation.ugettext
# What we return here is accessible to the caller via the 'as' clause
return self
def __exit__(self, type, value, traceback):
# Restore '_' if it previously had a value
if hasattr(self, 'prev'):
inspect.stack()[1][0].f_globals['_'] = self.prev
# Don't swallow exceptions
return False
def monthAbbreviation(self, monthNumber):
return self.translation.ugettext(monthsAbbrev[monthNumber])
def date(self, component):
dtStart = component.propertyValue("DTSTART")
return self.dtDate(dtStart)
def time(self, component):
"""
Examples:
3:30 PM to 4:30 PM PDT
All day
3:30 PM PDT
3:30 PM PDT to 7:30 PM EDT
1 day
2 days
1 day 1 hour
1 day 4 hours 18 minutes
"""
# Bind to '_' so pygettext.py will pick this up for translation
_ = self.translation.ugettext
tzStart = tzEnd = None
dtStart = component.propertyValue("DTSTART")
if dtStart.isDateOnly():
return ("", _("All day"))
else:
tzStart = dtStart.timeZoneDescriptor()
dtEnd = component.propertyValue("DTEND")
if dtEnd:
if not dtEnd.isDateOnly():
tzEnd = dtEnd.timeZoneDescriptor()
duration = dtEnd - dtStart
else:
tzEnd = tzStart
duration = component.propertyValue("DURATION")
if duration:
dtEnd = dtStart + duration
else:
if dtStart.isDateOnly():
dtEnd = None
duration = Duration(days=1)
else:
dtEnd = dtStart + Duration(days=1)
dtEnd.setHHMMSS(0, 0, 0)
duration = dtEnd - dtStart
if dtStart == dtEnd:
return (self.dtTime(dtStart), "")
return (
_("%(startTime)s to %(endTime)s")
% {
'startTime': self.dtTime(
dtStart,
includeTimezone=(tzStart != tzEnd)
),
'endTime': self.dtTime(dtEnd),
},
self.dtDuration(duration)
)
def dtDate(self, val):
# Bind to '_' so pygettext.py will pick this up for translation
_ = self.translation.ugettext
return (
_("%(dayName)s, %(monthName)s %(dayNumber)d, %(yearNumber)d")
% {
'dayName': _(daysFull[(val.getDayOfWeek() + 6) % 7]),
'monthName': _(monthsFull[val.getMonth()]),
'dayNumber': val.getDay(),
'yearNumber': val.getYear(),
}
)
def dtTime(self, val, includeTimezone=True):
if val.isDateOnly():
return ""
# Bind to '_' so pygettext.py will pick this up for translation
_ = self.translation.ugettext
ampm = _("AM") if val.getHours() < 12 else _("PM")
hour12 = val.getHours() % 12
if hour12 == 0:
hour12 = 12
result = (
_("%(hour12Number)d:%(minuteNumber)02d %(ampm)s")
% {
'hour24Number': val.getHours(), # 0-23
'hour12Number': hour12, # 1-12
'minuteNumber': val.getMinutes(), # 0-59
'ampm': ampm,
}
)
if includeTimezone and val.local():
result += " %s" % (val.timeZoneDescriptor(),)
return result
def dtDuration(self, val):
# Bind to '_' so pygettext.py will pick this up for translation
_ = self.translation.ugettext
parts = []
total = val.getTotalSeconds()
days = total / (24 * 60 * 60)
if days == 1:
parts.append(_("1 day"))
elif days > 1:
parts.append(
_("%(dayCount)d days") %
{'dayCount': days}
)
hours = divmod(total / 3600, 24)[1]
minutes = divmod(total / 60, 60)[1]
seconds = divmod(total, 60)[1]
if hours == 1:
parts.append(_("1 hour"))
elif hours > 1:
parts.append(
_("%(hourCount)d hours") %
{'hourCount': hours}
)
if minutes == 1:
parts.append(_("1 minute"))
elif minutes > 1:
parts.append(
_("%(minuteCount)d minutes") %
{'minuteCount': minutes}
)
if seconds == 1:
parts.append(_("1 second"))
elif seconds > 1:
parts.append(
_("%(secondCount)d seconds") %
{'secondCount': seconds}
)
return " ".join(parts)
# The strings below are wrapped in _( ) for the benefit of pygettext. We don't
# actually want them translated until they're used.
_ = lambda x: x
daysFull = [
_("Monday"),
_("Tuesday"),
_("Wednesday"),
_("Thursday"),
_("Friday"),
_("Saturday"),
_("Sunday"),
]
daysAbbrev = [
_("Mon"),
_("Tue"),
_("Wed"),
_("Thu"),
_("Fri"),
_("Sun"),
_("Sat"),
]
monthsFull = [
"month is 1-based",
_("January"),
_("February"),
_("March"),
_("April"),
_("May"),
_("June"),
_("July"),
_("August"),
_("September"),
_("October"),
_("November"),
_("December"),
]
monthsAbbrev = [
"month is 1-based",
_("JAN"),
_("FEB"),
_("MAR"),
_("APR"),
_("MAY"),
_("JUN"),
_("JUL"),
_("AUG"),
_("SEP"),
_("OCT"),
_("NOV"),
_("DEC"),
]
##
# String file conversion routines
##
def processLocalizationFiles(settings):
lprojRoot = settings.TranslationsDirectory
gnuRoot = settings.LocalesDirectory
if not foundationImported:
return
# Do we have an Apple translations directory?
if lprojRoot and gnuRoot and os.path.exists(lprojRoot):
log.info("Looking for Apple .lproj directories in {r}", r=lprojRoot)
# Make sure the gnutext translations directory exists
if not os.path.exists(gnuRoot):
try:
os.mkdir(gnuRoot)
except OSError:
log.warn(
"Could not create gnuttext translation directory: {r}",
r=gnuRoot,
)
return
# Scan for Apple translations (directories ending in .lproj)
for item in os.listdir(lprojRoot):
if item.endswith(".lproj"):
stringsFile = os.path.join(
lprojRoot, item,
'calendarserver.strings'
)
localeName = normalize(item[:-6])
moFile = os.path.join(
gnuRoot, localeName, 'LC_MESSAGES',
'calendarserver.mo'
)
if os.path.exists(stringsFile):
if (
not os.path.exists(moFile) or
os.stat(stringsFile).st_mtime >
os.stat(moFile).st_mtime
):
log.info("Converting {s} to {m}", s=stringsFile, m=moFile)
try:
convertStringsFile(stringsFile, moFile)
except Exception, e:
log.error(
"Failed to convert {s} to {m}: {ex}",
s=stringsFile, m=moFile, ex=e,
)
else:
log.info("{m} is up to date", m=moFile)
class ParseError(Exception):
pass
def convertStringsFile(src, dest):
dir = os.path.dirname(dest)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
# can't create directory to hold .po file
return
# Parse the binary plist .strings file:
with open(src) as f:
data = f.read()
data = CFDataRef.fromString(data)
try:
parsed = CFPropertyListRef.createFromData(data)
strings = parsed.toDict()
except CFError as error:
raise ParseError(error)
# The format of GNUtext MO files is described here:
# http://www.gnu.org/software/autoconf/manual/gettext/MO-Files.html
originals = strings.keys()
originals.sort()
descriptors = []
keys = ''
values = ''
for original in originals:
translation = strings[original]
if isinstance(original, unicode):
original = original.encode("UTF-8")
if isinstance(translation, unicode):
translation = translation.encode("UTF-8")
descriptors.append(
(
len(keys), len(original), len(values),
len(translation)
)
)
keys += original + '\0' # <NUL> terminated
values += translation + '\0'
# The header is 28 bytes, each descriptor is 8 bytes, with two descriptors
# per string (one pointing at original, one pointing at translation)
keysOffset = 28 + len(originals) * 2 * 8
valuesOffset = keysOffset + len(keys)
keyDescriptors = []
valueDescriptors = []
for origOffset, origLen, transOffset, transLen in descriptors:
keyDescriptors.append(origLen)
keyDescriptors.append(keysOffset + origOffset)
valueDescriptors.append(transLen)
valueDescriptors.append(valuesOffset + transOffset)
result = struct.pack(
"Iiiiiii",
0x950412DEL, # magic number
0, # file format revision
len(originals), # number of strings
28, # offset of table with original strings
28 + len(originals) * 8, # offset of table with translation strings
0, # size of hashing table
0 # offset of hashing table
)
result += array.array("i", keyDescriptors).tostring()
result += array.array("i", valueDescriptors).tostring()
result += keys
result += values
with open(dest, "wb") as outFile:
outFile.write(result)
def _remapLanguageCode(code):
"""
Remap certain language codes to others, per the localization team
"""
if code == "zh-Hans": # Simplified Chinese
code = "zh_CN"
elif code == "zh-Hant": # Traditional Chinese
code = "zh_TW"
return code
def getLanguage(config):
"""
If the language has been specified explicitly in the config, return it. Otherwise
look it up via NSLocale on OS X. Failing that, return "en"
@param config: The configuration object to examine
@type config: ConfigDict
@return: The language code -- on OS X the supported ones are:
de, en, es, fr, it, ja, ko, nl, zh_CN, zh_TW
@rtype: C{str}
"""
if config.Localization.Language:
return config.Localization.Language
try:
language = CFLocaleRef.preferredLanguages()[0]
language = _remapLanguageCode(language)
except:
language = "en"
return language
| |
# -*- coding: utf-8 -*-
from os import path as os_path
import re
def requires_ids_or_filenames(method):
"""
A decorator for spectrum library methods that require either a list of Ids or a list of filenames.
:param method:
A method belonging to a sub-class of SpectrumLibrary.
"""
def wrapper(model, *args, **kwargs):
have_ids = ("ids" in kwargs) and (kwargs["ids"] is not None)
have_filenames = ("filenames" in kwargs) and (kwargs["filenames"] is not None)
assert have_ids or have_filenames, "Must supply a list of Ids or a list of filenames"
assert not (have_ids and have_filenames), "Must supply either a list of Ids or a list of filenames, not both."
# If a single Id is supplied, rather than a list of Ids, turn it into a one-entry tuple
if have_ids and not isinstance(kwargs["ids"], (list, tuple)):
kwargs["ids"] = (kwargs["ids"],)
# If a single filename is supplied, turn it into a one-entry tuple
if have_filenames and not isinstance(kwargs["filenames"], (list, tuple)):
kwargs["filenames"] = (kwargs["filenames"],)
return method(model, *args, **kwargs)
return wrapper
class SpectrumLibrary(object):
"""
An abstract spectrum library object.
Spectrum libraries are a bit like having a directory full of data files on disk, each containing a spectrum.
However, they also include a database which can store arbitrary metadata about each spectrum -- for example,
stellar parameters and abundances. It is possible to search a spectrum library based on metadata constraints.
Various implementations of the SpectrumLibrary class are provided, storing the metadata in different flavours of
SQL database. SQLite is probably the simplest and creates portable libraries that you can transfer to a different
machine with all metadata intact. MySQL is a faster database engine, and probably a better option for data which
doesn't need to move around.
:ivar list[string] _metadata_fields:
A list of the metadata fields set on spectra in this SpectrumLibrary
"""
def __init__(self, path=None):
self._metadata_fields = None
def __str__(self):
return "<{module}.{name} instance".format(module=self.__module__,
name=type(self).__name__)
def __repr__(self):
return "<{0}.{1} object at {2}>".format(self.__module__,
type(self).__name__, hex(id(self)))
def purge(self):
"""
This irrevocably deletes the spectrum library from the database and from your disk. You have been warned.
:return:
None
"""
raise NotImplementedError("The purge method must be implemented by each SpectrumLibrary implementation.")
def list_metadata_fields(self):
"""
List all of the metadata fields set on spectra in this spectrum library.
:return:
List of strings
"""
return self._metadata_fields
def search(self, **kwargs):
"""
Search for spectra within this SpectrumLibrary which fall within some metadata constraints.
:param kwargs:
A dictionary of metadata constraints. Constraints can be specified either as <key: value> pairs, in
which case the value must match exactly, or as <key: [min,max]> in which case the value must fall within
the specified range.
:return:
A tuple of objects, each representing a spectrum which matches the search criteria. Within each object,
the properties <specId> and <filename> are defined as integers and strings respectively.
"""
raise NotImplementedError("The search method must be implemented by each SpectrumLibrary implementation.")
def open(self, ids=None, filenames=None):
"""
Open some spectra from this spectrum library, and return them as a SpectrumArray object.
:param ids:
List of the integer ids of the spectra to receive this metadata, or None to select them by filename.
:type ids:
List of int, or None
:param filenames:
List of the filenames of the spectra to receive this metadata, or None to select them by integer id.
:type filenames:
List of str, or None
:return:
A SpectrumArray object.
"""
raise NotImplementedError("The open method must be implemented by each SpectrumLibrary implementation.")
def insert(self, spectra, filenames, origin="Undefined", metadata=None, overwrite=False):
"""
Insert the spectra from a SpectrumArray object into this spectrum library.
:param spectra:
A SpectrumArray object contain the spectra to be inserted into this spectrum library.
:type spectra:
SpectrumArray
:param filenames:
A list of the filenames with which to save the spectra contained within this SpectrumArray
:type filenames:
List of str
:param origin:
A string describing where these spectra are being imported from. Normally the name of the module which is
importing them.
:type origin:
str
:param metadata:
A list of dictionaries of metadata to set on each of the spectra in this SpectrumArray.
:type metadata:
List of dict
:param overwrite:
Boolean flag indicating whether we're allowed to overwrite pre-existing spectra with the same filenames
:type overwrite:
bool
:return:
None
"""
raise NotImplementedError("The insert method must be implemented by each SpectrumLibrary implementation.")
def import_from(self, other, overwrite=False, **kwargs):
"""
Search for spectra within another SpectrumLibrary, and import all matching spectra into this library.
:param other:
The SpectrumLibrary from which we should import spectra.
:type other:
SpectrumLibrary
:param overwrite:
Boolean flag indicating whether we're allowed to overwrite pre-existing spectra with the same filenames
:type overwrite:
bool
:param kwargs:
A dictionary of metadata constraints. Constraints can be specified either as <key: value> pairs, in
which case the value must match exactly, or as <key: [min,max]> in which case the value must fall within
the specified range.
:return:
A tuple of objects, each representing a spectrum which matches the search criteria. Within each object,
the properties <specId> and <filename> are defined as integers and strings respectively.
"""
spectra = other.search(**kwargs)
for spectrum in spectra:
uid = spectrum["specId"]
filename = spectrum["filename"]
origin = spectrum["origin"]
obj = other.open(ids=[uid])
self.insert(spectra=obj, filenames=[filename], origin=origin, overwrite=overwrite)
@classmethod
def open_and_search(cls, library_spec, workspace, extra_constraints):
"""
Helper function which allows you to open a spectrum library and search it in a single function call.
The argument <library_spec> can take the form of the name of a library <my_library>, or can contain
metadata constraints as a comma-separated list in square brackets, e.g. <my_library[continuum_normalised=1]>.
This is useful if you want to write a script which allows the user to specify parameter cuts on to command line
when they specify which spectrum library to operate on.
Multiple constraints should be specified as a comma-separated list, e.g.:
my_library[continuum_normalised=1,5000<Teff<6000]
Constraints can be specified in two formats as shown above. You can either require equality, or require that
the parameter falls within a range using the < operator.
If you use the < operator, you must specify both lower and upper limit.
:param library_spec:
The name of the spectrum library to open, suffixed with any metadata constraints in [] brackets.
:param workspace:
The path of disk to where spectrum libraries are stored.
:param extra_constraints:
A dictionary containing any additional metadata constraints to be added to the ones supplied in
library_spec.
:return:
Dictionary, containing:
library -- a SpectrumLibrary instance
items -- the result of the metadata search within the SpectrumLibrary
constraints -- a list of the metadata constraints applied
"""
test = re.match("([^\[]*)\[(.*)\]$", library_spec)
constraints = {}
if test is None:
library_name = library_spec
else:
library_name = test.group(1)
for constraint in test.group(2).split(","):
words_1 = constraint.split("=")
words_2 = constraint.split("<")
if len(words_1) == 2:
constraint_name = words_1[0]
try:
constraint_value = float(words_1[1])
except ValueError:
constraint_value = words_1[1]
constraints[constraint_name] = constraint_value
elif len(words_2) == 3:
constraint_name = words_2[1]
try:
constraint_value_a = float(words_2[0])
constraint_value_b = float(words_2[2])
except ValueError:
constraint_value_a = words_2[0]
constraint_value_b = words_2[2]
constraints[constraint_name] = (constraint_value_a, constraint_value_b)
else:
assert False, "Could not parse constraint <{}>".format(constraint)
constraints.update(extra_constraints)
library_path = os_path.join(workspace, library_name)
input_library = cls(path=library_path)
library_items = input_library.search(**constraints)
return {
"library": input_library,
"items": library_items,
"constraints": constraints
}
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 06:38:41 2014
@author: raymondyee
"""
import numpy as np
import pandas as pd
from pandas import Series
import census
import us
import settings
c = census.Census(key=settings.CENSUS_KEY)
# generators for the various census geographic entities of interest
def states(variables='NAME'):
geo={'for':'state:*'}
states_fips = set([state.fips for state in us.states.STATES])
# need to filter out non-states
for r in c.sf1.get(variables, geo=geo):
if r['state'] in states_fips:
yield r
def counties(variables='NAME'):
"""ask for all the states in one call"""
# tabulate a set of fips codes for the states
states_fips = set([s.fips for s in us.states.STATES])
geo={'for':'county:*',
'in':'state:*'}
for county in c.sf1.get(variables, geo=geo):
# eliminate counties whose states aren't in a state or DC
if county['state'] in states_fips:
yield county
def counties2(variables='NAME'):
"""generator for all counties"""
# since we can get all the counties in one call,
# this function is for demonstrating the use of walking through
# the states to get at the counties
for state in us.states.STATES:
geo={'for':'county:*',
'in':'state:{fips}'.format(fips=state.fips)}
for county in c.sf1.get(variables, geo=geo):
yield county
def tracts(variables='NAME'):
for state in us.states.STATES:
# handy to print out state to monitor progress
# print state.fips, state
counties_in_state={'for':'county:*',
'in':'state:{fips}'.format(fips=state.fips)}
for county in c.sf1.get('NAME', geo=counties_in_state):
# print county['state'], county['NAME']
tracts_in_county = {'for':'tract:*',
'in': 'state:{s_fips} county:{c_fips}'.format(s_fips=state.fips,
c_fips=county['county'])}
for tract in c.sf1.get(variables,geo=tracts_in_county):
yield tract
def msas(variables="NAME"):
for state in us.STATES:
geo = {'for':'metropolitan statistical area/micropolitan statistical area:*',
'in':'state:{state_fips}'.format(state_fips=state.fips)
}
for msa in c.sf1.get(variables, geo=geo):
yield msa
def block_groups(variables='NAME'):
# http://api.census.gov/data/2010/sf1?get=P0010001&for=block+group:*&in=state:02+county:170
# let's use the county generator
for county in counties(variables):
geo = {'for':'block group:*',
'in':'state:{state} county:{county}'.format(state=county['state'],
county=county['county'])
}
for block_group in c.sf1.get(variables, geo):
yield block_group
def blocks(variables='NAME'):
# http://api.census.gov/data/2010/sf1?get=P0010001&for=block:*&in=state:02+county:290+tract:00100
# make use of the tract generator
for tract in tracts(variables):
geo={'for':'block:*',
'in':'state:{state} county:{county} tract:{tract}'.format(state=tract['state'],
county=tract['county'],
tract=tract['tract'])
}
for block in c.sf1.get(variables, geo):
yield block
def csas(variables="NAME"):
# http://api.census.gov/data/2010/sf1?get=P0010001&for=combined+statistical+area:*&in=state:24
for state in us.STATES:
geo = {'for':'combined statistical area:*',
'in':'state:{state_fips}'.format(state_fips=state.fips)
}
for csa in c.sf1.get(variables, geo=geo):
yield csa
def districts(variables="NAME"):
# http://api.census.gov/data/2010/sf1?get=P0010001&for=congressional+district:*&in=state:24
for state in us.STATES:
geo = {'for':'congressional district:*',
'in':'state:{state_fips}'.format(state_fips=state.fips)
}
for district in c.sf1.get(variables, geo=geo):
yield district
def zip_code_tabulation_areas(variables="NAME"):
# http://api.census.gov/data/2010/sf1?get=P0010001&for=zip+code+tabulation+area:*&in=state:02
for state in us.STATES:
geo = {'for':'zip code tabulation area:*',
'in':'state:{state_fips}'.format(state_fips=state.fips)
}
for zip_code_tabulation_area in c.sf1.get(variables, geo=geo):
yield zip_code_tabulation_area
def census_labels(prefix='P005', n0=1, n1=17, field_width=4, include_name=True, join=False):
"""convenience function to generate census labels"""
label_format = "{i:0%dd}" % (field_width)
variables = [prefix + label_format.format(i=i) for i in xrange(n0,n1+1)]
if include_name:
variables = ['NAME'] + variables
if join:
return ",".join(variables)
else:
return variables
def rdot_labels(other=True):
if other:
return ['White', 'Black', 'Asian', 'Hispanic', 'Other']
else:
return ['White', 'Black', 'Asian', 'Hispanic']
FINAL_LABELS = ['NAME', 'Total'] + rdot_labels() + ['p_White', 'p_Black', 'p_Asian', 'p_Hispanic', 'p_Other'] + ['entropy5', 'entropy4', 'entropy_rice', 'gini_simpson']
def convert_to_rdotmap(row):
"""takes the P005 variables and maps to a series with White, Black, Asian, Hispanic, Other
Total"""
return pd.Series({'Total':row['P0050001'],
'White':row['P0050003'],
'Black':row['P0050004'],
'Asian':row['P0050006'],
'Hispanic':row['P0050010'],
'Other': row['P0050005'] + row['P0050007'] + row['P0050008'] + row['P0050009'],
}, index=['Total', 'White', 'Black', 'Hispanic', 'Asian', 'Other'])
def normalize(s):
"""take a Series and divide each item by the sum so that the new series adds up to 1.0"""
total = np.sum(s)
return s.astype('float') / total
def normalize_relabel(s):
"""take a Series and divide each item by the sum so that the new series adds up to 1.0
Also relabel the indices by adding p_ prefix"""
total = np.sum(s)
new_index = list(Series(s.index).apply(lambda x: "p_"+x))
return Series(list(s.astype('float') / total),new_index)
def entropy(series):
"""Normalized Shannon Index"""
# a series in which all the entries are equal should result in normalized entropy of 1.0
# eliminate 0s
series1 = series[series!=0]
# if len(series) < 2 (i.e., 0 or 1) then return 0
if len(series1) > 1:
# calculate the maximum possible entropy for given length of input series
max_s = -np.log(1.0/len(series))
total = float(sum(series1))
p = series1.astype('float')/float(total)
return sum(-p*np.log(p))/max_s
else:
return 0.0
def gini_simpson(s):
# https://en.wikipedia.org/wiki/Diversity_index#Gini.E2.80.93Simpson_index
s1 = normalize(s)
return 1-np.sum(s1*s1)
def entropy_rice(series):
"""hard code how Rice U did calculation """
# pass in a Series with
# 'Asian','Black','Hispanic','White','Other'
# http://kinder.rice.edu/uploadedFiles/Urban_Research_Center/Media/Houston%20Region%20Grows%20More%20Ethnically%20Diverse%202-13.pdf
s0 = normalize(series)
s_other = s0['Other']*np.log(s0['Other']) if s0['Other'] > 0 else 0.0
return (np.log(0.2)*entropy(series) - s_other)/np.log(0.25)
def diversity(df):
"""Takes a df with the P005 variables and does entropy calculation"""
# convert populations to int
df[census_labels(include_name=False)] = df[census_labels(include_name=False)].astype('int')
df = pd.concat((df, df.apply(convert_to_rdotmap, axis=1)),axis=1)
df = pd.concat((df,df[rdot_labels()].apply(normalize_relabel,axis=1)), axis=1)
df['entropy5'] = df.apply(lambda x:entropy(x[rdot_labels()]), axis=1)
df['entropy4'] = df.apply(lambda x:entropy(x[rdot_labels(other=False)]), axis=1)
df['entropy_rice'] = df.apply(lambda x:entropy_rice(x[rdot_labels()]), axis=1)
df['gini_simpson'] = df.apply(lambda x:gini_simpson(x[rdot_labels()]), axis=1)
return df
| |
"""Test the DHCP discovery integration."""
import datetime
import threading
from unittest.mock import patch
from scapy.error import Scapy_Exception
from scapy.layers.dhcp import DHCP
from scapy.layers.l2 import Ether
from homeassistant import config_entries
from homeassistant.components import dhcp
from homeassistant.components.device_tracker.const import (
ATTR_HOST_NAME,
ATTR_IP,
ATTR_MAC,
ATTR_SOURCE_TYPE,
SOURCE_TYPE_ROUTER,
)
from homeassistant.const import (
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
# connect b8:b7:f1:6d:b5:33 192.168.210.56
RAW_DHCP_REQUEST = (
b"\xff\xff\xff\xff\xff\xff\xb8\xb7\xf1m\xb53\x08\x00E\x00\x01P\x06E"
b"\x00\x00\xff\x11\xb4X\x00\x00\x00\x00\xff\xff\xff\xff\x00D\x00C\x01<"
b"\x0b\x14\x01\x01\x06\x00jmjV\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xb7\xf1m\xb53\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00c\x82Sc5\x01\x039\x02\x05\xdc2\x04\xc0\xa8\xd286"
b"\x04\xc0\xa8\xd0\x017\x04\x01\x03\x1c\x06\x0c\x07connect\xff\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
# iRobot-AE9EC12DD3B04885BCBFA36AFB01E1CC 50:14:79:03:85:2c 192.168.1.120
RAW_DHCP_RENEWAL = (
b"\x00\x15\x5d\x8e\xed\x02\x50\x14\x79\x03\x85\x2c\x08\x00\x45\x00"
b"\x01\x8e\x51\xd2\x40\x00\x40\x11\x63\xa1\xc0\xa8\x01\x78\xc0\xa8"
b"\x01\x23\x00\x44\x00\x43\x01\x7a\x12\x09\x01\x01\x06\x00\xd4\xea"
b"\xb2\xfd\xff\xff\x00\x00\xc0\xa8\x01\x78\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x50\x14\x79\x03\x85\x2c\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63\x35\x01\x03\x39\x02\x05"
b"\xdc\x3c\x45\x64\x68\x63\x70\x63\x64\x2d\x35\x2e\x32\x2e\x31\x30"
b"\x3a\x4c\x69\x6e\x75\x78\x2d\x33\x2e\x31\x38\x2e\x37\x31\x3a\x61"
b"\x72\x6d\x76\x37\x6c\x3a\x51\x75\x61\x6c\x63\x6f\x6d\x6d\x20\x54"
b"\x65\x63\x68\x6e\x6f\x6c\x6f\x67\x69\x65\x73\x2c\x20\x49\x6e\x63"
b"\x20\x41\x50\x51\x38\x30\x30\x39\x0c\x27\x69\x52\x6f\x62\x6f\x74"
b"\x2d\x41\x45\x39\x45\x43\x31\x32\x44\x44\x33\x42\x30\x34\x38\x38"
b"\x35\x42\x43\x42\x46\x41\x33\x36\x41\x46\x42\x30\x31\x45\x31\x43"
b"\x43\x37\x08\x01\x21\x03\x06\x1c\x33\x3a\x3b\xff"
)
async def test_dhcp_match_hostname_and_macaddress(hass):
"""Test matching based on hostname and macaddress."""
dhcp_watcher = dhcp.DHCPWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "connect", "macaddress": "B8B7F1*"}],
)
packet = Ether(RAW_DHCP_REQUEST)
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
# Ensure no change is ignored
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[0][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.210.56",
dhcp.HOSTNAME: "connect",
dhcp.MAC_ADDRESS: "b8b7f16db533",
}
async def test_dhcp_renewal_match_hostname_and_macaddress(hass):
"""Test renewal matching based on hostname and macaddress."""
dhcp_watcher = dhcp.DHCPWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "irobot-*", "macaddress": "501479*"}],
)
packet = Ether(RAW_DHCP_RENEWAL)
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
# Ensure no change is ignored
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[0][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.1.120",
dhcp.HOSTNAME: "irobot-ae9ec12dd3b04885bcbfa36afb01e1cc",
dhcp.MAC_ADDRESS: "50147903852c",
}
async def test_dhcp_match_hostname(hass):
"""Test matching based on hostname only."""
dhcp_watcher = dhcp.DHCPWatcher(
hass, {}, [{"domain": "mock-domain", "hostname": "connect"}]
)
packet = Ether(RAW_DHCP_REQUEST)
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[0][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.210.56",
dhcp.HOSTNAME: "connect",
dhcp.MAC_ADDRESS: "b8b7f16db533",
}
async def test_dhcp_match_macaddress(hass):
"""Test matching based on macaddress only."""
dhcp_watcher = dhcp.DHCPWatcher(
hass, {}, [{"domain": "mock-domain", "macaddress": "B8B7F1*"}]
)
packet = Ether(RAW_DHCP_REQUEST)
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[0][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.210.56",
dhcp.HOSTNAME: "connect",
dhcp.MAC_ADDRESS: "b8b7f16db533",
}
async def test_dhcp_nomatch(hass):
"""Test not matching based on macaddress only."""
dhcp_watcher = dhcp.DHCPWatcher(
hass, {}, [{"domain": "mock-domain", "macaddress": "ABC123*"}]
)
packet = Ether(RAW_DHCP_REQUEST)
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 0
async def test_dhcp_nomatch_hostname(hass):
"""Test not matching based on hostname only."""
dhcp_watcher = dhcp.DHCPWatcher(
hass, {}, [{"domain": "mock-domain", "hostname": "nomatch*"}]
)
packet = Ether(RAW_DHCP_REQUEST)
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 0
async def test_dhcp_nomatch_non_dhcp_packet(hass):
"""Test matching does not throw on a non-dhcp packet."""
dhcp_watcher = dhcp.DHCPWatcher(
hass, {}, [{"domain": "mock-domain", "hostname": "nomatch*"}]
)
packet = Ether(b"")
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 0
async def test_dhcp_nomatch_non_dhcp_request_packet(hass):
"""Test nothing happens with the wrong message-type."""
dhcp_watcher = dhcp.DHCPWatcher(
hass, {}, [{"domain": "mock-domain", "hostname": "nomatch*"}]
)
packet = Ether(RAW_DHCP_REQUEST)
packet[DHCP].options = [
("message-type", 4),
("max_dhcp_size", 1500),
("requested_addr", "192.168.210.56"),
("server_id", "192.168.208.1"),
("param_req_list", [1, 3, 28, 6]),
("hostname", b"connect"),
]
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 0
async def test_dhcp_invalid_hostname(hass):
"""Test we ignore invalid hostnames."""
dhcp_watcher = dhcp.DHCPWatcher(
hass, {}, [{"domain": "mock-domain", "hostname": "nomatch*"}]
)
packet = Ether(RAW_DHCP_REQUEST)
packet[DHCP].options = [
("message-type", 3),
("max_dhcp_size", 1500),
("requested_addr", "192.168.210.56"),
("server_id", "192.168.208.1"),
("param_req_list", [1, 3, 28, 6]),
("hostname", "connect"),
]
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 0
async def test_dhcp_missing_hostname(hass):
"""Test we ignore missing hostnames."""
dhcp_watcher = dhcp.DHCPWatcher(
hass, {}, [{"domain": "mock-domain", "hostname": "nomatch*"}]
)
packet = Ether(RAW_DHCP_REQUEST)
packet[DHCP].options = [
("message-type", 3),
("max_dhcp_size", 1500),
("requested_addr", "192.168.210.56"),
("server_id", "192.168.208.1"),
("param_req_list", [1, 3, 28, 6]),
("hostname", None),
]
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 0
async def test_dhcp_invalid_option(hass):
"""Test we ignore invalid hostname option."""
dhcp_watcher = dhcp.DHCPWatcher(
hass, {}, [{"domain": "mock-domain", "hostname": "nomatch*"}]
)
packet = Ether(RAW_DHCP_REQUEST)
packet[DHCP].options = [
("message-type", 3),
("max_dhcp_size", 1500),
("requested_addr", "192.168.208.55"),
("server_id", "192.168.208.1"),
("param_req_list", [1, 3, 28, 6]),
("hostname"),
]
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
dhcp_watcher.handle_dhcp_packet(packet)
assert len(mock_init.mock_calls) == 0
async def test_setup_and_stop(hass):
"""Test we can setup and stop."""
assert await async_setup_component(
hass,
dhcp.DOMAIN,
{},
)
await hass.async_block_till_done()
with patch("homeassistant.components.dhcp.AsyncSniffer.start") as start_call, patch(
"homeassistant.components.dhcp._verify_l2socket_setup",
), patch("homeassistant.components.dhcp.compile_filter",), patch(
"homeassistant.components.dhcp.DiscoverHosts.async_discover"
):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
start_call.assert_called_once()
async def test_setup_fails_as_root(hass, caplog):
"""Test we handle sniff setup failing as root."""
assert await async_setup_component(
hass,
dhcp.DOMAIN,
{},
)
await hass.async_block_till_done()
wait_event = threading.Event()
with patch("os.geteuid", return_value=0), patch(
"homeassistant.components.dhcp._verify_l2socket_setup",
side_effect=Scapy_Exception,
), patch("homeassistant.components.dhcp.DiscoverHosts.async_discover"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
wait_event.set()
assert "Cannot watch for dhcp packets" in caplog.text
async def test_setup_fails_non_root(hass, caplog):
"""Test we handle sniff setup failing as non-root."""
assert await async_setup_component(
hass,
dhcp.DOMAIN,
{},
)
await hass.async_block_till_done()
with patch("os.geteuid", return_value=10), patch(
"homeassistant.components.dhcp._verify_l2socket_setup",
side_effect=Scapy_Exception,
), patch("homeassistant.components.dhcp.DiscoverHosts.async_discover"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert "Cannot watch for dhcp packets without root or CAP_NET_RAW" in caplog.text
async def test_setup_fails_with_broken_libpcap(hass, caplog):
"""Test we abort if libpcap is missing or broken."""
assert await async_setup_component(
hass,
dhcp.DOMAIN,
{},
)
await hass.async_block_till_done()
with patch("homeassistant.components.dhcp._verify_l2socket_setup",), patch(
"homeassistant.components.dhcp.compile_filter",
side_effect=ImportError,
) as compile_filter, patch(
"homeassistant.components.dhcp.AsyncSniffer",
) as async_sniffer, patch(
"homeassistant.components.dhcp.DiscoverHosts.async_discover"
):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert compile_filter.called
assert not async_sniffer.called
assert (
"Cannot watch for dhcp packets without a functional packet filter"
in caplog.text
)
async def test_device_tracker_hostname_and_macaddress_exists_before_start(hass):
"""Test matching based on hostname and macaddress before start."""
hass.states.async_set(
"device_tracker.august_connect",
STATE_HOME,
{
ATTR_HOST_NAME: "Connect",
ATTR_IP: "192.168.210.56",
ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER,
ATTR_MAC: "B8:B7:F1:6D:B5:33",
},
)
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
device_tracker_watcher = dhcp.DeviceTrackerWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "connect", "macaddress": "B8B7F1*"}],
)
await device_tracker_watcher.async_start()
await hass.async_block_till_done()
await device_tracker_watcher.async_stop()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[0][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.210.56",
dhcp.HOSTNAME: "connect",
dhcp.MAC_ADDRESS: "b8b7f16db533",
}
async def test_device_tracker_hostname_and_macaddress_after_start(hass):
"""Test matching based on hostname and macaddress after start."""
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
device_tracker_watcher = dhcp.DeviceTrackerWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "connect", "macaddress": "B8B7F1*"}],
)
await device_tracker_watcher.async_start()
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.august_connect",
STATE_HOME,
{
ATTR_HOST_NAME: "Connect",
ATTR_IP: "192.168.210.56",
ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER,
ATTR_MAC: "B8:B7:F1:6D:B5:33",
},
)
await hass.async_block_till_done()
await device_tracker_watcher.async_stop()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[0][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.210.56",
dhcp.HOSTNAME: "connect",
dhcp.MAC_ADDRESS: "b8b7f16db533",
}
async def test_device_tracker_hostname_and_macaddress_after_start_not_home(hass):
"""Test matching based on hostname and macaddress after start but not home."""
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
device_tracker_watcher = dhcp.DeviceTrackerWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "connect", "macaddress": "B8B7F1*"}],
)
await device_tracker_watcher.async_start()
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.august_connect",
STATE_NOT_HOME,
{
ATTR_HOST_NAME: "connect",
ATTR_IP: "192.168.210.56",
ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER,
ATTR_MAC: "B8:B7:F1:6D:B5:33",
},
)
await hass.async_block_till_done()
await device_tracker_watcher.async_stop()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 0
async def test_device_tracker_hostname_and_macaddress_after_start_not_router(hass):
"""Test matching based on hostname and macaddress after start but not router."""
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
device_tracker_watcher = dhcp.DeviceTrackerWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "connect", "macaddress": "B8B7F1*"}],
)
await device_tracker_watcher.async_start()
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.august_connect",
STATE_HOME,
{
ATTR_HOST_NAME: "connect",
ATTR_IP: "192.168.210.56",
ATTR_SOURCE_TYPE: "something_else",
ATTR_MAC: "B8:B7:F1:6D:B5:33",
},
)
await hass.async_block_till_done()
await device_tracker_watcher.async_stop()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 0
async def test_device_tracker_hostname_and_macaddress_after_start_hostname_missing(
hass,
):
"""Test matching based on hostname and macaddress after start but missing hostname."""
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
device_tracker_watcher = dhcp.DeviceTrackerWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "connect", "macaddress": "B8B7F1*"}],
)
await device_tracker_watcher.async_start()
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.august_connect",
STATE_HOME,
{
ATTR_IP: "192.168.210.56",
ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER,
ATTR_MAC: "B8:B7:F1:6D:B5:33",
},
)
await hass.async_block_till_done()
await device_tracker_watcher.async_stop()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 0
async def test_device_tracker_ignore_self_assigned_ips_before_start(hass):
"""Test matching ignores self assigned ip address."""
hass.states.async_set(
"device_tracker.august_connect",
STATE_HOME,
{
ATTR_HOST_NAME: "connect",
ATTR_IP: "169.254.210.56",
ATTR_SOURCE_TYPE: SOURCE_TYPE_ROUTER,
ATTR_MAC: "B8:B7:F1:6D:B5:33",
},
)
with patch.object(hass.config_entries.flow, "async_init") as mock_init:
device_tracker_watcher = dhcp.DeviceTrackerWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "connect", "macaddress": "B8B7F1*"}],
)
await device_tracker_watcher.async_start()
await hass.async_block_till_done()
await device_tracker_watcher.async_stop()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 0
async def test_aiodiscover_finds_new_hosts(hass):
"""Test aiodiscover finds new host."""
with patch.object(hass.config_entries.flow, "async_init") as mock_init, patch(
"homeassistant.components.dhcp.DiscoverHosts.async_discover",
return_value=[
{
dhcp.DISCOVERY_IP_ADDRESS: "192.168.210.56",
dhcp.DISCOVERY_HOSTNAME: "connect",
dhcp.DISCOVERY_MAC_ADDRESS: "b8b7f16db533",
}
],
):
device_tracker_watcher = dhcp.NetworkWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "connect", "macaddress": "B8B7F1*"}],
)
await device_tracker_watcher.async_start()
await hass.async_block_till_done()
await device_tracker_watcher.async_stop()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[0][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.210.56",
dhcp.HOSTNAME: "connect",
dhcp.MAC_ADDRESS: "b8b7f16db533",
}
async def test_aiodiscover_does_not_call_again_on_shorter_hostname(hass):
"""Verify longer hostnames generate a new flow but shorter ones do not.
Some routers will truncate hostnames so we want to accept
additional discovery where the hostname is longer and then
reject shorter ones.
"""
with patch.object(hass.config_entries.flow, "async_init") as mock_init, patch(
"homeassistant.components.dhcp.DiscoverHosts.async_discover",
return_value=[
{
dhcp.DISCOVERY_IP_ADDRESS: "192.168.210.56",
dhcp.DISCOVERY_HOSTNAME: "irobot-abc",
dhcp.DISCOVERY_MAC_ADDRESS: "b8b7f16db533",
},
{
dhcp.DISCOVERY_IP_ADDRESS: "192.168.210.56",
dhcp.DISCOVERY_HOSTNAME: "irobot-abcdef",
dhcp.DISCOVERY_MAC_ADDRESS: "b8b7f16db533",
},
{
dhcp.DISCOVERY_IP_ADDRESS: "192.168.210.56",
dhcp.DISCOVERY_HOSTNAME: "irobot-abc",
dhcp.DISCOVERY_MAC_ADDRESS: "b8b7f16db533",
},
],
):
device_tracker_watcher = dhcp.NetworkWatcher(
hass,
{},
[
{
"domain": "mock-domain",
"hostname": "irobot-*",
"macaddress": "B8B7F1*",
}
],
)
await device_tracker_watcher.async_start()
await hass.async_block_till_done()
await device_tracker_watcher.async_stop()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 2
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[0][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.210.56",
dhcp.HOSTNAME: "irobot-abc",
dhcp.MAC_ADDRESS: "b8b7f16db533",
}
assert mock_init.mock_calls[1][1][0] == "mock-domain"
assert mock_init.mock_calls[1][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[1][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.210.56",
dhcp.HOSTNAME: "irobot-abcdef",
dhcp.MAC_ADDRESS: "b8b7f16db533",
}
async def test_aiodiscover_finds_new_hosts_after_interval(hass):
"""Test aiodiscover finds new host after interval."""
with patch.object(hass.config_entries.flow, "async_init") as mock_init, patch(
"homeassistant.components.dhcp.DiscoverHosts.async_discover",
return_value=[],
):
device_tracker_watcher = dhcp.NetworkWatcher(
hass,
{},
[{"domain": "mock-domain", "hostname": "connect", "macaddress": "B8B7F1*"}],
)
await device_tracker_watcher.async_start()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 0
with patch.object(hass.config_entries.flow, "async_init") as mock_init, patch(
"homeassistant.components.dhcp.DiscoverHosts.async_discover",
return_value=[
{
dhcp.DISCOVERY_IP_ADDRESS: "192.168.210.56",
dhcp.DISCOVERY_HOSTNAME: "connect",
dhcp.DISCOVERY_MAC_ADDRESS: "b8b7f16db533",
}
],
):
async_fire_time_changed(hass, dt_util.utcnow() + datetime.timedelta(minutes=65))
await hass.async_block_till_done()
await device_tracker_watcher.async_stop()
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_DHCP
}
assert mock_init.mock_calls[0][2]["data"] == {
dhcp.IP_ADDRESS: "192.168.210.56",
dhcp.HOSTNAME: "connect",
dhcp.MAC_ADDRESS: "b8b7f16db533",
}
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import os
import unittest
from chatbase import Message, MessageSet, MessageTypes, InvalidMessageTypeError
class TestMessage(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMessage, self).__init__(*args, **kwargs)
self.inst = Message()
def test_init(self):
self.assertEqual(self.inst.api_key, '')
self.assertEqual(self.inst.platform, '')
self.assertEqual(self.inst.message, '')
self.assertEqual(self.inst.intent, '')
self.assertEqual(self.inst.version, '')
self.assertEqual(self.inst.user_id, '')
self.assertTrue(type(self.inst.time_stamp) is int)
self.assertEqual(self.inst.type, MessageTypes.USER)
self.assertFalse(self.inst.not_handled)
self.assertFalse(self.inst.feedback)
def test_ts_method(self):
self.assertTrue(type(Message.get_current_timestamp()) is int)
def test_type_setting(self):
# instance the entity in function to eliminate test-case bleeding
i = Message()
i.set_as_type_user()
self.assertEqual(i.type, MessageTypes.USER)
i.set_as_type_agent()
self.assertEqual(i.type, MessageTypes.AGENT)
def test_not_handled_setting(self):
i = Message()
i.set_as_not_handled()
self.assertTrue(i.not_handled)
i.set_as_handled()
self.assertFalse(i.not_handled)
def test_feedback_setting(self):
i = Message()
i.set_as_feedback()
self.assertTrue(i.feedback)
i.set_as_not_feedback()
self.assertFalse(i.feedback)
def test_to_json(self):
api_key = '1234'
platform = '1'
message = '2'
intent = '3'
version = '4'
user_id = '5'
time_stamp = int(round(time.time() * 1e3))
i = Message(api_key=api_key, platform=platform, message=message,
intent=intent, version=version, user_id=user_id,
type=MessageTypes.USER, not_handled=True,
time_stamp=time_stamp)
i.set_as_feedback()
self.assertEqual(json.loads(i.to_json()), {
'api_key': api_key,
'platform': platform,
'message': message,
'intent': intent,
'version': version,
'user_id': user_id,
'time_stamp': time_stamp,
'type': MessageTypes.USER, # since we did not set as type agent
'not_handled': True,
'feedback': True
})
def test_message_set_append_message(self):
api_key = '1234'
platform = '1'
message = '2'
intent = '3'
version = '4'
user_id = '5'
time_stamp = int(round(time.time() * 1e3))
msg1 = Message(api_key=api_key, platform=platform, message=message,
intent=intent, version=version, user_id=user_id,
type=MessageTypes.USER, not_handled=True,
time_stamp=time_stamp)
msg1.set_as_feedback()
msg2 = Message(api_key=api_key, platform=platform, message=message,
version=version, user_id=user_id,
type=MessageTypes.AGENT)
message_set = MessageSet(api_key=api_key, platform=platform,
version=version, user_id=user_id)
message_set.append_message(msg1)
message_set.append_message(msg2)
msg1 = message_set.messages[0]
self.assertEqual(json.loads(msg1.to_json()), {
'api_key': api_key,
'platform': platform,
'message': message,
'intent': intent,
'version': version,
'user_id': user_id,
'time_stamp': time_stamp,
'type': MessageTypes.USER, # since we did not set as type agent
'not_handled': True,
'feedback': True
})
msg2 = message_set.messages[1]
self.assertEqual(json.loads(msg2.to_json()), {
'api_key': api_key,
'platform': platform,
'message': message,
'intent': msg2.intent,
'version': version,
'user_id': user_id,
'time_stamp': msg2.time_stamp,
'type': MessageTypes.AGENT, # since we did set as type agent
'not_handled': False,
'feedback': False
})
def test_message_set_new_message(self):
api_key = '1234'
platform = '1'
message = '2'
intent = '3'
version = '4'
user_id = '5'
time_stamp = int(round(time.time() * 1e3))
message_set = MessageSet(api_key=api_key, platform=platform,
version=version, user_id=user_id)
msg1 = message_set.new_message(intent=intent, message=message,
type=MessageTypes.USER,
not_handled=True, time_stamp=time_stamp)
msg1.set_as_feedback()
msg2 = message_set.new_message(message=message, type=MessageTypes.AGENT)
self.assertEqual(json.loads(msg1.to_json()), {
'api_key': api_key,
'platform': platform,
'message': message,
'intent': intent,
'version': version,
'user_id': user_id,
'time_stamp': time_stamp,
'type': MessageTypes.USER, # since we did not set as type agent
'not_handled': True,
'feedback': True
})
self.assertEqual(json.loads(msg2.to_json()), {
'api_key': api_key,
'platform': platform,
'message': message,
'intent': msg2.intent,
'version': version,
'user_id': user_id,
'time_stamp': msg2.time_stamp,
'type': MessageTypes.AGENT, # since we did not set as type agent
'not_handled': False,
'feedback': False
})
def test_live_send(self):
test_api_key = os.environ.get('CB_TEST_API_KEY')
if test_api_key is None:
print("Warning: Skipping live integration test without test API key.")
return
i = Message(api_key=test_api_key,
platform="python-lib-test",
message="test-message",
intent="test-library",
version="0.1",
user_id="12345")
resp = i.send()
self.assertEqual(resp.status_code, 200)
def test_live_set_send(self):
test_api_key = os.environ.get('CB_TEST_API_KEY')
if test_api_key is None:
print("Warning: Skipping live integration test without test API key.")
return
s = MessageSet(api_key=test_api_key,
platform="python-lib-test",
version="0.1",
user_id="12345")
i = s.new_message()
i.message = "msg-1"
i.intent = "int-1"
i = s.new_message()
i.message = "msg-2"
i.intent = "int-2"
resp = s.send()
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
| |
from cattle import from_env
import pytest
import random
import requests
import os
import time
import logging
import paramiko
import inspect
import re
from docker import Client
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
TEST_IMAGE_UUID = os.environ.get('CATTLE_TEST_AGENT_IMAGE',
'docker:cattle/test-agent:v7')
SSH_HOST_IMAGE_UUID = os.environ.get('CATTLE_SSH_HOST_IMAGE',
'docker:rancher/ssh-host-container:' +
'v0.1.0')
SOCAT_IMAGE_UUID = os.environ.get('CATTLE_CLUSTER_SOCAT_IMAGE',
'docker:rancher/socat-docker:v0.2.0')
WEB_IMAGE_UUID = "docker:sangeetha/testlbsd:latest"
SSH_IMAGE_UUID = "docker:sangeetha/testclient:latest"
LB_HOST_ROUTING_IMAGE_UUID = "docker:sangeetha/testnewhostrouting:latest"
SSH_IMAGE_UUID_HOSTNET = "docker:sangeetha/testclient33:latest"
DEFAULT_TIMEOUT = 45
PRIVATE_KEY_FILENAME = "/tmp/private_key_host_ssh"
HOST_SSH_TEST_ACCOUNT = "ranchertest"
HOST_SSH_PUBLIC_PORT = 2222
socat_container_list = []
rancher_compose_con = {"container": None, "host": None, "port": "7878"}
MANAGED_NETWORK = "managed"
UNMANAGED_NETWORK = "bridge"
dns_labels = {"io.rancher.container.dns": "true",
"io.rancher.scheduler.affinity:container_label_ne":
"io.rancher.stack_service.name=${stack_name}/${service_name}"}
@pytest.fixture(scope='session')
def cattle_url():
default_url = 'http://localhost:8080/v1/schemas'
return os.environ.get('CATTLE_TEST_URL', default_url)
def _admin_client():
access_key = os.environ.get("CATTLE_ACCESS_KEY", 'admin')
secret_key = os.environ.get("CATTLE_SECRET_KEY", 'adminpass')
return from_env(url=cattle_url(),
cache=False,
access_key=access_key,
secret_key=secret_key)
def _client_for_user(name, accounts):
return from_env(url=cattle_url(),
cache=False,
access_key=accounts[name][0],
secret_key=accounts[name][1])
def create_user(admin_client, user_name, kind=None):
if kind is None:
kind = user_name
password = user_name + 'pass'
account = create_type_by_uuid(admin_client, 'account', user_name,
kind=user_name,
name=user_name)
active_cred = None
for cred in account.credentials():
if cred.kind == 'apiKey' and cred.publicValue == user_name \
and cred.secretValue == password:
active_cred = cred
break
if active_cred is None:
active_cred = admin_client.create_api_key({
'accountId': account.id,
'publicValue': user_name,
'secretValue': password
})
active_cred = wait_success(admin_client, active_cred)
if active_cred.state != 'active':
wait_success(admin_client, active_cred.activate())
return [user_name, password, account]
def acc_id(client):
obj = client.list_api_key()[0]
return obj.account().id
def client_for_project(project):
access_key = random_str()
secret_key = random_str()
admin_client = _admin_client()
active_cred = None
account = project
for cred in account.credentials():
if cred.kind == 'apiKey' and cred.publicValue == access_key\
and cred.secretValue == secret_key:
active_cred = cred
break
if active_cred is None:
active_cred = admin_client.create_api_key({
'accountId': account.id,
'publicValue': access_key,
'secretValue': secret_key
})
active_cred = wait_success(admin_client, active_cred)
if active_cred.state != 'active':
wait_success(admin_client, active_cred.activate())
return from_env(url=cattle_url(),
cache=False,
access_key=access_key,
secret_key=secret_key)
def wait_success(client, obj, timeout=DEFAULT_TIMEOUT):
return client.wait_success(obj, timeout=timeout)
def create_type_by_uuid(admin_client, type, uuid, activate=True, validate=True,
**kw):
opts = dict(kw)
opts['uuid'] = uuid
objs = admin_client.list(type, uuid=uuid)
obj = None
if len(objs) == 0:
obj = admin_client.create(type, **opts)
else:
obj = objs[0]
obj = wait_success(admin_client, obj)
if activate and obj.state == 'inactive':
obj.activate()
obj = wait_success(admin_client, obj)
if validate:
for k, v in opts.items():
assert getattr(obj, k) == v
return obj
@pytest.fixture(scope='session')
def accounts():
result = {}
admin_client = _admin_client()
for user_name in ['admin', 'agent', 'user', 'agentRegister', 'test',
'readAdmin', 'token', 'superadmin', 'service']:
result[user_name] = create_user(admin_client,
user_name,
kind=user_name)
result['admin'] = create_user(admin_client, 'admin')
system_account = admin_client.list_account(kind='system', uuid='system')[0]
result['system'] = [None, None, system_account]
return result
@pytest.fixture(scope='session')
def client(admin_client):
client = client_for_project(
admin_client.list_project(uuid="adminProject")[0])
assert client.valid()
return client
@pytest.fixture(scope='session')
def admin_client():
admin_client = _admin_client()
assert admin_client.valid()
return admin_client
@pytest.fixture(scope='session')
def super_client(accounts):
ret = _client_for_user('superadmin', accounts)
return ret
@pytest.fixture
def test_name():
return random_str()
@pytest.fixture
def random_str():
return 'test-{0}'.format(random_num())
@pytest.fixture
def random_num():
return random.randint(0, 1000000)
def wait_all_success(client, items, timeout=DEFAULT_TIMEOUT):
result = []
for item in items:
item = client.wait_success(item, timeout=timeout)
result.append(item)
return result
@pytest.fixture
def managed_network(client):
networks = client.list_network(uuid='managed-docker0')
assert len(networks) == 1
return networks[0]
@pytest.fixture(scope='session')
def unmanaged_network(client):
networks = client.list_network(uuid='unmanaged')
assert len(networks) == 1
return networks[0]
@pytest.fixture
def one_per_host(client, test_name):
instances = []
hosts = client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 2
for host in hosts:
c = client.create_container(name=test_name,
ports=['3000:3000'],
networkMode=MANAGED_NETWORK,
imageUuid=TEST_IMAGE_UUID,
requestedHostId=host.id)
instances.append(c)
instances = wait_all_success(client, instances, timeout=120)
for i in instances:
ports = i.ports_link()
assert len(ports) == 1
port = ports[0]
assert port.privatePort == 3000
assert port.publicPort == 3000
ping_port(port)
return instances
def delete_all(client, items):
wait_for = []
for i in items:
client.delete(i)
wait_for.append(client.reload(i))
wait_all_success(client, items, timeout=180)
def get_port_content(port, path, params={}):
assert port.publicPort is not None
assert port.publicIpAddressId is not None
url = 'http://{}:{}/{}'.format(port.publicIpAddress().address,
port.publicPort,
path)
e = None
for i in range(60):
try:
return requests.get(url, params=params, timeout=5).text
except Exception as e1:
e = e1
logger.exception('Failed to call %s', url)
time.sleep(1)
pass
if e is not None:
raise e
raise Exception('failed to call url {0} for port'.format(url))
def ping_port(port):
pong = get_port_content(port, 'ping')
assert pong == 'pong'
def ping_link(src, link_name, var=None, value=None):
src_port = src.ports_link()[0]
links = src.instanceLinks()
assert len(links) == 1
assert len(links[0].ports) == 1
assert links[0].linkName == link_name
for i in range(3):
from_link = get_port_content(src_port, 'get', params={
'link': link_name,
'path': 'env?var=' + var,
'port': links[0].ports[0].privatePort
})
if from_link == value:
continue
else:
time.sleep(1)
assert from_link == value
def generate_RSA(bits=2048):
'''
Generate an RSA keypair
'''
from Crypto.PublicKey import RSA
new_key = RSA.generate(bits)
public_key = new_key.publickey().exportKey('OpenSSH')
private_key = new_key.exportKey()
return private_key, public_key
@pytest.fixture(scope='session')
def host_ssh_containers(request, client):
keys = generate_RSA()
host_key = keys[0]
os.system("echo '" + host_key + "' >" + PRIVATE_KEY_FILENAME)
hosts = client.list_host(kind='docker', removed_null=True)
ssh_containers = []
for host in hosts:
env_var = {"SSH_KEY": keys[1]}
docker_vol_value = ["/usr/bin/docker:/usr/bin/docker",
"/var/run/docker.sock:/var/run/docker.sock"
]
c = client.create_container(name="host_ssh_container",
networkMode=MANAGED_NETWORK,
imageUuid=SSH_HOST_IMAGE_UUID,
requestedHostId=host.id,
dataVolumes=docker_vol_value,
environment=env_var,
ports=[str(HOST_SSH_PUBLIC_PORT)+":22"]
)
ssh_containers.append(c)
for c in ssh_containers:
c = client.wait_success(c, 180)
assert c.state == "running"
def fin():
for c in ssh_containers:
client.delete(c)
os.system("rm " + PRIVATE_KEY_FILENAME)
request.addfinalizer(fin)
def get_ssh_to_host_ssh_container(host):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username=HOST_SSH_TEST_ACCOUNT,
key_filename=PRIVATE_KEY_FILENAME, port=HOST_SSH_PUBLIC_PORT)
return ssh
@pytest.fixture
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=180):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.kind + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if (fail_handler):
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
@pytest.fixture(scope='session')
def socat_containers(client, request):
# When these tests run in the CI environment, the hosts don't expose the
# docker daemon over tcp, so we need to create a container that binds to
# the docker socket and exposes it on a port
if len(socat_container_list) != 0:
return
hosts = client.list_host(kind='docker', removed_null=True, state='active')
for host in hosts:
socat_container = client.create_container(
name='socat-%s' % random_str(),
networkMode=MANAGED_NETWORK,
imageUuid=SOCAT_IMAGE_UUID,
ports='2375:2375/tcp',
stdinOpen=False,
tty=False,
publishAllPorts=True,
privileged=True,
dataVolumes='/var/run/docker.sock:/var/run/docker.sock',
requestedHostId=host.id)
socat_container_list.append(socat_container)
for socat_container in socat_container_list:
wait_for_condition(
client, socat_container,
lambda x: x.state == 'running',
lambda x: 'State is: ' + x.state)
time.sleep(10)
def remove_socat():
delete_all(client, socat_container_list)
request.addfinalizer(remove_socat)
def get_docker_client(host):
ip = host.ipAddresses()[0].address
port = '2375'
params = {}
params['base_url'] = 'tcp://%s:%s' % (ip, port)
api_version = os.getenv('DOCKER_API_VERSION', '1.18')
params['version'] = api_version
return Client(**params)
def wait_for_scale_to_adjust(super_client, service):
service = super_client.wait_success(service)
instance_maps = super_client.list_serviceExposeMap(serviceId=service.id,
state="active")
start = time.time()
while len(instance_maps) != service.scale:
time.sleep(.5)
instance_maps = super_client.list_serviceExposeMap(
serviceId=service.id, state="active")
if time.time() - start > 30:
raise Exception('Timed out waiting for Service Expose map to be ' +
'created for all instances')
for instance_map in instance_maps:
c = super_client.by_id('container', instance_map.instanceId)
wait_for_condition(
super_client, c,
lambda x: x.state == "running",
lambda x: 'State is: ' + x.state)
def check_service_map(super_client, service, instance, state):
instance_service_map = super_client.\
list_serviceExposeMap(serviceId=service.id, instanceId=instance.id,
state=state)
assert len(instance_service_map) == 1
def get_container_names_list(super_client, services):
container_names = []
for service in services:
containers = get_service_container_list(super_client, service)
for c in containers:
if c.state == "running":
container_names.append(c.externalId[:12])
return container_names
def validate_add_service_link(super_client, service, consumedService):
service_maps = super_client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id)
assert len(service_maps) == 1
service_map = service_maps[0]
wait_for_condition(
super_client, service_map,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state)
def validate_remove_service_link(super_client, service, consumedService):
service_maps = super_client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id)
assert len(service_maps) == 1
service_map = service_maps[0]
wait_for_condition(
super_client, service_map,
lambda x: x.state == "removed",
lambda x: 'State is: ' + x.state)
def get_service_container_list(super_client, service):
container = []
instance_maps = super_client.list_serviceExposeMap(serviceId=service.id,
state="active")
for instance_map in instance_maps:
c = super_client.by_id('container', instance_map.instanceId)
containers = super_client.list_container(
externalId=c.externalId,
include="hosts")
assert len(containers) == 1
container.append(containers[0])
return container
def link_svc_with_port(super_client, service, linkservices, port):
for linkservice in linkservices:
service_link = {"serviceId": linkservice.id, "ports": [port]}
service = service.addservicelink(serviceLink=service_link)
validate_add_service_link(super_client, service, linkservice)
return service
def link_svc(super_client, service, linkservices):
for linkservice in linkservices:
service_link = {"serviceId": linkservice.id}
service = service.addservicelink(serviceLink=service_link)
validate_add_service_link(super_client, service, linkservice)
return service
def activate_svc(client, service):
service.activate()
service = client.wait_success(service, 120)
assert service.state == "active"
return service
def validate_exposed_port_and_container_link(super_client, con, link_name,
link_port, exposed_port):
time.sleep(10)
# Validate that the environment variables relating to link containers are
# set
containers = super_client.list_container(externalId=con.externalId,
include="hosts",
removed_null=True)
assert len(containers) == 1
con = containers[0]
host = super_client.by_id('host', con.hosts[0].id)
docker_client = get_docker_client(host)
inspect = docker_client.inspect_container(con.externalId)
response = inspect["Config"]["Env"]
logger.info(response)
address = None
port = None
env_name_link_address = link_name + "_PORT_" + str(link_port) + "_TCP_ADDR"
env_name_link_name = link_name + "_PORT_" + str(link_port) + "_TCP_PORT"
for env_var in response:
if env_name_link_address in env_var:
address = env_var[env_var.index("=")+1:]
if env_name_link_name in env_var:
port = env_var[env_var.index("=")+1:]
logger.info(address)
logger.info(port)
assert address and port is not None
# Validate port mapping
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username="root",
password="root", port=exposed_port)
# Validate link containers
cmd = "wget -O result.txt --timeout=20 --tries=1 http://" + \
address+":"+port+"/name.html" + ";cat result.txt"
logger.info(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
assert len(response) == 1
resp = response[0].strip("\n")
logger.info(resp)
assert link_name == resp
def wait_for_lb_service_to_become_active(super_client, client,
services, lb_service):
lbs = client.list_loadBalancer(serviceId=lb_service.id)
assert len(lbs) == 1
lb = lbs[0]
# Wait for host maps to get created and reach "active" state
host_maps = wait_until_host_map_created(client, lb, lb_service.scale, 60)
assert len(host_maps) == lb_service.scale
logger.info("host_maps - " + str(host_maps))
# Wait for target maps to get created and reach "active" state
all_target_count = 0
for service in services:
all_target_count = all_target_count + service.scale
target_maps = wait_until_target_map_created(
client, lb, all_target_count, 60)
logger.info(target_maps)
wait_for_config_propagation(super_client, lb, host_maps)
time.sleep(5)
lb_containers = get_service_container_list(super_client, lb_service)
assert len(lb_containers) == lb_service.scale
# Get haproxy config from Lb Agents
for lb_con in lb_containers:
host = super_client.by_id('host', lb_con.hosts[0].id)
docker_client = get_docker_client(host)
haproxy = docker_client.copy(
lb_con.externalId, "/etc/haproxy/haproxy.cfg")
print "haproxy: " + haproxy.read()
def validate_lb_service_for_external_services(super_client, client, lb_service,
port, container_list,
hostheader=None, path=None):
container_names = []
for con in container_list:
container_names.append(con.externalId[:12])
validate_lb_service_con_names(super_client, client, lb_service, port,
container_names, hostheader, path)
def validate_lb_service(super_client, client, lb_service, port,
target_services, hostheader=None, path=None):
target_count = 0
for service in target_services:
target_count = target_count + service.scale
container_names = get_container_names_list(super_client,
target_services)
logger.info(container_names)
assert len(container_names) == target_count
validate_lb_service_con_names(super_client, client, lb_service, port,
container_names, hostheader, path)
def validate_lb_service_con_names(super_client, client, lb_service, port,
container_names,
hostheader=None, path=None):
lbs = client.list_loadBalancer(serviceId=lb_service.id)
assert len(lbs) == 1
lb = lbs[0]
host_maps = client.list_loadBalancerHostMap(loadBalancerId=lb.id,
removed_null=True,
state="active")
assert len(host_maps) == lb_service.scale
lb_hosts = []
for host_map in host_maps:
host = client.by_id('host', host_map.hostId)
lb_hosts.append(host)
logger.info("host: " + host.name)
for host in lb_hosts:
wait_until_lb_is_active(host, port)
if hostheader is not None or path is not None:
check_round_robin_access(container_names, host, port,
hostheader, path)
else:
check_round_robin_access(container_names, host, port)
def wait_until_target_map_created(client, lb, count, timeout=30):
start = time.time()
target_maps = client.list_loadBalancerTarget(loadBalancerId=lb.id,
removed_null=True,
state="active")
while len(target_maps) != count:
time.sleep(.5)
target_maps = client. \
list_loadBalancerTarget(loadBalancerId=lb.id, removed_null=True,
state="active")
if time.time() - start > timeout:
raise Exception('Timed out waiting for target map creation')
return target_maps
def wait_until_host_map_created(client, lb, count, timeout=30):
start = time.time()
host_maps = client.list_loadBalancerHostMap(loadBalancerId=lb.id,
removed_null=True,
state="active")
while len(host_maps) != count:
time.sleep(.5)
host_maps = client. \
list_loadBalancerHostMap(loadBalancerId=lb.id, removed_null=True,
state="active")
if time.time() - start > timeout:
raise Exception('Timed out waiting for host map creation')
return host_maps
def wait_until_target_maps_removed(super_client, lb, consumed_service):
instance_maps = super_client.list_serviceExposeMap(
serviceId=consumed_service.id)
for instance_map in instance_maps:
target_maps = super_client.list_loadBalancerTarget(
loadBalancerId=lb.id, instanceId=instance_map.instanceId)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
super_client, target_map,
lambda x: x.state == "removed",
lambda x: 'State is: ' + x.state)
def wait_until_lb_is_active(host, port, timeout=30):
start = time.time()
while check_for_no_access(host, port):
time.sleep(.5)
print "No access yet"
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(host, port):
try:
url = "http://" + host.ipAddresses()[0].address + ":" +\
port + "/name.html"
requests.get(url)
return False
except requests.ConnectionError:
logger.info("Connection Error - " + url)
return True
def validate_linked_service(super_client, service, consumed_services,
exposed_port, exclude_instance=None,
exclude_instance_purged=False):
time.sleep(5)
containers = get_service_container_list(super_client, service)
assert len(containers) == service.scale
for container in containers:
host = super_client.by_id('host', container.hosts[0].id)
for consumed_service in consumed_services:
expected_dns_list = []
expected_link_response = []
dns_response = []
consumed_containers = get_service_container_list(super_client,
consumed_service)
if exclude_instance_purged:
assert len(consumed_containers) == consumed_service.scale - 1
else:
assert len(consumed_containers) == consumed_service.scale
for con in consumed_containers:
if (exclude_instance is not None) \
and (con.id == exclude_instance.id):
logger.info("Excluded from DNS and wget list:" + con.name)
else:
if con.networkMode == "host":
con_host = super_client.by_id('host', con.hosts[0].id)
expected_dns_list.append(
con_host.ipAddresses()[0].address)
expected_link_response.append(con_host.name)
else:
expected_dns_list.append(con.primaryIpAddress)
expected_link_response.append(con.externalId[:12])
logger.info("Expected dig response List" + str(expected_dns_list))
logger.info("Expected wget response List" +
str(expected_link_response))
# Validate port mapping
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username="root",
password="root", port=int(exposed_port))
# Validate link containers
cmd = "wget -O result.txt --timeout=20 --tries=1 http://" + \
consumed_service.name + ":80/name.html;cat result.txt"
logger.info(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
assert len(response) == 1
resp = response[0].strip("\n")
logger.info("Actual wget Response" + str(resp))
assert resp in (expected_link_response)
# Validate DNS resolution using dig
cmd = "dig " + consumed_service.name + " +short"
logger.info(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info("Actual dig Response" + str(response))
expected_entries_dig = consumed_service.scale
if exclude_instance is not None:
expected_entries_dig = expected_entries_dig - 1
assert len(response) == expected_entries_dig
for resp in response:
dns_response.append(resp.strip("\n"))
for address in expected_dns_list:
assert address in dns_response
def validate_dns_service(super_client, service, consumed_services,
exposed_port, dnsname, exclude_instance=None,
exclude_instance_purged=False):
time.sleep(5)
service_containers = get_service_container_list(super_client, service)
assert len(service_containers) == service.scale
for con in service_containers:
host = super_client.by_id('host', con.hosts[0].id)
containers = []
expected_dns_list = []
expected_link_response = []
dns_response = []
for consumed_service in consumed_services:
cons = get_service_container_list(super_client, consumed_service)
if exclude_instance_purged:
assert len(cons) == consumed_service.scale - 1
else:
assert len(cons) == consumed_service.scale
containers = containers + cons
for con in containers:
if (exclude_instance is not None) \
and (con.id == exclude_instance.id):
logger.info("Excluded from DNS and wget list:" + con.name)
else:
if con.networkMode == "host":
con_host = super_client.by_id('host', con.hosts[0].id)
expected_dns_list.append(con_host.ipAddresses()[0].address)
expected_link_response.append(con_host.name)
else:
expected_dns_list.append(con.primaryIpAddress)
expected_link_response.append(con.externalId[:12])
logger.info("Expected dig response List" + str(expected_dns_list))
logger.info("Expected wget response List" +
str(expected_link_response))
# Validate port mapping
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username="root",
password="root", port=int(exposed_port))
# Validate link containers
cmd = "wget -O result.txt --timeout=20 --tries=1 http://" + dnsname + \
":80/name.html;cat result.txt"
logger.info(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
assert len(response) == 1
resp = response[0].strip("\n")
logger.info("Actual wget Response" + str(resp))
assert resp in (expected_link_response)
# Validate DNS resolution using dig
cmd = "dig " + dnsname + " +short"
logger.info(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info("Actual dig Response" + str(response))
assert len(response) == len(expected_dns_list)
for resp in response:
dns_response.append(resp.strip("\n"))
for address in expected_dns_list:
assert address in dns_response
def validate_external_service(super_client, service, ext_services,
exposed_port, container_list,
exclude_instance=None,
exclude_instance_purged=False):
time.sleep(5)
containers = get_service_container_list(super_client, service)
assert len(containers) == service.scale
for container in containers:
print "Validation for container -" + str(container.name)
host = super_client.by_id('host', container.hosts[0].id)
for ext_service in ext_services:
expected_dns_list = []
expected_link_response = []
dns_response = []
for con in container_list:
if (exclude_instance is not None) \
and (con.id == exclude_instance.id):
print "Excluded from DNS and wget list:" + con.name
else:
expected_dns_list.append(con.primaryIpAddress)
expected_link_response.append(con.externalId[:12])
print "Expected dig response List" + str(expected_dns_list)
print "Expected wget response List" + str(expected_link_response)
# Validate port mapping
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username="root",
password="root", port=int(exposed_port))
# Validate link containers
cmd = "wget -O result.txt --timeout=20 --tries=1 http://" + \
ext_service.name + ":80/name.html;cat result.txt"
print cmd
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
assert len(response) == 1
resp = response[0].strip("\n")
print "Actual wget Response" + str(resp)
assert resp in (expected_link_response)
# Validate DNS resolution using dig
cmd = "dig " + ext_service.name + " +short"
print cmd
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
print "Actual dig Response" + str(response)
expected_entries_dig = len(container_list)
if exclude_instance is not None:
expected_entries_dig = expected_entries_dig - 1
assert len(response) == expected_entries_dig
for resp in response:
dns_response.append(resp.strip("\n"))
for address in expected_dns_list:
assert address in dns_response
def validate_external_service_for_hostname(super_client, service, ext_services,
exposed_port):
time.sleep(5)
containers = get_service_container_list(super_client, service)
assert len(containers) == service.scale
for container in containers:
print "Validation for container -" + str(container.name)
host = super_client.by_id('host', container.hosts[0].id)
for ext_service in ext_services:
expected_link_response = "About Google"
# Validate port mapping
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username="root",
password="root", port=int(exposed_port))
cmd = "wget -O result.txt --timeout=20 --tries=1 http://" + \
ext_service.name + ";cat result.txt"
print cmd
# Validate link containers mutliple times
for i in range(0, 10):
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
print "Actual wget Response" + str(response)
assert expected_link_response in str(response)
@pytest.fixture(scope='session')
def rancher_compose_container(admin_client, client, request):
if rancher_compose_con["container"] is not None:
return
setting = admin_client.by_id_setting(
"default.cattle.rancher.compose.linux.url")
rancher_compose_url = setting.value
cmd1 = \
"wget " + rancher_compose_url
cmd2 = "tar xvf rancher-compose-linux-amd64.tar.gz"
hosts = client.list_host(kind='docker', removed_null=True, state="active")
assert len(hosts) > 0
host = hosts[0]
port = rancher_compose_con["port"]
c = client.create_container(name="rancher-compose-client",
networkMode=MANAGED_NETWORK,
imageUuid="docker:sangeetha/testclient",
ports=[port+":22/tcp"],
requestedHostId=host.id
)
c = client.wait_success(c, 120)
assert c.state == "running"
time.sleep(5)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username="root",
password="root", port=int(port))
cmd = cmd1+";"+cmd2
print cmd
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
found = False
for resp in response:
if "/rancher-compose" in resp:
found = True
assert found
rancher_compose_con["container"] = c
rancher_compose_con["host"] = host
def remove_rancher_compose_container():
delete_all(client, [rancher_compose_con["container"]])
request.addfinalizer(remove_rancher_compose_container)
def launch_rancher_compose(client, env, testname):
compose_configs = env.exportconfig()
docker_compose = compose_configs["dockerComposeConfig"]
rancher_compose = compose_configs["rancherComposeConfig"]
access_key = client._access_key
secret_key = client._secret_key
docker_filename = testname + "-docker-compose.yml"
rancher_filename = testname + "-rancher-compose.yml"
project_name = env.name + "rancher"
cmd1 = "export RANCHER_URL=" + cattle_url()
cmd2 = "export RANCHER_ACCESS_KEY=" + access_key
cmd3 = "export RANCHER_SECRET_KEY=" + secret_key
cmd4 = "cd rancher-compose-v*"
cmd5 = "echo '" + docker_compose + "' > " + docker_filename
cmd6 = "echo '" + rancher_compose + "' > " + rancher_filename
cmd7 = "./rancher-compose -p " + project_name + " -f " + docker_filename + \
" -r " + rancher_filename + " up -d"
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
rancher_compose_con["host"].ipAddresses()[0].address, username="root",
password="root", port=int(rancher_compose_con["port"]))
cmd = cmd1+";"+cmd2+";"+cmd3+";"+cmd4+";"+cmd5+";"+cmd6+";"+cmd7
print cmd
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
print str(response)
expected_resp = "Creating environment " + project_name
found = False
for resp in response:
if expected_resp in resp:
found = True
assert found
def create_env_with_svc_and_lb(client, scale_svc, scale_lb, port,
internal=False):
launch_config_svc = {"imageUuid": WEB_IMAGE_UUID}
if internal:
launch_config_lb = {"expose": [port+":80"]}
else:
launch_config_lb = {"ports": [port+":80"]}
# Create Environment
env = create_env(client)
# Create Service
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=scale_svc)
service = client.wait_success(service)
assert service.state == "inactive"
# Create LB Service
random_name = random_str()
service_name = "LB-" + random_name.replace("-", "")
lb_service = client.create_loadBalancerService(
name=service_name,
environmentId=env.id,
launchConfig=launch_config_lb,
scale=scale_lb)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
return env, service, lb_service
def create_env_with_ext_svc_and_lb(client, scale_lb, port):
launch_config_lb = {"ports": [port+":80"]}
env, service, ext_service, con_list = create_env_with_ext_svc(
client, 1, port)
# Create LB Service
random_name = random_str()
service_name = "LB-" + random_name.replace("-", "")
lb_service = client.create_loadBalancerService(
name=service_name,
environmentId=env.id,
launchConfig=launch_config_lb,
scale=scale_lb)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
return env, lb_service, ext_service, con_list
def create_env_with_2_svc(client, scale_svc, scale_consumed_svc, port):
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
launch_config_consumed_svc = {"imageUuid": WEB_IMAGE_UUID}
# Create Environment
env = create_env(client)
# Create Service
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=scale_svc)
service = client.wait_success(service)
assert service.state == "inactive"
# Create Consumed Service
random_name = random_str()
service_name = random_name.replace("-", "")
consumed_service = client.create_service(
name=service_name, environmentId=env.id,
launchConfig=launch_config_consumed_svc, scale=scale_consumed_svc)
consumed_service = client.wait_success(consumed_service)
assert consumed_service.state == "inactive"
return env, service, consumed_service
def create_env_with_2_svc_dns(client, scale_svc, scale_consumed_svc, port,
cross_linking=False):
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
launch_config_consumed_svc = {"imageUuid": WEB_IMAGE_UUID}
# Create Environment for dns service and client service
env = create_env(client)
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=scale_svc)
service = client.wait_success(service)
assert service.state == "inactive"
# Create Consumed Service1
if cross_linking:
env_id = create_env(client).id
else:
env_id = env.id
random_name = random_str()
service_name = random_name.replace("-", "")
consumed_service = client.create_service(
name=service_name, environmentId=env_id,
launchConfig=launch_config_consumed_svc, scale=scale_consumed_svc)
consumed_service = client.wait_success(consumed_service)
assert consumed_service.state == "inactive"
# Create Consumed Service2
if cross_linking:
env_id = create_env(client).id
else:
env_id = env.id
random_name = random_str()
service_name = random_name.replace("-", "")
consumed_service1 = client.create_service(
name=service_name, environmentId=env_id,
launchConfig=launch_config_consumed_svc, scale=scale_consumed_svc)
consumed_service1 = client.wait_success(consumed_service1)
assert consumed_service1.state == "inactive"
# Create DNS service
dns = client.create_dnsService(name='WEB1',
environmentId=env.id)
dns = client.wait_success(dns)
return env, service, consumed_service, consumed_service1, dns
def create_env_with_ext_svc(client, scale_svc, port, hostname=False):
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID,
"ports": [port+":22/tcp"]}
# Create Environment
env = create_env(client)
# Create Service
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=scale_svc)
service = client.wait_success(service)
assert service.state == "inactive"
con_list = None
# Create external Service
random_name = random_str()
ext_service_name = random_name.replace("-", "")
if not hostname:
# Create 2 containers which would be the applications that need to be
# serviced by the external service
c1 = client.create_container(name=random_str(),
imageUuid=WEB_IMAGE_UUID)
c2 = client.create_container(name=random_str(),
imageUuid=WEB_IMAGE_UUID)
c1 = client.wait_success(c1, 120)
assert c1.state == "running"
c2 = client.wait_success(c2, 120)
assert c2.state == "running"
con_list = [c1, c2]
ips = [c1.primaryIpAddress, c2.primaryIpAddress]
ext_service = client.create_externalService(
name=ext_service_name, environmentId=env.id,
externalIpAddresses=ips)
else:
ext_service = client.create_externalService(
name=ext_service_name, environmentId=env.id, hostname="google.com")
ext_service = client.wait_success(ext_service)
assert ext_service.state == "inactive"
return env, service, ext_service, con_list
def create_env_and_svc(client, launch_config, scale):
env = create_env(client)
service = create_svc(client, env, launch_config, scale)
return service, env
def check_container_in_service(super_client, service):
container_list = get_service_container_list(super_client, service)
assert len(container_list) == service.scale
for container in container_list:
assert container.state == "running"
containers = super_client.list_container(
externalId=container.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(container.externalId)
logger.info("Checked for containers running - " + container.name)
assert inspect["State"]["Running"]
def create_svc(client, env, launch_config, scale):
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config,
scale=scale)
service = client.wait_success(service)
assert service.state == "inactive"
return service
def wait_until_instances_get_stopped(super_client, service, timeout=60):
stopped_count = 0
start = time.time()
while stopped_count != service.scale:
time.sleep(.5)
container_list = get_service_container_list(super_client, service)
stopped_count = 0
for con in container_list:
if con.state == "stopped":
stopped_count = stopped_count + 1
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for instances to get to stopped state')
def get_service_containers_with_name(super_client, service, name):
container = []
instance_maps = super_client.list_serviceExposeMap(serviceId=service.id,
state="active")
nameformat = re.compile(name + "_[0-9]{1,2}")
for instance_map in instance_maps:
c = super_client.by_id('container', instance_map.instanceId)
print c.name
if nameformat.match(c.name):
containers = super_client.list_container(
externalId=c.externalId,
include="hosts")
assert len(containers) == 1
container.append(containers[0])
return container
def wait_until_instances_get_stopped_for_service_with_sec_launch_configs(
super_client, service, timeout=60):
stopped_count = 0
start = time.time()
container_count = service.scale*(len(service.secondaryLaunchConfigs)+1)
while stopped_count != container_count:
time.sleep(.5)
container_list = get_service_container_list(super_client, service)
stopped_count = 0
for con in container_list:
if con.state == "stopped":
stopped_count = stopped_count + 1
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for instances to get to stopped state')
def validate_lb_service_for_no_access(client, lb_service, port,
hostheader, path):
lbs = client.list_loadBalancer(serviceId=lb_service.id)
assert len(lbs) == 1
lb = lbs[0]
host_maps = wait_until_host_map_created(client, lb, lb_service.scale)
assert len(host_maps) == lb_service.scale
lb_hosts = []
for host_map in host_maps:
host = client.by_id('host', host_map.hostId)
lb_hosts.append(host)
logger.info("host: " + host.name)
for host in lb_hosts:
wait_until_lb_is_active(host, port)
check_for_service_unavailable(host, port, hostheader, path)
def check_for_service_unavailable(host, port, hostheader, path):
url = "http://" + host.ipAddresses()[0].address +\
":" + port + path
logger.info(url)
headers = {"host": hostheader}
logger.info(headers)
r = requests.get(url, headers=headers)
response = r.text.strip("\n")
logger.info(response)
r.close()
assert "503 Service Unavailable" in response
def check_round_robin_access(container_names, host, port,
hostheader=None, path="/name.html"):
con_hostname = container_names[:]
con_hostname_ordered = []
url = "http://" + host.ipAddresses()[0].address +\
":" + port + path
logger.info(url)
headers = None
if hostheader is not None:
headers = {"host": hostheader}
logger.info(headers)
for n in range(0, len(con_hostname)):
if headers is not None:
r = requests.get(url, headers=headers)
else:
r = requests.get(url)
response = r.text.strip("\n")
logger.info(response)
r.close()
assert response in con_hostname
con_hostname.remove(response)
con_hostname_ordered.append(response)
logger.info(con_hostname_ordered)
i = 0
for n in range(0, 10):
if headers is not None:
r = requests.get(url, headers=headers)
else:
r = requests.get(url)
response = r.text.strip("\n")
r.close()
logger.info(response)
assert response == con_hostname_ordered[i]
i = i + 1
if i == len(con_hostname_ordered):
i = 0
def create_env_with_multiple_svc_and_lb(client, scale_svc, scale_lb,
ports, count, crosslinking=False):
target_port = ["80", "81"]
launch_config_svc = \
{"imageUuid": LB_HOST_ROUTING_IMAGE_UUID}
assert len(ports) in (1, 2)
launch_port = []
for i in range(0, len(ports)):
listening_port = ports[i]+":"+target_port[i]
if "/" in ports[i]:
port_mode = ports[i].split("/")
listening_port = port_mode[0]+":"+target_port[i]+"/"+port_mode[1]
launch_port.append(listening_port)
launch_config_lb = {"ports": launch_port}
services = []
# Create Environment
env = create_env(client)
# Create Service
for i in range(0, count):
random_name = random_str()
service_name = random_name.replace("-", "")
if crosslinking:
env_serv = create_env(client)
env_id = env_serv.id
else:
env_id = env.id
service = client.create_service(name=service_name,
environmentId=env_id,
launchConfig=launch_config_svc,
scale=scale_svc)
service = client.wait_success(service)
assert service.state == "inactive"
services.append(service)
# Create LB Service
random_name = random_str()
service_name = "LB-" + random_name.replace("-", "")
lb_service = client.create_loadBalancerService(
name=service_name,
environmentId=env.id,
launchConfig=launch_config_lb,
scale=scale_lb)
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
env = env.activateservices()
env = client.wait_success(env, 120)
if not crosslinking:
for service in services:
service = client.wait_success(service, 120)
assert service.state == "active"
lb_service = client.wait_success(lb_service, 120)
assert lb_service.state == "active"
return env, services, lb_service
def wait_for_config_propagation(super_client, lb, host_maps, timeout=30):
for host_map in host_maps:
uri = 'delegate:///?lbId={}&hostMapId={}'.\
format(get_plain_id(super_client, lb),
get_plain_id(super_client, host_map))
agents = super_client.list_agent(uri=uri)
assert len(agents) == 1
agent = agents[0]
assert agent is not None
item = get_config_item(agent, "haproxy")
start = time.time()
print "requested_version " + str(item.requestedVersion)
print "applied_version " + str(item.appliedVersion)
while item.requestedVersion != item.appliedVersion:
print "requested_version " + str(item.requestedVersion)
print "applied_version " + str(item.appliedVersion)
time.sleep(.5)
agent = super_client.reload(agent)
item = get_config_item(agent, "haproxy")
if time.time() - start > timeout:
raise Exception('Timed out waiting for config propagation')
return
def get_config_item(agent, config_name):
item = None
for config_items in agent.configItemStatuses():
if config_items.name == config_name:
item = config_items
break
assert item is not None
return item
def get_plain_id(admin_client, obj=None):
if obj is None:
obj = admin_client
admin_client = super_client(None)
ret = admin_client.list(obj.type, uuid=obj.uuid, _plainId='true')
assert len(ret) == 1
return ret[0].id
def create_env(client):
random_name = random_str()
env_name = random_name.replace("-", "")
env = client.create_environment(name=env_name)
env = client.wait_success(env)
assert env.state == "active"
return env
def get_env(super_client, service):
e = super_client.by_id('environment', service.environmentId)
return e
def get_service_container_with_label(super_client, service, name, label):
containers = []
found = False
instance_maps = super_client.list_serviceExposeMap(serviceId=service.id,
state="active")
nameformat = re.compile(name + "_[0-9]{1,2}")
for instance_map in instance_maps:
c = super_client.by_id('container', instance_map.instanceId)
if nameformat.match(c.name) \
and c.labels["io.rancher.service.deployment.unit"] == label:
containers = super_client.list_container(
externalId=c.externalId,
include="hosts")
assert len(containers) == 1
found = True
break
assert found
return containers[0]
def get_side_kick_container(super_client, container, service, service_name):
label = container.labels["io.rancher.service.deployment.unit"]
print container.name + " - " + label
secondary_con = get_service_container_with_label(
super_client, service, service_name, label)
return secondary_con
def validate_internal_lb(super_client, lb_service, services,
host, con_port, lb_port):
# Access each of the LB Agent from the client container
lb_containers = get_service_container_list(super_client, lb_service)
assert len(lb_containers) == lb_service.scale
for lb_con in lb_containers:
lb_ip = lb_con.primaryIpAddress
target_count = 0
for service in services:
target_count = target_count + service.scale
expected_lb_response = get_container_names_list(super_client,
services)
assert len(expected_lb_response) == target_count
# Validate port mapping
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username="root",
password="root", port=int(con_port))
# Validate lb service from this container using LB agent's ip address
cmd = "wget -O result.txt --timeout=20 --tries=1 http://" + lb_ip + \
":"+lb_port+"/name.html;cat result.txt"
logger.info(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
assert len(response) == 1
resp = response[0].strip("\n")
logger.info("Actual wget Response" + str(resp))
assert resp in (expected_lb_response)
def create_env_with_2_svc_hostnetwork(
client, scale_svc, scale_consumed_svc, port, sshport,
isnetworkModeHost_svc=False,
isnetworkModeHost_consumed_svc=False):
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID_HOSTNET}
launch_config_consumed_svc = {"imageUuid": WEB_IMAGE_UUID}
if isnetworkModeHost_svc:
launch_config_svc["networkMode"] = "host"
launch_config_svc["labels"] = dns_labels
else:
launch_config_svc["ports"] = [port+":"+sshport+"/tcp"]
if isnetworkModeHost_consumed_svc:
launch_config_consumed_svc["networkMode"] = "host"
launch_config_consumed_svc["labels"] = dns_labels
launch_config_consumed_svc["ports"] = []
# Create Environment
env = create_env(client)
# Create Service
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=scale_svc)
service = client.wait_success(service)
assert service.state == "inactive"
# Create Consumed Service
random_name = random_str()
service_name = random_name.replace("-", "")
consumed_service = client.create_service(
name=service_name, environmentId=env.id,
launchConfig=launch_config_consumed_svc, scale=scale_consumed_svc)
consumed_service = client.wait_success(consumed_service)
assert consumed_service.state == "inactive"
return env, service, consumed_service
def create_env_with_2_svc_dns_hostnetwork(
client, scale_svc, scale_consumed_svc, port,
cross_linking=False, isnetworkModeHost_svc=False,
isnetworkModeHost_consumed_svc=False):
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID_HOSTNET}
launch_config_consumed_svc = {"imageUuid": WEB_IMAGE_UUID}
if isnetworkModeHost_svc:
launch_config_svc["networkMode"] = "host"
launch_config_svc["labels"] = dns_labels
else:
launch_config_svc["ports"] = [port+":33/tcp"]
if isnetworkModeHost_consumed_svc:
launch_config_consumed_svc["networkMode"] = "host"
launch_config_consumed_svc["labels"] = dns_labels
launch_config_consumed_svc["ports"] = []
# Create Environment for dns service and client service
env = create_env(client)
random_name = random_str()
service_name = random_name.replace("-", "")
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=scale_svc)
service = client.wait_success(service)
assert service.state == "inactive"
# Force containers of 2 different services to be in different hosts
hosts = client.list_host(kind='docker', removed_null=True, state='active')
assert len(hosts) > 1
# Create Consumed Service1
if cross_linking:
env_id = create_env(client).id
else:
env_id = env.id
random_name = random_str()
service_name = random_name.replace("-", "")
launch_config_consumed_svc["requestedHostId"] = hosts[0].id
consumed_service = client.create_service(
name=service_name, environmentId=env_id,
launchConfig=launch_config_consumed_svc, scale=scale_consumed_svc)
consumed_service = client.wait_success(consumed_service)
assert consumed_service.state == "inactive"
# Create Consumed Service2
if cross_linking:
env_id = create_env(client).id
else:
env_id = env.id
random_name = random_str()
service_name = random_name.replace("-", "")
launch_config_consumed_svc["requestedHostId"] = hosts[1].id
consumed_service1 = client.create_service(
name=service_name, environmentId=env_id,
launchConfig=launch_config_consumed_svc, scale=scale_consumed_svc)
consumed_service1 = client.wait_success(consumed_service1)
assert consumed_service1.state == "inactive"
# Create DNS service
dns = client.create_dnsService(name='WEB1',
environmentId=env.id)
dns = client.wait_success(dns)
return env, service, consumed_service, consumed_service1, dns
| |
import os
import sys
import clusto
import logging
import commands
from ConfigParser import SafeConfigParser
from optparse import OptionParser, make_option
scriptpaths = [os.path.realpath(os.path.join(os.curdir, 'scripts')),
'/etc/clusto/scripts',
'/usr/local/bin',
'/usr/bin',
] #+ filter(lambda x: not x.endswith('.egg'), sys.path)
def list_clusto_scripts(path):
"""
Return a list of clusto scripts in the given path.
"""
if not os.path.exists(path):
return []
if os.path.isdir(path):
dirlist = os.listdir(path)
else:
dirlist = [path]
available = filter(lambda x: x.startswith("clusto-")
and not x.endswith('~')
and os.access(os.path.join(path,x), os.X_OK),
dirlist)
return map(lambda x: os.path.join(path, x), available)
def runcmd(args):
args[0] = 'clusto-' + args[0]
cmdname = args[0]
paths = os.environ['PATH'].split(':')
cmd = None
for path in paths:
cmdtest = os.path.join(path, cmdname)
if os.path.exists(cmdtest):
cmd = cmdtest
break
if not cmd:
raise CommandError(cmdname + " is not a clusto-command.")
os.execvpe(cmdname, args, env=os.environ)
def get_command(cmdname):
for path in scriptpaths:
scripts = list_clusto_scripts(path)
for s in scripts:
if s.split('-')[1].split('.')[0] == cmdname:
return s
return None
def get_command_help(cmdname):
fullpath = get_command(cmdname)
return commands.getoutput(fullpath + " --help-description")
def get_clusto_config(filename=None):
"""Find, parse, and return the configuration data needed by clusto.
Gets the config path from the CLUSTOCONFIG environment variable otherwise
it is /etc/clusto/clusto.conf
"""
filesearchpath = ['/etc/clusto/clusto.conf']
filename = filename or os.environ.get('CLUSTOCONFIG')
if not filename:
filename = filesearchpath[0]
if filename:
if not os.path.exists(os.path.realpath(filename)):
raise CmdLineError("Config file %s doesn't exist." % filename)
config = SafeConfigParser()
config.read([filename])
if not config.has_section('clusto'):
config.add_section('clusto')
if 'CLUSTODSN' in os.environ:
config.set('clusto', 'dsn', os.environ['CLUSTODSN'])
if not config.has_option('clusto', 'dsn'):
raise CmdLineError("No database given for clusto data.")
return config
def init_script(name=os.path.basename(sys.argv[0]), configfile=None,
initializedb=False):
"""Initialize the clusto environment for clusto scripts.
Connects to the clusto database, returns a python SafeConfigParser and a
logger.
Uses get_clusto_config and setup_logging
"""
config = get_clusto_config(filename=configfile)
clusto.connect(config=config)
if initializedb:
clusto.init_clusto()
logger = setup_logging(config=config, name=name)
return (config, logger)
def setup_logging(config=None, name="clusto.script"):
"""Setup the default log level and return the logger
The logger will try to log to /var/log and console.
#FIXME shouldn't ignore the config
"""
loglocation="/var/log"
logfilename = os.path.join(loglocation,'clusto.log')
if not (os.access(loglocation, os.W_OK)
or (os.path.exists(logfilename) and os.access(logfilename, os.W_OK))):
logfilename = os.path.devnull
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=logfilename,
)
log = logging.getLogger(name)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
log.addHandler(console)
return log
def setup_clusto_env(options):
"""
Take clusto parameters and put it into the shell environment.
"""
if options.dsn:
os.environ['CLUSTODSN'] = options.dsn
if options.configfile:
os.environ['CLUSTOCONFIG'] = options.configfile
if os.environ.has_key('CLUSTOCONFIG'):
config = get_clusto_config(os.environ['CLUSTOCONFIG'])
else:
config = get_clusto_config()
if not os.environ.has_key('CLUSTODSN'):
os.environ['CLUSTODSN'] = config.get('clusto','dsn')
return config
class CmdLineError(Exception):
pass
class CommandError(Exception):
pass
class ClustoScript(object):
usage = "%prog [options]"
option_list = []
num_args = None
num_args_min = 0
short_description = "sample short descripton"
def __init__(self):
self.parser = OptionParser(usage=self.usage,
option_list=self.option_list)
self.parser.add_option("--help-description",
action="callback",
callback=self._help_description,
dest="helpdesc",
help="print out the short command description")
def _help_description(self, option, opt_str, value, parser, *args, **kwargs):
print self.short_description
sys.exit(0)
def runscript(scriptclass):
script = scriptclass()
(options, argv) = script.parser.parse_args(sys.argv)
config, logger = init_script()
try:
if (script.num_args != None and script.num_args != (len(argv)-1)) or script.num_args_min > (len(argv)-1):
raise CmdLineError("Wrong number of arguments.")
retval = script.main(argv,
options,
config=config,
log=logger)
except (CmdLineError, LookupError), msg:
print msg
script.parser.print_help()
return 1
return sys.exit(retval)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.