gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2015 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import operator
import docker
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common.notification import EndNotification
from trove.guestagent import dbaas
from trove.guestagent import guest_log
from trove.guestagent import volume
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.module import driver_manager
from trove.guestagent.module import module_manager
from trove.guestagent.strategies import replication as repl_strategy
from trove.instance import service_status
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Manager(periodic_task.PeriodicTasks):
"""This is the base class for all datastore managers. Over time, common
functionality should be pulled back here from the existing managers.
"""
GUEST_LOG_TYPE_LABEL = 'type'
GUEST_LOG_USER_LABEL = 'user'
GUEST_LOG_FILE_LABEL = 'file'
GUEST_LOG_SECTION_LABEL = 'section'
GUEST_LOG_ENABLE_LABEL = 'enable'
GUEST_LOG_DISABLE_LABEL = 'disable'
GUEST_LOG_RESTART_LABEL = 'restart'
GUEST_LOG_BASE_DIR = '/var/log/trove'
GUEST_LOG_DATASTORE_DIRNAME = 'datastore'
GUEST_LOG_DEFS_GUEST_LABEL = 'guest'
GUEST_LOG_DEFS_GENERAL_LABEL = 'general'
GUEST_LOG_DEFS_ERROR_LABEL = 'error'
GUEST_LOG_DEFS_SLOW_QUERY_LABEL = 'slow_query'
MODULE_APPLY_TO_ALL = module_manager.ModuleManager.MODULE_APPLY_TO_ALL
_docker_client = None
@property
def docker_client(self):
if self._docker_client:
return self._docker_client
self._docker_client = docker.from_env()
return self._docker_client
def __init__(self, manager_name):
super(Manager, self).__init__(CONF)
# Manager properties
self.__manager_name = manager_name
self.__manager = None
self.__prepare_error = False
# Guest log
self._guest_log_context = None
self._guest_log_loaded_context = None
self._guest_log_cache = None
self._guest_log_defs = None
# Module
self.module_driver_manager = driver_manager.ModuleDriverManager()
# Drivers should implement
self.adm = None
self.app = None
self.status = None
if CONF.guest_agent.container_registry:
try:
self.docker_client.login(
CONF.guest_agent.container_registry_username,
CONF.guest_agent.container_registry_password,
email="",
registry=CONF.guest_agent.container_registry)
except Exception as exc:
raise exception.GuestError(f"Failed to login the container "
f"registry, error: {str(exc)}")
@property
def manager_name(self):
"""This returns the passed-in name of the manager."""
return self.__manager_name
@property
def manager(self):
"""This returns the name of the manager."""
if not self.__manager:
self.__manager = CONF.datastore_manager or self.__manager_name
return self.__manager
@property
def prepare_error(self):
return self.__prepare_error
@prepare_error.setter
def prepare_error(self, prepare_error):
self.__prepare_error = prepare_error
@property
def configuration_manager(self):
"""If the datastore supports the new-style configuration manager,
it should override this to return it.
"""
return None
@property
def replication(self):
"""If the datastore supports replication, return an instance of
the strategy.
"""
try:
return repl_strategy.get_instance(self.manager)
except Exception as ex:
LOG.warning("Cannot get replication instance for '%(manager)s': "
"%(msg)s", {'manager': self.manager, 'msg': str(ex)})
return None
@property
def replication_strategy(self):
"""If the datastore supports replication, return the strategy."""
try:
return repl_strategy.get_strategy(self.manager)
except Exception as ex:
LOG.debug("Cannot get replication strategy for '%(manager)s': "
"%(msg)s", {'manager': self.manager, 'msg': str(ex)})
return None
@property
def guestagent_log_defs(self):
"""These are log files that should be available on every Trove
instance. By definition, these should be of type LogType.SYS
"""
log_dir = CONF.log_dir or '/var/log/trove/'
log_file = CONF.log_file or 'trove-guestagent.log'
guestagent_log = guestagent_utils.build_file_path(log_dir, log_file)
return {
self.GUEST_LOG_DEFS_GUEST_LABEL: {
self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.SYS,
self.GUEST_LOG_USER_LABEL: None,
self.GUEST_LOG_FILE_LABEL: guestagent_log,
},
}
@property
def guest_log_context(self):
return self._guest_log_context
@guest_log_context.setter
def guest_log_context(self, context):
self._guest_log_context = context
@periodic_task.periodic_task
def update_status(self, context):
"""Update the status of the trove instance."""
if not self.status.is_installed:
LOG.info("Database service is not installed, skip status check")
return
LOG.debug("Starting to check database service status")
status = self.get_service_status()
self.status.set_status(status)
def get_service_status(self):
return self.status.get_actual_db_status()
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
#################
# Instance related
#################
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None, modules=None,
ds_version=None):
"""Set up datastore on a Guest Instance."""
with EndNotification(context, instance_id=CONF.guest_id):
self._prepare(context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot, modules,
ds_version=ds_version)
def _prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot, modules, ds_version=None):
LOG.info("Starting datastore prepare for '%s:%s'.", self.manager,
ds_version)
self.status.begin_install()
post_processing = True if cluster_config else False
try:
# Since all module handling is common, don't pass it down to the
# individual 'do_prepare' methods.
self.do_prepare(context, packages, databases, memory_mb,
users, device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot, ds_version=ds_version)
except Exception as ex:
self.prepare_error = True
LOG.exception("Failed to prepare datastore: %s", ex)
raise
finally:
LOG.info("Ending datastore prepare for '%s'.", self.manager)
self.status.end_install(error_occurred=self.prepare_error,
post_processing=post_processing)
# At this point critical 'prepare' work is done and the instance
# is now in the correct 'ACTIVE' 'INSTANCE_READY' or 'ERROR' state.
# Of cource if an error has occurred, none of the code that follows
# will run.
LOG.info("Completed setup of '%s' datastore successfully.",
self.manager)
# The following block performs additional instance initialization.
# Failures will be recorded, but won't stop the provisioning
# or change the instance state.
try:
if modules:
LOG.info("Applying modules (called from 'prepare').")
self.module_apply(context, modules)
LOG.info('Module apply completed.')
except Exception as ex:
LOG.exception("An error occurred applying modules: "
"%s", str(ex))
# The following block performs single-instance initialization.
# Failures will be recorded, but won't stop the provisioning
# or change the instance state.
if not cluster_config:
try:
if databases:
LOG.info("Creating databases (called from 'prepare').")
self.create_database(context, databases)
LOG.info('Databases created successfully.')
except Exception as ex:
LOG.warning("An error occurred creating databases: %s",
str(ex))
try:
if users:
LOG.info("Creating users (called from 'prepare')")
self.create_user(context, users)
LOG.info('Users created successfully.')
except Exception as ex:
LOG.warning("An error occurred creating users: "
"%s", str(ex))
# We only enable-root automatically if not restoring a backup
# that may already have root enabled in which case we keep it
# unchanged.
if root_password and not backup_info:
try:
LOG.info("Enabling root user (with password).")
self.enable_root_on_prepare(context, root_password)
LOG.info('Root enabled successfully.')
except Exception as ex:
LOG.exception("An error occurred enabling root user: "
"%s", str(ex))
try:
LOG.info("Starting post prepare for '%s' datastore.", self.manager)
self.post_prepare(context, packages, databases, memory_mb,
users, device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot)
LOG.info("Post prepare for '%s' datastore completed.",
self.manager)
except Exception as ex:
LOG.exception("An error occurred in post prepare: %s",
str(ex))
raise
@abc.abstractmethod
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info, config_contents,
root_password, overrides, cluster_config, snapshot,
ds_version=None):
"""This is called from prepare when the Trove instance first comes
online. 'Prepare' is the first rpc message passed from the
task manager. do_prepare handles all the base configuration of
the instance and is where the actual work is done. Once this method
completes, the datastore is considered either 'ready' for use (or
for final connections to other datastores) or in an 'error' state,
and the status is changed accordingly. Each datastore must
implement this method.
"""
pass
def post_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info, config_contents,
root_password, overrides, cluster_config, snapshot):
"""This is called after prepare has completed successfully.
Processing done here should be limited to things that will not
affect the actual 'running' status of the datastore (for example,
creating databases and users, although these are now handled
automatically). Any exceptions are caught, logged and rethrown,
however no status changes are made and the end-user will not be
informed of the error.
"""
LOG.info('No post_prepare work has been defined.')
pass
def start_db_with_conf_changes(self, context, config_contents, ds_version):
"""Start the database with given configuration.
This method is called after resize.
"""
self.app.start_db_with_conf_changes(config_contents, ds_version)
def stop_db(self, context):
self.app.stop_db()
def restart(self, context):
self.app.restart()
def rebuild(self, context, ds_version, config_contents=None,
config_overrides=None):
raise exception.DatastoreOperationNotSupported(
operation='rebuild', datastore=self.manager)
def pre_upgrade(self, context):
"""Prepares the guest for upgrade, returning a dict to be passed
to post_upgrade
"""
return {}
def upgrade(self, context, upgrade_info):
"""Upgrade the database."""
pass
def post_upgrade(self, context, upgrade_info):
"""Recovers the guest after the image is upgraded using information
from the pre_upgrade step
"""
pass
#####################
# File System related
#####################
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
# TODO(peterstac) - note that fs_path is not used in this method.
mount_point = CONF.get(self.manager).mount_point
LOG.debug("Getting file system stats for '%s'", mount_point)
return dbaas.get_filesystem_volume_stats(mount_point)
def mount_volume(self, context, device_path=None, mount_point=None,
write_to_fstab=False):
LOG.debug("Mounting the device %(path)s at the mount point "
"%(mount_point)s.", {'path': device_path,
'mount_point': mount_point})
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=write_to_fstab)
def unmount_volume(self, context, device_path=None, mount_point=None):
LOG.debug("Unmounting the device %(path)s from the mount point "
"%(mount_point)s.", {'path': device_path,
'mount_point': mount_point})
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
def resize_fs(self, context, device_path=None, mount_point=None,
online=False):
LOG.info(f"Resizing the filesystem at {mount_point}, online: {online}")
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point, online=online)
###############
# Configuration
###############
def reset_configuration(self, context, configuration):
"""Reset database base configuration.
The default implementation should be sufficient if a
configuration_manager is provided. Even if one is not, this
method needs to be implemented to allow the rollback of
flavor-resize on the guestagent side.
"""
if self.configuration_manager:
LOG.info("Resetting configuration.")
config_contents = configuration['config_contents']
self.configuration_manager.reset_configuration(config_contents)
def apply_overrides_on_prepare(self, context, overrides):
self.update_overrides(context, overrides)
self.restart(context)
def update_overrides(self, context, overrides, remove=False):
LOG.info(f"Updating config options: {overrides}, remove={remove}")
if remove:
self.app.remove_overrides()
self.app.update_overrides(overrides)
def apply_overrides(self, context, overrides):
raise exception.DatastoreOperationNotSupported(
operation='apply_overrides', datastore=self.manager)
#################
# Cluster related
#################
def cluster_complete(self, context):
LOG.info("Cluster creation complete, starting status checks.")
self.status.end_install()
#############
# Log related
#############
def get_datastore_log_defs(self):
"""Any datastore-specific log files should be overridden in this dict
by the corresponding Manager class.
Format of a dict entry:
'name_of_log': {self.GUEST_LOG_TYPE_LABEL:
Specified by the Enum in guest_log.LogType,
self.GUEST_LOG_USER_LABEL:
User that owns the file,
self.GUEST_LOG_FILE_LABEL:
Path on filesystem where the log resides,
self.GUEST_LOG_SECTION_LABEL:
Section where to put config (if ini style)
self.GUEST_LOG_ENABLE_LABEL: {
Dict of config_group settings to enable log},
self.GUEST_LOG_DISABLE_LABEL: {
Dict of config_group settings to disable log},
See guestagent_log_defs for an example.
"""
return {}
def is_log_enabled(self, logname):
return False
def get_guest_log_defs(self):
"""Return all the guest log defs."""
if not self._guest_log_defs:
self._guest_log_defs = dict(self.get_datastore_log_defs())
self._guest_log_defs.update(self.guestagent_log_defs)
return self._guest_log_defs
def get_guest_log_cache(self):
"""Make sure the guest_log_cache is loaded and return it."""
self._refresh_guest_log_cache()
return self._guest_log_cache
def _refresh_guest_log_cache(self):
if self._guest_log_cache:
# Replace the context if it's changed
if self._guest_log_loaded_context != self.guest_log_context:
for log_name in self._guest_log_cache.keys():
self._guest_log_cache[log_name].context = (
self.guest_log_context)
else:
# Load the initial cache
self._guest_log_cache = {}
if self.guest_log_context:
gl_defs = self.get_guest_log_defs()
try:
exposed_logs = CONF.get(self.manager).get(
'guest_log_exposed_logs')
except oslo_cfg.NoSuchOptError:
exposed_logs = ''
LOG.debug("Available log defs: %s", ",".join(gl_defs.keys()))
exposed_logs = exposed_logs.lower().replace(',', ' ').split()
LOG.debug("Exposing log defs: %s", ",".join(exposed_logs))
expose_all = 'all' in exposed_logs
for log_name in gl_defs.keys():
gl_def = gl_defs[log_name]
exposed = expose_all or log_name in exposed_logs
guestlog = guest_log.GuestLog(
self.guest_log_context, log_name,
gl_def[self.GUEST_LOG_TYPE_LABEL],
gl_def[self.GUEST_LOG_USER_LABEL],
gl_def[self.GUEST_LOG_FILE_LABEL],
exposed)
if (gl_def[self.GUEST_LOG_TYPE_LABEL] ==
guest_log.LogType.USER):
guestlog.enabled = self.is_log_enabled(log_name)
guestlog.status = (guest_log.LogStatus.Enabled
if guestlog.enabled
else guest_log.LogStatus.Disabled)
self._guest_log_cache[log_name] = guestlog
self._guest_log_loaded_context = self.guest_log_context
def guest_log_list(self, context):
LOG.info("Getting list of guest logs.")
self.guest_log_context = context
gl_cache = self.get_guest_log_cache()
result = filter(None, [gl_cache[log_name].show()
if gl_cache[log_name].exposed else None
for log_name in gl_cache.keys()])
return result
def guest_log_action(self, context, log_name, enable, disable,
publish, discard):
if enable and disable:
raise exception.BadRequest("Cannot enable and disable log '%s'." %
log_name)
# Enable if we are publishing, unless told to disable
if publish and not disable:
enable = True
LOG.info("Processing guest log '%(log)s' "
"(enable=%(en)s, disable=%(dis)s, "
"publish=%(pub)s, discard=%(disc)s).",
{'log': log_name, 'en': enable, 'dis': disable,
'pub': publish, 'disc': discard})
self.guest_log_context = context
gl_cache = self.get_guest_log_cache()
if log_name in gl_cache:
LOG.debug(f"Found log {log_name}, type={gl_cache[log_name].type}, "
f"enable={gl_cache[log_name].enabled}")
# system log can only be published
if ((gl_cache[log_name].type == guest_log.LogType.SYS) and
not publish):
if enable or disable:
if enable:
action_text = "enable"
else:
action_text = "disable"
raise exception.BadRequest("Cannot %s a SYSTEM log ('%s')."
% (action_text, log_name))
if gl_cache[log_name].type == guest_log.LogType.USER:
requires_change = (
(gl_cache[log_name].enabled and disable) or
(not gl_cache[log_name].enabled and enable))
if requires_change:
self.guest_log_enable(context, log_name, disable)
gl_cache[log_name].enabled = enable
gl_cache[log_name].status = (
guest_log.LogStatus.Enabled
if enable
else guest_log.LogStatus.Disabled
)
log_details = gl_cache[log_name].show()
if discard:
log_details = gl_cache[log_name].discard_log()
if publish:
log_details = gl_cache[log_name].publish_log()
LOG.info("Details for log '%(log)s': %(det)s",
{'log': log_name, 'det': log_details})
return log_details
raise exception.NotFound("Log '%s' is not defined." % log_name)
def guest_log_enable(self, context, log_name, disable):
"""This method can be overridden by datastore implementations to
facilitate enabling and disabling USER type logs. If the logs
can be enabled with simple configuration group changes, however,
the code here will probably suffice.
Must return whether the datastore needs to be restarted in order for
the logging to begin.
"""
restart_required = False
verb = ("Disabling" if disable else "Enabling")
if self.configuration_manager:
LOG.debug("%(verb)s log '%(log)s'", {'verb': verb,
'log': log_name})
gl_def = self.get_guest_log_defs()[log_name]
enable_cfg_label = "%s_%s_log" % (self.GUEST_LOG_ENABLE_LABEL,
log_name)
disable_cfg_label = "%s_%s_log" % (self.GUEST_LOG_DISABLE_LABEL,
log_name)
restart_required = gl_def.get(self.GUEST_LOG_RESTART_LABEL,
restart_required)
if disable:
self._apply_log_overrides(
context, enable_cfg_label, disable_cfg_label,
gl_def.get(self.GUEST_LOG_DISABLE_LABEL),
gl_def.get(self.GUEST_LOG_SECTION_LABEL),
restart_required)
else:
self._apply_log_overrides(
context, disable_cfg_label, enable_cfg_label,
gl_def.get(self.GUEST_LOG_ENABLE_LABEL),
gl_def.get(self.GUEST_LOG_SECTION_LABEL),
restart_required)
else:
log_fmt = ("%(verb)s log '%(log)s' not supported - "
"no configuration manager defined!")
exc_fmt = _("%(verb)s log '%(log)s' not supported - "
"no configuration manager defined!")
msg_content = {'verb': verb, 'log': log_name}
LOG.error(log_fmt, msg_content)
raise exception.GuestError(
original_message=(exc_fmt % msg_content))
return restart_required
def _apply_log_overrides(self, context, remove_label,
apply_label, cfg_values, section_label,
restart_required):
self.configuration_manager.remove_system_override(
change_id=remove_label)
if cfg_values:
config_man_values = cfg_values
if section_label:
config_man_values = {section_label: cfg_values}
self.configuration_manager.apply_system_override(
config_man_values, change_id=apply_label, pre_user=True)
if restart_required:
self.status.set_status(
service_status.ServiceStatuses.RESTART_REQUIRED)
else:
self.apply_overrides(context, cfg_values)
def get_log_status(self, label):
self.configuration_manager.get_value(label)
def build_log_file_name(self, log_name, owner, datastore_dir=None):
"""Build a log file name based on the log_name and make sure the
directories exist and are accessible by owner.
"""
if datastore_dir is None:
base_dir = self.GUEST_LOG_BASE_DIR
if not operating_system.exists(base_dir, is_directory=True):
operating_system.ensure_directory(
base_dir, user=owner, group=owner, force=True,
as_root=True)
datastore_dir = guestagent_utils.build_file_path(
base_dir, self.GUEST_LOG_DATASTORE_DIRNAME)
if not operating_system.exists(datastore_dir, is_directory=True):
operating_system.ensure_directory(
datastore_dir, user=owner, group=owner, force=True,
as_root=True)
log_file_name = guestagent_utils.build_file_path(
datastore_dir, '%s-%s.log' % (self.manager, log_name))
return self.validate_log_file(log_file_name, owner)
def validate_log_file(self, log_file, owner):
"""Make sure the log file exists and is accessible by owner.
"""
if not operating_system.exists(log_file, as_root=True):
operating_system.write_file(log_file, '', as_root=True)
operating_system.chown(log_file, user=owner, group=owner,
as_root=True)
operating_system.chmod(log_file, FileMode.ADD_USR_RW_GRP_RW_OTH_R,
as_root=True)
return log_file
################
# Module related
################
def module_list(self, context, include_contents=False):
LOG.info("Getting list of modules.")
results = module_manager.ModuleManager.read_module_results(
is_admin=context.is_admin, include_contents=include_contents)
LOG.info("Returning list of modules: %s", results)
return results
def module_apply(self, context, modules=None):
LOG.info("Applying modules.")
results = []
modules = [data['module'] for data in modules]
try:
# make sure the modules are applied in the correct order
modules.sort(key=operator.itemgetter('apply_order'))
modules.sort(key=operator.itemgetter('priority_apply'),
reverse=True)
except KeyError:
# If we don't have ordering info then maybe we're running
# a version of the module feature before ordering was
# introduced. In that case, since we don't have any
# way to order the modules we should just continue.
pass
for module in modules:
id = module.get('id', None)
module_type = module.get('type', None)
name = module.get('name', None)
tenant = module.get('tenant', self.MODULE_APPLY_TO_ALL)
datastore = module.get('datastore', self.MODULE_APPLY_TO_ALL)
ds_version = module.get('datastore_version',
self.MODULE_APPLY_TO_ALL)
contents = module.get('contents', None)
md5 = module.get('md5', None)
auto_apply = module.get('auto_apply', True)
visible = module.get('visible', True)
is_admin = module.get('is_admin', None)
if is_admin is None:
# fall back to the old method of checking for an admin option
is_admin = (tenant == self.MODULE_APPLY_TO_ALL or
not visible or
auto_apply)
if not name:
raise AttributeError(_("Module name not specified"))
if not contents:
raise AttributeError(_("Module contents not specified"))
driver = self.module_driver_manager.get_driver(module_type)
if not driver:
raise exception.ModuleTypeNotFound(
_("No driver implemented for module type '%s'") %
module_type)
if (datastore and datastore != self.MODULE_APPLY_TO_ALL and
datastore != CONF.datastore_manager):
reason = (_("Module not valid for datastore %s") %
CONF.datastore_manager)
raise exception.ModuleInvalid(reason=reason)
result = module_manager.ModuleManager.apply_module(
driver, module_type, name, tenant, datastore, ds_version,
contents, id, md5, auto_apply, visible, is_admin)
results.append(result)
LOG.info("Returning list of modules: %s", results)
return results
def module_remove(self, context, module=None):
LOG.info("Removing module.")
module = module['module']
id = module.get('id', None)
module_type = module.get('type', None)
name = module.get('name', None)
datastore = module.get('datastore', None)
ds_version = module.get('datastore_version', None)
if not name:
raise AttributeError(_("Module name not specified"))
driver = self.module_driver_manager.get_driver(module_type)
if not driver:
raise exception.ModuleTypeNotFound(
_("No driver implemented for module type '%s'") %
module_type)
module_manager.ModuleManager.remove_module(
driver, module_type, id, name, datastore, ds_version)
LOG.info("Deleted module: %s", name)
################
# Backup and restore
################
def create_backup(self, context, backup_info):
"""Create backup for the database.
:param context: User context object.
:param backup_info: a dictionary containing the db instance id of the
backup task, location, type, and other data.
"""
pass
def perform_restore(self, context, restore_location, backup_info):
LOG.info("Starting to restore database from backup %s, "
"backup_info: %s", backup_info['id'], backup_info)
if (backup_info["location"].endswith('.enc') and
not CONF.backup_aes_cbc_key):
self.status.set_status(service_status.ServiceStatuses.FAILED)
raise exception.TroveError('Decryption key not configured for '
'encrypted backup.')
try:
self.app.restore_backup(context, backup_info, restore_location)
except Exception:
LOG.error("Failed to restore from backup %s.", backup_info['id'])
self.status.set_status(service_status.ServiceStatuses.FAILED)
raise
LOG.info("Finished restore data from backup %s", backup_info['id'])
################
# Database and user management
################
def create_database(self, context, databases):
with EndNotification(context):
return self.adm.create_databases(databases)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
return self.adm.list_databases(limit, marker, include_marker)
def delete_database(self, context, database):
with EndNotification(context):
return self.adm.delete_database(database)
def change_passwords(self, context, users):
with EndNotification(context):
self.adm.change_passwords(users)
def get_root_password(self, context):
raise exception.DatastoreOperationNotSupported(
operation='get_root_password', datastore=self.manager)
def enable_root(self, context):
LOG.info("Enabling root for the database.")
return self.adm.enable_root()
def enable_root_on_prepare(self, context, root_password):
self.enable_root_with_password(context, root_password)
def enable_root_with_password(self, context, root_password=None):
return self.adm.enable_root(root_password)
def disable_root(self, context):
LOG.info("Disabling root for the database.")
return self.adm.disable_root()
def is_root_enabled(self, context):
return self.adm.is_root_enabled()
def create_user(self, context, users):
with EndNotification(context):
self.adm.create_users(users)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
return self.adm.list_users(limit, marker, include_marker)
def delete_user(self, context, user):
with EndNotification(context):
self.adm.delete_user(user)
def get_user(self, context, username, hostname):
return self.adm.get_user(username, hostname)
def update_attributes(self, context, username, hostname, user_attrs):
with EndNotification(context):
self.adm.update_attributes(username, hostname, user_attrs)
def grant_access(self, context, username, hostname, databases):
return self.adm.grant_access(username, hostname, databases)
def revoke_access(self, context, username, hostname, database):
return self.adm.revoke_access(username, hostname, database)
def list_access(self, context, username, hostname):
return self.adm.list_access(username, hostname)
################
# Replication related
################
def backup_required_for_replication(self, context):
return self.replication.backup_required_for_replication()
def get_replication_snapshot(self, context, snapshot_info,
replica_source_config=None):
LOG.info("Getting replication snapshot, snapshot_info: %s",
snapshot_info)
self.replication.enable_as_master(self.app, replica_source_config)
LOG.info('Enabled as replication master')
snapshot_id, replica_conf = self.replication.snapshot_for_replication(
context, self.app, self.adm, None, snapshot_info)
volume_stats = self.get_filesystem_stats(context, None)
replication_snapshot = {
'dataset': {
'datastore_manager': self.manager,
'dataset_size': volume_stats.get('used', 0.0),
'volume_size': volume_stats.get('total', 0.0),
'snapshot_id': snapshot_id
},
'replication_strategy': self.replication_strategy,
'master': self.replication.get_master_ref(self.app, snapshot_info),
'replica_conf': replica_conf
}
return replication_snapshot
def attach_replica(self, context, snapshot, slave_config, restart=False):
raise exception.DatastoreOperationNotSupported(
operation='attach_replication_slave', datastore=self.manager)
def detach_replica(self, context, for_failover=False):
"""Running on replica, detach from the primary."""
LOG.info("Detaching replica.")
replica_info = self.replication.detach_slave(self.app, for_failover)
return replica_info
def get_replica_context(self, context):
"""Running on primary."""
LOG.info("Getting replica context.")
replica_info = self.replication.get_replica_context(self.app, self.adm)
return replica_info
def make_read_only(self, context, read_only):
raise exception.DatastoreOperationNotSupported(
operation='make_read_only', datastore=self.manager)
def enable_as_master(self, context, replica_source_config):
LOG.info("Enable as master")
self.replication.enable_as_master(self.app, replica_source_config)
def demote_replication_master(self, context):
LOG.info("Demoting replication master.")
self.replication.demote_master(self.app)
def get_txn_count(self, context):
LOG.debug("Getting transaction count.")
raise exception.DatastoreOperationNotSupported(
operation='get_txn_count', datastore=self.manager)
def get_latest_txn_id(self, context):
raise exception.DatastoreOperationNotSupported(
operation='get_latest_txn_id', datastore=self.manager)
def wait_for_txn(self, context, txn):
raise exception.DatastoreOperationNotSupported(
operation='wait_for_txn', datastore=self.manager)
| |
'''
test_core.py: Python testing for core functions for Singularity in Python,
including defaults, utils, and shell functions.
Copyright (c) 2016-2017, Vanessa Sochat. All rights reserved.
"Singularity" Copyright (c) 2016, The Regents of the University of California,
through Lawrence Berkeley National Laboratory (subject to receipt of any
required approvals from the U.S. Dept. of Energy). All rights reserved.
This software is licensed under a customized 3-clause BSD license. Please
consult LICENSE file distributed with the sources of this project regarding
your rights to use or distribute this software.
NOTICE. This Software was developed under funding from the U.S. Department of
Energy and the U.S. Government consequently retains certain rights. As such,
the U.S. Government has been granted for itself and others acting on its
behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software
to reproduce, distribute copies to the public, prepare derivative works, and
perform publicly and display publicly, and to permit other to do so.
'''
import os
import re
import sys
import tarfile
sys.path.append('..') # directory with client
from unittest import TestCase
import shutil
import tempfile
VERSION = sys.version_info[0]
print("*** PYTHON VERSION %s BASE TESTING START ***" %(VERSION))
class TestShell(TestCase):
def setUp(self):
# Test repo information
self.registry = "registry"
self.repo_name = "repo"
self.namespace = "namespace"
self.tag = "tag"
# Default repo information
self.REGISTRY = 'index.docker.io'
self.NAMESPACE = 'library'
self.REPO_TAG = 'latest'
self.tmpdir = tempfile.mkdtemp()
os.environ['SINGULARITY_ROOTFS'] = self.tmpdir
print("\n---START----------------------------------------")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("---END------------------------------------------")
def test_get_image_uri(self):
'''test_get_image_uri ensures that the correct uri is returned
for a user specified uri, registry, namespace.
'''
from shell import get_image_uri
print("Case 1: No image uri should return None")
image_uri = get_image_uri('namespace/repo:tag')
self.assertEqual(image_uri, None)
print("Case 2: testing return of shub://")
image_uri = get_image_uri('shub://namespace/repo:tag')
self.assertEqual(image_uri, 'shub://')
print("Case 3: testing return of docker uri")
image_uri = get_image_uri('docker://namespace/repo:tag')
self.assertEqual(image_uri, 'docker://')
print("Case 4: weird capitalization should return lowercase")
image_uri = get_image_uri('DocKer://namespace/repo:tag')
self.assertEqual(image_uri, 'docker://')
def test_remove_image_uri(self):
'''test_remove_image_uri removes the uri
'''
from shell import remove_image_uri
print("Case 1: No image_uri should estimate first")
image = remove_image_uri('myuri://namespace/repo:tag')
self.assertEqual(image, "namespace/repo:tag")
print("Case 2: Missing image uri should return image")
image = remove_image_uri('namespace/repo:tag')
self.assertEqual(image, "namespace/repo:tag")
def test_parse_image_uri(self):
'''test_parse_image_uri ensures that the correct namespace,
repo name, and tag (or unique id) is returned.
'''
from shell import parse_image_uri
print("Case 1: Empty repo_name should return error")
with self.assertRaises(SystemExit) as cm:
image = parse_image_uri(image="")
self.assertEqual(cm.exception.code, 1)
print("Case 2: Checking for correct output tags in digest...")
image_name = "%s/%s" %(self.namespace,self.repo_name)
digest = parse_image_uri(image=image_name)
for tag in ['registry','repo_name','repo_tag','namespace']:
self.assertTrue(tag in digest)
print("Case 3: Specifying only an image should return defaults")
image = parse_image_uri(image="shub://lizardleezle",
uri = "shub://")
self.assertTrue(isinstance(image,dict))
self.assertEqual(image["namespace"],self.NAMESPACE)
self.assertEqual(image["repo_tag"],self.REPO_TAG)
self.assertEqual(image["repo_name"],'lizardleezle')
self.assertEqual(image["registry"],self.REGISTRY)
print("Case 4: Tag when speciifed should be returned.")
image_name = "%s/%s:%s" %(self.namespace,self.repo_name,"pusheenasaurus")
digest = parse_image_uri(image_name)
self.assertTrue(digest['repo_tag'] == 'pusheenasaurus')
print("Case 5: Repo name and tag without namespace...")
image_name = "%s:%s" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name)
self.assertTrue(digest['repo_tag'] == self.tag)
self.assertTrue(digest['namespace'] == 'library')
self.assertTrue(digest['repo_name'] == self.repo_name)
print("Case 6: Changing default namespace should not use library.")
image_name = "meow/%s:%s" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name)
self.assertTrue(digest['namespace'] == 'meow')
print("Case 7: Changing default registry should not use index.docker.io.")
image_name = "meow/mix/%s:%s" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name)
self.assertTrue(digest['registry'] == 'meow')
self.assertTrue(digest['namespace'] == 'mix')
print("Case 8: Custom uri should use it.")
image_name = "catdog://meow/mix/%s:%s" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name,uri="catdog://")
self.assertTrue(digest['registry'] == 'meow')
self.assertTrue(digest['namespace'] == 'mix')
print("Case 9: Digest version should be parsed")
image_name = "catdog://meow/mix/%s:%s@sha:256xxxxxxxxxxxxxxx" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name,uri="catdog://")
self.assertTrue(digest['registry'] == 'meow')
self.assertTrue(digest['namespace'] == 'mix')
self.assertTrue(digest['version'] == 'sha:256xxxxxxxxxxxxxxx')
class TestUtils(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
print("\n---START----------------------------------------")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("---END------------------------------------------")
def test_add_http(self):
'''test_add_http ensures that http is added to a url
'''
from sutils import add_http
url_http = 'http://registry.docker.io'
url_https = 'https://registry.docker.io'
print("Case 1: adding https to url with nothing specified...")
# Default is https
url = 'registry.docker.io'
http = add_http(url)
self.assertEqual(url_https,http)
# http
print("Case 2: adding http to url with nothing specified...")
http = add_http(url,use_https=False)
self.assertEqual(url_http,http)
# This should not change. Note - is url is http, stays http
print("Case 3: url already has https, should not change...")
url = 'https://registry.docker.io'
http = add_http(url)
self.assertEqual(url_https,http)
# This should not change. Note - is url is http, stays http
print("Case 4: url already has http, should not change...")
url = 'http://registry.docker.io'
http = add_http(url,use_https=False)
self.assertEqual(url_http,http)
print("Case 5: url has http, should change to https")
url = 'http://registry.docker.io'
http = add_http(url)
self.assertEqual(url_https,http)
print("Case 6: url has https, should change to http")
url = 'https://registry.docker.io'
http = add_http(url,use_https=False)
self.assertEqual(url_http,http)
print("Case 7: url should have trailing slash stripped")
url = 'https://registry.docker.io/'
http = add_http(url,use_https=False)
self.assertEqual(url_http,http)
def test_headers(self):
'''test_add_http ensures that http is added to a url
'''
print("Testing utils header functions...")
from sutils import basic_auth_header
# Basic auth header
print("Case 4: basic_auth_header - ask for custom authentication header")
auth = basic_auth_header(username='vanessa',
password='pancakes')
self.assertEqual(auth['Authorization'],
'Basic dmFuZXNzYTpwYW5jYWtlcw==')
def test_run_command(self):
'''test_run_command tests sending a command to commandline
using subprocess
'''
print("Testing utils.run_command...")
from sutils import run_command
# An error should return None
print("Case 1: Command errors returns None ")
none = run_command(['exec','whaaczasd'])
self.assertEqual(none,None)
# A success should return console output
print("Case 2: Command success returns output")
hello = run_command(['echo','hello'])
if not isinstance(hello,str): # python 3 support
hello = hello.decode('utf-8')
self.assertEqual(hello,'hello\n')
def test_is_number(self):
'''test_is_number should return True for any string or
number that turns to a number, and False for everything else
'''
print("Testing utils.is_number...")
from sutils import is_number
print("Case 1: Testing string and float numbers returns True")
self.assertTrue(is_number("4"))
self.assertTrue(is_number(4))
self.assertTrue(is_number("2.0"))
self.assertTrue(is_number(2.0))
print("Case 2: Testing repo names, tags, commits, returns False")
self.assertFalse(is_number("vsoch/singularity-images"))
self.assertFalse(is_number("vsoch/singularity-images:latest"))
self.assertFalse(is_number("44ca6e7c6c35778ab80b34c3fc940c32f1810f39"))
def test_extract_tar(self):
'''test_extract_tar will test extraction of a tar.gz file
'''
print("Testing utils.extract_tar...")
# First create a temporary tar file
from sutils import extract_tar
from glob import glob
import tarfile
# Create and close a temporary tar.gz
print("Case 1: Testing tar.gz...")
creation_dir = tempfile.mkdtemp()
archive,files = create_test_tar(creation_dir)
# Extract to different directory
extract_dir = tempfile.mkdtemp()
extract_tar(archive=archive,
output_folder=extract_dir)
extracted_files = [x.replace(extract_dir,'') for x in glob("%s/tmp/*" %(extract_dir))]
[self.assertTrue(x in files) for x in extracted_files]
# Clean up
for dirname in [extract_dir,creation_dir]:
shutil.rmtree(dirname)
print("Case 2: Testing tar...")
creation_dir = tempfile.mkdtemp()
archive,files = create_test_tar(creation_dir,compressed=False)
# Extract to different directory
extract_dir = tempfile.mkdtemp()
extract_tar(archive=archive,
output_folder=extract_dir)
extracted_files = [x.replace(extract_dir,'') for x in glob("%s/tmp/*" %(extract_dir))]
[self.assertTrue(x in files) for x in extracted_files]
print("Case 3: Testing that extract_tar returns None on error...")
creation_dir = tempfile.mkdtemp()
archive,files = create_test_tar(creation_dir,compressed=False)
extract_dir = tempfile.mkdtemp()
shutil.rmtree(extract_dir)
output = extract_tar(archive=archive,
output_folder=extract_dir)
self.assertEqual(output,None)
def test_write_read_files(self):
'''test_write_read_files will test the functions write_file and read_file
'''
print("Testing utils.write_file...")
from sutils import write_file
import json
tmpfile = tempfile.mkstemp()[1]
os.remove(tmpfile)
write_file(tmpfile,"hello!")
self.assertTrue(os.path.exists(tmpfile))
print("Testing utils.read_file...")
from sutils import read_file
content = read_file(tmpfile)[0]
self.assertEqual("hello!",content)
from sutils import write_json
print("Testing utils.write_json...")
print("Case 1: Providing bad json")
bad_json = {"Wakkawakkawakka'}":[{True},"2",3]}
tmpfile = tempfile.mkstemp()[1]
os.remove(tmpfile)
with self.assertRaises(TypeError) as cm:
write_json(bad_json,tmpfile)
print("Case 2: Providing good json")
good_json = {"Wakkawakkawakka":[True,"2",3]}
tmpfile = tempfile.mkstemp()[1]
os.remove(tmpfile)
write_json(good_json,tmpfile)
content = json.load(open(tmpfile,'r'))
self.assertTrue(isinstance(content,dict))
self.assertTrue("Wakkawakkawakka" in content)
def test_clean_path(self):
'''test_clean_path will test the clean_path function
'''
print("Testing utils.clean_path...")
from sutils import clean_path
ideal_path = '/home/vanessa/Desktop/stuff'
self.assertEqual(clean_path('/home/vanessa/Desktop/stuff/'),ideal_path)
self.assertEqual(clean_path('/home/vanessa/Desktop/stuff//'),ideal_path)
self.assertEqual(clean_path('/home/vanessa//Desktop/stuff/'),ideal_path)
def test_get_fullpath(self):
'''test_get_fullpath will test the get_fullpath function
'''
print("Testing utils.get_fullpath...")
from sutils import get_fullpath
tmpfile = tempfile.mkstemp()[1]
print("Case 1: File exists, should return full path")
self.assertEqual(get_fullpath(tmpfile),tmpfile)
print("Case 2: File doesn't exist, should return error")
os.remove(tmpfile)
with self.assertRaises(SystemExit) as cm:
get_fullpath(tmpfile)
self.assertEqual(cm.exception.code, 1)
print("Case 3: File doesn't exist, but not required, should return None")
self.assertEqual(get_fullpath(tmpfile,required=False),None)
def test_write_singularity_infos(self):
'''test_get_fullpath will test the get_fullpath function
'''
print("Testing utils.write_singuarity_infos...")
from sutils import write_singularity_infos
base_dir = '%s/ROOTFS' %(self.tmpdir)
prefix = 'docker'
start_number = 0
content = "export HELLO=MOTO"
print("Case 1: Metadata base doesn't exist, should return error")
with self.assertRaises(SystemExit) as cm:
info_file = write_singularity_infos(base_dir=base_dir,
prefix=prefix,
start_number=start_number,
content=content)
self.assertEqual(cm.exception.code, 1)
print("Case 2: Metadata base does exist, should return path.")
os.mkdir(base_dir)
info_file = write_singularity_infos(base_dir=base_dir,
prefix=prefix,
start_number=start_number,
content=content)
self.assertEqual(info_file,"%s/%s-%s" %(base_dir,start_number,prefix))
print("Case 3: Adding another equivalent prefix should return next")
info_file = write_singularity_infos(base_dir=base_dir,
prefix=prefix,
start_number=start_number,
content=content)
self.assertEqual(info_file,"%s/%s-%s" %(base_dir,start_number+1,prefix))
print("Case 4: Files have correct content.")
with open(info_file,'r') as filey:
written_content = filey.read()
self.assertEqual(content,written_content)
# Supporting Test Functions
def create_test_tar(tmpdir,compressed=True):
archive = "%s/toodles.tar.gz" %tmpdir
if compressed == False:
archive = "%s/toodles.tar" %tmpdir
mode = "w:gz"
if compressed == False:
mode = "w"
print("Creating %s" %(archive))
tar = tarfile.open(archive, mode)
files = [tempfile.mkstemp()[1] for x in range(3)]
[tar.add(x) for x in files]
tar.close()
return archive,files
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that update the state of the models in response to user actions."""
from __future__ import absolute_import
from __future__ import division
from datetime import datetime
from datetime import timedelta
import pickle
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from recommender import config
from recommender import datastore_based_connection_trainer as connection_trainer
from recommender import feeds
from recommender import models
from recommender import time_periods
# Updates all feeds in batches of 10 at a time.
def UpdateAllFeeds():
_UpdateNFeeds(None, 20, datetime.now())
def _UpdateNFeeds(min_id, batch_size, start_time):
max_last_updated = datetime.now()
query = feeds.Feed.query()
if min_id:
query = query.filter(feeds.Feed.key > ndb.Key(feeds.Feed, min_id))
batch = query.fetch(batch_size)
if not batch:
return
deferred.defer(
_UpdateNFeeds,
batch[-1].key.id(),
batch_size,
start_time,
_queue='feed-updates')
# Fire off all async requests in parallel so we only wait for the slowest
# feed in the batch instead of for all serially.
results = [(feed,
feed.UpdateAsync(canonicalize=CanonicalizeUrl))
for feed in batch
if feed.last_updated < max_last_updated]
for feed, new_items_future in results:
new_items = new_items_future.get_result()
if new_items:
deferred.defer(
_NewFeedItemsAdded,
feed, {item.url for item in new_items},
_queue='feed-updates')
def _NewFeedItemsAdded(feed, new_item_urls):
_DecayConnectionWeightToFeed(feed, len(new_item_urls))
models.PrefetchPageInfos(new_item_urls)
def UpdateFeed(feed):
if feed:
deferred.defer(UpdateFeedImpl, feed, _queue='feed-updates')
def UpdateFeedImpl(feed):
new_items = feed.Update(CanonicalizeUrl)
if new_items:
deferred.defer(
_DecayConnectionWeightToFeed,
feed,
len(new_items),
_queue='feed-updates')
models.PrefetchPageInfos({item.url for item in new_items})
def _DecayConnectionWeightToFeed(feed, num_new_items):
publisher = models.Source(models.SOURCE_TYPE_FEED, feed.GetUrl(), None)
connection_trainer.CreateTrainer().DecayConnectionWeightToPublisher(
publisher, num_items=num_new_items)
def CanonicalizeUrl(url):
page_info = models.GetPageInfo(url)
if page_info:
return page_info.canonical_url
return url
def AddRating(user, url, rating, source, category_id):
user_id = models.UserKey(user).id()
stats = models.AddRating(
user_id, url, rating, source, category_id)
# If the page belongs to a feed then register this feed.
page_info = models.GetPageInfo(url)
if page_info.feed_url:
UpdateFeed(feeds.AddFeed(page_info.feed_url, page_info.feed_title))
stats['own_feed'] = page_info.feed_url
# If the page itself is a feed url then also register it.
if page_info.is_feed:
UpdateFeed(feeds.AddFeed(page_info.canonical_url, page_info.title))
stats['own_feed'] = page_info.canonical_url
RatingAdded(
models.Source(models.SOURCE_TYPE_USER, user_id, category_id), url, rating)
return stats
DELAY_BEFORE_UPDATING_CONNECTIONS = (
timedelta(seconds=10) if config.IsDev() else timedelta(minutes=1))
def RatingAdded(source, url, rating):
source = models.SerializeSource(source)
deferred.defer(
RatingAddedImpl,
source,
url,
rating,
_queue='recommendation-updates',
_countdown=DELAY_BEFORE_UPDATING_CONNECTIONS.total_seconds())
def RatingAddedImpl(source, url, rating):
source = models.DeserializeSource(source)
if source.source_type == models.SOURCE_TYPE_USER:
deferred.defer(UpdatePopularPage, url, _queue='default')
# Check if the user has changed the category or removed their vote.
# In both cases we do not want to update the connection state.
if source.source_type == models.SOURCE_TYPE_USER:
user_key = models.UserKey(source.source_id)
page_rating = ndb.Key(models.PageRating, url, parent=user_key).get()
if not page_rating:
return
if page_rating.category != source.CategoryKey():
# We will let the deferred task scheduled from SetPageCategory to update
# connections for the updated category.
return
connection_trainer.CreateTrainer().RecommendationAdded(source, url, rating)
models.UpdateCachedConnectionInfo(source.source_id)
def UpdatePopularPage(url):
start_datetime = datetime.now()
values_by_key = dict()
for rating in models.PageRating.query(
models.PageRating.url == url).order(-models.PageRating.date):
for (key, value) in PopularPagesMap(rating, start_datetime):
if key not in values_by_key:
values_by_key[key] = [value]
else:
values_by_key[key].append(value)
for key, values in values_by_key.iteritems():
PopularPagesReduce(key, values)
def SetPageCategory(user, url, category_id, retries_left=10):
user_key = models.UserKey(user)
page_rating = ndb.Key(models.PageRating, url, parent=user_key).get()
if page_rating is None:
# It could be that the PageRating has not become available yet so we retry
# a few times.
if retries_left > 0:
deferred.defer(
SetPageCategory,
user,
url,
category_id,
retries_left - 1,
_queue='default')
return
category = None
if category_id is not None:
category = models.CategoryKey(category_id, user)
if page_rating.category == category:
return
page_rating.category = category
page_rating.put()
deferred.defer(
RatingAddedImpl,
models.SerializeSource(
models.Source(models.SOURCE_TYPE_USER, user.user_id(), category_id)),
url,
page_rating.rating,
_countdown=DELAY_BEFORE_UPDATING_CONNECTIONS.total_seconds())
# This is used in pipelines.py as part of a MapReduce.
def PopularPagesMap(rating, start_datetime=None):
# This is a neutral rating.
if rating.rating == 0:
return
# Do not count ratings added after the pipeline was started.
if start_datetime is None:
start_datetime = datetime.now()
if rating.date > start_datetime:
return
time_passed = start_datetime - rating.date
score = rating.rating
for time_period in time_periods.TIME_PERIODS:
if (time_period['name'] == time_periods.RECENT or
time_period['name'] == time_periods.LAST_VISIT or
time_period['name'] == time_periods.LAST_VISIT_RESTRICTED):
continue
if time_passed < time_period['timedelta']:
yield [
pickle.dumps((rating.url, time_period['name'])),
pickle.dumps((score, time_passed))
]
def PopularPagesReduce(key, values):
url, time_period = pickle.loads(key)
popular_page = models.PopularPage(
url=url,
time_period=time_period,
score=0,
positive_ratings=0,
negative_ratings=0)
half_life_seconds = time_periods.Get(time_period)['timedelta'].total_seconds()
for value in values:
rating, time_passed = pickle.loads(value)
if rating < 0:
popular_page.negative_ratings += 1
else:
popular_page.positive_ratings += 1
popular_page.score += rating * (0.5**(time_passed.total_seconds() /
half_life_seconds))
existing = models.PopularPage.query(
models.PopularPage.url == url,
models.PopularPage.time_period == popular_page.time_period).get()
# Update the existing entry or create a new one.
if popular_page.score > 0:
if existing is not None:
popular_page.key = existing.key
popular_page.put()
# Or delete the existing entry.
elif existing is not None:
existing.key.delete()
| |
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import os.path
import attr
import numpy as np
from scipy import constants
from astropy import time
import logging
logger = logging.getLogger(__name__)
logger.setLevel(20)
@attr.s
class Metadata(object):
""" Metadata we need to translate parameters into a pipeline state.
Called from a function defined for a given source (e.g., an sdm file).
Built from nominally immutable attributes and properties.
To modify metadata, use attr.assoc(inst, key=newval)
"""
# basics
datasource = attr.ib(default=None) # 'vys', 'sdm', 'sim', 'vyssim'
datasetId = attr.ib(default=None)
filename = attr.ib(default=None) # full path to SDM (optional)
scan = attr.ib(default=None) # int
subscan = attr.ib(default=None) # int
bdfdir = attr.ib(default=None)
bdfstr = attr.ib(default=None)
# data structure and source properties
source = attr.ib(default=None)
radec = attr.ib(default=None) # (radians, radians)
inttime = attr.ib(default=None) # seconds
nints_ = attr.ib(default=None)
telescope = attr.ib(default=None)
phasecenters = attr.ib(default=None) # list of tuples with (startmjd, stopmjd, ra_deg, dec_deg)
# array/antenna info
starttime_mjd = attr.ib(default=None) # float
endtime_mjd_ = attr.ib(default=None)
dishdiameter = attr.ib(default=None)
intent = attr.ib(default=None)
antids = attr.ib(default=None)
stationids = attr.ib(default=None) # needed?
xyz = attr.ib(default=None) # in m, geocentric
# spectral info
spworder = attr.ib(default=None)
quantization = attr.ib(default=None)
spw_orig = attr.ib(default=None) # indexes for spw
spw_nchan = attr.ib(default=None) # channels per spw
spw_reffreq = attr.ib(default=None) # reference frequency (ch0) in Hz
spw_chansize = attr.ib(default=None) # channel size in Hz
pols_orig = attr.ib(default=None)
def atdefaults(self):
""" Is metadata still set at default values? """
return not any([self.__dict__[ab] for ab in self.__dict__])
# @property
# def spw_chanr(self):
# chanr = []
# i0 = 0
# for nch in self.spw_nchan:
# chanr.append((i0, i0+nch))
# i0 = nch
# return chanr
@property
def spw_sorted_properties(self):
""" Returns reffreq, nchan, chansize tuple
each has spw properties in freq-sorted order.
"""
reffreq, nchan, chansize = zip(*sorted(zip(self.spw_reffreq,
self.spw_nchan,
self.spw_chansize)))
return reffreq, nchan, chansize
@property
def freq_orig(self):
"""Spacing of channel centers in GHz.
Data is read into spw or increasing frequency order.
Note that spworder has increasing freq order, but spw_reffreq need not.
"""
reffreq, nchan, chansize = self.spw_sorted_properties
return np.concatenate([np.linspace(reffreq[ii],
reffreq[ii]+(nchan[ii]-1)*chansize[ii],
nchan[ii])
for ii in range(len((self.spw_reffreq)))]).astype('float32')/1e9
@property
def nspw_orig(self):
return len(self.spw_reffreq)
@property
def nchan_orig(self):
return len(self.freq_orig)
@property
def nants_orig(self):
return len(self.antids)
@property
def nbl_orig(self):
return int(self.nants_orig*(self.nants_orig-1)/2)
@property
def blarr_orig(self):
return np.array([[int(self.antids[i].lstrip('ea')),
int(self.antids[j].lstrip('ea'))]
for j in range(self.nants_orig)
for i in range(0, j)])
@property
def starttime_string(self):
return time.Time(self.starttime_mjd, format='mjd').iso.replace('-', '/').replace(' ', '/')
# return qa.time(qa.quantity(self.starttime_mjd, 'd'),
# form='ymd', prec=8)[0]
@property
def endtime_mjd(self):
""" If nints\_ is defined (e.g., for SDM data), then endtime_mjd is calculated.
Otherwise (e.g., for scan_config/vys data), it looks for endtime_mjd\_
attribute
"""
if self.endtime_mjd_:
return self.endtime_mjd_
elif self.nints_:
assert self.nints_ > 0, "nints_ must be greater than zero"
return self.starttime_mjd + (self.nints_*self.inttime)/(24*3600)
else:
raise AttributeError("Either endtime_mjd_ or nints_ need to be "
"defined.")
@property
def nints(self):
""" If endtime_mjd\_ is defined (e.g., for scan_config/vys
data), then endtime_mjd is calculated.
Otherwise (e.g., for SDM data), it looks for nints\_ attribute
"""
if self.nints_:
return self.nints_
elif self.endtime_mjd_:
assert self.endtime_mjd > self.starttime_mjd, "endtime_mjd must be larger than starttime_mjd"
return np.round((self.endtime_mjd_ - self.starttime_mjd)*(24*3600)/self.inttime).astype(int)
# return np.ceil((self.endtime_mjd_ - self.starttime_mjd)*(24*3600)/self.inttime).astype(int)
else:
raise AttributeError("Either endtime_mjd_ or nints_ need to be "
"defined.")
@property
def uvrange_orig(self):
from rfpipe.util import calc_uvw_astropy as calc_uvw
(ur, vr, wr) = calc_uvw(datetime=self.starttime_string,
radec=self.radec,
xyz=self.xyz,
telescope=self.telescope)
u = ur * self.freq_orig.min() * (1e9/constants.c) * (-1)
v = vr * self.freq_orig.min() * (1e9/constants.c) * (-1)
return (u.max() - u.min(), v.max() - v.min())
@property
def npol_orig(self):
return len(self.pols_orig)
@property
def scanId(self):
assert self.datasetId is not None
return '{0}.{1}.{2}'.format(self.datasetId, self.scan, self.subscan)
def make_metadata(inmeta=None, config=None, sdmfile=None, sdmscan=None,
sdmsubscan=1, bdfdir=None):
""" Given range of potential metadata sources, create Metadata object
"""
if isinstance(inmeta, Metadata):
return inmeta # does not overload if Metadata object passed in
else:
if inmeta is None:
inmeta = {} # passing in dict will overload other metadata sources
# get metadata
if (sdmfile is not None) and (sdmscan is not None) and (config is None):
meta = sdm_metadata(sdmfile, sdmscan, sdmsubscan, bdfdir=bdfdir)
elif config is not None and (sdmfile is None) and (sdmscan is None):
# config datasource can be vys or simulated data
datasource = inmeta['datasource'] if 'datasource' in inmeta else 'vys'
meta = config_metadata(config, datasource=datasource)
else:
if inmeta is None:
logger.warning("Provide either inmeta, sdmfile/sdmscan, or config object to define metadata. Empty metadata dict being created.")
meta = {}
# optionally overload metadata
if isinstance(inmeta, dict):
for key in inmeta:
meta[key] = inmeta[key]
for key in meta:
logger.debug(key, meta[key], type(meta[key]))
else:
logger.warning("inmeta not dict, Metadata, or None. Not parsed.")
return Metadata(**meta)
def config_metadata(config, datasource='vys'):
""" Creates dict holding metadata from evla_mcast scan config object.
Parallel structure to sdm_metadata, so this inherits some of its
nomenclature. datasource defines expected data source (vys expected when
using scan config)
spworder is required for proper indexing of vys data.
"""
logger.info('Reading metadata from config object')
meta = {}
meta['datasource'] = datasource
meta['datasetId'] = config.datasetId
meta['scan'] = config.scanNo
meta['subscan'] = config.subscanNo
meta['starttime_mjd'] = config.startTime
meta['endtime_mjd_'] = config.stopTime
meta['source'] = str(config.source)
meta['intent'] = str(config.scan_intent)
meta['telescope'] = str(config.telescope)
antennas = config.get_antennas()
meta['antids'] = [str(ant.name) for ant in antennas]
meta['stationids'] = config.listOfStations # TODO: check type
meta['xyz'] = np.array([ant.xyz for ant in antennas])
meta['radec'] = (np.radians(config.ra_deg), np.radians(config.dec_deg))
meta['dishdiameter'] = 25. # ?
mininttime = min([sb.hw_time_res for sb in config.get_subbands()])
subbands = [sb for sb in config.get_subbands() if sb.hw_time_res == mininttime]
meta['inttime'] = subbands[0].hw_time_res # assumes vys stream post-hw-integ
if datasource == 'vys': # hack to make consistent with vysmaw_reader app
meta['pols_orig'] = ['A*A', 'B*B']
else:
meta['pols_orig'] = subbands[0].pp
meta['spw_nchan'] = [sb.spectralChannels for sb in subbands]
meta['spw_chansize'] = [1e6*sb.bw/sb.spectralChannels for sb in subbands]
# meta['spw_orig'] = ['{0}-{1}'.format(sb.IFid, sb.sbid) for sb in subbands] # this is probably redundant with spworder
meta['spw_orig'] = list(range(len(subbands)))
meta['spw_reffreq'] = [(sb.sky_center_freq-sb.bw/sb.spectralChannels*(sb.spectralChannels/2))*1e6 for sb in subbands]
# meta['spworder'] = sorted([('{0}-{1}'.format(sb.IFid, sb.sbid),
meta['spworder'] = sorted([('{0}-{1}'.format(sb.IFid, sb.swIndex-1),
meta['spw_reffreq'][subbands.index(sb)])
for sb in subbands], key=lambda x: x[1])
meta['quantization'] = ['{0}'.format(bbn.split('_')[-1])
for bbn in config.baseBandNames]
return meta
def sdm_metadata(sdmfile, scan, subscan=1, bdfdir=None):
""" Wraps Metadata call to provide immutable, attribute-filled class instance.
"""
logger.info('Reading metadata from {0}, scan {1}'.format(sdmfile, scan))
from rfpipe.util import getsdm
sdm = getsdm(sdmfile, bdfdir=bdfdir)
scanobj = sdm.scan(scan, subidx=subscan)
meta = {}
meta['datasource'] = 'sdm'
meta['datasetId'] = os.path.basename(sdmfile.rstrip('/'))
meta['filename'] = sdmfile
meta['scan'] = int(scan)
meta['subscan'] = int(subscan)
meta['bdfdir'] = bdfdir
# meta['configid'] = scanobj.configDescriptionId
bdfstr = scanobj.bdf.fname
if (not os.path.exists(bdfstr)) or ('X1' in bdfstr):
meta['bdfstr'] = None
else:
meta['bdfstr'] = bdfstr
meta['starttime_mjd'] = scanobj.startMJD
meta['nints_'] = int(scanobj.numIntegration)
try:
inttime = scanobj.bdf.get_integration(0).interval
meta['inttime'] = inttime
except (AttributeError, TypeError):
logger.warning("No BDF found. inttime not set.")
meta['source'] = str(scanobj.source)
meta['intent'] = ' '.join(scanobj.intents)
meta['telescope'] = str(sdm['ExecBlock'][0]['telescopeName']).strip()
meta['antids'] = [str(ant) for ant in scanobj.antennas]
meta['stationids'] = [str(station) for station in scanobj.stations]
meta['xyz'] = np.array(scanobj.positions)
meta['radec'] = scanobj.coordinates.tolist() # radians
meta['dishdiameter'] = float(str(sdm['Antenna'][0].dishDiameter).strip())
meta['spw_orig'] = list(range(len(scanobj.spws)))
meta['spw_nchan'] = scanobj.numchans
meta['spw_reffreq'] = scanobj.reffreqs
meta['spw_chansize'] = scanobj.chanwidths
maxchansize = max(meta['spw_chansize'])
mask = np.array(meta['spw_chansize']) == maxchansize
if mask.sum() < len(meta['spw_reffreq']):
logger.info('Skipping high-resolution spws {0}'.format(np.array(meta['spw_orig'])[~mask].tolist()))
assert mask.sum() > 0.5*len(meta['spw_reffreq']), "more than 50% spws removed"
meta['spw_reffreq'] = np.array(meta['spw_reffreq'])[mask].tolist()
meta['spw_nchan'] = np.array(meta['spw_nchan'])[mask].tolist()
meta['spw_chansize'] = np.array(meta['spw_chansize'])[mask].tolist()
meta['spw_orig'] = list(range(mask.sum()))
try:
meta['pols_orig'] = scanobj.bdf.spws[0].pols('cross')
except AttributeError:
logger.warning("No BDF found. Inferring pols from xml.")
meta['pols_orig'] = [pol for pol in (str(sdm['Polarization'][0]
.corrType)).strip().split(' ')
if pol in ['XX', 'YY', 'XY', 'YX',
'RR', 'LL', 'RL', 'LR',
'A*A', 'A*B', 'B*A', 'B*B']]
try:
meta['spworder'] = sorted(zip(['{0}-{1}'.format(spw.swbb.rstrip('_8BIT'),
spw.sw-1)
for spw in scanobj.bdf.spws],
np.array(scanobj.reffreqs)/1e6),
key=lambda x: x[1])
meta['quantization'] = ['{0}'.format(spw.swbb.split('_')[-1])
for spw in scanobj.bdf.spws]
except AttributeError:
logger.warning("No BDF found. spworder/quantization not defined.")
return meta
def mock_metadata(t0, t1, nants, nspw, chans, npol, inttime_micros, scan=1,
subscan=1, band='S', datasource='vys', datasetid=None, antconfig='B'):
""" Wraps Metadata call to provide immutable, attribute-filled class instance.
Parallel structure to sdm_metadata, so this inherits some of its
nomenclature. t0, t1 are times in mjd. Supports up to nant=27, npol=4, and
nspw=16. chans is total number of channels over all spw (equal per spw).
datasource is expected source of data (typically vys when mocking).
datasetid default is "test_<t0>".
antconfig defines extent of array and can be 'D', 'C', 'B', or 'A'.
"""
logger.info('Generating mock metadata')
antconfigs = ['D', 'C', 'B', 'A']
assert antconfig in antconfigs, 'antconfig not recognized'
meta = {}
meta['datasource'] = datasource
if datasetid is None:
datasetid = 'test_{0}'.format(t0)
meta['datasetId'] = datasetid
meta['scan'] = scan
meta['subscan'] = subscan
meta['bdfdir'] = ''
meta['starttime_mjd'] = t0
meta['endtime_mjd_'] = t1
meta['inttime'] = inttime_micros/1e6
meta['source'] = 'testsource'
meta['intent'] = 'OBSERVE_TARGET'
meta['telescope'] = 'VLA'
meta['antids'] = ['ea{0:02}'.format(ant) for ant in range(1, nants+1)]
meta['stationids'] = ['W24', 'W04', 'W28', 'E04', 'E36', 'N12', 'N16',
'W12', 'N28', 'E20', 'N20', 'E28', 'E08', 'N24',
'W20', 'N04', 'W36', 'W16', 'E16', 'E24', 'W32',
'E12', 'W08', 'N08', 'E32', 'N36', 'N32'][:nants]
xyz = np.array([[-1604008.7444, -5042135.8251, 3553403.7108],
[-1601315.9005, -5041985.30747, 3554808.311],
[-1604865.6575, -5042190.032, 3552962.3635],
[-1601068.806, -5042051.9327, 3554824.8388],
[-1596127.7308, -5045193.7421, 3552652.4197],
[-1601110.022, -5041488.0826, 3555597.4446],
[-1601061.9544, -5041175.8753, 3556058.0267],
[-1602044.9123, -5042025.8014, 3554427.8357],
[-1600863.6922, -5039885.3167, 3557965.3178],
[-1599340.8001, -5043150.963, 3554065.2315],
[-1601004.6988, -5040802.801, 3556610.1493],
[-1597899.8959, -5044068.6847, 3553432.4502],
[-1600801.9314, -5042219.3826, 3554706.4294],
[-1600930.0836, -5040316.3864, 3557330.39],
[-1603249.6721, -5042091.4281, 3553797.7842],
[-1601173.9647, -5041902.6458, 3554987.5342],
[-1606841.961, -5042279.6752, 3551913.0214],
[-1602592.8535, -5042054.9924, 3554140.7028],
[-1599926.1041, -5042772.9772, 3554319.8011],
[-1598663.082, -5043581.3912, 3553767.0141],
[-1605808.6341, -5042230.084, 3552459.1978],
[-1600416.518, -5042462.4305, 3554536.0417],
[-1601614.0832, -5042001.6569, 3554652.5059],
[-1601147.9425, -5041733.8336, 3555235.947],
[-1597053.1244, -5044604.675, 3553058.9927],
[-1600690.6125, -5038758.7161, 3559632.0571],
[-1600781.0607, -5039347.4391, 3558761.5271]])[:nants]
dxyz = xyz - xyz.mean(axis=0)
scale = antconfigs.index(antconfig) - 2 # referenced to B config
meta['xyz'] = dxyz * (3**scale) + xyz.mean(axis=0)
meta['radec'] = [0., 0.]
meta['dishdiameter'] = 25
# set up spw
meta['spw_orig'] = list(range(16))[:nspw]
if band == 'L':
low, high = 1e9, 2e9
chansize = 1e6
elif band == 'S':
low, high = 2e9, 4e9
chansize = 2e6
elif band == 'C':
low, high = 4e9, 8e9
chansize = 2e6
elif band == 'X':
low, high = 8e9, 12e9
chansize = 2e6
elif band == 'Ku':
low, high = 12e9, 18e9
chansize = 2e6
elif band == 'K':
low, high = 18e9, 26.5e9
chansize = 2e6
elif band == 'Ka':
low, high = 26.5e9, 30e9
chansize = 2e6
elif band == 'Q':
low, high = 40e9, 50e9
chansize = 2e6
else:
logger.warning("band ({0}) not recognized. Assuming S.".format(band))
low, high = 2e9, 4e9
meta['spw_reffreq'] = np.linspace(low, high, 16)[:nspw]
meta['spw_chansize'] = [chansize]*nspw
chanperspw = chans//nspw
meta['spw_nchan'] = [chanperspw]*nspw
if datasource == 'vys': # hack to make consistent with vysmaw_reader app
meta['pols_orig'] = ['A*A', 'B*B']
else:
if npol == 4:
meta['pols_orig'] = ['A*A', 'A*B', 'B*A', 'B*B']
elif npol == 2:
meta['pols_orig'] = ['A*A', 'B*B']
else:
logger.warning("npol must be 2 or 4 (autos or full pol)")
meta['spworder'] = sorted([('{0}-{1}'.format('AC1', sbid),
meta['spw_reffreq'][sbid])
for sbid in range(nspw)], key=lambda x: x[1])
return meta
def get_bdfdir(sdmfile, sdmscan):
""" Get bdfdir by parsing sdmfile for internal or lustre bdf locations.
Requires sdm and
"""
from rfpipe.util import getsdm
sdm = getsdm(sdmfile)
scan = sdm.scan(sdmscan)
if not os.path.exists(scan.bdfdir):
sdm.bdfdir='/lustre/evla/wcbe/data/realfast'
return scan.bdfdir
def reffreq_to_band(reffreqs, edge=5e8):
""" Given list of reffreqs, return name of band that contains all of them.
edge defines frequency edge around each nominal band to include.
"""
nspw = len(reffreqs)
for band, low, high in [('L', 1e9, 2e9), ('S', 2e9, 4e9),
('C', 4e9, 8e9), ('X', 8e9, 12e9),
('Ku', 12e9, 18e9), ('K', 18e9, 26.5e9),
('Ka', 26.5e9, 30e9), ('Q', 40e9, 50e9)]:
reffreq_inband = [reffreq for reffreq in reffreqs
if ((reffreq >= low-edge) and (reffreq < high+edge))]
if len(reffreq_inband) == nspw:
return band
return None
def sdmband(sdmfile, sdmscan, bdfdir=None):
""" Read metadata from sdm and return band as string.
"""
meta = make_metadata(sdmfile=sdmfile, sdmscan=sdmscan, bdfdir=bdfdir)
return reffreq_to_band(meta.spw_reffreq)
def oldstate_metadata(d, scan=None, bdfdir=None):
""" Parses old state function ("d", a dictionary) into new metadata instance
Note: d from merged candidate file will have some parameters defined by
last scan.
If scan is None, it will assume d is from single scan, not merged
"""
meta = {}
meta['datasource'] = 'sdm'
meta['datasetId'] = d['fileroot']
meta['subscan'] = 1
meta['bdfdir'] = bdfdir
if scan is not None:
meta['starttime_mjd'] = d['starttime_mjddict'][scan]
meta['scan'] = scan
else:
meta['starttime_mjd'] = d['starttime_mjd']
meta['scan'] = d['scan']
meta['inttime'] = d['inttime']
meta['nints_'] = d['nints']
meta['source'] = str(d['source'])
meta['telescope'] = 'VLA'
meta['antids'] = ['ea'+str(ant) for ant in d['ants']] # ** test that these are the same as what we expected with rtpipe **
# meta['xyz'] = #
meta['radec'] = d['radec'] # ** for last scan! **
meta['dishdiameter'] = d['dishdiameter']
meta['spw_orig'] = d['spw_orig']
meta['spw_nchan'] = d['spw_nchan']
meta['spw_reffreq'] = d['spw_reffreq']
meta['spw_chansize'] = d['spw_chansize']
meta['pols_orig'] = d['pols_orig']
logger.info('Read metadata from old state dictionary for scan {0}'
.format(scan))
return meta
| |
#!/usr/bin/python
#----------------------------------------------------------------------------#
# #
# ozz-animation is hosted at http://github.com/guillaumeblanc/ozz-animation #
# and distributed under the MIT License (MIT). #
# #
# Copyright (c) 2015 Guillaume Blanc #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#----------------------------------------------------------------------------#
# CMake python helper script.
import subprocess
import multiprocessing
import shutil
import sys
import os
import re
from functools import partial
# Build global path variables.
root = os.path.abspath(os.path.join(os.getcwd(), '.'))
build_dir = os.path.join(root, 'build')
build_dir_cc = os.path.join(root, 'build-cc')
cmake_cache_file = os.path.join(build_dir, 'CMakeCache.txt')
config = 'Release'
generators = {0: 'default'}
generator = generators[0]
emscripten_path = os.environ.get('EMSCRIPTEN')
def ValidateCMake():
try:
# Test that cmake can be executed, silently...
pipe = subprocess.Popen(['cmake'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = pipe.communicate()
except OSError as e:
print("CMake is not installed or properly setup. Please visit www.cmake.org.")
return False
print("CMake is installed and setup properly.")
return True
def CheckEmscripten():
if(emscripten_path == None):
return False
try:
# Test that cmake can be executed, silently...
pipe = subprocess.Popen(['emcc'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = pipe.communicate()
except OSError as e:
print("Emscripten is not installed or properly setup.")
return False
print("Emscripten is installed and setup properly.")
return True
def MakeBuildDir(_build_dir = build_dir):
print("Creating out-of-source build directory: \"" + _build_dir + "\".")
if not os.path.exists(_build_dir):
os.makedirs(_build_dir)
return True
def CleanBuildDir():
print("Deleting out-of-source build directory: \"" + build_dir + "\".")
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
print("Deleting out-of-source cross compilation build directory: \"" + build_dir_cc + "\".")
if os.path.exists(build_dir_cc):
shutil.rmtree(build_dir_cc)
return True
def Configure():
# Configure build process.
print("Configuring build project.")
options = ['cmake']
options += ['-D', 'CMAKE_BUILD_TYPE=' + config]
global generator
if(generator != 'default'):
options += ['-G', generator]
options += [root]
config_process = subprocess.Popen(options, cwd=build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Configuration failed.")
return False
print("Configuration succeeded.")
# Updates generator once configuration is complete
generator = DetectGenerator()
return True
def ConfigureCC():
# Configure build process.
print("Configuring cross compilation build project.")
options = ['cmake']
options += ['-D', 'CMAKE_BUILD_TYPE=' + config]
options += ['-D', 'CMAKE_TOOLCHAIN_FILE=' + emscripten_path + '/cmake/Platform/Emscripten.cmake']
options += ['-D', 'dae2anim_DIR=' + build_dir]
options += ['-D', 'dae2skel_DIR=' + build_dir]
options += ['-G', 'MinGW Makefiles']
options += [root]
config_process = subprocess.Popen(options, cwd=build_dir_cc)
config_process.wait()
if(config_process.returncode != 0):
print("Configuration failed.")
return False
print("Configuration succeeded.")
# Updates generator once configuration is complete
generator = DetectGenerator()
return True
def Build(_build_dir = build_dir):
# Configure build process.
print("Building project.")
options = ['cmake', '--build', _build_dir, '--config', config, '--use-stderr'];
# Appends parallel build option if supported by the generator.
if "Unix Makefiles" in generator:
options += ['--', '-j' + str(multiprocessing.cpu_count())]
config_process = subprocess.Popen(options, cwd=_build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Build failed.")
return False
print("Build succeeded.")
return True
def Test():
# Configure Test process.
print("Running unit tests.")
options = ['ctest' ,'--output-on-failure', '-j' + str(multiprocessing.cpu_count()), '--build-config', config]
config_process = subprocess.Popen(options, cwd=build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Testing failed.")
return False
print("Testing succeeded.")
return True
def PackSources(_type):
print("Packing sources.")
options = ['cpack', '-G', _type, '--config', 'CPackSourceConfig.cmake']
config_process = subprocess.Popen(options, cwd=build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Packing sources of type " + _type + " failed.")
return False
print("Packing sources of type " + _type + " succeeded.")
return True
def PackBinaries(_type, _build_dir = build_dir):
print("Packing binaries.")
options = ['cpack', '-G', _type, '-C', config]
config_process = subprocess.Popen(options, cwd=_build_dir)
config_process.wait()
if(config_process.returncode != 0):
print("Packing binaries of type " + _type + " failed.")
return False
print("Packing binaries of type " + _type + " succeeded.")
return True
def SelecConfig():
configs = {
1: 'Debug',
2: 'Release',
3: 'RelWithDebInfo',
4: 'MinSizeRel'}
while True:
print("Select build configuration:")
for num, message in sorted(configs.iteritems()):
print("%d: %s") % (num, message)
# Get input and check validity
try:
answer = int(raw_input("Enter a value: "))
except:
continue
if not answer in configs:
continue
# Affect global configuration variable
global config
config = configs[answer]
return True
def FindGenerators():
# Finds all generators outputted from cmake usage
process = subprocess.Popen(['cmake'], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
sub_stdout = stdout[stdout.rfind('Generators'):]
matches = re.findall(r"\s*(.+)\s*=.+", sub_stdout, re.MULTILINE)
# Fills generators list
global generators
for match in matches:
generator_name = match.strip()
generators[len(generators)] = generator_name
# Appends also Win64 option if generator is VS
if "Visual Studio" in generator_name:
generators[len(generators)] = generator_name + " Win64"
def FindInCache(_regex):
try:
cache_file = open(cmake_cache_file)
except:
return None
return re.search(_regex, cache_file.read())
def DetectGenerator():
match = FindInCache(r"CMAKE_GENERATOR:INTERNAL=(.*)")
if match:
global generators
global generator
for num, message in sorted(generators.iteritems()):
if match.group(1) == message:
return message
return 'default'
def SelecGenerator():
global generators
while True:
print("Select generator:")
for num, message in sorted(generators.iteritems()):
print("%d: %s") % (num, message)
# Get input and check validity
try:
answer = int(raw_input("Enter a value: "))
except:
continue
if not answer in generators:
continue
# Check if this is the current generator
current_generator = DetectGenerator()
if current_generator == 'default':
global generator
generator = generators[answer]
return True
if current_generator != generators[answer]:
print("Selected generator '%s' is different from the current one '%s'.") % (generators[answer], current_generator)
clean = raw_input("Do you want to clean build directory to apply the change? (y/n): ") == "y"
if clean:
generator = generators[answer]
return CleanBuildDir()
return True
def ClearScreen():
os.system('cls' if os.name=='nt' else 'clear')
def Exit():
sys.exit(0)
return True
def main():
# Checks CMake installation is correct.
if not ValidateCMake():
return
# Emscripten is optional
CheckEmscripten()
# Detects available generators
FindGenerators()
# Update current generator
print("DetectGenerator")
global generator
generator = DetectGenerator()
options = {
'1': ["Build", [MakeBuildDir, Configure, Build]],
'2': ["Run unit tests", [MakeBuildDir, Configure, Build, Test]],
'3': ["Execute CMake generation step (don't build)", [MakeBuildDir, Configure]],
'4': ["Clean out-of-source build directory\n ------------------", [CleanBuildDir]],
'5': ["Pack binaries", [MakeBuildDir, Configure, Build, partial(PackBinaries, "ZIP"), partial(PackBinaries, "TBZ2")]],
'6': ["Pack sources\n ------------------", [MakeBuildDir, Configure, partial(PackSources, "ZIP"), partial(PackSources, "TBZ2")]],
'7': ["Select build configuration", [SelecConfig]],
'8': ["Select cmake generator\n ------------------", [SelecGenerator]],
'9': ["Exit\n------------------", [Exit]]}
# Adds emscripten
global emscripten_path
if emscripten_path != None:
options['1a'] = ["Build emscripten", [MakeBuildDir, Configure, Build, partial(MakeBuildDir, build_dir_cc), ConfigureCC, partial(Build, build_dir_cc)]]
options['5a'] = ["Pack emscripten binaries", [MakeBuildDir, Configure, Build, partial(MakeBuildDir, build_dir_cc), ConfigureCC, partial(Build, build_dir_cc), partial(PackBinaries, "ZIP", build_dir_cc)]]
while True:
# Displays options
ClearScreen()
print("ozz CMake build helper tool")
print("")
print("Selected build configuration: %s") % config
print("Selected generator: %s") % generator
print("")
print("Choose an option:")
print("------------------")
for key, message in sorted(options.iteritems()):
print(" %s: %s") % (key, message[0])
# Get input and check validity
answer = raw_input("Enter a value: ")
if not answer in options:
continue
# Execute command in a try catch to avoid crashes and allow retries.
ClearScreen()
try:
for command in options[answer][1]:
if command():
print("\nExecution success.\n")
else:
print("\nExecution failed.\n")
break
except Exception, e:
print("\nAn error occured during script execution: %s\n") % e
raw_input("Press enter to continue...")
return 0
if __name__ == '__main__':
main()
| |
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.api.v2 import attributes as attr
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import portsecurity as psec
from neutron.manager import NeutronManager
from neutron.tests.unit import test_db_plugin
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_allowedaddresspairs.'
'AllowedAddressPairTestPlugin')
class AllowedAddressPairTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
super(AllowedAddressPairTestCase, self).setUp(plugin)
# Check if a plugin supports security groups
plugin_obj = NeutronManager.get_plugin()
self._skip_port_security = ('port-security' not in
plugin_obj.supported_extension_aliases)
class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin,
db_base_plugin_v2.NeutronDbPluginV2,
addr_pair_db.AllowedAddressPairsMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with port security and allowed address pairs.
"""
supported_extension_aliases = ["allowed-address-pairs"]
def create_port(self, context, port):
p = port['port']
with context.session.begin(subtransactions=True):
neutron_db = super(AllowedAddressPairTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
if attr.is_attr_set(p.get(addr_pair.ADDRESS_PAIRS)):
self._process_create_allowed_address_pairs(
context, p,
p[addr_pair.ADDRESS_PAIRS])
else:
p[addr_pair.ADDRESS_PAIRS] = None
return port['port']
def update_port(self, context, id, port):
changed_fixed_ips = 'fixed_ips' in port['port']
delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
port)
has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
with context.session.begin(subtransactions=True):
ret_port = super(AllowedAddressPairTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
if (delete_addr_pairs or has_addr_pairs):
# delete address pairds and readd them
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, ret_port,
ret_port[addr_pair.ADDRESS_PAIRS])
elif changed_fixed_ips:
self._check_fixed_ips_and_address_pairs_no_overlap(context,
ret_port)
return ret_port
class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
super(AllowedAddressPairDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestAllowedAddressPairs(AllowedAddressPairDBTestCase):
def test_create_port_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_true_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_false_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=False,
allowed_address_pairs=address_pairs)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_port_bad_mac(self):
address_pairs = [{'mac_address': 'invalid_mac',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_bad_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1222'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_missing_ip_field(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_duplicate_mac_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_extra_args(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1',
'icbb': 'agreed'}]
self._create_port_with_address_pairs(address_pairs, 400)
def _create_port_with_address_pairs(self, address_pairs, ret_code):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, ret_code)
def test_update_add_address_pairs(self):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_update_fixed_ip_to_address_pair_ip_fail(self):
with self.network() as net:
with self.subnet(network=net):
address_pairs = [{'ip_address': '10.0.0.65'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)['port']
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.65'}]}}
req = self.new_update_request('ports', data, port['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
self._delete('ports', port['id'])
def test_update_fixed_ip_to_address_pair_with_mac_fail(self):
with self.network() as net:
with self.subnet(network=net):
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)['port']
address_pairs = [
{'mac_address': port['mac_address'],
'ip_address': port['fixed_ips'][0]['ip_address']}]
data = {'port': {addr_pair.ADDRESS_PAIRS: address_pairs}}
req = self.new_update_request('ports', data, port['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
self._delete('ports', port['id'])
def test_create_address_gets_port_mac(self):
with self.network() as net:
address_pairs = [{'ip_address': '23.23.23.23'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)['port']
port_addr_mac = port[addr_pair.ADDRESS_PAIRS][0]['mac_address']
self.assertEqual(port_addr_mac,
port['mac_address'])
self._delete('ports', port['id'])
def test_update_address_pair_to_match_fixed_ip_and_mac(self):
with self.network() as net:
with self.subnet(network=net):
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)['port']
address_pairs = [{'mac_address': port['mac_address'],
'ip_address':
port['fixed_ips'][0]['ip_address']}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
self._delete('ports', port['id'])
def test_update_port_security_off_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
with self.subnet(network=net):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {psec.PORTSECURITY: False}}
# If plugin implements security groups we also need to remove
# the security group on port.
plugin_obj = NeutronManager.get_plugin()
if 'security-groups' in plugin_obj.supported_extension_aliases:
update_port['port']['security_groups'] = []
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
self._delete('ports', port['port']['id'])
def test_create_port_remove_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {addr_pair.ADDRESS_PAIRS: []}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], [])
self._delete('ports', port['port']['id'])
class TestAllowedAddressPairsXML(TestAllowedAddressPairs):
fmt = 'xml'
| |
import unicodedata
from typing import Iterator, List, Optional, Tuple
from .types import Case, InvalidAcronymError
def get_rubstring_ranges(a_str: str, sub: str) -> Iterator[Tuple[int, int]]: # noqa
start = 0
sub_len = len(sub)
while True:
start = a_str.find(sub, start)
if start == -1:
return
yield (start, start + sub_len)
start += 1
def char_is_sep(a_char: str) -> bool: # noqa: D103
return not (
char_is_upper(a_char) or char_is_lower(a_char) or char_is_decimal(a_char)
)
def char_is_decimal(a_char: str) -> bool: # noqa: D103
return unicodedata.category(a_char) == "Nd"
def char_is_lower(a_char: str) -> bool: # noqa: D103
return unicodedata.category(a_char) == "Ll"
def char_is_upper(a_char: str) -> bool: # noqa: D103
return unicodedata.category(a_char) == "Lu"
def is_upper(a_string: str) -> bool: # noqa: D103
return len(a_string) == 1 and char_is_upper(a_string)
def is_valid_acronym(a_string: str) -> bool: # noqa: D103
if not a_string:
return False
for a_char in a_string:
if char_is_sep(a_char):
return False
return True
def determine_case(was_all_upper: bool, words: List[str], string: str) -> Case:
"""Determine case type of string.
Arguments:
was_all_upper (bool): [description]
words (list of str): Segmented input string
string (str): Original input string
Returns:
Case: Determined case
"""
case_type = Case.UNKOWN
if was_all_upper:
case_type = Case.UPPER
elif string.islower():
case_type = Case.LOWER
elif words:
camel_case = words[0].islower()
pascal_case = words[0].istitle() or words[0].isupper()
if camel_case or pascal_case:
for word in words[1:]:
c = word.istitle() or word.isupper()
camel_case &= c
pascal_case &= c
if not c:
break
if camel_case:
case_type = Case.CAMEL
elif pascal_case:
case_type = Case.PASCAL
else:
case_type = Case.MIXED
return case_type
def advanced_acronym_detection(
s: int, i: int, words: List[str], acronyms: List[str]
) -> int:
"""Detect acronyms by checking against a list of acronyms.
Arguments:
s (int): Index of first letter in run
i (int): Index of current word
words (list of str): Segmented input string
acronyms (list of str): List of acronyms
Returns:
int: Index of last letter in run
"""
# Combine each letter into single string.
acr_str = "".join(words[s:i])
# List of ranges representing found acronyms.
range_list: List[Tuple[int, int]] = []
# Set of remaining letters.
not_range = set(range(len(acr_str)))
# Search for each acronym in acr_str.
for acr in acronyms:
for (start, end) in get_rubstring_ranges(acr_str, acr):
# Make sure found acronym doesn't overlap with others.
for r in range_list:
if start < r[1] and end > r[0]:
break
else:
range_list.append((start, end))
for j in range(start, end):
not_range.remove(j)
# Add remaining letters as ranges.
for nr in not_range:
range_list.append((nr, nr + 1))
# No ranges will overlap, so it's safe to sort by lower bound,
# which sort() will do by default.
range_list.sort()
# Remove original letters in word list.
for _ in range(s, i):
del words[s]
# Replace them with new word grouping.
for j in range(len(range_list)):
r = range_list[j]
words.insert(s + j, acr_str[r[0] : r[1]])
return s + len(range_list) - 1
def simple_acronym_detection(s: int, i: int, words: List[str], *args) -> int:
"""Detect acronyms based on runs of upper-case letters.
Arguments:
s (int): Index of first letter in run
i (int): Index of current word
words (list of str): Segmented input string
args: Placeholder to conform to signature of
advanced_acronym_detection
Returns:
int: Index of last letter in run
"""
# Combine each letter into a single string.
acr_str = "".join(words[s:i])
# Remove original letters in word list.
for _ in range(s, i):
del words[s]
# Replace them with new word grouping.
words.insert(s, "".join(acr_str))
return s
def sanitize_acronyms(unsafe_acronyms: List[str]) -> List[str]:
"""Normalize valid acronyms to upper-case.
Arguments:
unsafe_acronyms (list of str): Acronyms to be sanitized
Returns:
list of str: Sanitized acronyms
Raises:
InvalidAcronymError: Upon encountering an invalid acronym
"""
acronyms = []
for acr in unsafe_acronyms:
if is_valid_acronym(acr):
acronyms.append(acr.upper())
else:
raise InvalidAcronymError(acr)
return acronyms
def normalize_words(words: List[str], acronyms: List[str]) -> List[str]:
"""Normalize case of each word to PascalCase.
Arguments:
words (list of str): Words to normalize
acronyms (list of str): Acronymes to upper
Returns:
list of str: Normalized words
"""
normalized = []
for word in words:
# if detect_acronyms:
if word.upper() in acronyms:
# Convert known acronyms to upper-case.
normalized.append(word.upper())
else:
# Fallback behavior: Preserve case on upper-case words.
if not word.isupper():
normalized.append(word.capitalize())
return normalized
def segment_string(string: str) -> Tuple[List[Optional[str]], str, bool]:
"""Segment string on separator into list of words.
Arguments:
string (str): The string to process
Returns:
optional, list of str: List of words the string got minced to
separator: The separator char intersecting words
bool: Whether the string was upper-case
"""
words: List[Optional[str]] = []
separator = ""
# curr_index of current character. Initially 1 because we don't
# want to check if the 0th character is a boundary.
curr_i = 1
# Index of first character in a sequence
seq_i = 0
# Previous character.
prev_i = string[0:1]
# Treat an all-caps string as lower-case, to prevent its
# letters to be counted as boundaries
was_upper = False
if string.isupper():
string = string.lower()
was_upper = True
# Iterate over each character, checking for boundaries, or places
# where the string should divided.
while curr_i <= len(string):
char = string[curr_i : curr_i + 1]
split = False
if curr_i < len(string):
# Detect upper-case letter as boundary.
if char_is_upper(char):
split = True
# Detect transition from separator to not separator.
elif not char_is_sep(char) and char_is_sep(prev_i):
split = True
# Detect transition not separator to separator.
elif char_is_sep(char) and not char_is_sep(prev_i):
split = True
else:
# The looprev_igoes one extra iteration so that it can
# handle the remaining text after the last boundary.
split = True
if split:
if not char_is_sep(prev_i):
words.append(string[seq_i:curr_i])
else:
# string contains at least one separator.
# Use the first one as the string's primary separator.
if not separator:
separator = string[seq_i : seq_i + 1]
# Use None to indicate a separator in the word list.
words.append(None)
# If separators weren't included in the list, then breaks
# between upper-case sequences ("AAA_BBB") would be
# disregarded; the letter-run detector would count them
# as a single sequence ("AAABBB").
seq_i = curr_i
curr_i += 1
prev_i = char
return words, separator, was_upper
| |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import os
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.resources.properties_file import PropertiesFile
from resource_management.libraries.resources.template_config import TemplateConfig
from resource_management.core.resources.system import Directory, Execute, File, Link
from resource_management.core.source import StaticFile, Template, InlineTemplate
from resource_management.libraries.functions import format
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import Direction
from resource_management.core.logger import Logger
def kafka(upgrade_type=None):
import params
ensure_base_directories()
kafka_server_config = mutable_config_dict(params.config['configurations']['kafka-broker'])
# This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2.
# Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to.
effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version)
Logger.info(format("Effective stack version: {effective_version}"))
# listeners and advertised.listeners are only added in 2.3.0.0 onwards.
if effective_version is not None and effective_version != "" and \
check_stack_feature(StackFeature.KAFKA_LISTENERS, effective_version):
listeners = kafka_server_config['listeners'].replace("localhost", params.hostname)
Logger.info(format("Kafka listeners: {listeners}"))
kafka_server_config['listeners'] = listeners
if params.security_enabled and params.kafka_kerberos_enabled:
Logger.info("Kafka kerberos security is enabled.")
kafka_server_config['advertised.listeners'] = listeners
Logger.info(format("Kafka advertised listeners: {listeners}"))
elif 'advertised.listeners' in kafka_server_config:
advertised_listeners = kafka_server_config['advertised.listeners'].replace("localhost", params.hostname)
kafka_server_config['advertised.listeners'] = advertised_listeners
Logger.info(format("Kafka advertised listeners: {advertised_listeners}"))
else:
kafka_server_config['host.name'] = params.hostname
if params.has_metric_collector:
kafka_server_config['kafka.timeline.metrics.hosts'] = params.ams_collector_hosts
kafka_server_config['kafka.timeline.metrics.port'] = params.metric_collector_port
kafka_server_config['kafka.timeline.metrics.protocol'] = params.metric_collector_protocol
kafka_server_config['kafka.timeline.metrics.truststore.path'] = params.metric_truststore_path
kafka_server_config['kafka.timeline.metrics.truststore.type'] = params.metric_truststore_type
kafka_server_config['kafka.timeline.metrics.truststore.password'] = params.metric_truststore_password
kafka_data_dir = kafka_server_config['log.dirs']
kafka_data_dirs = filter(None, kafka_data_dir.split(","))
rack="/default-rack"
i=0
if len(params.all_racks) > 0:
for host in params.all_hosts:
if host == params.hostname:
rack=params.all_racks[i]
break
i=i+1
kafka_server_config['broker.rack']=rack
Directory(kafka_data_dirs,
mode=0755,
cd_access='a',
owner=params.kafka_user,
group=params.user_group,
create_parents = True,
recursive_ownership = True,
)
PropertiesFile("server.properties",
dir=params.conf_dir,
properties=kafka_server_config,
owner=params.kafka_user,
group=params.user_group,
)
File(format("{conf_dir}/kafka-env.sh"),
owner=params.kafka_user,
content=InlineTemplate(params.kafka_env_sh_template)
)
if (params.log4j_props != None):
File(format("{conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.kafka_user,
content=InlineTemplate(params.log4j_props)
)
if params.security_enabled and params.kafka_kerberos_enabled:
if params.kafka_jaas_conf_template:
File(format("{conf_dir}/kafka_jaas.conf"),
owner=params.kafka_user,
content=InlineTemplate(params.kafka_jaas_conf_template)
)
else:
TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
owner=params.kafka_user)
if params.kafka_client_jaas_conf_template:
File(format("{conf_dir}/kafka_client_jaas.conf"),
owner=params.kafka_user,
content=InlineTemplate(params.kafka_client_jaas_conf_template)
)
else:
TemplateConfig(format("{conf_dir}/kafka_client_jaas.conf"),
owner=params.kafka_user)
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
create_parents = True,
owner='root',
group='root'
)
File(os.path.join(params.limits_conf_dir, 'kafka.conf'),
owner='root',
group='root',
mode=0644,
content=Template("kafka.conf.j2")
)
File(os.path.join(params.conf_dir, 'tools-log4j.properties'),
owner='root',
group='root',
mode=0644,
content=Template("tools-log4j.properties.j2")
)
setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
def mutable_config_dict(kafka_broker_config):
kafka_server_config = {}
for key, value in kafka_broker_config.iteritems():
kafka_server_config[key] = value
return kafka_server_config
# Used to workaround the hardcoded pid/log dir used on the kafka bash process launcher
def setup_symlink(kafka_managed_dir, kafka_ambari_managed_dir):
import params
backup_folder_path = None
backup_folder_suffix = "_tmp"
if kafka_ambari_managed_dir != kafka_managed_dir:
if os.path.exists(kafka_managed_dir) and not os.path.islink(kafka_managed_dir):
# Backup existing data before delete if config is changed repeatedly to/from default location at any point in time time, as there may be relevant contents (historic logs)
backup_folder_path = backup_dir_contents(kafka_managed_dir, backup_folder_suffix)
Directory(kafka_managed_dir,
action="delete",
create_parents = True)
elif os.path.islink(kafka_managed_dir) and os.path.realpath(kafka_managed_dir) != kafka_ambari_managed_dir:
Link(kafka_managed_dir,
action="delete")
if not os.path.islink(kafka_managed_dir):
Link(kafka_managed_dir,
to=kafka_ambari_managed_dir)
elif os.path.islink(kafka_managed_dir): # If config is changed and coincides with the kafka managed dir, remove the symlink and physically create the folder
Link(kafka_managed_dir,
action="delete")
Directory(kafka_managed_dir,
mode=0755,
cd_access='a',
owner=params.kafka_user,
group=params.user_group,
create_parents = True,
recursive_ownership = True,
)
if backup_folder_path:
# Restore backed up files to current relevant dirs if needed - will be triggered only when changing to/from default path;
for file in os.listdir(backup_folder_path):
if os.path.isdir(os.path.join(backup_folder_path, file)):
Execute(('cp', '-r', os.path.join(backup_folder_path, file), kafka_managed_dir),
sudo=True)
Execute(("chown", "-R", format("{kafka_user}:{user_group}"), os.path.join(kafka_managed_dir, file)),
sudo=True)
else:
File(os.path.join(kafka_managed_dir,file),
owner=params.kafka_user,
content = StaticFile(os.path.join(backup_folder_path,file)))
# Clean up backed up folder
Directory(backup_folder_path,
action="delete",
create_parents = True)
# Uses agent temp dir to store backup files
def backup_dir_contents(dir_path, backup_folder_suffix):
import params
backup_destination_path = params.tmp_dir + os.path.normpath(dir_path)+backup_folder_suffix
Directory(backup_destination_path,
mode=0755,
cd_access='a',
owner=params.kafka_user,
group=params.user_group,
create_parents = True,
recursive_ownership = True,
)
# Safely copy top-level contents to backup folder
for file in os.listdir(dir_path):
if os.path.isdir(os.path.join(dir_path, file)):
Execute(('cp', '-r', os.path.join(dir_path, file), backup_destination_path),
sudo=True)
Execute(("chown", "-R", format("{kafka_user}:{user_group}"), os.path.join(backup_destination_path, file)),
sudo=True)
else:
File(os.path.join(backup_destination_path, file),
owner=params.kafka_user,
content = StaticFile(os.path.join(dir_path,file)))
return backup_destination_path
def ensure_base_directories():
import params
Directory([params.kafka_log_dir, params.kafka_pid_dir, params.conf_dir],
mode=0755,
cd_access='a',
owner=params.kafka_user,
group=params.user_group,
create_parents = True,
recursive_ownership = True,
)
| |
import unittest
from typing import List, Tuple
from pyannotate_tools.annotations.infer import (
flatten_types,
infer_annotation,
merge_items,
remove_redundant_items,
)
from pyannotate_tools.annotations.types import (
AbstractType,
AnyType,
ARG_POS,
ARG_STAR,
ClassType,
TupleType,
UnionType,
NoReturnType,
)
class TestInfer(unittest.TestCase):
def test_simple(self):
# type: () -> None
self.assert_infer(['(int) -> str'], ([(ClassType('int'), ARG_POS)],
ClassType('str')))
def test_infer_union_arg(self):
# type: () -> None
self.assert_infer(['(int) -> None',
'(str) -> None'],
([(UnionType([ClassType('int'),
ClassType('str')]), ARG_POS)],
ClassType('None')))
def test_infer_union_return(self):
# type: () -> None
self.assert_infer(['() -> int',
'() -> str'],
([],
UnionType([ClassType('int'), ClassType('str')])))
def test_star_arg(self):
# type: () -> None
self.assert_infer(['(int) -> None',
'(int, *bool) -> None'],
([(ClassType('int'), ARG_POS),
(ClassType('bool'), ARG_STAR)],
ClassType('None')))
def test_merge_unions(self):
# type: () -> None
self.assert_infer(['(Union[int, str]) -> None',
'(Union[str, None]) -> None'],
([(UnionType([ClassType('int'),
ClassType('str'),
ClassType('None')]), ARG_POS)],
ClassType('None')))
def test_remove_redundant_union_item(self):
# type: () -> None
self.assert_infer(['(str) -> None',
'(unicode) -> None'],
([(ClassType('Text'), ARG_POS)],
ClassType('None')))
def test_remove_redundant_dict_item(self):
# type: () -> None
self.assert_infer(['(Dict[str, Any]) -> None',
'(Dict[str, str]) -> None'],
([(ClassType('Dict', [ClassType('str'), AnyType()]), ARG_POS)],
ClassType('None')))
def test_remove_redundant_dict_item_when_simplified(self):
# type: () -> None
self.assert_infer(['(Dict[str, Any]) -> None',
'(Dict[str, Union[str, List, Dict, int]]) -> None'],
([(ClassType('Dict', [ClassType('str'), AnyType()]), ARG_POS)],
ClassType('None')))
def test_simplify_list_item_types(self):
# type: () -> None
self.assert_infer(['(List[Union[bool, int]]) -> None'],
([(ClassType('List', [ClassType('int')]), ARG_POS)],
ClassType('None')))
def test_simplify_potential_typed_dict(self):
# type: () -> None
# Fall back to Dict[x, Any] in case of a complex Dict type.
self.assert_infer(['(Dict[str, Union[int, str]]) -> Any'],
([(ClassType('Dict', [ClassType('str'), AnyType()]), ARG_POS)],
AnyType()))
self.assert_infer(['(Dict[Text, Union[int, str]]) -> Any'],
([(ClassType('Dict', [ClassType('Text'), AnyType()]), ARG_POS)],
AnyType()))
# Not a potential TypedDict so ordinary simplification applies.
self.assert_infer(['(Dict[str, Union[str, Text]]) -> Any'],
([(ClassType('Dict', [ClassType('str'), ClassType('Text')]), ARG_POS)],
AnyType()))
self.assert_infer(['(Dict[str, Union[int, None]]) -> Any'],
([(ClassType('Dict', [ClassType('str'),
UnionType([ClassType('int'),
ClassType('None')])]), ARG_POS)],
AnyType()))
def test_simplify_multiple_empty_collections(self):
# type: () -> None
self.assert_infer(['() -> Tuple[List, List[x]]',
'() -> Tuple[List, List]'],
([],
TupleType([ClassType('List'), ClassType('List', [ClassType('x')])])))
def assert_infer(self, comments, expected):
# type: (List[str], Tuple[List[Tuple[AbstractType, str]], AbstractType]) -> None
actual = infer_annotation(comments)
assert actual == expected
def test_infer_ignore_mock(self):
# type: () -> None
self.assert_infer(['(mock.mock.Mock) -> None',
'(str) -> None'],
([(ClassType('str'), ARG_POS)],
ClassType('None')))
def test_infer_ignore_mock_fallback_to_any(self):
# type: () -> None
self.assert_infer(['(mock.mock.Mock) -> str',
'(mock.mock.Mock) -> int'],
([(AnyType(), ARG_POS)],
UnionType([ClassType('str'), ClassType('int')])))
def test_infer_none_argument(self):
# type: () -> None
self.assert_infer(['(None) -> None'],
([(UnionType([ClassType('None'), AnyType()]), ARG_POS)],
ClassType('None')))
CT = ClassType
class TestRedundantItems(unittest.TestCase):
def test_cannot_simplify(self):
# type: () -> None
for first, second in ((CT('str'), CT('int')),
(CT('List', [CT('int')]),
CT('List', [CT('str')])),
(CT('List'),
CT('Set', [CT('int')]))):
assert remove_redundant_items([first, second]) == [first, second]
assert remove_redundant_items([second, first]) == [second, first]
def test_simplify_simple(self):
# type: () -> None
for first, second in ((CT('str'), CT('Text')),
(CT('bool'), CT('int')),
(CT('int'), CT('float'))):
assert remove_redundant_items([first, second]) == [second]
assert remove_redundant_items([second, first]) == [second]
def test_simplify_multiple(self):
# type: () -> None
assert remove_redundant_items([CT('Text'), CT('str'), CT('bool'), CT('int'),
CT('X')]) == [CT('Text'), CT('int'), CT('X')]
def test_simplify_generics(self):
# type: () -> None
for first, second in ((CT('List'), CT('List', [CT('Text')])),
(CT('Set'), CT('Set', [CT('Text')])),
(CT('Dict'), CT('Dict', [CT('str'), CT('int')]))):
assert remove_redundant_items([first, second]) == [second]
class TestMergeUnionItems(unittest.TestCase):
def test_cannot_merge(self):
# type: () -> None
for first, second in ((CT('str'), CT('Text')),
(CT('List', [CT('int')]), CT('List', [CT('str')]))):
assert merge_items([first, second]) == [first, second]
assert merge_items([second, first]) == [second, first]
assert merge_items([first, second, first]) == [first, second, first]
def test_merge_union_of_same_length_tuples(self):
# type: () -> None
assert merge_items([TupleType([CT('str')]),
TupleType([CT('int')])]) == [TupleType([UnionType([CT('str'),
CT('int')])])]
assert merge_items([TupleType([CT('str')]),
TupleType([CT('Text')])]) == [TupleType([CT('Text')])]
def test_merge_tuples_with_different_lengths(self):
# type: () -> None
assert merge_items([
TupleType([CT('str')]),
TupleType([CT('str'), CT('str')])]) == [CT('Tuple', [CT('str')])]
assert merge_items([
TupleType([]),
TupleType([CT('str')]),
TupleType([CT('str'), CT('str')])]) == [CT('Tuple', [CT('str')])]
# Don't merge if types aren't identical
assert merge_items([
TupleType([CT('str')]),
TupleType([CT('str'), CT('int')])]) == [TupleType([CT('str')]),
TupleType([CT('str'), CT('int')])]
def test_merge_union_containing_no_return(self):
# type: () -> None
assert merge_items([CT('int'), NoReturnType()]) == [CT('int')]
assert merge_items([NoReturnType(), CT('int')]) == [CT('int')]
class TestFlattenTypes(unittest.TestCase):
def test_nested_tuples(self):
# type: () -> None
assert flatten_types([UnionType([UnionType([CT('int'), CT('str')]), CT('X')])]) == [
CT('int'), CT('str'), CT('X')]
| |
from collections import OrderedDict
import sys
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.paginator import InvalidPage
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.http import urlencode
from django.contrib.admin import FieldListFilter
from django.contrib.admin.exceptions import (
DisallowedModelAdminLookup, DisallowedModelAdminToField,
)
from django.contrib.admin.options import IncorrectLookupParameters, IS_POPUP_VAR, TO_FIELD_VAR
from django.contrib.admin.utils import (quote, get_fields_from_path,
lookup_needs_distinct, prepare_lookup_value)
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)')
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_queryset = model_admin.get_queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
self.preserved_filters = model_admin.get_preserved_filters(request)
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
to_field = request.GET.get(TO_FIELD_VAR)
if to_field and not model_admin.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
self.to_field = to_field
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
self.get_results(request)
if self.is_popup:
title = ugettext('Select %s')
else:
title = ugettext('Select %s to change')
self.title = title % force_text(self.opts.verbose_name)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.model_admin.lookup_allowed(key, value):
raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params,
self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
spec = field_list_filter_class(field, request, lookup_params,
self.model, self.model_admin, field_path=field_path)
# Check if we need to use distinct()
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (use_distinct or
lookup_needs_distinct(self.lookup_opts, key))
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization:
# full_result_count is equal to paginator.count if no filters
# were applied
if self.model_admin.show_full_result_count:
if self.get_filters_params() or self.params.get(SEARCH_VAR):
full_result_count = self.root_queryset.count()
else:
full_result_count = result_count
else:
full_result_count = None
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num + 1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.show_full_result_count = self.model_admin.show_full_result_count
# Admin actions are shown if there is at least one entry
# or if entries are not counted because show_full_result_count is disabled
self.show_admin_actions = self.show_full_result_count or bool(full_result_count)
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request)
or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_queryset
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.model_admin.get_search_results(
request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.list_select_related is True:
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.list_select_related:
return qs.select_related(*self.list_select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
return True
return False
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.model_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
| |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.template.defaultfilters import filesizeformat # noqa
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from django.http import HttpResponseRedirect
from oslo_log import log as logging
import operator
import json
import hashlib
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images \
import utils as image_utils
from openstack_dashboard.dashboards.admin.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
def _image_choice_title(img):
gb = filesizeformat(img.size)
return '%s (%s)' % (img.name or img.id, gb)
class RebuildInstanceForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
image = forms.ChoiceField(
label=_("Select Image"),
widget=forms.ThemableSelectWidget(
attrs={'class': 'image-selector'},
data_attrs=('size', 'display-name'),
transform=_image_choice_title))
password = forms.RegexField(
label=_("Rebuild Password"),
required=False,
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Rebuild Password"),
required=False,
widget=forms.PasswordInput(render_value=False))
disk_config = forms.ThemableChoiceField(label=_("Disk Partition"),
required=False)
def __init__(self, request, *args, **kwargs):
super(RebuildInstanceForm, self).__init__(request, *args, **kwargs)
instance_id = kwargs.get('initial', {}).get('instance_id')
self.fields['instance_id'].initial = instance_id
images = image_utils.get_available_images(request,
request.user.tenant_id)
choices = [(image.id, image) for image in images]
if choices:
choices.insert(0, ("", _("Select Image")))
else:
choices.insert(0, ("", _("No images available")))
self.fields['image'].choices = choices
if not api.nova.can_set_server_password():
del self.fields['password']
del self.fields['confirm_password']
try:
if not api.nova.extension_supported("DiskConfig", request):
del self.fields['disk_config']
else:
# Set our disk_config choices
config_choices = [("AUTO", _("Automatic")),
("MANUAL", _("Manual"))]
self.fields['disk_config'].choices = config_choices
except Exception:
exceptions.handle(request, _('Unable to retrieve extensions '
'information.'))
def clean(self):
cleaned_data = super(RebuildInstanceForm, self).clean()
if 'password' in cleaned_data:
passwd = cleaned_data.get('password')
confirm = cleaned_data.get('confirm_password')
if passwd is not None and confirm is not None:
if passwd != confirm:
raise forms.ValidationError(_("Passwords do not match."))
return cleaned_data
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data', 'password')
def handle(self, request, data):
instance = data.get('instance_id')
image = data.get('image')
password = data.get('password') or None
disk_config = data.get('disk_config', None)
try:
api.nova.server_rebuild(request, instance, image, password,
disk_config)
messages.info(request, _('Rebuilding instance %s.') % instance)
except Exception:
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, _("Unable to rebuild instance."),
redirect=redirect)
return True
class DecryptPasswordInstanceForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
_keypair_name_label = _("Key Pair Name")
_keypair_name_help = _("The Key Pair name that "
"was associated with the instance")
_attrs = {'readonly': 'readonly', 'rows': 4}
keypair_name = forms.CharField(widget=forms.widgets.TextInput(_attrs),
label=_keypair_name_label,
help_text=_keypair_name_help,
required=False)
_encrypted_pwd_help = _("The instance password encrypted "
"with your public key.")
encrypted_password = forms.CharField(widget=forms.widgets.Textarea(_attrs),
label=_("Encrypted Password"),
help_text=_encrypted_pwd_help,
required=False)
def __init__(self, request, *args, **kwargs):
super(DecryptPasswordInstanceForm, self).__init__(request,
*args,
**kwargs)
instance_id = kwargs.get('initial', {}).get('instance_id')
self.fields['instance_id'].initial = instance_id
keypair_name = kwargs.get('initial', {}).get('keypair_name')
self.fields['keypair_name'].initial = keypair_name
try:
result = api.nova.get_password(request, instance_id)
if not result:
_unavailable = _("Instance Password is not set"
" or is not yet available")
self.fields['encrypted_password'].initial = _unavailable
else:
self.fields['encrypted_password'].initial = result
self.fields['private_key_file'] = forms.FileField(
label=_('Private Key File'),
widget=forms.FileInput())
self.fields['private_key'] = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("OR Copy/Paste your Private Key"))
_attrs = {'readonly': 'readonly'}
self.fields['decrypted_password'] = forms.CharField(
widget=forms.widgets.TextInput(_attrs),
label=_("Password"),
required=False)
except Exception:
redirect = reverse('horizon:admin:instances:index')
_error = _("Unable to retrieve instance password.")
exceptions.handle(request, _error, redirect=redirect)
def handle(self, request, data):
return True
class AttachVolume(forms.SelfHandlingForm):
volume = forms.ChoiceField(label=_("Volume ID"),
help_text=_("Select a volume to attach "
"to this instance."))
device = forms.CharField(label=_("Device Name"),
widget=forms.HiddenInput(),
required=False,
help_text=_("Actual device name may differ due "
"to hypervisor settings. If not "
"specified, then hypervisor will "
"select a device name."))
instance_id = forms.CharField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(AttachVolume, self).__init__(*args, **kwargs)
# Populate volume choices
volume_list = kwargs.get('initial', {}).get("volume_list", [])
volumes = []
for volume in volume_list:
# Only show volumes that aren't attached to an instance already
if not volume.attachments:
volumes.append(
(volume.id, '%(name)s (%(id)s)'
% {"name": volume.name, "id": volume.id}))
if volumes:
volumes.insert(0, ("", _("Select a volume")))
else:
volumes.insert(0, ("", _("No volumes available")))
self.fields['volume'].choices = volumes
def handle(self, request, data):
instance_id = self.initial.get("instance_id", None)
volume_choices = dict(self.fields['volume'].choices)
volume = volume_choices.get(data['volume'],
_("Unknown volume (None)"))
volume_id = data.get('volume')
device = data.get('device') or None
try:
attach = api.nova.instance_volume_attach(request,
volume_id,
instance_id,
device)
message = _('Attaching volume %(vol)s to instance '
'%(inst)s on %(dev)s.') % {"vol": volume,
"inst": instance_id,
"dev": attach.device}
messages.info(request, message)
except Exception:
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request,
_('Unable to attach volume.'),
redirect=redirect)
return True
class DetachVolume(forms.SelfHandlingForm):
volume = forms.ChoiceField(label=_("Volume ID"),
help_text=_("Select a volume to detach "
"from this instance."))
instance_id = forms.CharField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(DetachVolume, self).__init__(*args, **kwargs)
# Populate instance id
instance_id = kwargs.get('initial', {}).get("instance_id", None)
# Populate attached volumes
try:
volumes = []
volume_list = api.nova.instance_volumes_list(self.request,
instance_id)
for volume in volume_list:
volumes.append((volume.id, '%s (%s)' % (volume.name,
volume.id)))
if volume_list:
volumes.insert(0, ("", _("Select a volume")))
else:
volumes.insert(0, ("", _("No volumes attached")))
self.fields['volume'].choices = volumes
except Exception:
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(self.request, _("Unable to detach volume."),
redirect=redirect)
def handle(self, request, data):
instance_id = self.initial.get("instance_id", None)
volume_choices = dict(self.fields['volume'].choices)
volume = volume_choices.get(data['volume'],
_("Unknown volume (None)"))
volume_id = data.get('volume')
try:
api.nova.instance_volume_detach(request,
instance_id,
volume_id)
message = _('Detaching volume %(vol)s from instance '
'%(inst)s.') % {"vol": volume,
"inst": instance_id}
messages.info(request, message)
except Exception:
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request,
_("Unable to detach volume."),
redirect=redirect)
return True
class AttachInterface(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
network = forms.ThemableChoiceField(label=_("Network"))
def __init__(self, request, *args, **kwargs):
super(AttachInterface, self).__init__(request, *args, **kwargs)
networks = instance_utils.network_field_data(request,
include_empty_option=True)
self.fields['network'].choices = networks
def handle(self, request, data):
instance_id = data['instance_id']
network = data.get('network')
try:
api.nova.interface_attach(request, instance_id, net_id=network)
msg = _('Attaching interface for instance %s.') % instance_id
messages.success(request, msg)
except Exception:
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, _("Unable to attach interface."),
redirect=redirect)
return True
class DetachInterface(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
port = forms.ThemableChoiceField(label=_("Port"))
def __init__(self, request, *args, **kwargs):
super(DetachInterface, self).__init__(request, *args, **kwargs)
instance_id = self.initial.get("instance_id", None)
ports = []
try:
ports = api.neutron.port_list(request, device_id=instance_id)
except Exception:
exceptions.handle(request, _('Unable to retrieve ports '
'information.'))
choices = []
for port in ports:
ips = []
for ip in port.fixed_ips:
ips.append(ip['ip_address'])
choices.append((port.id, ','.join(ips) or port.id))
if choices:
choices.insert(0, ("", _("Select Port")))
else:
choices.insert(0, ("", _("No Ports available")))
self.fields['port'].choices = choices
def handle(self, request, data):
instance_id = data['instance_id']
port = data.get('port')
try:
api.nova.interface_detach(request, instance_id, port)
msg = _('Detached interface %(port)s for instance '
'%(instance)s.') % {'port': port, 'instance': instance_id}
messages.success(request, msg)
except Exception:
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, _("Unable to detach interface."),
redirect=redirect)
return True
class ReallocationInstanceForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
user_data = forms.CharField(widget=forms.HiddenInput())
project = forms.ChoiceField(label=_("Project"))
user = forms.ChoiceField(label=_("User"), required=False)
def __init__(self, request, *args, **kwargs):
super(ReallocationInstanceForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
data = self.get_user_text()
#api.nova.cdrom_attach(request, instance_id, 'dev', 'image_id')
#api.nova.cdrom_list(request, instance_id)
#LOG.info("cipher =======================%s" % cipher)
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
self.fields['user_data'] = forms.CharField(widget=forms.HiddenInput,
initial=data)
self.fields['project'].choices = self.populate_project_choices(request,
initial)
self.fields['user'].choices = self.populate_user_choices(request,
initial)
def populate_project_choices(self, request, initial):
projects, has_more = api.keystone.tenant_list(self.request)
choices = [(project.id, project.name) for project in projects if project.name !='services']
if choices:
choices.insert(0, ("", _("Select a project")))
else:
choices.insert(0, ("", _("No project available.")))
return sorted(choices, key=operator.itemgetter(1))
def populate_user_choices(self, request, initial):
users = api.keystone.user_list(self.request)
uname = ['nova', 'neutron', 'cinder', 'glance', 'AdminShadow']
choices = [(user.id, user.name) for user in users if user.name not in uname]
if choices:
choices.insert(0, ("", _("Select a user")))
else:
choices.insert(0, ("", _("No user available.")))
return sorted(choices, key=operator.itemgetter(1))
def get_user_text(self):
list = {}
json_list = {}
try:
projects, has_more = api.keystone.tenant_list(self.request)
project_all = [(project.id, project.name) for project in projects]
for p in project_all:
user_list = [(u.id, u.name)for u in api.keystone.user_list(self.request, p[0])]
user_list.sort()
uname = ['nova', 'neutron', 'ceilometer','swift','cinder', 'glance', 'AdminShadow']
for u in sorted(user_list, key=operator.itemgetter(1)):
if u[1] not in uname:
list.setdefault(p[0], [ ]).append(u)
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve get user data."))
json_list = json.dumps(list)
return json_list
def clean(self):
cleaned_data = super(ReallocationInstanceForm, self).clean()
if cleaned_data.get("user_data", None):
del cleaned_data['user_data']
return cleaned_data
def handle(self, request, data):
try:
api.nova.reallocation(request,
data["instance_id"],
data['project'],
data['user'])
msg = _('Allocation intances .')
messages.success(request, msg)
except Exception:
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(self.request, _("Unable to allocate instance"))
return True
class CreateDevsnapshotForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"), required=False)
def clean(self):
cleaned_data = super(CreateDevsnapshotForm, self).clean()
instance_id = cleaned_data.get('instance_id', None)
return cleaned_data
def __init__(self, request, *args, **kwargs):
super(CreateDevsnapshotForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
def handle(self, request, data):
kwargs = {'snapshot':{
'instance_id':data['instance_id'],
'name':data['name']}}
try:
api.nova.create_dev_snapshot(request, **kwargs)
return HttpResponseRedirect('/dashboard/admin/%s'
% data.get('instance_id', None))
except Exception as error:
msg = _('Failed to create dev_snapshot. ')
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
class DeleteDevsnapshotForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
def __init__(self, request, *args, **kwargs):
super(DeleteDevsnapshotForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
def handle(self, request, data):
try:
api.nova.delete_dev_snapshot(request, data['instance_id'], data['name'])
msg = _('The snapshot is successful delete .')
messages.success(request, msg)
return HttpResponseRedirect('/dashboard/admin/%s'
% data.get('instance_id', None))
except Exception:
msg = _('Failed to delete dev_snapshot. ')
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
class SetDevsnapshotForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
def __init__(self, request, *args, **kwargs):
super(SetDevsnapshotForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
def handle(self, request, data):
try:
api.nova.set_dev_snapshot(request, data['instance_id'], data['name'])
msg = _('set this devsnapshot for plan devsnapshot .')
messages.success(request, msg)
return HttpResponseRedirect('/dashboard/admin/%s'
% data.get('instance_id', None))
except Exception:
msg = _('Failed to set dev_snapshot. ')
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
class RevertDevsnapshotForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
def __init__(self, request, *args, **kwargs):
super(RevertDevsnapshotForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
def handle(self, request, data):
try:
api.nova.revert_dev_snapshot(request, data['instance_id'], data['name'])
msg = _('The snapshot is successful revert.')
messages.success(request, msg)
return HttpResponseRedirect('/dashboard/admin/%s'
% data.get('instance_id', None))
except Exception:
msg = _('Failed to set revert snapshot. ')
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
class CDRomForm(forms.SelfHandlingForm):
instance_id = forms.CharField(label=_("Instance ID"),
widget=forms.HiddenInput(),
required=False)
instance_name = forms.CharField(
label=_("Instance name:"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
devices = forms.ChoiceField(label=_("CDROM Device"),
help_text=_("Choose a Device."),required=False)
images = forms.ChoiceField(label=_("Image name"),
help_text=_("Choose a Image to attach."),required=True)
def __init__(self, *args, **kwargs):
super(CDRomForm, self).__init__(*args, **kwargs)
device_list = kwargs.get('initial', {}).get('devices', [])
iso_list = kwargs.get('initial', {}).get('isos', [])
isos = []
isoMap = {}
for iso in iso_list:
if iso.disk_format == 'iso':
isos.append((iso.id, '%s (%s)' % (iso.name,iso.id)))
fake_iso_id = hashlib.sha1(iso.id).hexdigest()
isoMap[fake_iso_id] = iso.name
if isos:
#isos.insert(len(isos), ("1", _("select the iso")))
isos.insert(len(isos), ("0", _("Detach the iso")))
else:
isos = (("", _("No iso available")),)
self.fields['images'].choices = isos
devices = []
for device in device_list:
iso_name = device.image_id
if isoMap.has_key(iso_name):
iso_name = isoMap[device.image_id]
devices.append((device.device_name, '%s (%s)' % (device.device_name,iso_name)))
if devices:
pass
else:
devices = (("", _("No devices available")),)
self.fields['devices'].choices = devices
def handle(self, request, data):
try:
#snapshot = api.nova.snapshot_create(request,
# data['instance_id'],
# data['name'])
# NOTE(gabriel): This API call is only to display a pretty name.
instance = api.nova.server_get(request, data['instance_id'])
vals = {"inst": instance.name,"status":instance.status}
image_id = data.get('images', '')
dev = data.get('devices', '')
if vals['status'] != 'SHUTOFF' and (not data['devices'] or len(data['devices']) == 0):
messages.error(request, _('Attach ISO error, '
'instance "%(inst)s" status "%(status)s".It must Shutoff') % vals)
return True
else:
api.nova.cdrom_attach(request, data['instance_id'], dev, image_id)
if image_id == "0":
msg = _('ISO detach successfully.')
else:
msg = _('ISO attach successfully.')
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:instances:index")
exceptions.handle(request,
_('Unable to attach ISO.'),
redirect=redirect)
class LiveMigrateForm(forms.SelfHandlingForm):
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("New Host"),
help_text=_("Choose a Host to migrate to."))
disk_over_commit = forms.BooleanField(label=_("Disk Over Commit"),
initial=False, required=False)
block_migration = forms.BooleanField(label=_("Block Migration"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(LiveMigrateForm, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
instance_id = initial.get('instance_id')
self.fields['instance_id'] = forms.CharField(widget=forms.HiddenInput,
initial=instance_id)
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.host_name,
host.host_name)
for host in hosts
if host.service.startswith('compute') and
host.host_name != current_host]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available.")))
return sorted(host_list)
def handle(self, request, data):
try:
block_migration = data['block_migration']
disk_over_commit = data['disk_over_commit']
api.nova.server_live_migrate(request,
data['instance_id'],
data['host'],
block_migration=block_migration,
disk_over_commit=disk_over_commit)
msg = _('The instance is preparing the live migration '
'to host "%s".') % data['host']
messages.success(request, msg)
return True
except Exception:
msg = _('Failed to live migrate instance to '
'host "%s".') % data['host']
redirect = reverse('horizon:admin:instances:index')
exceptions.handle(request, msg, redirect=redirect)
| |
#!/usr/bin/python3
import my_settings
import sys
import math
import numpy as np
from argparse import ArgumentParser
from chainer import functions, optimizers
import chainer.computational_graph as cg
import util.generators as gens
from util.functions import trace, fill_batch2
from util.model_file import ModelFile
from util.vocabulary import Vocabulary
#from util.chainer_cpu_wrapper import wrapper
from util.chainer_gpu_wrapper import wrapper
class AttentionalTranslationModel:
def __init__(self):
pass
def __make_model(self):
self.__model = wrapper.make_model(
# input embedding
w_xi = functions.EmbedID(len(self.__src_vocab), self.__n_embed),
# forward encoder
w_ia = functions.Linear(self.__n_embed, 4 * self.__n_hidden),
w_aa = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
# backward encoder
w_ib = functions.Linear(self.__n_embed, 4 * self.__n_hidden),
w_bb = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
# attentional weight estimator
w_aw = functions.Linear(self.__n_hidden, self.__n_hidden),
w_bw = functions.Linear(self.__n_hidden, self.__n_hidden),
w_pw = functions.Linear(self.__n_hidden, self.__n_hidden),
w_we = functions.Linear(self.__n_hidden, 1),
# decoder
w_ap = functions.Linear(self.__n_hidden, self.__n_hidden),
w_bp = functions.Linear(self.__n_hidden, self.__n_hidden),
w_yp = functions.EmbedID(len(self.__trg_vocab), 4 * self.__n_hidden),
w_pp = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_cp = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_dp = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_py = functions.Linear(self.__n_hidden, len(self.__trg_vocab)),
)
@staticmethod
def new(src_vocab, trg_vocab, n_embed, n_hidden):
self = AttentionalTranslationModel()
self.__src_vocab = src_vocab
self.__trg_vocab = trg_vocab
self.__n_embed = n_embed
self.__n_hidden = n_hidden
self.__make_model()
return self
def save(self, filename):
with ModelFile(filename, 'w') as fp:
self.__src_vocab.save(fp.get_file_pointer())
self.__trg_vocab.save(fp.get_file_pointer())
fp.write(self.__n_embed)
fp.write(self.__n_hidden)
wrapper.begin_model_access(self.__model)
fp.write_embed(self.__model.w_xi)
fp.write_linear(self.__model.w_ia)
fp.write_linear(self.__model.w_aa)
fp.write_linear(self.__model.w_ib)
fp.write_linear(self.__model.w_bb)
fp.write_linear(self.__model.w_aw)
fp.write_linear(self.__model.w_bw)
fp.write_linear(self.__model.w_pw)
fp.write_linear(self.__model.w_we)
fp.write_linear(self.__model.w_ap)
fp.write_linear(self.__model.w_bp)
fp.write_embed(self.__model.w_yp)
fp.write_linear(self.__model.w_pp)
fp.write_linear(self.__model.w_cp)
fp.write_linear(self.__model.w_dp)
fp.write_linear(self.__model.w_py)
wrapper.end_model_access(self.__model)
@staticmethod
def load(filename):
self = AttentionalTranslationModel()
with ModelFile(filename) as fp:
self.__src_vocab = Vocabulary.load(fp.get_file_pointer())
self.__trg_vocab = Vocabulary.load(fp.get_file_pointer())
self.__n_embed = int(fp.read())
self.__n_hidden = int(fp.read())
self.__make_model()
wrapper.begin_model_access(self.__model)
fp.read_embed(self.__model.w_xi)
fp.read_linear(self.__model.w_ia)
fp.read_linear(self.__model.w_aa)
fp.read_linear(self.__model.w_ib)
fp.read_linear(self.__model.w_bb)
fp.read_linear(self.__model.w_aw)
fp.read_linear(self.__model.w_bw)
fp.read_linear(self.__model.w_pw)
fp.read_linear(self.__model.w_we)
fp.read_linear(self.__model.w_ap)
fp.read_linear(self.__model.w_bp)
fp.read_embed(self.__model.w_yp)
fp.read_linear(self.__model.w_pp)
fp.read_linear(self.__model.w_cp)
fp.read_linear(self.__model.w_dp)
fp.read_linear(self.__model.w_py)
wrapper.end_model_access(self.__model)
return self
def init_optimizer(self):
self.__opt = optimizers.AdaGrad(lr=0.01)
self.__opt.setup(self.__model)
def __forward(self, is_training, src_batch, trg_batch = None, generation_limit = None):
m = self.__model
tanh = functions.tanh
lstm = functions.lstm
batch_size = len(src_batch)
hidden_size = self.__n_hidden
src_len = len(src_batch[0])
trg_len = len(trg_batch[0]) - 1 if is_training else generation_limit
src_stoi = self.__src_vocab.stoi
trg_stoi = self.__trg_vocab.stoi
trg_itos = self.__trg_vocab.itos
hidden_zeros = wrapper.zeros((batch_size, hidden_size))
sum_e_zeros = wrapper.zeros((batch_size, 1))
# make embedding
list_x = []
for l in range(src_len):
s_x = wrapper.make_var([src_stoi(src_batch[k][l]) for k in range(batch_size)], dtype=np.int32)
list_x.append(s_x)
# forward encoding
c = hidden_zeros
s_a = hidden_zeros
list_a = []
for l in range(src_len):
s_x = list_x[l]
s_i = tanh(m.w_xi(s_x))
c, s_a = lstm(c, m.w_ia(s_i) + m.w_aa(s_a))
list_a.append(s_a)
# backward encoding
c = hidden_zeros
s_b = hidden_zeros
list_b = []
for l in reversed(range(src_len)):
s_x = list_x[l]
s_i = tanh(m.w_xi(s_x))
c, s_b = lstm(c, m.w_ib(s_i) + m.w_bb(s_b))
list_b.insert(0, s_b)
# decoding
c = hidden_zeros
s_p = tanh(m.w_ap(list_a[-1]) + m.w_bp(list_b[0]))
s_y = wrapper.make_var([trg_stoi('<s>') for k in range(batch_size)], dtype=np.int32)
hyp_batch = [[] for _ in range(batch_size)]
accum_loss = wrapper.zeros(()) if is_training else None
#for n in range(src_len):
# print(src_batch[0][n], end=' ')
#print()
for l in range(trg_len):
# calculate attention weights
list_e = []
sum_e = sum_e_zeros
for n in range(src_len):
s_w = tanh(m.w_aw(list_a[n]) + m.w_bw(list_b[n]) + m.w_pw(s_p))
r_e = functions.exp(m.w_we(s_w))
#list_e.append(functions.concat(r_e for _ in range(self.__n_hidden)))
list_e.append(r_e)
sum_e += r_e
#sum_e = functions.concat(sum_e for _ in range(self.__n_hidden))
# make attention vector
s_c = hidden_zeros
s_d = hidden_zeros
for n in range(src_len):
s_e = list_e[n] / sum_e
#s_c += s_e * list_a[n]
#s_d += s_e * list_b[n]
s_c += functions.reshape(functions.batch_matmul(list_a[n], s_e), (batch_size, hidden_size))
s_d += functions.reshape(functions.batch_matmul(list_b[n], s_e), (batch_size, hidden_size))
#zxcv = wrapper.get_data(s_e)[0][0]
#if zxcv > 0.9: asdf='#'
#elif zxcv > 0.7: asdf='*'
#elif zxcv > 0.3: asdf='+'
#elif zxcv > 0.1: asdf='.'
#else: asdf=' '
#print(asdf * len(src_batch[0][n]), end=' ')
# generate next word
c, s_p = lstm(c, m.w_yp(s_y) + m.w_pp(s_p) + m.w_cp(s_c) + m.w_dp(s_d))
r_y = m.w_py(s_p)
output = wrapper.get_data(r_y).argmax(1)
for k in range(batch_size):
hyp_batch[k].append(trg_itos(output[k]))
#print(hyp_batch[0][-1])
if is_training:
s_t = wrapper.make_var([trg_stoi(trg_batch[k][l + 1]) for k in range(batch_size)], dtype=np.int32)
accum_loss += functions.softmax_cross_entropy(r_y, s_t)
s_y = s_t
else:
if all(hyp_batch[k][-1] == '</s>' for k in range(batch_size)): break
s_y = wrapper.make_var(output, dtype=np.int32)
return hyp_batch, accum_loss
def train(self, src_batch, trg_batch):
self.__opt.zero_grads()
hyp_batch, accum_loss = self.__forward(True, src_batch, trg_batch=trg_batch)
#g = cg.build_computational_graph([accum_loss])
#with open('asdf', 'w') as fp: fp.write(g.dump())
#sys.exit()
accum_loss.backward()
self.__opt.clip_grads(10)
self.__opt.update()
return hyp_batch
def predict(self, src_batch, generation_limit):
return self.__forward(False, src_batch, generation_limit=generation_limit)[0]
def parse_args():
def_vocab = 32768
def_embed = 256
def_hidden = 512
def_epoch = 100
def_minibatch = 64
def_generation_limit = 256
p = ArgumentParser(description='Attentional neural machine translation')
p.add_argument('mode', help='\'train\' or \'test\'')
p.add_argument('source', help='[in] source corpus')
p.add_argument('target', help='[in/out] target corpus')
p.add_argument('model', help='[in/out] model file')
p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int,
help='vocabulary size (default: %d)' % def_vocab)
p.add_argument('--embed', default=def_embed, metavar='INT', type=int,
help='embedding layer size (default: %d)' % def_embed)
p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int,
help='hidden layer size (default: %d)' % def_hidden)
p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int,
help='number of training epoch (default: %d)' % def_epoch)
p.add_argument('--minibatch', default=def_minibatch, metavar='INT', type=int,
help='minibatch size (default: %d)' % def_minibatch)
p.add_argument('--generation-limit', default=def_generation_limit, metavar='INT', type=int,
help='maximum number of words to be generated for test input')
args = p.parse_args()
# check args
try:
if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'')
if args.vocab < 1: raise ValueError('you must set --vocab >= 1')
if args.embed < 1: raise ValueError('you must set --embed >= 1')
if args.hidden < 1: raise ValueError('you must set --hidden >= 1')
if args.epoch < 1: raise ValueError('you must set --epoch >= 1')
if args.minibatch < 1: raise ValueError('you must set --minibatch >= 1')
if args.generation_limit < 1: raise ValueError('you must set --generation-limit >= 1')
except Exception as ex:
p.print_usage(file=sys.stderr)
print(ex, file=sys.stderr)
sys.exit()
return args
def train_model(args):
trace('making vocabularies ...')
src_vocab = Vocabulary.new(gens.word_list(args.source), args.vocab)
trg_vocab = Vocabulary.new(gens.word_list(args.target), args.vocab)
trace('making model ...')
model = AttentionalTranslationModel.new(src_vocab, trg_vocab, args.embed, args.hidden)
for epoch in range(args.epoch):
trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
trained = 0
gen1 = gens.word_list(args.source)
gen2 = gens.word_list(args.target)
gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * args.minibatch, order=0), args.minibatch)
model.init_optimizer()
for src_batch, trg_batch in gen3:
src_batch = fill_batch2(src_batch)
trg_batch = fill_batch2(trg_batch)
K = len(src_batch)
hyp_batch = model.train(src_batch, trg_batch)
for k in range(K):
trace('epoch %3d/%3d, sample %8d' % (epoch + 1, args.epoch, trained + k + 1))
trace(' src = ' + ' '.join([x if x != '</s>' else '*' for x in src_batch[k]]))
trace(' trg = ' + ' '.join([x if x != '</s>' else '*' for x in trg_batch[k]]))
trace(' hyp = ' + ' '.join([x if x != '</s>' else '*' for x in hyp_batch[k]]))
trained += K
trace('saving model ...')
model.save(args.model + '.%03d' % (epoch + 1))
trace('finished.')
def test_model(args):
trace('loading model ...')
model = AttentionalTranslationModel.load(args.model)
trace('generating translation ...')
generated = 0
with open(args.target, 'w') as fp:
for src_batch in gens.batch(gens.word_list(args.source), args.minibatch):
src_batch = fill_batch2(src_batch)
K = len(src_batch)
trace('sample %8d - %8d ...' % (generated + 1, generated + K))
hyp_batch = model.predict(src_batch, args.generation_limit)
for hyp in hyp_batch:
hyp.append('</s>')
hyp = hyp[:hyp.index('</s>')]
print(' '.join(hyp), file=fp)
generated += K
trace('finished.')
def main():
args = parse_args()
trace('initializing ...')
wrapper.init()
if args.mode == 'train': train_model(args)
elif args.mode == 'test': test_model(args)
if __name__ == '__main__':
main()
| |
'''
Created on Aug 21, 2015
@author: David Zwicker <dzwicker@seas.harvard.edu>
'''
from __future__ import division
import collections
import functools
import logging
import sys
import unittest
import warnings
from contextlib import contextmanager
try:
collectionsAbc = collections.abc # python 3
except AttributeError:
collectionsAbc = collections # python 2
import numpy as np
import six
from six.moves import zip_longest
from .math import arrays_close
class MockLoggingHandler(logging.Handler):
""" Mock logging handler to check for expected logs.
Messages are available from an instance's ``messages`` dict, in order,
indexed by a lowercase log level string (e.g., 'debug', 'info', etc.).
Adapted from http://stackoverflow.com/a/20553331/932593
"""
def __init__(self, *args, **kwargs):
self.messages = {'debug': [], 'info': [], 'warning': [], 'error': [],
'critical': []}
super(MockLoggingHandler, self).__init__(*args, **kwargs)
def emit(self, record):
"""
Store a message from ``record`` in the instance's ``messages`` dict.
"""
self.acquire()
try:
self.messages[record.levelname.lower()].append(record.getMessage())
finally:
self.release()
def reset(self):
""" reset all messages """
self.acquire()
try:
for message_list in self.messages.values():
message_list.clear()
finally:
self.release()
class TestBase(unittest.TestCase):
""" extends the basic TestCase class with some convenience functions """
def assertAllClose(self, arr1, arr2, rtol=1e-05, atol=1e-08, msg=None):
""" compares all the entries of the arrays a and b """
try:
# try to convert to numpy arrays
arr1 = np.asanyarray(arr1)
arr2 = np.asanyarray(arr2)
except ValueError:
# try iterating explicitly
try:
for v1, v2 in zip_longest(arr1, arr2):
self.assertAllClose(v1, v2, rtol, atol, msg)
except TypeError:
if msg is None:
msg = ""
else:
msg += "; "
raise TypeError(msg + "Don't know how to compare %s and %s"
% (arr1, arr2))
else:
if msg is None:
msg = 'Values are not equal'
msg += '\n%s !=\n%s)' % (arr1, arr2)
is_close = arrays_close(arr1, arr2, rtol, atol, equal_nan=True)
self.assertTrue(is_close, msg)
def assertDictAllClose(self, a, b, rtol=1e-05, atol=1e-08, msg=None):
""" compares all the entries of the dictionaries a and b """
if msg is None:
msg = ''
else:
msg += '\n'
for k, v in a.items():
# create a message if non was given
submsg = msg + ('Dictionaries differ for key `%s` (%s != %s)'
% (k, v, b[k]))
# try comparing as numpy arrays and fall back if that doesn't work
try:
self.assertAllClose(v, b[k], rtol, atol, submsg)
except TypeError:
self.assertEqual(v, b[k], submsg)
class WarnAssertionsMixin(object):
"""
Mixing that allows to test for warnings
Code inspired by https://blog.ionelmc.ro/2013/06/26/testing-python-warnings/
"""
@contextmanager
def assertNoWarnings(self):
try:
warnings.simplefilter("error")
yield
finally:
warnings.resetwarnings()
@contextmanager
def assertWarnings(self, messages):
"""
Asserts that the given messages are issued in the given order.
"""
if not messages:
raise RuntimeError("Use assertNoWarnings instead!")
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
for mod in list(sys.modules.values()):
if hasattr(mod, '__warningregistry__'):
mod.__warningregistry__.clear()
yield
warning_list = [w.message.args[0] for w in warning_list]
for message in messages:
if not any(message in warning for warning in warning_list):
self.fail('Message `%s` was not contained in warnings'
% message)
def deep_getsizeof(obj, ids=None):
"""Find the memory footprint of a Python object
This is a recursive function that drills down a Python object graph
like a dictionary holding nested dictionaries with lists of lists
and tuples and sets.
The sys.getsizeof function does a shallow size of only. It counts each
object inside a container as pointer only regardless of how big it
really is.
Function modified from
https://code.tutsplus.com/tutorials/understand-how-much-memory-your-python-objects-use--cms-25609
"""
if ids is not None:
if id(obj) in ids:
return 0
else:
ids = set()
r = sys.getsizeof(obj)
ids.add(id(obj))
if isinstance(obj, six.string_types):
# simple string
return r
if isinstance(obj, collectionsAbc.Mapping):
# simple mapping
return r + sum(deep_getsizeof(k, ids) + deep_getsizeof(v, ids)
for k, v in six.iteritems(obj))
if isinstance(obj, collectionsAbc.Container):
# collection that is neither a string nor a mapping
return r + sum(deep_getsizeof(x, ids) for x in obj)
if hasattr(obj, '__dict__'):
# custom object
return r + deep_getsizeof(obj.__dict__, ids)
# basic object: neither of the above
return r
def repeat(num):
""" decorator for repeating tests several times """
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
for _ in range(num):
f(*args, **kwargs)
return wrapper
return decorator
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import log as logging
from rally import consts
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.cinder import utils
from rally.plugins.openstack.scenarios.glance import utils as glance_utils
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.task import types
from rally.task import validation
LOG = logging.getLogger(__name__)
class CinderVolumes(utils.CinderScenario,
nova_utils.NovaScenario,
glance_utils.GlanceScenario):
"""Benchmark scenarios for Cinder Volumes."""
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_volume(self, size, detailed=True,
image=None, **kwargs):
"""Create a volume and list all volumes.
Measure the "cinder volume-list" command performance.
If you have only 1 user in your context, you will
add 1 volume on every iteration. So you will have more
and more volumes and will be able to measure the
performance of the "cinder volume-list" command depending on
the number of images owned by users.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param detailed: determines whether the volume listing should contain
detailed information about all of them
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
self._list_volumes(detailed)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def list_volumes(self, detailed=True):
"""List all volumes.
This simple scenario tests the cinder list command by listing
all the volumes.
:param detailed: True if detailed information about volumes
should be listed
"""
self._list_volumes(detailed)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_update_volume(self, size, image=None,
create_volume_kwargs=None,
update_volume_kwargs=None):
"""Create a volume and update its name and description.
:param size: volume size (integer, in GB)
:param image: image to be used to create volume
:param create_volume_kwargs: dict, to be used to create volume
:param update_volume_kwargs: dict, to be used to update volume
"""
create_volume_kwargs = create_volume_kwargs or {}
update_volume_kwargs = update_volume_kwargs or {}
if image:
create_volume_kwargs["imageRef"] = image
volume = self._create_volume(size, **create_volume_kwargs)
self._update_volume(volume, **update_volume_kwargs)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_delete_volume(self, size, image=None,
min_sleep=0, max_sleep=0,
**kwargs):
"""Create and then delete a volume.
Good for testing a maximal bandwidth of cloud. Optional 'min_sleep'
and 'max_sleep' parameters allow the scenario to simulate a pause
between volume creation and deletion (of random duration from
[min_sleep, max_sleep]).
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self._create_volume(size, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_volume(self, size, image=None, **kwargs):
"""Create a volume.
Good test to check how influence amount of active volumes on
performance of creating new.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@validation.required_contexts("volumes")
@scenario.configure(context={"cleanup": ["cinder"]})
def modify_volume_metadata(self, sets=10, set_size=3,
deletes=5, delete_size=3):
"""Modify a volume's metadata.
This requires a volume to be created with the volumes
context. Additionally, ``sets * set_size`` must be greater
than or equal to ``deletes * delete_size``.
:param sets: how many set_metadata operations to perform
:param set_size: number of metadata keys to set in each
set_metadata operation
:param deletes: how many delete_metadata operations to perform
:param delete_size: number of metadata keys to delete in each
delete_metadata operation
"""
if sets * set_size < deletes * delete_size:
raise exceptions.InvalidArgumentsException(
"Not enough metadata keys will be created: "
"Setting %(num_keys)s keys, but deleting %(num_deletes)s" %
{"num_keys": sets * set_size,
"num_deletes": deletes * delete_size})
volume = random.choice(self.context["tenant"]["volumes"])
keys = self._set_metadata(volume["id"], sets, set_size)
self._delete_metadata(volume["id"], keys, deletes, delete_size)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_extend_volume(self, size, new_size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and extend a volume and then delete it.
:param size: volume size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param new_size: volume new size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
to extend.
Notice: should be bigger volume size
:param min_sleep: minimum sleep time between volume extension and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume extension and
deletion (in seconds)
:param kwargs: optional args to extend the volume
"""
volume = self._create_volume(size, **kwargs)
self._extend_volume(volume, new_size)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_from_volume_and_delete_volume(self, size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create volume from volume and then delete it.
Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep'
parameters allow the scenario to simulate a pause between volume
creation and deletion (of random duration from [min_sleep, max_sleep]).
:param size: volume size (in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
Should be equal or bigger source volume size
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
source_vol = random.choice(self.context["tenant"]["volumes"])
volume = self._create_volume(size, source_volid=source_vol["id"],
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_delete_snapshot(self, force=False, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and then delete a volume-snapshot.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between snapshot creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param min_sleep: minimum sleep time between snapshot creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between snapshot creation and
deletion (in seconds)
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
snapshot = self._create_snapshot(volume["id"], force=force, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_snapshot(snapshot)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_and_attach_volume(self, size, image, flavor, **kwargs):
"""Create a VM and attach a volume to it.
Simple test to create a VM and attach a volume, then
detach the volume and delete volume/VM.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param kwargs: optional arguments for VM creation
"""
server = self._boot_server(image, flavor, **kwargs)
volume = self._create_volume(size)
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_volume(volume)
self._delete_server(server)
@validation.volume_type_exists("volume_type")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_snapshot_and_attach_volume(self, volume_type=False,
size=None, **kwargs):
"""Create volume, snapshot and attach/detach volume.
This scenario is based off of the standalone qaStressTest.py
(https://github.com/WaltHP/cinder-stress).
:param volume_type: Whether or not to specify volume type when creating
volumes.
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if size is None:
size = {"min": 1, "max": 5}
selected_type = None
volume_types = [None]
if volume_type:
volume_types_list = self.clients("cinder").volume_types.list()
for s in volume_types_list:
volume_types.append(s.name)
selected_type = random.choice(volume_types)
volume = self._create_volume(size, volume_type=selected_type)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_nested_snapshots_and_attach_volume(self,
size=None,
nested_level=None,
**kwargs):
"""Create a volume from snapshot and attach/detach the volume
This scenario create volume, create it's snapshot, attach volume,
then create new volume from existing snapshot and so on,
with defined nested level, after all detach and delete them.
volume->snapshot->volume->snapshot->volume ...
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param nested_level: Nested level - dictionary, contains two values:
min - minimum number of volumes will be created
from snapshot;
max - maximum number of volumes will be created
from snapshot.
default values: {"min": 5, "max": 10}
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if size is None:
size = {"min": 1, "max": 5}
if nested_level is None:
nested_level = {"min": 5, "max": 10}
# NOTE: Volume size cannot be smaller than the snapshot size, so
# volume with specified size should be created to avoid
# size mismatching between volume and snapshot due random
# size in _create_volume method.
size = random.randint(size["min"], size["max"])
nested_level = random.randint(nested_level["min"], nested_level["max"])
source_vol = self._create_volume(size)
nes_objs = [(self.get_random_server(), source_vol,
self._create_snapshot(source_vol.id, False, **kwargs))]
self._attach_volume(nes_objs[0][0], nes_objs[0][1])
snapshot = nes_objs[0][2]
for i in range(nested_level - 1):
volume = self._create_volume(size, snapshot_id=snapshot.id)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
nes_objs.append((server, volume, snapshot))
nes_objs.reverse()
for server, volume, snapshot in nes_objs:
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_snapshots(self, force=False, detailed=True, **kwargs):
"""Create and then list a volume-snapshot.
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param detailed: True if detailed information about snapshots
should be listed
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
self._create_snapshot(volume["id"], force=force, **kwargs)
self._list_snapshots(detailed)
@validation.required_services(consts.Service.CINDER, consts.Service.GLANCE)
@validation.required_openstack(users=True)
@validation.required_parameters("size")
@scenario.configure(context={"cleanup": ["cinder", "glance"]})
def create_and_upload_volume_to_image(self, size, force=False,
container_format="bare",
disk_format="raw",
do_delete=True,
**kwargs):
"""Create and upload a volume to image.
:param size: volume size (integers, in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param force: when set to True volume that is attached to an instance
could be uploaded to image
:param container_format: image container format
:param disk_format: disk format for image
:param do_delete: deletes image and volume after uploading if True
:param kwargs: optional args to create a volume
"""
volume = self._create_volume(size, **kwargs)
image = self._upload_volume_to_image(volume, force, container_format,
disk_format)
if do_delete:
self._delete_volume(volume)
self._delete_image(image)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_volume_backup(self, size, do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create a volume backup.
:param size: volume size in GB
:param do_delete: if True, a volume and a volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_restore_volume_backup(self, size, do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Restore volume backup.
:param size: volume size in GB
:param do_delete: if True, the volume and the volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
self._restore_backup(backup.id)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_volume_backups(self, size, detailed=True,
do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create and then list a volume backup.
:param size: volume size in GB
:param detailed: True if detailed information about backup
should be listed
:param do_delete: if True, a volume backup will be deleted
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
self._list_backups(detailed)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PyConPosterProposal.slide_deck'
db.add_column(u'pycon_pyconposterproposal', 'slide_deck',
self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'PyConLightningTalkProposal.slide_deck'
db.add_column(u'pycon_pyconlightningtalkproposal', 'slide_deck',
self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'PyConTalkProposal.slide_deck'
db.add_column(u'pycon_pycontalkproposal', 'slide_deck',
self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'PyConTutorialProposal.slide_deck'
db.add_column(u'pycon_pycontutorialproposal', 'slide_deck',
self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'PyConTutorialProposal.handout'
db.add_column(u'pycon_pycontutorialproposal', 'handout',
self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PyConPosterProposal.slide_deck'
db.delete_column(u'pycon_pyconposterproposal', 'slide_deck')
# Deleting field 'PyConLightningTalkProposal.slide_deck'
db.delete_column(u'pycon_pyconlightningtalkproposal', 'slide_deck')
# Deleting field 'PyConTalkProposal.slide_deck'
db.delete_column(u'pycon_pycontalkproposal', 'slide_deck')
# Deleting field 'PyConTutorialProposal.slide_deck'
db.delete_column(u'pycon_pycontutorialproposal', 'slide_deck')
# Deleting field 'PyConTutorialProposal.handout'
db.delete_column(u'pycon_pycontutorialproposal', 'handout')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'default': "'US/Eastern'", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'conference.section': {
'Meta': {'object_name': 'Section'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Conference']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.additionalspeaker': {
'Meta': {'unique_together': "(('speaker', 'proposalbase'),)", 'object_name': 'AdditionalSpeaker', 'db_table': "'proposals_proposalbase_additional_speakers'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposalbase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalBase']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['speakers.Speaker']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'proposals.proposalbase': {
'Meta': {'object_name': 'ProposalBase'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['speakers.Speaker']", 'symmetrical': 'False', 'through': u"orm['proposals.AdditionalSpeaker']", 'blank': 'True'}),
'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalKind']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': u"orm['speakers.Speaker']"}),
'submitted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.proposalkind': {
'Meta': {'object_name': 'ProposalKind'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposal_kinds'", 'to': u"orm['conference.Section']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'pycon.pyconlightningtalkproposal': {
'Meta': {'object_name': 'PyConLightningTalkProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slide_deck': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'pycon.pyconposterproposal': {
'Meta': {'object_name': 'PyConPosterProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slide_deck': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'pycon.pyconproposalcategory': {
'Meta': {'object_name': 'PyConProposalCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'pycon.pyconsponsortutorialproposal': {
'Meta': {'object_name': 'PyConSponsorTutorialProposal', '_ormbases': [u'proposals.ProposalBase']},
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'})
},
u'pycon.pycontalkproposal': {
'Meta': {'object_name': 'PyConTalkProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'outline': ('django.db.models.fields.TextField', [], {}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slide_deck': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'pycon.pycontutorialproposal': {
'Meta': {'object_name': 'PyConTutorialProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'domain_level': ('django.db.models.fields.IntegerField', [], {}),
'handout': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'more_info': ('django.db.models.fields.TextField', [], {}),
'outline': ('django.db.models.fields.TextField', [], {}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slide_deck': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'speakers.speaker': {
'Meta': {'object_name': 'Speaker'},
'annotation': ('django.db.models.fields.TextField', [], {}),
'biography': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_email': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'invite_token': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'sessions_preference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'speaker_profile'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['pycon']
| |
"""
Main objects needed for sorting collections.
Collection and Content may need to be customized for the specific type of colleciton in use.
OVERVIEW:
============
A 'Collections' object is created with the root location and the list of acceptable collection directories available. These option are generally stored as a JSON file named 'config.json' in the root of the collections directory, but could easily be generated and passed in at run time. The main function is to load and store all of the CollectionSummary objects.
A 'CollectionSummary' will look for a 'summary.json' file in either the main root directory associated with the CollectionSummary, or in the meta subdirectory of the main root directory (meta_root). This summary data includes other drive locations where the 'Collection' and all other associated data may be found.
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
import os, re, logging, codecs
#used in Cluster.save():
import json
from .helpers import load_json, save_json, find_json
from .content import Content
from sortable.path import Path, check_ignore
from moments.timestamp import Timestamp
from moments.journal import Journal
#from medley.yapsy.PluginManager import PluginManager
class CollectionSimple(list):
"""
Collection is getting a bit too smart
which makes it complicated...
generally just want a source (and root) so we know where things come from
Content is much more important
"""
def __init__(self, source='', root='', contents=[], walk=False, as_dict=False, debug=False):
"""
source should be the full path to source
"""
#this is a json representation of the whole Collection:
self.source = source
#can be useful to look back at how the object was loaded.
self.walk = walk
#if we were passed a list of contents, apply them
if len(contents):
for s in contents:
if not s in self:
self.append(s)
if source:
self.root = os.path.dirname(source)
else:
self.root = root
## if not root:
## #print "Warning, no root"
## self.summary = None
def as_list(self, include_empty=False):
"""
generate a list of standard python objects
for saving or exporting to json
"""
all_contents = []
for content in self:
d = content.to_dict(include_empty)
all_contents.append(d)
return all_contents
def save(self, destination=None):
if not destination:
#maybe destination should default to source???
destination = os.path.join(self.root, "contents-saved.json")
all_contents = self.as_list()
save_json(destination, all_contents)
def load(self, source=None, debug=False):
"""
load a collection from a previously generated summary json file
should contain all data for all included content items
this content would have been assembled in a previous scan
"""
if source:
self.source = source
# do the loading now:
if self.source:
if os.path.exists(self.source):
json_contents = load_json(self.source)
#this will depend on how json_contents was stored (list vs dict):
#for content in json_contents.values():
#if as_dict:
if isinstance(json_contents, dict):
if debug:
print("Reading JSON as dictionary")
for content in list(json_contents.values()):
s = Content(content=content)
s.load()
if debug:
print(s)
self.append(s)
else:
if debug:
print("Reading JSON as list")
# storing as a list seems most versatile
# can create a dictionary version later
# (too many ways to index)
for content in json_contents:
if debug:
print(content)
s = Content(content=content)
s.load()
self.append(s)
else:
print("WARNING: couldn't find contents json path: %s" % self.source)
else:
raise ValueError("No source file specified: %s" % self.source)
#aka walk()
#def rescan(self, ignores=['summary.json', 'scenes.json'], debug=False):
def rescan(self, ignores=[], debug=False):
"""
look for all json files that describe the content items
these should have been generated externally (e.g. during scrape)
json files should contain the main attributes that a Content object has
the rest will be kept in a remainder
parsing html and generating json summaries of content
is beyond the scope of this application
and should be kept outside of this code base (too specific to content)
"""
if not self.root:
raise ValueError("Cannot rescan. No root set on collection: %s" % self.root)
#clear out anything else
del self[:]
if debug:
print("walking directory for contents: %s" % self.root)
json_check = re.compile('.*\.json$')
#it might be inefficient to try to define these here...
#too many different names that might work in different contexts
#ignores = ["contents", "collection", "incompletes"]
#can pass them in if needed...
self_root_path = Path(self.root)
parent = self_root_path.parent()
if not os.path.isdir(self.root):
print("Looking for path of root: %s" % self.root)
print("(is the drive mounted???)")
self.root = os.path.dirname(self.root)
#if we still don't have a directory, something is wrong with root
assert os.path.isdir(self.root)
#instead of looking for ignores
#will limit by convention
#top level directory should only contain meta jsons
#(that should be ignored as content data)
#content jsons will always be in a subdirectory
#similarly, meta jsons should never be in a subdirectory
#for root,dirs,files in os.walk(self.root):
subdirs = self_root_path.load().directories
for subdir in subdirs:
if check_ignore(str(subdir), ignores):
print("Ignoring directory: %s" % (subdir))
else:
for root,dirs,files in os.walk(str(subdir)):
for f in files:
#if json_check.search(f):
if json_check.search(f) and not check_ignore(f, ignores):
json_file = os.path.join(root, f)
p_root = Path(root)
relative_root = p_root.to_relative(str(parent))
#get rid of leading slash
if re.match('/', relative_root):
relative_root = relative_root[1:]
if debug:
print("loading content from: %s" % json_file)
#c = Content(json_file, root=relative_root)
c = Content(json_file)
if debug:
print("setting base_dir to: %s" % relative_root)
#if updating one here, should update the other:
c.base_dir = relative_root
c.drive_dir = str(parent)
self.append(c)
if debug:
print("Finished loading %s contents manually" % (len(self)))
def update(self, new_group):
"""
clear out everything we hold
then apply everything in the new_group
this preserves all attributes for the Collection object (meta data)
(rather than instantiating a new version)
also [2012.09.21 12:40:12]
this can be done more suscinctly with
del self[:]
self.extend(new_group)
"""
print("clearing contents: %s" % len(self))
for item in self[:]:
self.remove(item)
print("should be clear (0): %s" % len(self))
for item in new_group:
self.append(item)
print("applied new order: %s" % len(self))
def apply_order(self, order, debug=False):
"""
take an ordered list of base dirs
and apply the order to the contents in the collection
return the length of new items found for adjusting cutoffs in caller
"""
new_group = []
for item in order:
for content in self[:]:
if content.base_dir == item:
if debug:
print("adding %s to new list" % item)
if not content in new_group:
new_group.append(content)
self.remove(content)
found = len(self)
#anything not in order list should be added to the beginning
#which means adding the new_group to the end of anything that is left
self.extend(new_group)
return found
def get_order(self):
"""
return a list of all base dirs
that represent all contents in the loaded collection
"""
order = []
for item in self:
if not item.base_dir in order:
order.append(item.base_dir)
return order
class Collection(CollectionSimple):
"""
object to hold the complete meta data for a collection of content...
there are many different ways a collection of content can be represented
- dictionary based on a common key
- groups (list of lists)
- flat list
generally a flat list is easiest... allows ordering
When it comes to all of the data stored for a Collection,
including the Content items, there are different ways to represent them.
Collections are typically local mirrors of remote content
In some media applications, these could be considered a Subscription
to an RSS feed.
As such, they often need to be checked for new content, but the way
this is done is often customized for the source.
e.g. not everything *is* RSS based, so a custom scraper is required
Adding in tailored Content acquisition routines to the Collection would
cause clutter.
Subclassing doesn't make much sense either in this case. (???)
What would probably work best is a plugin customized for the particular
Collection.
Looking at YAPSY for this.
"""
#seems like the __init__ from CollectionSimple should be adequate...
#this one often defaults in a walk, which is not necessary here
#(usually a custom walk involved anyway)
def old__init__(self, source='', root='', contents=[], walk=False, as_dict=False, debug=False):
"""
source should be the full path to source
walk will force a walk, regardless of if a source is available
"""
#this is a json representation of the whole Collection:
self.source = source
#can be useful to look back at how the object was loaded.
self.walk = walk
#if we were passed a list of contents, apply them
if len(contents):
for s in contents:
if not s in self:
self.append(s)
if source:
self.root = os.path.dirname(source)
else:
self.root = root
if not root:
#print "Warning, no root"
self.summary = None
else:
#load CollectionSummary:
#cs = self.load_collection_summary()
#do we always need this???
print("LOADING COLLECTION SUMMARY AT: %s" % self.root)
self.summary = CollectionSummary(self.root)
#pass
#TODO:
#*2012.11.09 12:15:37
#this should be optional or configurable
#can take a while to scan for a meta directory
#with a lot of meta data
#self.summary.load_scraper()
#print "Finished loading scraper: %s" % self.summary.scraper
#print type(self.summary.scraper)
#print dir(self.summary.scraper)
#this also makes scraper available via:
#self.summary.scraper
if self.root and not self.source:
meta = self.summary.latest_meta()
if meta:
self.source = meta
else:
# couldn't find anything, so better walk
self.walk = True
#if we get this far, contents.json seems like a safer default
#self.source = os.path.join(self.root, 'contents.json')
if self.walk:
#need to know where to walk
assert self.root, "NO ROOT SPECIFIED!"
self.rescan()
elif self.source:
if debug:
print("Loading Collection from: %s" % self.source)
self.load(debug=debug)
else:
#might want to create one from scratch.
pass
#whether or not to update a content's source json file
#or just make the changes to the list locally
#
#generally with a playlist you don't want to update the content source
#e.g. subtractively limiting content segments to only favorites...
# wouldn't want to remove those segments from the content source
# just from the current playlist
#
#this should not matter if a content object is edited directly
self.sync_contents = True
def reparse(self):
"""
similar to rescan
but this time go through and regenerate the individual json files
for each content item
from the original HTML source file
this will utilize the customized Scraper IPlugin module
for the given Collection
typically this should be performed by the Scraper itself
during content scans
not sure how useful this will be
other than to make sure integration of YAPSY is working
"""
## print "loading logging"
## import logging
## logging.basicConfig(level=logging.DEBUG)
logging.debug("walking directory for reparse: %s" % self.root)
html_check = re.compile('.*\.html$')
#any directories that do not contain content should be listed here
ignores = [ "pages", "archive" ]
self_root_path = Path(self.root)
parent = self_root_path.parent()
#probably safe to assume this, but...
if os.path.isdir(self.root):
subdirs = self_root_path.load().directories
for subdir in subdirs:
if not check_ignore(str(subdir), ignores):
for root,dirs,files in os.walk(str(subdir)):
for f in files:
if html_check.search(f):
html_file = os.path.join(root, f)
print()
print()
print("Starting check of: %s" % html_file)
json = self.summary.scraper.parse_details(html_file)
self.summary.scraper.save_details(json, html_source=html_file)
#TODO:
#consider moving json saving into parse_details
#to avoid duplication of efforts
## p_root = Path(html_file)
## relative_root = p_root.to_relative(str(parent))
## logging.debug("html relative path: %s" % relative_root)
## #get rid of leading slash
## relative_root = relative_root[1:]
## json['root'] = relative_root
## if json.has_key('date'):
## ts = Timestamp(json['date'])
## else:
## ts = Timestamp(f.split('.')[0])
## json['date'] = str(ts.compact(accuracy="day"))
## json_path = os.path.join(root, ts.filename(".json"))
## save_json(json_path, json)
## self.append(s)
#(or rescan)
print("Finished parsing %s contents manually" % (len(self)))
self.rescan()
def sort_by_date(self, debug=False):
dates = []
for i in self:
dates.append( [i.timestamp.compact(), i] )
new_order = []
dates.sort()
dates.reverse()
for d in dates:
if debug:
print(d[0])
new_order.append(d[1])
self.update(new_order)
def load_cluster(self, cluster_file=None):
"""
if no cluster_file is specified explicitly
look through all available cluster files
and choose the right one based on most recent date
"""
print("MAY WANT TO CALL LOAD CLUSTER DIRECT ON SUMMARY!")
return self.summary.load_cluster()
class CollectionSummary(object):
"""
summary of a drive or a collection
includes:
- directories with collection data / images
- json item lists
- collection indexes
this might be a good place to make parent class for subclassing for
plugins based on YAPSY and IPlugins.
"""
def __init__(self, root):
#if this changes, might need to rename existing files to stay consistent
#aka: index.json, collection-meta.json
self.file = 'summary.json'
self.root = root
#just incase meta files are not stored in the root of the collection
self.meta_root = root
self.name = os.path.basename(root)
if not self.name:
#maybe there was a trailing slash... this should fix that:
self.name = os.path.basename(os.path.dirname(root))
#other places to find media for this collection
#aka: 'drives'
self.locations = []
self.available = []
# a place to keep track of the local json meta indexes
# and the last time that this system accessed that index
self.metas = {}
#do not store with collection (might take too long to load quickly)
#self.items = []
#data loaded from json representation
self.json_data = {}
#not loaded by default
self.collection = None
self.scraper = None
self.cluster = None
self.load()
self.scan_metas()
for l in self.locations:
if os.path.exists(l):
self.available.append(l)
self.save()
def __str__(self):
return self.name
def summary(self):
print("Name: %s" % (self.name))
print("Root: %s" % (self.root))
print("Locations: %s" % (self.locations))
print("Available: %s" % (self.available))
print("JSON Meta Files: %s" % (self.metas))
print("")
def load(self, json_file=None):
"""
load previously stored meta data
"""
if not json_file:
json_file = os.path.join(self.root, self.file)
if not os.path.exists(json_file):
#check one more place...
#common to keep these files in a 'meta' directory
alt_json_file = os.path.join(self.root, 'meta', self.file)
json_file = alt_json_file
self.meta_root = os.path.join(self.root, 'meta')
if not os.path.exists(alt_json_file):
print("WARNING: couldn't find json on collection load: %s" % (json_file))
#now see if we have something
if os.path.exists(json_file):
self.json_data = load_json(json_file)
if 'locations' in self.json_data:
self.locations = self.json_data['locations']
if 'metas' in self.json_data:
self.metas = self.json_data['metas']
def save(self, json_file=None):
"""
save current data for faster lookup next time
"""
if not json_file:
json_file = os.path.join(self.meta_root, self.file)
#collection = { 'locations':self.locations, 'metas':self.metas }
self.json_data['locations'] = self.locations
self.json_data['metas'] = self.metas
self.json_data['name'] = self.name
self.json_data['root'] = self.root
self.json_data['meta_root'] = self.meta_root
save_json(json_file, self.json_data)
## def load_scraper(self):
## #print "loading logging"
## #import logging
## #logging.basicConfig(level=logging.DEBUG)
## print("loading scraper from: %s" % self.root)
## # Build the manager
## simplePluginManager = PluginManager(plugin_info_ext="medley-plugin")
## # Tell it the default place(s) where to find plugins
## #simplePluginManager.setPluginPlaces(["path/to/myplugins"])
## simplePluginManager.setPluginPlaces([self.root])
## # Load all plugins
## simplePluginManager.collectPlugins()
## number_found = len(simplePluginManager.getAllPlugins())
## print("Activate all loaded plugins: %s" % number_found)
## for plugin in simplePluginManager.getAllPlugins():
## #plugin.plugin_object.print_name()
## print("Activating: %s" % plugin.name)
## simplePluginManager.activatePluginByName(plugin.name)
## #self.scraper = simplePluginManager.getPluginByName(plugin.name)
## plugin = simplePluginManager.getPluginByName(self.name)
## if plugin:
## self.scraper = plugin.plugin_object
## else:
## self.scraper = None
def load_collection(self, json_file=None):
"""
load the corresponding collection object
should automatically determine the latest version of the meta file
and default to that if no other json_file is specified manually
and if none, exists, walk should be called automatically
"""
collection = None
if json_file is None:
meta = self.latest_meta()
print("self.latest_meta() results: %s" % meta)
if self.meta_root:
#Collection will set root accordingly if meta has full path
meta = os.path.join(self.meta_root, meta)
print("after join: %s" % meta)
collection = Collection(meta)
else:
collection = Collection(root=self.root, walk=True)
else:
collection = Collection(json_file)
#keep track of it here, once it has been loaded
self.collection = collection
return collection
def load_content(self, base_dir):
"""
sometimes it is useful to retrieve a specifice content item
without loading the whole collection
look in all available locations and return the corresponding Content
"""
json = None
for root in self.available:
option = os.path.join(root, base_dir)
print("Checking path: %s" % option)
if os.path.exists(option):
json = find_json(option)
if json:
print("FOUND: %s" % json)
if json:
content = Content(json)
return content
else:
return None
def load_cluster(self, json_file=None):
"""
load the corresponding cluster object
should automatically determine the latest version of the meta file
and default to that if no other json_file is specified manually
"""
print("LOADING CLUSTER: ")
cluster = None
if json_file is None:
meta = self.latest_groups()
print("meta: %s (None means no clusters/.groups files found)" % meta)
if meta:
meta = os.path.join(self.meta_root, meta)
cluster = Cluster(meta)
else:
#TODO:
#could generate an intial list of all group items
#available in the collection
#this may be collection specific though
cluster = Cluster()
else:
cluster = Cluster(json_file)
#keep track of it here, once it has been loaded
self.cluster = cluster
return cluster
#aka def latest_cluster(self):
def latest_groups(self, debug=True):
"""
similar to latest_meta
but only returns the groups/cluster meta
"""
if not len(list(self.metas.items())):
self.scan_metas()
#if still no metas exists, then nothing to return
if not len(list(self.metas.items())):
print("No meta found in scan (latest_groups())")
return None
assert len(list(self.metas.items()))
metas = []
groups = []
for name in list(self.metas.keys()):
if re.search('.*\.groups', name):
groups.append(name)
else:
metas.append(name)
print("FOUND THE FOLLOWING GROUP OPTIONS (%s): %s" % (len(groups), groups))
newest_group = None
newest_date = None
#find newest group now
for name in groups:
#http://stackoverflow.com/questions/1059559/python-strings-split-with-multiple-separators
#w+ = a word character (a-z etc.) repeated one or more times
#match all of those regardless of separator
parts = re.findall(r'\w+', name)
#print parts
for part in parts:
try:
ts = Timestamp(part)
#print ts
if newest_date and (ts.datetime > newest_date.datetime):
#print "Found a newer group: %s (previously: %s)" % (
# name, newest_date.compact())
newest_date = ts
newest_group = name
elif not newest_date:
newest_date = ts
newest_group = name
else:
#must be an older item
pass
except:
#must not be a datestring
pass
return newest_group
def latest_meta(self):
"""
look through all metas, and return the newest one
two meta items in common use,
the full collection representation
and the grouping of various meta data into "groups" files
"""
if not len(list(self.metas.items())):
self.scan_metas()
#if still no metas exists, then nothing to return
if not len(list(self.metas.items())):
print("No meta found in scan")
return None
assert len(list(self.metas.items()))
metas = []
groups = []
for name in list(self.metas.keys()):
if re.search('.*\.groups', name):
groups.append(name)
else:
metas.append(name)
newest_meta = None
newest_date = None
#find newest meta now
for name in metas:
#http://stackoverflow.com/questions/1059559/python-strings-split-with-multiple-separators
#w+ = a word character (a-z etc.) repeated one or more times
#match all of those regardless of separator
parts = re.findall(r'\w+', name)
#print parts
for part in parts:
try:
ts = Timestamp(part)
#print ts
if newest_date and (ts.datetime > newest_date.datetime):
#print "Found a newer meta: %s (previously: %s)" % (
# name, newest_date.compact())
newest_date = ts
newest_meta = name
elif not newest_date:
newest_date = ts
newest_meta = name
else:
#must be an older item
pass
except:
#must not be a datestring
pass
return newest_meta
def scan_metas(self):
"""
go through our list of JSON meta files
make sure any previously found ones still exist
add new ones
"""
if not os.path.exists(self.meta_root):
os.makedirs(self.meta_root)
options = os.listdir(self.meta_root)
print("scan_metas in %s, %s options found" % (self.meta_root, len(options)))
if self.file in options:
#self.load()
options.remove(self.file)
old_metas = list(self.metas.keys())
ignores = [ '~', ]
#*2012.11.09 11:47:34
#not always ending in .json anymore
#(but should always have .json in the name)
for o in options:
if re.search('.*\.json', o) and not check_ignore(o, ignores):
if o not in self.metas:
#self.metas.append(o)
self.metas[o] = { 'length':None, 'updated':None }
if o in old_metas:
old_metas.remove(o)
# clean up any old items
for o in old_metas:
del self.metas[o]
class Collections(list):
"""
object to hold multiple CollectionSummary objects
with methods to help loading and lookups
"""
def __init__(self, root, collection_list=[]):
self.root = root
#keep track if we've already called load:
self.loaded = False
# should be a list of paths only
# if a path is relative (starts with './') use self.root
# otherwise assume it is a full path
self.paths = collection_list
if collection_list:
self.load_summaries(collection_list)
# we will store actual CollectionSummary objects in self
def scan(self, debug=False):
"""
return a list of path options local to self.root
this does *NOT* update self.paths...
that is left up to the caller if it is appropriate
"""
names = os.listdir(self.root)
options = []
for name in names:
#options.append( (name, os.path.join(self.root, name)) )
options.append( os.path.join(self.root, name) )
paths = []
for o in options:
if os.path.isdir(o):
paths.append(o)
return paths
def add(self, path):
"""
add a path to the collection.paths
then load the one path
used for loading externally
"""
if not path in self.paths:
if os.path.isdir(path):
self.paths.append(path)
c = CollectionSummary(path)
self.append(c)
else:
raise ValueError("Non-directory item sent to add: %s" % path)
else:
print("Path: %s already in collections.paths" % path)
def scan_and_add(self):
"""
just combine the above functions into one call
this is an alternative to load_summaries,
which manually specifies the list of viable paths
"""
options = self.scan()
for option in options:
self.add(option)
def load_summaries(self, collection_list=[]):
"""
(re)load any collections
will over-write self.paths with collection_list if passed in
"""
#clear out our contents first
del self[:]
if collection_list:
print("Updating Collections.paths to: %s" % collection_list)
self.paths = collection_list
for path in self.paths:
if re.match('^\.', path):
path = os.path.join(self.root, path[2:])
if os.path.isdir(path):
c = CollectionSummary(path)
self.append(c)
else:
raise ValueError("Non-directory item in Collections.paths: %s" % path)
self.loaded = True
def get_summary(self, name, debug=False):
"""
return the first collection with a name that matches 'name'
"""
if debug:
print("Getting collection: %s from: %s" % (name, self.root))
if not self.loaded:
self.load_summaries()
for collection_summary in self:
if collection_summary.name == name:
if debug:
print("%s matches: %s" % (name, collection_summary.name))
return collection_summary
else:
if debug:
print("%s doesn't match: %s" % (name, collection_summary.name))
return None
def setup(self):
"""
set up new structure based on initialized root
will probably do this elsewhere, but just in case...
"""
if not os.path.exists(self.root):
os.makedirs(self.root)
for path in self.paths:
if re.match('^\.', path):
path = os.path.join(self.root, path[2:])
# if path is a full path already, and not under self.root
# it may already exist
if not os.path.exists(path):
os.makedirs(path)
def available(self):
available = []
self.load_summaries()
for option in self:
if option.available:
available.append(option)
return available
class Cluster(list):
"""
A Cluster is essentially a list of clouds.
These are useful for grouping like items with like items.
clusters were once referred to as groups
but that is too generic of a name in this case
these are really clusters of clouds
these clusters can be applied to media lists (Items)
to generate intricate playlists based on a number of factors
"""
def __init__(self, source=None, ordered_list=[]):
self.extend(ordered_list)
self.source = source
if self.source:
self.load()
#used in from_cloud and to_cloud
self.tag = None
def save(self, destination=None):
"""
similar to save json, but custom formatting to make editing easier
to load, use collection.load_groups
"""
if destination is None:
destination = self.source
if not destination:
raise ValueError("No destination specified: %s" % destination)
json_file = codecs.open(destination, 'w', encoding='utf-8', errors='ignore')
#split = json.dumps(ordered_list)
split = json.dumps(self)
split = split.replace('], ', ', ], \n')
split = split.replace(']]', ', ]]')
json_file.write(split)
json_file.close()
def load(self, source=None, create=False):
"""
"""
if source is None:
source = self.source
if not source:
raise ValueError("No source specified: %s" % source)
if not os.path.exists(source):
if create:
self.save(source)
else:
raise ValueError("Source file does not exist: %s" % source)
groups = []
json_file = codecs.open(source, 'r', encoding='utf-8', errors='ignore')
lines = json_file.readlines()
#unsplit the items and put them back into a standard json format
unsplit = ''
for line in lines:
if not re.match("#", line):
line = line.replace(',]', ']')
line = line.replace(', ]', ']')
unsplit += line.strip() + ' '
try:
groups = json.loads(unsplit)
except:
#try to pinpoint where the error is occurring:
print(unsplit)
#get rid of outer list:
unsplit = unsplit[1:-1]
parts = unsplit.split('], ')
#assert len(parts) == 11
count = 0
for p in parts:
p = p + ']'
try:
group = json.loads(p)
except:
new_p = p[1:-1]
tags = new_p.split('", "')
summary = ''
for tag in tags:
summary += tag + "\n"
#print count
#print summary
#print "%s - %s" % (count, summary)
raise ValueError("Trouble loading JSON in part %s: %s" % (count, summary))
count += 1
json_file.close()
del self[:]
self.extend(groups)
return groups
def from_cloud(self, cloud_file, tag):
"""
sometimes clusters are stored in a cloud file
less decoration in that case
tag will be used to find the latest entry with that tag
"""
self.source = cloud_file
self.tag = tag
clouds = Journal(cloud_file)
if clouds.tag(tag):
data = clouds.tags(tag)[0].data
#if data:
lines = data.splitlines()
## else:
## print "No data for: %s" % clouds.tags(tag)[0]
## exit()
else:
print("no ->%s<- tags found in %s" % (tag, cloud_file))
exit()
#print len(lines)
groups = []
for l in lines:
new_group = l.split()
groups.append(new_group)
del self[:]
self.extend(groups)
return groups
def to_cloud(self, destination=None, tags=[]):
if destination is None:
print("Using previous source: %s" % self.source)
destination = self.source
#print "Using previous source: %s" % ('temp.txt')
#destination = 'temp.txt'
if isinstance(tags, str) or isinstance(tags, str):
raise ValueError("tags should be a list! (%s)" % tags)
elif not tags:
tags.append(self.tag)
else:
#we must have a non-False value and it's not a string...
assert isinstance(tags, list)
if not tags:
raise ValueError("Need a tag! (%s)" % tags)
data = ''
ct = 0
for group in self:
#if you want numbers in the group:
#for i in range(20):
# if str(i) in g:
# g.remove(str(i))
#group.insert(0, str(ct))
data += " ".join(group) + '\n'
ct += 1
clouds = Journal(destination)
#make_entry
clouds.make(data=data, tags=tags)
clouds.save(destination)
print("Saved cloud: %s" % destination)
def remove(self, ignores):
"""
go through lists and remove all matching items in ignores
"""
count = 0
for group in self:
for item in group:
if item in ignores:
print("removing item: %s" % item)
group.remove(item)
count += 1
print("Removed: %s items from: %s" % (count, self.source))
print()
def flatten(self, remove_dupes=True):
flat = []
for group in self:
#print len(group)
for item in group:
if (not item in flat):
flat.append(item)
else:
if remove_dupes:
print("removing dupe: %s" % item)
group.remove(item)
else:
print("keeping dupe: %s" % item)
#print len(group)
return flat
def position(self, lookup):
"""
similar to contains, but returns the exact position if it exists
"""
match = None
group_num = 0
for group in self:
#print len(group)
if lookup in group:
match = (group_num, group.index(lookup))
group_num += 1
return match
def contains(self, lookup):
"""
helper
just flattens all lists and sees if lookup is in that
"""
flat = self.flatten()
if lookup in flat:
return True
else:
return False
def add_at(self, item, position):
"""
take the item and add it to the sub group at position
if position is out of range for the cluster's length
add extra lists to pad it out
"""
while position >= len(self):
self.append([])
#remove from anywhere else first:
self.remove( [item] )
self[position].append(item)
def merge_in(self, incoming, add_new=False, keep_order=False):
"""
take the incoming cluser and merge its items in, moving our items around
incoming will be taken as the authority on group membership
incoming should be modified before merge in
if anything should not be merged
e.g. current unsorted items...
wouldn't want those to undo items sorted elsewhere
if you want to add new items, be sure to set add_new to True
keep_order will keep the existing order of items in this group
probably not what you want
"""
self_all = self.flatten()
incoming_all = incoming.flatten()
#expand self to be legth of incoming:
if len(incoming) > len(self):
size_diff = len(incoming) - len(self)
print("EXPANDING CLUSTER BY %s" % size_diff)
for ct in range(size_diff):
self.append([])
for ct in range(len(self)):
if len(incoming) <= ct:
print("skipping index: %s (incoming too short: %s)" % (ct, len(incoming)))
else:
print("checking index: %s" % (ct))
cur_self = self[ct]
cur_incoming = incoming[ct]
print("%s items in self. %s items in incoming" % (len(cur_self), len(cur_incoming)))
new_ct = 0
#place to keep track of the new order being applied by incoming
new_sub = []
for item in cur_incoming:
if (not item in self_all) and add_new:
if keep_order:
cur_self.append(item)
else:
new_sub.append(item)
print("New item added: %s" % item)
elif not item in self_all:
print("Skipping new item: %s" % item)
print()
elif not item in cur_self:
#need to go find it in another group and remove it
for sub_ct in range(len(self)):
if sub_ct != ct and (item in self[sub_ct]):
self[sub_ct].remove(item)
if keep_order:
cur_self.append(item)
else:
new_sub.append(item)
if sub_ct < ct:
print("down: %s (from %s to %s)" % (item, sub_ct, ct))
else:
print("up: %s (from %s to %s)" % (item, sub_ct, ct))
else:
#must be in the current group...
#just need to check order preferences
if not keep_order:
new_sub.append(item)
cur_self.remove(item)
if not keep_order:
new_sub.extend(cur_self)
self[ct] = new_sub
print("%s: now %s items in destination (self)" % (ct, len(cur_self)))
print()
| |
import unittest
import numpy as np
import itertools
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
def grad_var_name(var_name):
return var_name + "@GRAD"
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for sub_in_name, _ in sub_in:
var = scope.new_var(sub_in_name)
kwargs[in_name].append(sub_in_name)
else:
var = scope.new_var(in_name)
kwargs[in_name].append(in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_in = outputs[out_name]
for sub_in_name, _ in sub_in:
var = scope.new_var(sub_in_name)
kwargs[out_name].append(sub_in_name)
else:
var = scope.new_var(out_name)
kwargs[out_name].append(out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for sub_in_name, sub_in_val in sub_in:
var = scope.find_var(sub_in_name)
tensor = var.get_tensor()
sub_in_array = sub_in_val[0] \
if isinstance(sub_in_val, tuple) else sub_in_val
tensor.set_dims(sub_in_array.shape)
tensor.set(sub_in_array, place)
if isinstance(sub_in_val, tuple):
tensor.set_lod(sub_in_val[1])
else:
var = scope.find_var(in_name)
tensor = var.get_tensor()
in_val = inputs[in_name]
in_array = in_val[0] if isinstance(in_val, tuple) else in_val
tensor.set_dims(in_array.shape)
tensor.set(in_array, place)
if isinstance(in_val, tuple):
tensor.set_lod(in_val[1])
def set_output_grad(scope, op, outputs, place):
for out_name, out_dup in Operator.get_op_outputs(op.type()):
if out_name in outputs:
if out_dup:
sub_out = outputs[out_name]
for sub_out_name, _ in sub_out:
out_tensor = scope.find_var(sub_out_name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(
sub_out_name)).get_tensor()
grad_tensor.set_dims(out_tensor.shape())
data = np.ones(out_tensor.shape(), dtype=np.float32)
grad_tensor.set(data, place)
else:
out_tensor = scope.find_var(out_name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(out_name)).get_tensor(
)
grad_tensor.set_dims(out_tensor.shape())
data = np.ones(out_tensor.shape(), dtype=np.float32)
grad_tensor.set(data, place)
def get_numeric_gradient(scope,
op,
inputs,
input_to_check,
output_names,
delta=0.005,
in_place=False):
set_input(scope, op, inputs, core.CPUPlace())
op.infer_shape(scope)
tensor_to_check = scope.find_var(input_to_check).get_tensor()
def product(dim):
return reduce(lambda a, b: a * b, dim, 1)
ctx = core.DeviceContext.create(core.CPUPlace())
def get_output():
sum = 0.0
for output_name in output_names:
op.run(scope, ctx)
sum += np.array(scope.find_var(output_name).get_tensor()).sum()
return sum
tensor_to_check = scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.get_dims())
gradient_flat = np.zeros(shape=(tensor_size, ), dtype='float32')
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
if in_place:
set_input(scope, op, inputs, core.CPUPlace())
# get one input element throw it's index i.
origin = tensor_to_check.get_float_element(i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta
tensor_to_check.set_float_element(i, x_pos)
y_pos = get_output()
if in_place:
set_input(scope, op, inputs, core.CPUPlace())
x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg)
y_neg = get_output()
tensor_to_check.set_float_element(i, origin)
gradient_flat[i] = (y_pos - y_neg) / delta / 2
return gradient_flat.reshape(tensor_to_check.get_dims())
def get_backward_op(scope, op, no_grad_set):
backward_op = core.Operator.backward(op, no_grad_set)
for input in backward_op.input_vars():
var = scope.new_var(input)
var.get_tensor()
for output in backward_op.output_vars():
var = scope.new_var(output)
var.get_tensor()
return backward_op
def get_gradient(scope, op, inputs, outputs, grad_name, place,
no_grad_set=None):
ctx = core.DeviceContext.create(place)
set_input(scope, op, inputs, place)
op.infer_shape(scope)
op.run(scope, ctx)
if no_grad_set is None:
no_grad_set = set()
backward_op = get_backward_op(scope, op, no_grad_set)
set_output_grad(scope, op, outputs, place)
backward_op.infer_shape(scope)
backward_op.run(scope, ctx)
out = np.array(scope.find_var(grad_name).get_tensor())
return out
class OpTest(unittest.TestCase):
def check_output_with_place(self, place):
self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
op_attrs)
if isinstance(place, core.GPUPlace) and not self.op.support_gpu():
return
set_input(self.scope, self.op, self.inputs, place)
self.op.infer_shape(self.scope)
ctx = core.DeviceContext.create(place)
self.op.run(self.scope, ctx)
for out_name, out_dup in Operator.get_op_outputs(self.op.type()):
if out_dup:
sub_out = self.outputs[out_name]
for sub_out_name in sub_out:
actual = np.array(
self.scope.find_var(sub_out_name).get_tensor())
expect = sub_out[sub_out_name]
self.assertTrue(
np.allclose(
actual, expect, atol=1e-05),
"output name: " + out_name + "has diff")
else:
actual = np.array(self.scope.find_var(out_name).get_tensor())
expect = self.outputs[out_name]
self.assertTrue(
np.allclose(
actual, expect, atol=1e-05),
"output name: " + out_name + "has diff")
def check_output(self):
places = [core.CPUPlace()]
if core.is_compile_gpu():
places.append(core.GPUPlace(0))
for place in places:
self.check_output_with_place(place)
def __assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
abs_a = np.abs(a)
abs_a[abs_a < 1e-3] = 1
diff_mat = np.abs(a - b) / abs_a
max_diff = np.max(diff_mat)
def err_msg():
offset = np.argmax(diff_mat > max_relative_error)
return "%s Variable %s max gradient diff %f over limit %f, the first " \
"error element is %d" % (
msg_prefix, name, max_diff, max_relative_error, offset)
self.assertLessEqual(max_diff, max_relative_error, err_msg())
def check_grad(self,
inputs_to_check,
output_names,
no_grad_set=None,
in_place=False,
max_relative_error=0.005):
self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
op_attrs)
if no_grad_set is None:
no_grad_set = set()
if not type(output_names) is list:
output_names = [output_names]
numeric_grads = [
get_numeric_gradient(
self.scope,
self.op,
self.inputs,
input_to_check,
output_names,
in_place=in_place) for input_to_check in inputs_to_check
]
grad_names = [
grad_var_name(input_to_check) for input_to_check in inputs_to_check
]
cpu_place = core.CPUPlace()
cpu_analytic_grads = [
get_gradient(self.scope, self.op, self.inputs, self.outputs,
grad_name, cpu_place, no_grad_set)
for grad_name in grad_names
]
self.__assert_is_close(numeric_grads, cpu_analytic_grads, grad_names,
max_relative_error,
"Gradient Check On %s" % str(cpu_place))
if core.is_compile_gpu() and self.op.support_gpu():
gpu_place = core.GPUPlace(0)
gpu_analytic_grads = [
get_gradient(self.scope, self.op, self.inputs, self.outputs,
grad_name, gpu_place, no_grad_set)
for grad_name in grad_names
]
self.__assert_is_close(numeric_grads, gpu_analytic_grads,
grad_names, max_relative_error,
"Gradient Check On %s" % str(gpu_place))
for c_grad, g_grad, name in itertools.izip(
cpu_analytic_grads, gpu_analytic_grads, grad_names):
self.assertTrue(
np.allclose(
c_grad, g_grad, atol=1e-4),
"output name: " + name + " has diff")
| |
# coding: utf-8
""" Test cases for Series.plot """
from datetime import datetime
from itertools import chain
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series, date_range
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.util.testing as tm
import pandas.plotting as plotting
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@pytest.mark.slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_plot_figsize_and_title(self):
# figsize and title
_, ax = self.plt.subplots()
ax = self.series.plot(title='Test', figsize=(16, 8), ax=ax)
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
key = 'axes.prop_cycle'
colors = self.plt.rcParams[key]
_, ax = self.plt.subplots()
Series([1, 2, 3]).plot(ax=ax)
assert colors == self.plt.rcParams[key]
def test_ts_line_lim(self):
fig, ax = self.plt.subplots()
ax = self.ts.plot(ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data(orig=False)[0][0]
assert xmax >= lines[0].get_data(orig=False)[0][-1]
tm.close()
ax = self.ts.plot(secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data(orig=False)[0][0]
assert xmax >= lines[0].get_data(orig=False)[0][-1]
def test_ts_area_lim(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
tm.close()
# GH 7471
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
tm.close()
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin <= line[0]
assert xmax >= line[-1]
def test_label(self):
s = Series([1, 2])
_, ax = self.plt.subplots()
ax = s.plot(label='LABEL', legend=True, ax=ax)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
_, ax = self.plt.subplots()
ax = s.plot(legend=True, label='LABEL', ax=ax)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
_, ax = self.plt.subplots()
ax = s.plot(legend=False, label='LABEL', ax=ax)
assert ax.get_legend() is None # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
tm.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
_, ax = self.plt.subplots()
ax = s.plot(use_index=False, ax=ax)
label = ax.get_xlabel()
assert label == ''
_, ax = self.plt.subplots()
ax2 = s.plot.bar(use_index=False, ax=ax)
label2 = ax2.get_xlabel()
assert label2 == ''
@pytest.mark.slow
def test_bar_log(self):
expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4])
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.bar(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.barh(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1])
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar', ax=ax)
ymin = 0.0007943282347242822
ymax = 0.12589254117941673
res = ax.get_ylim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh', ax=ax)
res = ax.get_xlim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
_, ax = self.plt.subplots()
ax = df.plot.bar(use_index=False, ax=ax)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_bar_user_colors(self):
s = Series([1, 2, 3, 4])
ax = s.plot.bar(color=['red', 'blue', 'blue', 'red'])
result = [p.get_facecolor() for p in ax.patches]
expected = [(1., 0., 0., 1.),
(0., 0., 1., 1.),
(0., 0., 1., 1.),
(1., 0., 0., 1.)]
assert result == expected
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
_, ax = self.plt.subplots()
axes = df.plot(ax=ax)
self._check_ticks_props(axes, xrot=0)
_, ax = self.plt.subplots()
axes = df.plot(rot=30, ax=ax)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
_, ax = self.plt.subplots()
ax = ser.plot(ax=ax)
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
assert xp == ax.get_xlim()[0]
def test_unsorted_index_xlim(self):
ser = Series([0., 1., np.nan, 3., 4., 5., 6.],
index=[1., 0., 3., 2., np.nan, 3., 2.])
_, ax = self.plt.subplots()
ax = ser.plot(ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data(orig=False)[0])
assert xmax >= np.nanmax(lines[0].get_data(orig=False)[0])
@pytest.mark.slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
assert ax.get_ylabel() == 'YLABEL'
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels,
colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100)
for s in series.values / float(series.sum())]
expected_texts = list(chain.from_iterable(zip(series.index, pcts)))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
assert t.get_fontsize() == 7
# includes negative value
with pytest.raises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'],
name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
_, ax = self.plt.subplots()
ax = s.plot.pie(legend=True, ax=ax)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
assert result == expected
@pytest.mark.slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 10
@pytest.mark.slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(
np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 20
_, ax = self.plt.subplots()
ax = df.plot.hist(ax=ax) # bins=10
assert len(ax.patches) == 40
@pytest.mark.slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@pytest.mark.slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
@pytest.mark.slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
@pytest.mark.slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2),
figsize=(12, 7))
@pytest.mark.slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes
assert len(axes) == 2
@pytest.mark.slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
_, ax = self.plt.subplots()
ax = df['a'].plot.hist(legend=True, ax=ax)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary
_, ax = self.plt.subplots()
ax = df['a'].plot.hist(legend=True, secondary_y=True, ax=ax)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax,
labels=['a (right)', 'b (right)'])
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> primary
_, ax = self.plt.subplots()
ax = df['a'].plot.hist(legend=True, secondary_y=True, ax=ax)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
assert ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@pytest.mark.slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# primary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, mark_right=False, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@pytest.mark.slow
@pytest.mark.parametrize("input_logy, expected_scale", [
(True, 'log'),
('sym', 'symlog')
])
def test_secondary_logy(self, input_logy, expected_scale):
# GH 25545
s1 = Series(np.random.randn(30))
s2 = Series(np.random.randn(30))
# GH 24980
ax1 = s1.plot(logy=input_logy)
ax2 = s2.plot(secondary_y=True, logy=input_logy)
assert ax1.get_yscale() == expected_scale
assert ax2.get_yscale() == expected_scale
@pytest.mark.slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with pytest.raises(ValueError):
_, ax = self.plt.subplots()
x.plot(style='k--', color='k', ax=ax)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_hist_kde(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_kwargs(self):
sample_points = np.linspace(-100, 100, 20)
_check_plot_works(self.ts.plot.kde, bw_method='scott', ind=20)
_check_plot_works(self.ts.plot.kde, bw_method=None, ind=20)
_check_plot_works(self.ts.plot.kde, bw_method=None, ind=np.int(20))
_check_plot_works(self.ts.plot.kde, bw_method=.5, ind=sample_points)
_check_plot_works(self.ts.plot.density, bw_method=.5,
ind=sample_points)
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, bw_method=.5, ind=sample_points,
ax=ax)
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
# gh-14821: check if the values have any missing values
assert any(~np.isnan(axes.lines[0].get_xdata()))
@pytest.mark.slow
def test_hist_kwargs(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 5
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(orientation='horizontal', ax=ax)
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(align='left', stacked=True, ax=ax)
tm.close()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_hist_kde_color(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, bins=10, color='b', ax=ax)
self._check_ax_scales(ax, yaxis='log')
assert len(ax.patches) == 10
self._check_colors(ax.patches, facecolors=['b'] * 10)
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, color='r', ax=ax)
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
assert len(lines) == 1
self._check_colors(lines, ['r'])
@pytest.mark.slow
def test_boxplot_series(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.box(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@pytest.mark.slow
def test_kind_both_ways(self):
s = Series(range(3))
kinds = (plotting._core._common_kinds +
plotting._core._series_kinds)
_, ax = self.plt.subplots()
for kind in kinds:
s.plot(kind=kind, ax=ax)
getattr(s.plot, kind)()
@pytest.mark.slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
_, ax = self.plt.subplots()
for kind in plotting._core._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
@pytest.mark.slow
def test_valid_object_plot(self):
s = Series(range(10), dtype=object)
for kind in plotting._core._common_kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
_, ax = self.plt.subplots()
for kind in plotting._core._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
s.plot(kind=kind, ax=ax)
def test_invalid_kind(self):
s = Series([1, 2])
with pytest.raises(ValueError):
s.plot(kind='aasdf')
@pytest.mark.slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@pytest.mark.slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with pytest.raises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz'] * 10
with pytest.raises(TypeError):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@pytest.mark.slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1, 2, 3]),
plotting._core._series_kinds +
plotting._core._common_kinds)
@pytest.mark.slow
def test_standard_colors(self):
from pandas.plotting._matplotlib.style import _get_standard_colors
for c in ['r', 'red', 'green', '#FF0000']:
result = _get_standard_colors(1, color=c)
assert result == [c]
result = _get_standard_colors(1, color=[c])
assert result == [c]
result = _get_standard_colors(3, color=c)
assert result == [c] * 3
result = _get_standard_colors(3, color=[c])
assert result == [c] * 3
@pytest.mark.slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
from pandas.plotting._matplotlib.style import _get_standard_colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
# single letter colors like k
for c in colors.ColorConverter.colors:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
def test_series_plot_color_kwargs(self):
# GH1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1).plot(color='green', ax=ax)
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green', ax=ax)
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
def_colors = self._unpack_cycler(mpl.rcParams)
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
_, ax = self.plt.subplots()
for i in range(ncolors):
ax = s.plot(ax=ax)
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
_, ax = self.plt.subplots()
ax = s.plot(xticks=[0, 3, 5, 9], ax=ax)
exp = ['P%02d' % i for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
s = Series(range(100, 121), index=pd.bdate_range(
start='2014-05-01', end='2014-06-01',
freq=CustomBusinessDay(holidays=['2014-05-26'])))
_check_plot_works(s.plot)
@pytest.mark.xfail
def test_plot_accessor_updates_on_inplace(self):
s = Series([1, 2, 3, 4])
_, ax = self.plt.subplots()
ax = s.plot(ax=ax)
before = ax.xaxis.get_ticklocs()
s.drop([0, 1], inplace=True)
_, ax = self.plt.subplots()
after = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(before, after)
| |
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs"]
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
try:
from _opcode import stack_effect
__all__.append('stack_effect')
except ImportError:
pass
cmp_op = ('<', '<=', '==', '!=', '>', '>=')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = [] # unused
opmap = {}
opname = ['<%r>' % (op,) for op in range(256)]
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('ROT_FOUR', 6)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_MATRIX_MULTIPLY', 16)
def_op('INPLACE_MATRIX_MULTIPLY', 17)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('GET_LEN', 30)
def_op('MATCH_MAPPING', 31)
def_op('MATCH_SEQUENCE', 32)
def_op('MATCH_KEYS', 33)
def_op('COPY_DICT_WITHOUT_KEYS', 34)
def_op('WITH_EXCEPT_START', 49)
def_op('GET_AITER', 50)
def_op('GET_ANEXT', 51)
def_op('BEFORE_ASYNC_WITH', 52)
def_op('END_ASYNC_FOR', 54)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('GET_YIELD_FROM_ITER', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('GET_AWAITABLE', 73)
def_op('LOAD_ASSERTION_ERROR', 74)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('LIST_TO_TUPLE', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('SETUP_ANNOTATIONS', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('ROT_N', 99)
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
def_op('IS_OP', 117)
def_op('CONTAINS_OP', 118)
def_op('RERAISE', 119)
jabs_op('JUMP_IF_NOT_EXC_MATCH', 121)
jrel_op('SETUP_FINALLY', 122) # Distance to target address
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
def_op('GEN_START', 129) # Kind of generator/coroutine
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args
def_op('MAKE_FUNCTION', 132) # Flags
def_op('BUILD_SLICE', 133) # Number of items
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_KW', 141) # #args + #kwargs
def_op('CALL_FUNCTION_EX', 142) # Flags
jrel_op('SETUP_WITH', 143)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('MATCH_CLASS', 152)
jrel_op('SETUP_ASYNC_WITH', 154)
def_op('FORMAT_VALUE', 155)
def_op('BUILD_CONST_KEY_MAP', 156)
def_op('BUILD_STRING', 157)
name_op('LOAD_METHOD', 160)
def_op('CALL_METHOD', 161)
def_op('LIST_EXTEND', 162)
def_op('SET_UPDATE', 163)
def_op('DICT_MERGE', 164)
def_op('DICT_UPDATE', 165)
del def_op, name_op, jrel_op, jabs_op
| |
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
# strings representing various path-related bits and pieces
# These are primarily for export; internally, they are hardcoded.
# Should be set before imports for resolving cyclic dependency.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
devnull = 'nul'
import os
import sys
import stat
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath",
"samefile", "sameopenfile", "samestat", "commonpath"]
def _get_bothseps(path):
if isinstance(path, bytes):
return b'\\/'
else:
return '\\/'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
s = os.fspath(s)
if isinstance(s, bytes):
return s.replace(b'/', b'\\').lower()
else:
return s.replace('/', '\\').lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on Windows.
# For Windows it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume-letter-and-colon or UNC-resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = os.fspath(s)
# Paths beginning with \\?\ are always absolute, but do not
# necessarily contain a drive.
if isinstance(s, bytes):
if s.replace(b'/', b'\\').startswith(b'\\\\?\\'):
return True
else:
if s.replace('/', '\\').startswith('\\\\?\\'):
return True
s = splitdrive(s)[1]
return len(s) > 0 and s[0] in _get_bothseps(s)
# Join two (or more) paths.
def join(path, *paths):
path = os.fspath(path)
if isinstance(path, bytes):
sep = b'\\'
seps = b'\\/'
colon = b':'
else:
sep = '\\'
seps = '\\/'
colon = ':'
try:
if not paths:
path[:0] + sep #23780: Ensure compatible data type even if p is null.
result_drive, result_path = splitdrive(path)
for p in map(os.fspath, paths):
p_drive, p_path = splitdrive(p)
if p_path and p_path[0] in seps:
# Second path is absolute
if p_drive or not result_drive:
result_drive = p_drive
result_path = p_path
continue
elif p_drive and p_drive != result_drive:
if p_drive.lower() != result_drive.lower():
# Different drives => ignore the first path entirely
result_drive = p_drive
result_path = p_path
continue
# Same drive in different case
result_drive = p_drive
# Second path is relative to the first
if result_path and result_path[-1] not in seps:
result_path = result_path + sep
result_path = result_path + p_path
## add separator between UNC and non-absolute path
if (result_path and result_path[0] not in seps and
result_drive and result_drive[-1:] != colon):
return result_drive + sep + result_path
return result_drive + result_path
except (TypeError, AttributeError, BytesWarning):
genericpath._check_arg_types('join', path, *paths)
raise
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive/UNC sharepoint and relative path specifiers.
Returns a 2-tuple (drive_or_unc, path); either part may be empty.
If you assign
result = splitdrive(p)
It is always true that:
result[0] + result[1] == p
If the path contained a drive letter, drive_or_unc will contain everything
up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir")
If the path contained a UNC path, the drive_or_unc will contain the host name
and share up to but not including the fourth directory separator character.
e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir")
Paths cannot contain both a drive letter and a UNC path.
"""
p = os.fspath(p)
if len(p) >= 2:
if isinstance(p, bytes):
sep = b'\\'
altsep = b'/'
colon = b':'
else:
sep = '\\'
altsep = '/'
colon = ':'
normp = p.replace(altsep, sep)
if (normp[0:2] == sep*2) and (normp[2:3] != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^^
index = normp.find(sep, 2)
if index == -1:
return p[:0], p
index2 = normp.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 == index + 1:
return p[:0], p
if index2 == -1:
index2 = len(p)
return p[:index2], p[index2:]
if normp[1:2] == colon:
return p[:2], p[2:]
return p[:0], p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
p = os.fspath(p)
seps = _get_bothseps(p)
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in seps:
i -= 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head = head.rstrip(seps) or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
p = os.fspath(p)
if isinstance(p, bytes):
return genericpath._splitext(p, b'\\', b'/', b'.')
else:
return genericpath._splitext(p, '\\', '/', '.')
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link.
This will always return false for Windows prior to 6.0.
"""
try:
st = os.lstat(path)
except (OSError, ValueError, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except (OSError, ValueError):
return False
return True
# Is a path a mount point?
# Any drive letter root (eg c:\)
# Any share UNC (eg \\server\share)
# Any volume mounted on a filesystem folder
#
# No one method detects all three situations. Historically we've lexically
# detected drive letter roots and share UNCs. The canonical approach to
# detecting mounted volumes (querying the reparse tag) fails for the most
# common case: drive letter roots. The alternative which uses GetVolumePathName
# fails if the drive letter is the result of a SUBST.
try:
from nt import _getvolumepathname
except ImportError:
_getvolumepathname = None
def ismount(path):
"""Test whether a path is a mount point (a drive root, the root of a
share, or a mounted volume)"""
path = os.fspath(path)
seps = _get_bothseps(path)
path = abspath(path)
root, rest = splitdrive(path)
if root and root[0] in seps:
return (not rest) or (rest in seps)
if rest in seps:
return True
if _getvolumepathname:
return path.rstrip(seps) == _getvolumepathname(path).rstrip(seps)
else:
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
path = os.fspath(path)
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
i, n = 1, len(path)
while i < n and path[i] not in _get_bothseps(path):
i += 1
if 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
path = os.fspath(path)
if isinstance(path, bytes):
if b'$' not in path and b'%' not in path:
return path
import string
varchars = bytes(string.ascii_letters + string.digits + '_-', 'ascii')
quote = b'\''
percent = b'%'
brace = b'{'
rbrace = b'}'
dollar = b'$'
environ = getattr(os, 'environb', None)
else:
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
quote = '\''
percent = '%'
brace = '{'
rbrace = '}'
dollar = '$'
environ = os.environ
res = path[:0]
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index:index+1]
if c == quote: # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index(c)
res += c + path[:index + 1]
except ValueError:
res += c + path
index = pathlen - 1
elif c == percent: # variable or '%'
if path[index + 1:index + 2] == percent:
res += c
index += 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index(percent)
except ValueError:
res += percent + path
index = pathlen - 1
else:
var = path[:index]
try:
if environ is None:
value = os.fsencode(os.environ[os.fsdecode(var)])
else:
value = environ[var]
except KeyError:
value = percent + var + percent
res += value
elif c == dollar: # variable or '$$'
if path[index + 1:index + 2] == dollar:
res += c
index += 1
elif path[index + 1:index + 2] == brace:
path = path[index+2:]
pathlen = len(path)
try:
index = path.index(rbrace)
except ValueError:
res += dollar + brace + path
index = pathlen - 1
else:
var = path[:index]
try:
if environ is None:
value = os.fsencode(os.environ[os.fsdecode(var)])
else:
value = environ[var]
except KeyError:
value = dollar + brace + var + rbrace
res += value
else:
var = path[:0]
index += 1
c = path[index:index + 1]
while c and c in varchars:
var += c
index += 1
c = path[index:index + 1]
try:
if environ is None:
value = os.fsencode(os.environ[os.fsdecode(var)])
else:
value = environ[var]
except KeyError:
value = dollar + var
res += value
if c:
index -= 1
else:
res += c
index += 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
path = os.fspath(path)
if isinstance(path, bytes):
sep = b'\\'
altsep = b'/'
curdir = b'.'
pardir = b'..'
special_prefixes = (b'\\\\.\\', b'\\\\?\\')
else:
sep = '\\'
altsep = '/'
curdir = '.'
pardir = '..'
special_prefixes = ('\\\\.\\', '\\\\?\\')
if path.startswith(special_prefixes):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path
# unchanged apart from the call to os.fspath()
return path
path = path.replace(altsep, sep)
prefix, path = splitdrive(path)
# collapse initial backslashes
if path.startswith(sep):
prefix += sep
path = path.lstrip(sep)
comps = path.split(sep)
i = 0
while i < len(comps):
if not comps[i] or comps[i] == curdir:
del comps[i]
elif comps[i] == pardir:
if i > 0 and comps[i-1] != pardir:
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith(sep):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(curdir)
return prefix + sep.join(comps)
def _abspath_fallback(path):
"""Return the absolute version of a path as a fallback function in case
`nt._getfullpathname` is not available or raises OSError. See bpo-31047 for
more.
"""
path = os.fspath(path)
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
abspath = _abspath_fallback
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
try:
return normpath(_getfullpathname(path))
except (OSError, ValueError):
return _abspath_fallback(path)
try:
from nt import _getfinalpathname, readlink as _nt_readlink
except ImportError:
# realpath is a no-op on systems without _getfinalpathname support.
realpath = abspath
else:
def _readlink_deep(path):
# These error codes indicate that we should stop reading links and
# return the path we currently have.
# 1: ERROR_INVALID_FUNCTION
# 2: ERROR_FILE_NOT_FOUND
# 3: ERROR_DIRECTORY_NOT_FOUND
# 5: ERROR_ACCESS_DENIED
# 21: ERROR_NOT_READY (implies drive with no media)
# 32: ERROR_SHARING_VIOLATION (probably an NTFS paging file)
# 50: ERROR_NOT_SUPPORTED (implies no support for reparse points)
# 67: ERROR_BAD_NET_NAME (implies remote server unavailable)
# 87: ERROR_INVALID_PARAMETER
# 4390: ERROR_NOT_A_REPARSE_POINT
# 4392: ERROR_INVALID_REPARSE_DATA
# 4393: ERROR_REPARSE_TAG_INVALID
allowed_winerror = 1, 2, 3, 5, 21, 32, 50, 67, 87, 4390, 4392, 4393
seen = set()
while normcase(path) not in seen:
seen.add(normcase(path))
try:
old_path = path
path = _nt_readlink(path)
# Links may be relative, so resolve them against their
# own location
if not isabs(path):
# If it's something other than a symlink, we don't know
# what it's actually going to be resolved against, so
# just return the old path.
if not islink(old_path):
path = old_path
break
path = normpath(join(dirname(old_path), path))
except OSError as ex:
if ex.winerror in allowed_winerror:
break
raise
except ValueError:
# Stop on reparse points that are not symlinks
break
return path
def _getfinalpathname_nonstrict(path):
# These error codes indicate that we should stop resolving the path
# and return the value we currently have.
# 1: ERROR_INVALID_FUNCTION
# 2: ERROR_FILE_NOT_FOUND
# 3: ERROR_DIRECTORY_NOT_FOUND
# 5: ERROR_ACCESS_DENIED
# 21: ERROR_NOT_READY (implies drive with no media)
# 32: ERROR_SHARING_VIOLATION (probably an NTFS paging file)
# 50: ERROR_NOT_SUPPORTED
# 67: ERROR_BAD_NET_NAME (implies remote server unavailable)
# 87: ERROR_INVALID_PARAMETER
# 123: ERROR_INVALID_NAME
# 1920: ERROR_CANT_ACCESS_FILE
# 1921: ERROR_CANT_RESOLVE_FILENAME (implies unfollowable symlink)
allowed_winerror = 1, 2, 3, 5, 21, 32, 50, 67, 87, 123, 1920, 1921
# Non-strict algorithm is to find as much of the target directory
# as we can and join the rest.
tail = ''
while path:
try:
path = _getfinalpathname(path)
return join(path, tail) if tail else path
except OSError as ex:
if ex.winerror not in allowed_winerror:
raise
try:
# The OS could not resolve this path fully, so we attempt
# to follow the link ourselves. If we succeed, join the tail
# and return.
new_path = _readlink_deep(path)
if new_path != path:
return join(new_path, tail) if tail else new_path
except OSError:
# If we fail to readlink(), let's keep traversing
pass
path, name = split(path)
# TODO (bpo-38186): Request the real file name from the directory
# entry using FindFirstFileW. For now, we will return the path
# as best we have it
if path and not name:
return path + tail
tail = join(name, tail) if tail else name
return tail
def realpath(path):
path = normpath(path)
if isinstance(path, bytes):
prefix = b'\\\\?\\'
unc_prefix = b'\\\\?\\UNC\\'
new_unc_prefix = b'\\\\'
cwd = os.getcwdb()
# bpo-38081: Special case for realpath(b'nul')
if normcase(path) == normcase(os.fsencode(devnull)):
return b'\\\\.\\NUL'
else:
prefix = '\\\\?\\'
unc_prefix = '\\\\?\\UNC\\'
new_unc_prefix = '\\\\'
cwd = os.getcwd()
# bpo-38081: Special case for realpath('nul')
if normcase(path) == normcase(devnull):
return '\\\\.\\NUL'
had_prefix = path.startswith(prefix)
if not had_prefix and not isabs(path):
path = join(cwd, path)
try:
path = _getfinalpathname(path)
initial_winerror = 0
except OSError as ex:
initial_winerror = ex.winerror
path = _getfinalpathname_nonstrict(path)
# The path returned by _getfinalpathname will always start with \\?\ -
# strip off that prefix unless it was already provided on the original
# path.
if not had_prefix and path.startswith(prefix):
# For UNC paths, the prefix will actually be \\?\UNC\
# Handle that case as well.
if path.startswith(unc_prefix):
spath = new_unc_prefix + path[len(unc_prefix):]
else:
spath = path[len(prefix):]
# Ensure that the non-prefixed path resolves to the same path
try:
if _getfinalpathname(spath) == path:
path = spath
except OSError as ex:
# If the path does not exist and originally did not exist, then
# strip the prefix anyway.
if ex.winerror == initial_winerror:
path = spath
return path
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def relpath(path, start=None):
"""Return a relative version of a path"""
path = os.fspath(path)
if isinstance(path, bytes):
sep = b'\\'
curdir = b'.'
pardir = b'..'
else:
sep = '\\'
curdir = '.'
pardir = '..'
if start is None:
start = curdir
if not path:
raise ValueError("no path specified")
start = os.fspath(start)
try:
start_abs = abspath(normpath(start))
path_abs = abspath(normpath(path))
start_drive, start_rest = splitdrive(start_abs)
path_drive, path_rest = splitdrive(path_abs)
if normcase(start_drive) != normcase(path_drive):
raise ValueError("path is on mount %r, start on mount %r" % (
path_drive, start_drive))
start_list = [x for x in start_rest.split(sep) if x]
path_list = [x for x in path_rest.split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if normcase(e1) != normcase(e2):
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
except (TypeError, ValueError, AttributeError, BytesWarning, DeprecationWarning):
genericpath._check_arg_types('relpath', path, start)
raise
# Return the longest common sub-path of the sequence of paths given as input.
# The function is case-insensitive and 'separator-insensitive', i.e. if the
# only difference between two paths is the use of '\' versus '/' as separator,
# they are deemed to be equal.
#
# However, the returned path will have the standard '\' separator (even if the
# given paths had the alternative '/' separator) and will have the case of the
# first path given in the sequence. Additionally, any trailing separator is
# stripped from the returned path.
def commonpath(paths):
"""Given a sequence of path names, returns the longest common sub-path."""
if not paths:
raise ValueError('commonpath() arg is an empty sequence')
paths = tuple(map(os.fspath, paths))
if isinstance(paths[0], bytes):
sep = b'\\'
altsep = b'/'
curdir = b'.'
else:
sep = '\\'
altsep = '/'
curdir = '.'
try:
drivesplits = [splitdrive(p.replace(altsep, sep).lower()) for p in paths]
split_paths = [p.split(sep) for d, p in drivesplits]
try:
isabs, = set(p[:1] == sep for d, p in drivesplits)
except ValueError:
raise ValueError("Can't mix absolute and relative paths") from None
# Check that all drive letters or UNC paths match. The check is made only
# now otherwise type errors for mixing strings and bytes would not be
# caught.
if len(set(d for d, p in drivesplits)) != 1:
raise ValueError("Paths don't have the same drive")
drive, path = splitdrive(paths[0].replace(altsep, sep))
common = path.split(sep)
common = [c for c in common if c and c != curdir]
split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
s1 = min(split_paths)
s2 = max(split_paths)
for i, c in enumerate(s1):
if c != s2[i]:
common = common[:i]
break
else:
common = common[:len(s1)]
prefix = drive + sep if isabs else drive
return prefix + sep.join(common)
except (TypeError, AttributeError):
genericpath._check_arg_types('commonpath', *paths)
raise
try:
# The genericpath.isdir implementation uses os.stat and checks the mode
# attribute to tell whether or not the path is a directory.
# This is overkill on Windows - just pass the path to GetFileAttributes
# and check the attribute from there.
from nt import _isdir as isdir
except ImportError:
# Use genericpath.isdir as imported above.
pass
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch, Mock
from autorelease import trigger
@patch("autorelease.trigger.trigger_kokoro_build_for_pull_request")
@patch("autorelease.github.GitHub.list_org_issues")
@patch("autorelease.kokoro.make_authorized_session")
def test_no_issues(
make_authorized_session, list_org_issues, trigger_kokoro_build_for_pull_request
):
list_org_issues.return_value = []
trigger.main("github-token", "kokoro-credentials")
make_authorized_session.assert_called_once()
trigger_kokoro_build_for_pull_request.assert_not_called()
@patch("autorelease.trigger.trigger_kokoro_build_for_pull_request")
@patch("autorelease.github.GitHub.list_org_issues")
@patch("autorelease.kokoro.make_adc_session")
def test_adc(make_adc_session, list_org_issues, trigger_kokoro_build_for_pull_request):
list_org_issues.return_value = []
trigger.main("github-token", None)
make_adc_session.assert_called_once()
trigger_kokoro_build_for_pull_request.assert_not_called()
@patch("autorelease.trigger.trigger_kokoro_build_for_pull_request")
@patch("autorelease.github.GitHub.list_org_issues")
@patch("autorelease.kokoro.make_authorized_session")
def test_processes_issues(
make_authorized_session, list_org_issues, trigger_kokoro_build_for_pull_request
):
pr1 = {
"base": {"ref": "abc123", "repo": {"full_name": "googleapis/java-asset"}},
"pull_request": {"html_url": "https://github.com/googleapis/java-asset"},
"title": "chore: release 1.2.3",
}
pr2 = {
"base": {"ref": "def456", "repo": {"full_name": "googleapis/nodejs-container"}},
"pull_request": {"html_url": "https://github.com/nodejs/java-container"},
"title": "chore: release 1.0.0",
}
list_org_issues.side_effect = [[pr1, pr2]]
trigger.main("github-token", "kokoro-credentials")
list_org_issues.assert_any_call(
org="googleapis",
state="closed",
labels="autorelease: tagged",
created_after="2021-04-01",
)
list_org_issues.assert_any_call(
org="GoogleCloudPlatform",
state="closed",
labels="autorelease: tagged",
created_after="2021-04-01",
)
assert trigger_kokoro_build_for_pull_request.call_count == 2
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_skips_non_merged(trigger_build):
github = Mock()
github.update_pull_labels = Mock()
github.get_url.return_value = {
"merged_at": None,
"base": {"repo": {"full_name": "googleapis/java-asset"}},
}
issue = {
"pull_request": {"url": "https://api.github.com/googleapis/java-asset/pull/5"}
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
github.update_pull_labels.assert_called_once()
trigger_build.assert_not_called()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", ["java"])
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_triggers_kokoro(trigger_build):
github = Mock()
github.get_url.return_value = {
"merged_at": "2021-01-01T09:00:00.000Z",
"merge_commit_sha": "abcd1234",
"base": {"repo": {"full_name": "googleapis/java-asset"}},
"html_url": "https://github.com/googleapis/java-asset/pulls/5",
}
issue = {
"pull_request": {"url": "https://api.github.com/googleapis/java-asset/pull/5"},
"merged_at": "2021-01-01T09:00:00.000Z",
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
trigger_build.assert_called_once()
github.update_pull_labels.assert_called_once()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", [])
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_skips_kokoro_if_not_in_allowlist(
trigger_build,
):
github = Mock()
github.get_url.return_value = {
"merged_at": "2021-01-01T09:00:00.000Z",
"merge_commit_sha": "abcd1234",
"base": {"repo": {"full_name": "googleapis/java-asset"}},
"html_url": "https://github.com/googleapis/java-asset/pulls/5",
}
issue = {
"pull_request": {"url": "https://api.github.com/googleapis/java-asset/pull/5"},
"merged_at": "2021-01-01T09:00:00.000Z",
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
trigger_build.assert_not_called()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", ["php"])
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_skips_kokoro_if_no_job_name(
trigger_build,
):
github = Mock()
github.get_url.return_value = {
"merged_at": "2021-01-01T09:00:00.000Z",
"base": {"repo": {"full_name": "googleapis/google-cloud-php"}},
"html_url": "https://github.com/googleapis/google-cloud-php/pulls/5",
}
issue = {
"pull_request": {
"url": "https://api.github.com/googleapis/google-cloud-php/pull/5"
},
"merged_at": "2021-01-01T09:00:00.000Z",
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
trigger_build.assert_not_called()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", ["php"])
@patch("autorelease.kokoro.trigger_build")
def test_trigger_kokoro_build_for_pull_request_skips_kokoro_if_already_triggered(
trigger_build,
):
github = Mock()
github.get_url.return_value = {
"merged_at": "2021-01-01T09:00:00.000Z",
"base": {"repo": {"full_name": "googleapis/google-cloud-php"}},
"html_url": "https://github.com/googleapis/google-cloud-php/pulls/5",
"labels": [{"id": 12345, "name": "autorelease: triggered"}],
}
issue = {
"pull_request": {
"url": "https://api.github.com/googleapis/google-cloud-php/pull/5"
},
"merged_at": "2021-01-01T09:00:00.000Z",
}
trigger.trigger_kokoro_build_for_pull_request(Mock(), github, issue, Mock())
trigger_build.assert_not_called()
@patch("autorelease.trigger.LANGUAGE_ALLOWLIST", ["java"])
@patch("autorelease.kokoro.make_authorized_session")
@patch("autorelease.github.GitHub.get_issue")
@patch("autorelease.github.GitHub.get_url")
@patch("autorelease.github.GitHub.update_pull_labels")
@patch("autorelease.kokoro.trigger_build")
def test_trigger_single(
trigger_build, update_pull_labels, get_url, get_issue, make_authorized_session
):
kokoro_session = Mock()
make_authorized_session.return_value = kokoro_session
get_issue.return_value = {
"title": "chore: release 1.2.3",
"pull_request": {
"html_url": "https://github.com/googleapis/java-trace/pull/1234",
"url": "https://api.github.com/repos/googleapis/java-trace/pulls/1234",
},
}
get_url.return_value = {
"merged_at": "2021-07-20T09:00:00.123Z",
"base": {"repo": {"full_name": "googleapis/java-trace"}},
"html_url": "https://github.com/googleapis/java-trace/pull/1234",
"merge_commit_sha": "abcd1234",
"labels": [{"id": 12345, "name": "autorelease: tagged"}],
}
pull_request_url = "https://github.com/googleapis/java-trace/pull/1234"
reporter = trigger.trigger_single(
"fake-github-token", "fake-kokoro-credentials", pull_request_url
)
assert len(reporter.results) == 1
trigger_build.assert_called_with(
kokoro_session,
job_name="cloud-devrel/client-libraries/java/java-trace/release/stage",
sha="abcd1234",
env_vars={
"AUTORELEASE_PR": "https://github.com/googleapis/java-trace/pull/1234"
},
)
update_pull_labels.assert_not_called()
@patch("autorelease.kokoro.make_authorized_session")
@patch("autorelease.kokoro.trigger_build")
def test_trigger_single_bad_url(trigger_build, make_authorized_session):
kokoro_session = Mock()
make_authorized_session.return_value = kokoro_session
pull_request_url = "https://github.com/googleapis/java-trace/issues/1234"
reporter = trigger.trigger_single(
"fake-github-token", "fake-kokoro-credentials", pull_request_url
)
assert len(reporter.results) == 1
trigger_build.assert_not_called()
@patch("autorelease.kokoro.make_authorized_session")
@patch("autorelease.github.GitHub.get_issue")
@patch("autorelease.github.GitHub.get_url")
@patch("autorelease.github.GitHub.update_pull_labels")
@patch("autorelease.kokoro.trigger_build")
def test_trigger_single_skips_already_triggered(
trigger_build, update_pull_labels, get_url, get_issue, make_authorized_session
):
kokoro_session = Mock()
make_authorized_session.return_value = kokoro_session
get_issue.return_value = {
"title": "chore: release 1.2.3",
"pull_request": {
"html_url": "https://github.com/googleapis/java-trace/pull/1234",
"url": "https://api.github.com/repos/googleapis/java-trace/pulls/1234",
},
}
get_url.return_value = {
"merged_at": "2021-07-20T09:00:00.123Z",
"base": {"repo": {"full_name": "googleapis/java-trace"}},
"html_url": "https://github.com/googleapis/java-trace/pull/1234",
"merge_commit_sha": "abcd1234",
"labels": [
{"id": 12345, "name": "autorelease: tagged"},
{"id": 12346, "name": "autorelease: triggered"},
],
}
pull_request_url = "https://github.com/googleapis/java-trace/pull/1234"
reporter = trigger.trigger_single(
"fake-github-token", "fake-kokoro-credentials", pull_request_url
)
assert len(reporter.results) == 1
trigger_build.assert_not_called()
| |
# -*- coding: utf-8 -*-
"""
Unit tests for the OBSolve class.
Thomas Ogden <t@ogden.eu>
"""
import os
import unittest
import numpy as np
from maxwellbloch import ob_solve, t_funcs
# Absolute path of tests/json directory, so that tests can be called from
# different directories.
JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json'))
JSON_STR_02 = (
'{'
' "atom": {'
' "decays": ['
' { "channels": [[0,1], [1,2]], '
' "rate": 1.0'
' }'
' ],'
' "energies": [],'
' "fields": ['
' {'
' "coupled_levels": ['
' [0, 1]'
' ],'
' "detuning": 0.0,'
' "detuning_positive": true,'
' "label": "probe",'
' "rabi_freq": 5.0,'
' "rabi_freq_t_args": {},'
' "rabi_freq_t_func": null'
' },'
' {'
' "coupled_levels": ['
' [1, 2]'
' ],'
' "detuning": 0.0,'
' "detuning_positive": false,'
' "label": "coupling",'
' "rabi_freq": 10.0,'
' "rabi_freq_t_args": {},'
' "rabi_freq_t_func": null'
' }'
' ],'
' "num_states": 3'
' },'
' "t_min": 0.0,'
' "t_max": 1.0,'
' "t_steps": 100,'
' "method": "mesolve",'
' "opts": {}'
'}'
)
class TestSetFieldRabiTFunc(unittest.TestCase):
""" Test setting custom Rabi frequency time functions. """
def test_set_field_rabi_t_func_1(self):
""" Test that a custom double pulse Rabi freq time functions can be
set.
"""
ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02)
two_pulse_t_func = lambda t, args: (t_funcs.gaussian(0)(t, args) +
t_funcs.gaussian(1)(t, args))
two_pulse_t_args = {"ampl_0": 1.0, "centre_0": 0.0, "fwhm_0": 0.1,
"ampl_1": 2.0, "centre_1": 0.5, "fwhm_1": 0.1, }
ob_solve_02.set_field_rabi_freq_t_func(0, two_pulse_t_func)
ob_solve_02.set_field_rabi_freq_t_args(0, two_pulse_t_args)
field_0 = ob_solve_02.atom.fields[0]
self.assertAlmostEqual(field_0.rabi_freq_t_func(0.0,
field_0.rabi_freq_t_args), 1.0)
self.assertAlmostEqual(field_0.rabi_freq_t_func(0.5,
field_0.rabi_freq_t_args), 2.0)
self.assertAlmostEqual(field_0.rabi_freq_t_func(1.0,
field_0.rabi_freq_t_args), 0.0)
class TestSolve(unittest.TestCase):
def test_two_level_rabi_oscillations(self):
""" Solve the optical Bloch equations for the two-level atom.
Notes:
See https://en.wikipedia.org/wiki/Rabi_cycle
"""
RABI_FREQ = 5.0
atom_dict = {"fields": [{"coupled_levels": [[0, 1]],
"rabi_freq": RABI_FREQ}], "num_states": 2}
obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0,
t_steps=100)
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# The solution is known, we should have Rabi cycling at the frequency.
known_0 = np.cos(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
known_1 = np.sin(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-5, atol=1.e-5))
self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-5, atol=1.e-5))
# If you want to take a look
# import matplotlib.pyplot as plt
# plt.plot(obs.tlist, pop_0)
# plt.plot(obs.tlist, known_0, ls='dashed')
# plt.plot(obs.tlist, pop_1)
# plt.plot(obs.tlist, known_1, ls='dashed')
# plt.show()
def test_two_level_with_opts(self):
""" Same as test_two_level_rabi_oscillations() but with opts set such
that the tolerances are lower. The results will be less
accurate.
"""
RABI_FREQ = 5.0
atom_dict = {"fields": [{"coupled_levels": [[0, 1]],
"rabi_freq": RABI_FREQ}], "num_states": 2,
"initial_state": [1., 0.]}
obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0,
t_steps=100, opts={'atol': 1e-6, 'rtol': 1e-4})
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# The solution is known, we should have Rabi cycling at the frequency.
known_0 = np.cos(2.0 * np.pi * RABI_FREQ * obs.tlist / 2.0)**2
known_1 = np.sin(2.0 * np.pi * RABI_FREQ * obs.tlist / 2.0)**2
# Compared with test_two_level_rabi_oscillations() we can only assert
# a lower tolerance to the known solution.
self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-3, atol=1.e-3))
self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-3, atol=1.e-3))
# If you want to take a look
# import matplotlib.pyplot as plt
# plt.plot(obs.tlist, pop_0)
# plt.plot(obs.tlist, known_0, ls='dashed')
# plt.plot(obs.tlist, pop_1)
# plt.plot(obs.tlist, known_1, ls='dashed')
# plt.show()
def test_two_level_with_inital_state(self):
""" Same as test_two_level_rabi_oscillations() but with the initial
state set so that the population starts in the upper level.
"""
RABI_FREQ = 5.0
atom_dict = {"fields": [{"coupled_levels": [[0, 1]],
"rabi_freq": RABI_FREQ}], "num_states": 2,
"initial_state": [0., 1.]}
obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0,
t_steps=100)
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# The solution is as test_two_level_rabi_oscillations() but swapped
known_0 = np.sin(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
known_1 = np.cos(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-5, atol=1.e-5))
self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-5, atol=1.e-5))
def test_two_two_fields(self):
""" Test a two-level atom addressed by multiple fields.
Notes:
- Test for bug in #159, where multiple fields coupling the same
levels isn't working.
- The first square pi-pulse drives all the population to the excited
state, the second square pi-pulse drives all pop back to ground.
- Before fix, only the second field is driving the atoms.
"""
json_path = os.path.join(JSON_DIR, "obs-two-two-fields.json")
obs = ob_solve.OBSolve().from_json(json_path)
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# All population should start in the ground state
self.assertAlmostEqual(pop_0[0], 1.0, places=5)
self.assertAlmostEqual(pop_1[0], 0.0, places=5)
# The first pi-pulse between t = 0.2 and t = 0.3 should drive all the
# population to the exited state
self.assertAlmostEqual(pop_0[len(pop_0)//2], 0.0, places=5)
self.assertAlmostEqual(pop_1[len(pop_0)//2], 1.0, places=5)
# The second pi-pulse between t = 0.6 and t = 0.7 should drive all the
# population back to the ground state
self.assertAlmostEqual(pop_0[-1], 1.0, places=5)
self.assertAlmostEqual(pop_1[-1], 0.0, places=5)
# If you want to take a look
# import matplotlib.pyplot as plt
# plt.plot(obs.tlist, pop_0)
# plt.plot(obs.tlist, pop_1)
# plt.show()
def test_vee_cw_weak_sech_2pi(self):
""" Test a three-level vee config atom where the probe transition is
addressed by a weak cw and the drive is a sech pulse.
Notes:
- Test for bug in #222, where the `t_args can contain ampl or n_pi,
not both` exception is raised even though those args are in
different fields.
- Bug is due to field_idxs not being set correctly, see also #159.
"""
json_path = os.path.join(JSON_DIR, "obs-vee-cw-weak-sech-2pi.json")
obs = ob_solve.OBSolve().from_json(json_path)
# Test that solve does not throw any exceptions.
obs.solve()
class TestJSON(unittest.TestCase):
def test_to_from_json_str_00(self):
ob_solve_00 = ob_solve.OBSolve()
ob_solve_01 = ob_solve.OBSolve.from_json_str(ob_solve_00.to_json_str())
self.assertEqual(ob_solve_00.to_json_str(), ob_solve_01.to_json_str())
def test_from_json_str(self):
ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02)
self.assertEqual(ob_solve_02.t_min, 0.0)
self.assertEqual(ob_solve_02.t_max, 1.0)
self.assertEqual(ob_solve_02.t_steps, 100)
self.assertEqual(ob_solve_02.method, "mesolve")
def test_to_from_json_str_03(self):
json_path = os.path.join(JSON_DIR, "ob_solve_03.json")
obs = ob_solve.OBSolve().from_json(json_path)
obs_test = ob_solve.OBSolve.from_json_str(obs.to_json_str())
self.assertEqual(obs.to_json_str(), obs_test.to_json_str())
def test_to_from_json(self):
import os
filepath = "test_ob_solve_02.json"
ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02)
ob_solve_02.to_json(filepath)
ob_solve_03 = ob_solve.OBSolve().from_json(filepath)
os.remove(filepath)
self.assertEqual(ob_solve_02.to_json_str(),
ob_solve_03.to_json_str())
class TestSaveLoad(unittest.TestCase):
""" Tests for the OBSolve save and load methods."""
def test_save_load_01(self):
""" Solve a basic OBSolve problem. Save the results to file. Set the
results in the OBSolve object to null. Load the results from file
and check that they match the original values.
"""
json_path = os.path.join(JSON_DIR, "ob_solve_02.json")
ob_solve_02 = ob_solve.OBSolve().from_json(json_path)
states_t = ob_solve_02.solve()
states_t_loaded = ob_solve_02.solve(recalc=False)
self.assertTrue((states_t == states_t_loaded).all())
| |
import logging
import collections
from typing import Any, Dict, Optional
from enum import Enum
import ray
from ray.rllib.utils.replay_buffers.replay_buffer import _ALL_POLICIES
from ray.rllib.policy.rnn_sequencing import timeslice_along_seq_lens_with_overlap
from ray.rllib.policy.sample_batch import MultiAgentBatch
from ray.rllib.utils.annotations import override, ExperimentalAPI
from ray.rllib.utils.replay_buffers.replay_buffer import ReplayBuffer
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.typing import PolicyID, SampleBatchType
from ray.rllib.utils.replay_buffers.replay_buffer import StorageUnit
from ray.rllib.utils.from_config import from_config
from ray.util.debug import log_once
logger = logging.getLogger(__name__)
@ExperimentalAPI
class ReplayMode(Enum):
LOCKSTEP = "lockstep"
INDEPENDENT = "independent"
@ExperimentalAPI
def merge_dicts_with_warning(args_on_init, args_on_call):
"""Merge argument dicts, overwriting args_on_call with warning.
The MultiAgentReplayBuffer supports setting standard arguments for calls
of methods of the underlying buffers. These arguments can be
overwritten. Such overwrites trigger a warning to the user.
"""
for arg_name, arg_value in args_on_call.items():
if arg_name in args_on_init:
if log_once("overwrite_argument_{}".format((str(arg_name)))):
logger.warning(
"Replay Buffer was initialized to have "
"underlying buffers methods called with "
"argument `{}={}`, but was subsequently called "
"with `{}={}`.".format(
arg_name,
args_on_init[arg_name],
arg_name,
arg_value,
)
)
return {**args_on_init, **args_on_call}
@ExperimentalAPI
class MultiAgentReplayBuffer(ReplayBuffer):
"""A replay buffer shard for multiagent setups.
This buffer is meant to be run in parallel to distribute experiences
across `num_shards` shards. Unlike simpler buffers, it holds a set of
buffers - one for each policy ID.
"""
def __init__(
self,
capacity: int = 10000,
storage_unit: str = "timesteps",
num_shards: int = 1,
replay_batch_size: int = 1,
learning_starts: int = 1000,
replay_mode: str = "independent",
replay_sequence_length: int = 1,
replay_burn_in: int = 0,
replay_zero_init_states: bool = True,
underlying_buffer_config: dict = None,
**kwargs
):
"""Initializes a MultiAgentReplayBuffer instance.
Args:
num_shards: The number of buffer shards that exist in total
(including this one).
storage_unit: Either 'timesteps', 'sequences' or
'episodes'. Specifies how experiences are stored. If they
are stored in episodes, replay_sequence_length is ignored.
learning_starts: Number of timesteps after which a call to
`replay()` will yield samples (before that, `replay()` will
return None).
capacity: Max number of total timesteps in all policy buffers.
After reaching this number, older samples will be
dropped to make space for new ones.
replay_batch_size: The batch size to be sampled (in timesteps).
Note that if `replay_sequence_length` > 1,
`self.replay_batch_size` will be set to the number of
sequences sampled (B).
replay_mode: One of "independent" or "lockstep". Determines,
whether batches are sampled independently or to an equal
amount.
replay_sequence_length: The sequence length (T) of a single
sample. If > 1, we will sample B x T from this buffer. This
only has an effect if storage_unit is 'timesteps'.
replay_burn_in: The burn-in length in case
`replay_sequence_length` > 0. This is the number of timesteps
each sequence overlaps with the previous one to generate a
better internal state (=state after the burn-in), instead of
starting from 0.0 each RNN rollout. This only has an effect
if storage_unit is 'timesteps'.
replay_zero_init_states: Whether the initial states in the
buffer (if replay_sequence_length > 0) are alwayas 0.0 or
should be updated with the previous train_batch state outputs.
underlying_buffer_config: A config that contains all necessary
constructor arguments and arguments for methods to call on
the underlying buffers.
**kwargs: Forward compatibility kwargs.
"""
shard_capacity = capacity // num_shards
ReplayBuffer.__init__(self, capacity, storage_unit)
# If the user provides an underlying buffer config, we use to
# instantiate and interact with underlying buffers
self.underlying_buffer_config = underlying_buffer_config
if self.underlying_buffer_config is not None:
self.underlying_buffer_call_args = self.underlying_buffer_config
else:
self.underlying_buffer_call_args = {}
self.replay_batch_size = replay_batch_size
self.replay_starts = learning_starts // num_shards
self.replay_mode = replay_mode
self.replay_sequence_length = replay_sequence_length
self.replay_burn_in = replay_burn_in
self.replay_zero_init_states = replay_zero_init_states
if replay_mode in ["lockstep", ReplayMode.LOCKSTEP]:
self.replay_mode = ReplayMode.LOCKSTEP
if self._storage_unit in [StorageUnit.EPISODES, StorageUnit.SEQUENCES]:
raise ValueError(
"MultiAgentReplayBuffer does not support "
"lockstep mode with storage unit `episodes`"
"or `sequences`."
)
elif replay_mode in ["independent", ReplayMode.INDEPENDENT]:
self.replay_mode = ReplayMode.INDEPENDENT
else:
raise ValueError("Unsupported replay mode: {}".format(replay_mode))
if self.underlying_buffer_config:
ctor_args = {
**{"capacity": shard_capacity, "storage_unit": storage_unit},
**self.underlying_buffer_config,
}
def new_buffer():
return from_config(self.underlying_buffer_config["type"], ctor_args)
else:
# Default case
def new_buffer():
self.underlying_buffer_call_args = {}
return ReplayBuffer(
self.capacity,
storage_unit=storage_unit,
)
self.replay_buffers = collections.defaultdict(new_buffer)
# Metrics.
self.add_batch_timer = TimerStat()
self.replay_timer = TimerStat()
self._num_added = 0
def __len__(self) -> int:
"""Returns the number of items currently stored in this buffer."""
return sum(len(buffer._storage) for buffer in self.replay_buffers.values())
@ExperimentalAPI
@override(ReplayBuffer)
def add(self, batch: SampleBatchType, **kwargs) -> None:
"""Adds a batch to the appropriate policy's replay buffer.
Turns the batch into a MultiAgentBatch of the DEFAULT_POLICY_ID if
it is not a MultiAgentBatch. Subsequently, adds the individual policy
batches to the storage.
Args:
batch : The batch to be added.
**kwargs: Forward compatibility kwargs.
"""
# Make a copy so the replay buffer doesn't pin plasma memory.
batch = batch.copy()
# Handle everything as if multi-agent.
batch = batch.as_multi_agent()
with self.add_batch_timer:
if self.replay_mode == ReplayMode.LOCKSTEP:
# Lockstep mode: Store under _ALL_POLICIES key (we will always
# only sample from all policies at the same time).
# This means storing a MultiAgentBatch to the underlying buffer
self._add_to_underlying_buffer(_ALL_POLICIES, batch, **kwargs)
else:
# Store independent SampleBatches
for policy_id, sample_batch in batch.policy_batches.items():
self._add_to_underlying_buffer(policy_id, sample_batch, **kwargs)
self._num_added += batch.count
@ExperimentalAPI
def _add_to_underlying_buffer(
self, policy_id: PolicyID, batch: SampleBatchType, **kwargs
) -> None:
"""Add a batch of experiences to the underlying buffer of a policy.
If the storage unit is `timesteps`, cut the batch into timeslices
before adding them to the appropriate buffer. Otherwise, let the
underlying buffer decide how slice batches.
Args:
policy_id: ID of the policy that corresponds to the underlying
buffer
batch: SampleBatch to add to the underlying buffer
**kwargs: Forward compatibility kwargs.
"""
# Merge kwargs, overwriting standard call arguments
kwargs = merge_dicts_with_warning(self.underlying_buffer_call_args, kwargs)
# For the storage unit `timesteps`, the underlying buffer will
# simply store the samples how they arrive. For sequences and
# episodes, the underlying buffer may split them itself.
if self._storage_unit is StorageUnit.TIMESTEPS:
if self.replay_sequence_length == 1:
timeslices = batch.timeslices(1)
else:
timeslices = timeslice_along_seq_lens_with_overlap(
sample_batch=batch,
zero_pad_max_seq_len=self.replay_sequence_length,
pre_overlap=self.replay_burn_in,
zero_init_states=self.replay_zero_init_states,
)
for time_slice in timeslices:
self.replay_buffers[policy_id].add(time_slice, **kwargs)
else:
self.replay_buffers[policy_id].add(batch, **kwargs)
@ExperimentalAPI
@override(ReplayBuffer)
def sample(
self, num_items: int, policy_id: Optional[PolicyID] = None, **kwargs
) -> Optional[SampleBatchType]:
"""Samples a MultiAgentBatch of `num_items` per one policy's buffer.
If less than `num_items` records are in the policy's buffer,
some samples in the results may be repeated to fulfil the batch size
`num_items` request. Returns an empty batch if there are no items in
the buffer.
Args:
num_items: Number of items to sample from a policy's buffer.
policy_id: ID of the policy that created the experiences we sample.
If none is given, sample from all policies.
Returns:
Concatenated MultiAgentBatch of items.
**kwargs: Forward compatibility kwargs.
"""
# Merge kwargs, overwriting standard call arguments
kwargs = merge_dicts_with_warning(self.underlying_buffer_call_args, kwargs)
if self._num_added < self.replay_starts:
return None
with self.replay_timer:
# Lockstep mode: Sample from all policies at the same time an
# equal amount of steps.
if self.replay_mode == ReplayMode.LOCKSTEP:
assert (
policy_id is None
), "`policy_id` specifier not allowed in `lockstep` mode!"
# In lockstep mode we sample MultiAgentBatches
return self.replay_buffers[_ALL_POLICIES].sample(num_items, **kwargs)
elif policy_id is not None:
sample = self.replay_buffers[policy_id].sample(num_items, **kwargs)
return MultiAgentBatch({policy_id: sample}, sample.count)
else:
samples = {}
for policy_id, replay_buffer in self.replay_buffers.items():
samples[policy_id] = replay_buffer.sample(num_items, **kwargs)
return MultiAgentBatch(samples, sum(s.count for s in samples.values()))
@ExperimentalAPI
@override(ReplayBuffer)
def stats(self, debug: bool = False) -> Dict:
"""Returns the stats of this buffer and all underlying buffers.
Args:
debug: If True, stats of underlying replay buffers will
be fetched with debug=True.
Returns:
stat: Dictionary of buffer stats.
"""
stat = {
"add_batch_time_ms": round(1000 * self.add_batch_timer.mean, 3),
"replay_time_ms": round(1000 * self.replay_timer.mean, 3),
}
for policy_id, replay_buffer in self.replay_buffers.items():
stat.update(
{"policy_{}".format(policy_id): replay_buffer.stats(debug=debug)}
)
return stat
@ExperimentalAPI
@override(ReplayBuffer)
def get_state(self) -> Dict[str, Any]:
"""Returns all local state.
Returns:
The serializable local state.
"""
state = {"num_added": self._num_added, "replay_buffers": {}}
for policy_id, replay_buffer in self.replay_buffers.items():
state["replay_buffers"][policy_id] = replay_buffer.get_state()
return state
@ExperimentalAPI
@override(ReplayBuffer)
def set_state(self, state: Dict[str, Any]) -> None:
"""Restores all local state to the provided `state`.
Args:
state: The new state to set this buffer. Can be obtained by
calling `self.get_state()`.
"""
self._num_added = state["num_added"]
buffer_states = state["replay_buffers"]
for policy_id in buffer_states.keys():
self.replay_buffers[policy_id].set_state(buffer_states[policy_id])
ReplayActor = ray.remote(num_cpus=0)(MultiAgentReplayBuffer)
| |
import bs4
import re
import requests
from six.moves.urllib import parse
from seo_report.stop_words import ENGLISH_STOP_WORDS
from seo_report.warnings import BADGES
from seo_report.warnings import WARNINGS
TOKEN_REGEX = re.compile(r'(?u)\b\w\w+\b')
SOCIAL_WEBSITES = [
"www.facebook.com",
"twitter.com",
"plus.google.com",
"www.instagram.com",
"www.pinterest.com"
]
class Webpage(object):
url = None
title = None
description = None
website_titles = {}
website_descriptions = {}
def __init__(self, page_url, html, website_titles, website_descriptions):
self.url = page_url
self.netloc = parse.urlparse(page_url).netloc
self.html = html
self.title = None
self.description = None
self.keywords = {}
self.issues = []
self.achieved = []
self.website_titles = website_titles
self.website_descriptions = website_descriptions
def report(self):
'''
Analyze the Page
'''
soup = bs4.BeautifulSoup(self.html, "html.parser")
# per page analysis
self._analyze_title(soup)
self._analyze_description(soup)
self._analyze_url_structure(soup)
self._analyze_content(soup)
self._analyze_anchors(soup)
self._analyze_images(soup)
self._analyze_headings(soup)
self._analyze_keywords(soup)
self._analyze_wordcount(soup)
self._analyze_backlinks(soup)
self._analyze_social(soup)
self._analyze_pagespeed(soup)
self._analyze_sentiment(soup)
# return the rendered results
return self._render()
def _analyze_title(self, doc):
"""
Validate the title
"""
self.title = t = u""
if doc.title:
self.title = t = doc.title.text
# Avoid using extremely lengthy titles that are unhelpful to users
length = len(t)
if length == 0:
self.warn(WARNINGS["TITLE_MISSING"], self.title)
return
elif length < 10:
self.warn(WARNINGS["TITLE_TOO_SHORT"], self.title)
elif length > 70:
self.warn(WARNINGS["TITLE_TOO_LONG"], self.title)
else:
self.earned(BADGES["TITLE_LENGTH"], self.title)
# Avoid using default or vague titles like "Untitled" or "New Page 1"
if any(vague_words in t.lower()
for vague_words in ['untitled', 'page']):
self.warn(WARNINGS["TITLE_TOO_GENERIC"], self.title)
else:
self.earned(BADGES["TITLE_INFORMATIVE"], self.title)
# Avoid stuffing unneeded keywords in your title tags
title_words = self.grouped(self.tokenize(t))
for word, count in title_words:
if count > 3:
self.warn(
WARNINGS["TITLE_KEYWORD_STUFFED"],
self.title)
# Avoid choosing a title that has no relation to the content on the
# page
# TODO
# Avoid using a single title tag across all of your site's pages or a
# large group of pages
if t in self.website_titles:
self.warn(
WARNINGS["TITLE_DUPLICATED"],
u'"{0}" previously used on pages: {1}'.format(
t, self.website_titles[t]))
else:
self.earned(BADGES["TITLE_UNIQUE"], self.title)
self.website_titles[t] = self.url
def _analyze_description(self, doc):
"""
Validate the description
"""
desc = doc.findAll('meta', attrs={'name': 'description'})
self.description = d = u""
if len(desc) > 0:
self.description = d = desc[0].get('content', '')
# calculate the length of the description once
length = len(d)
if length == 0:
self.warn(WARNINGS["DESCRIPTION_MISSING"])
return
elif length < 140:
self.warn(WARNINGS["DESCRIPTION_TOO_SHORT"], self.description)
elif length > 255:
self.warn(WARNINGS["DESCRIPTION_TOO_LONG"], self.description)
else:
self.earned(BADGES["DESCRIPTION_LENGTH"], self.description)
# Avoid using generic descriptions like "This is a web page" or "Page
# about baseball cards"
if any(vague_words in d.lower()
for vague_words in ['web page', 'page about']
):
self.warn(WARNINGS["DESCRIPTION_TOO_GENERIC"], self.description)
else:
self.earned(BADGES["DESCRIPTION_INFORMATIVE"], self.description)
# Avoid filling the description with only keywords
desc_words = self.grouped(self.tokenize(d))
for word, count in desc_words:
if count > 3:
self.warn(
WARNINGS["DESCRIPTION_KEYWORD_STUFFED"], self.description)
# Avoid copying and pasting the entire content
# of the document into the description meta tag
# TODO
# Avoid using a single description meta tag across all of your site's
# pages or a large group of pages
if d in self.website_descriptions:
self.warn(WARNINGS["DESCRIPTION_DUPLICATED"],
u'"{0}" previously used on pages: {1}'.format(
d, self.website_descriptions[d]))
else:
self.website_descriptions[d] = self.url
def _analyze_url_structure(self, doc):
"""
Analyze URL Structure
"""
parsed_url = parse.urlparse(self.url)
path = parsed_url.path.split("/")
# Avoid using lengthy URLs with unnecessary parameters and session IDs
if len(self.url) > 100:
self.warn(WARNINGS["URL_TOO_LONG"], self.url)
# Avoid choosing generic page names like "page1.html"
if any(vague_words in self.url.lower() for vague_words in ['page']):
self.warn(WARNINGS["URL_TOO_GENERIC"], self.url)
# Avoid using excessive keywords
# like "baseball-cards-baseball-cards-baseballcards.htm"
url_words = self.grouped(self.tokenize(path[-1]))
for word, count in url_words:
if count >= 2:
self.warn(WARNINGS["URL_KEYWORD_STUFFED"], self.url)
# Avoid having deep nesting of subdirectories like ".../dir1/dir2/dir3
# /dir4/dir5/dir6/page.html"
if len(path) > 3:
self.warn(WARNINGS["URL_TOO_DEEP"], self.url)
# Avoid using directory names that have no relation to the content in
# them
# Avoid having pages from subdomains and the root directory
# access the same content
# if this is not the canonical page, then ignore it
# and only look at the canonical url
canonical = doc.find("link", rel="canonical")
if canonical:
canonical_url = canonical['href']
if canonical_url != self.url:
# ignore this page, but ensure the canonical url is in our list
self.warn(WARNINGS["URL_NOT_CANONICAL"], canonical_url)
else:
self.earned(BADGES["URL_CANONICAL"], self.url)
# Avoid using odd capitalization of URLs
if any(x.isupper() for x in self.url):
self.warn(WARNINGS["URL_CAPITALIZED"], self.url)
else:
# Achievement: many users expect lower-case URLs and remember them
# better
self.earned(BADGES["URL_CORRECTLY_CASED"], self.url)
# Avoid creating complex webs of navigation links, e.g. linking every
# page on your site to every other page
# Avoid going overboard with slicing and dicing your content (so that
# it takes twenty clicks)
# Avoid having a navigation based entirely on drop-down menus, images,
# or animations
# Avoid letting your HTML site map page become out of date with broken
# links
# Avoid allowing your 404 pages to be indexed in search engines
# Avoid providing only a vague message like "Not found", "404", or no
# 404 page at all
def _analyze_content(self, doc):
# Avoid bad spelling and bad grammar
# Avoid dumping large amounts of text on varying topics onto a page
# without paragraph, subheading, or layout separation
# Avoid inserting numerous unnecessary keywords aimed at
# search engines but are annoying or nonsensical to users
# (check percentage of keywords to content)
pass
def _analyze_anchors(self, doc):
"""
Analyze Anchor Tags
"""
anchors = doc.find_all('a', href=True)
verified_pages = []
for tag in anchors:
tag_href = tag['href']
tag_text = tag.text.lower().strip()
image_link = tag.find('img')
if image_link is not None:
# Ensure the image uses an Alt tag
if len(image_link.get('alt', '')) == 0:
self.warn(WARNINGS["IMAGE_LINK_ALT_MISSING"], tag_href)
else:
self.earned(BADGES["IMAGE_LINK_ALT"],
image_link.get('alt', ''))
else:
# Ensure title tags or the text are used in Anchors
# Avoid writing long anchor text, such as a lengthy sentence or
# short paragraph of text
if len(tag.get('title', '')) == 0 and len(tag_text) == 0:
self.warn(WARNINGS["ANCHOR_TEXT_MISSING"], tag_href)
elif len(tag_text) < 3:
self.warn(WARNINGS["ANCHOR_TEXT_TOO_SHORT"], tag_text)
elif len(tag_text) > 100:
self.warn(WARNINGS["ANCHOR_TEXT_TOO_LONG"], tag_text)
# Avoid writing generic anchor text like "page", "article", or
# "click here"
if any(vague_words in tag_text.lower()
for vague_words in ['click here', 'page', 'article']):
self.warn(WARNINGS["ANCHOR_TEXT_TOO_GENERIC"], tag_text)
if len(tag_href) > 100:
self.warn(WARNINGS["ANCHOR_HREF_TOO_LONG"], tag_href)
# Avoid using text that is off-topic or has no relation to the
# content of the page linked to
# Avoid using the page's URL as the anchor text in most cases
if tag_text == tag_href:
self.warn(WARNINGS["ANCHOR_HREF_EQUALS_TEXT"], tag_text)
# Avoid comment spam to external websites
if len(parse.urlparse(tag_href).netloc) > 0:
if self.netloc not in tag_href:
if not(any(social_site in tag_href
for social_site in SOCIAL_WEBSITES)):
if tag.get('rel') is None \
or 'nofollow' not in tag.get('rel'):
self.warn(WARNINGS["ANCHOR_NO_FOLLOW"], tag_href)
else:
self.earned(BADGES["ANCHOR_NO_FOLLOW"], tag_href)
# Avoid linking to broken webpages
if not tag_href.startswith("mailto:"):
referenced_href = tag_href
if len(parse.urlparse(tag_href).netloc) == 0:
referenced_href = parse.urljoin(self.url, tag_href)
if referenced_href not in verified_pages:
resp = requests.head(referenced_href)
if resp.status_code == requests.codes.not_found:
self.warn(WARNINGS["BROKEN_LINK"], referenced_href)
verified_pages.append(referenced_href)
def _analyze_images(self, doc):
"""
Verifies that each img has an alt and title
"""
images = doc.find_all('img')
for image in images:
src = image.get('src', image.get('data-src', ''))
if len(src) == 0:
self.warn(WARNINGS["IMAGE_SRC_MISSING"], str(image))
else:
if len(image.get('alt', '')) == 0:
self.warn(WARNINGS["IMAGE_ALT_MISSING"], str(image))
# Avoid using generic filenames like
# "image1.jpg", "pic.gif", "1.jpg" when possible.
# Some sites with thousands of images might consider
# automating the naming of images
# TODO
# Avoid writing extremely lengthy filenames for
# relative images on the site
if len(parse.urlparse(src).netloc) == 0 \
or self.netloc in src:
if len(src) > 15:
self.warn(WARNINGS["IMAGE_SRC_TOO_LONG"], src)
# Avoid writing excessively long alt text that would be
# considered spammy
if len(image.get('alt', '')) > 40:
self.warn(WARNINGS["IMAGE_ALT_TOO_LONG"],
image.get('alt', ''))
# Avoid using only image links for your site's navigation
# TODO
def _analyze_headings(self, doc):
"""
Make sure each page has at least one header tag
"""
h1tags = doc.find_all('h1')
self.headers = []
for h in h1tags:
self.headers.append(h.text)
if len(h.text) < 3:
self.warn(WARNINGS["H1_TOO_SHORT"], h.text)
else:
self.earned(BADGES["H1_LENGTH"], h.text)
if len(h1tags) != 1:
self.warn(WARNINGS["H1_ONE_PER_PAGE"], self.headers)
else:
self.earned(BADGES["H1_ONE_PER_PAGE"], self.headers)
# Avoid placing text in heading tags that wouldn't be helpful
# in defining the structure of the page
# TODO
# Avoid using heading tags where other tags like <em> and <strong>
# may be more appropriate
# TODO
# Avoid erratically moving from one heading tag size to another
# TODO
# Avoid excessively using heading tags throughout the page
# TODO
# Avoid putting all of the page's text into a heading tag
# TODO
# Avoid using heading tags only for styling text
# and not presenting structure
# TODO
def _analyze_keywords(self, doc):
# The Keywords Metatag should be avoided as they are a spam indicator
# and no longer used by Search Engines
kw_meta = doc.findAll('meta', attrs={'name': 'keywords'})
if len(kw_meta) > 0:
self.warn(WARNINGS["KEYWORDS_META"], kw_meta)
# Detect the most popular keywords used on the page
self.keywords = self._get_keywords(doc)
# we only care about the top 5 keywords
del self.keywords[5:]
def _analyze_wordcount(self, doc):
# Word Count: We have known for some time that Google shows a
# preference for longer, more comprehensive content. According to the
# report, the average word count for top-ranking content is in the
# range of 1,140-1,285 words. This is up from 902 words in 2014. When
# creating content, focus on providing comprehensive coverage of your
# topic, rather than writing shorter content that only brushes the
# surface of your topic
page_content = self._get_keywords(doc)
# calculate total number of words used (ignoring stop words)
count = 0
for word, freq in page_content:
count += freq
if count < 1140:
self.warn(WARNINGS["WORDCOUNT_TOO_SHORT"],
u"You have {0} words.".format(count))
else:
self.earned(BADGES["WORDCOUNT"],
u"You have {0} words.".format(count))
def _analyze_backlinks(self, doc):
pass
def _analyze_social(self, doc):
# Facebook, Twitter, Pinterest, GooglePlus, Instagram
pass
def _analyze_pagespeed(self, doc):
# Page load times should be < X seconds
# Are you using a CDN
# Are your CSS and JS files minimized
pass
def _analyze_sentiment(self, doc):
# Analyze the sentiment of the text used on the page
# http://textblob.readthedocs.io/en/dev/
# Avoid negative emotion when conveying your product and services
pass
def _render(self):
'''
Render the result
'''
# iterate the keywords for returning
keywords_result = []
for word, count in self.keywords:
kw = {
"keyword": word,
"frequency": count,
"in_title": word in self.title.lower(),
"in_description": word in self.description.lower(),
"in_header": word in self.headers
}
keywords_result.append(kw)
result = {
"url": self.url,
"keywords": keywords_result,
"issues": self.issues,
"achieved": self.achieved,
"title": self.title,
"description": self.description
}
return result
def warn(self, message, value=None):
self.issues.append(
{
"warning": message,
"value": value
}
)
def earned(self, message, value=None):
self.achieved.append(
{
"achievement": message,
"value": value
}
)
def visible_tags(self, element):
non_visible_elements = [
'style', 'script', '[document]',
'head', 'title', 'meta']
if element.parent.name in non_visible_elements:
return False
elif isinstance(element, bs4.element.Comment):
return False
return True
def tokenize(self, rawtext):
return [
word
for word in TOKEN_REGEX.findall(rawtext.lower())
if word not in ENGLISH_STOP_WORDS
]
def grouped(self, token_list):
grouped_list = {}
for word in token_list:
if word in grouped_list:
grouped_list[word] += 1
else:
grouped_list[word] = 1
grouped_list = sorted(grouped_list.items(),
key=lambda x: x[1], reverse=True)
return grouped_list
def _get_keywords(self, doc):
keywords = {}
text_elements = filter(self.visible_tags, doc.findAll(text=True))
page_text = ''
for element in text_elements:
page_text += element.lower() + ' '
tokens = self.tokenize(page_text)
keywords = self.grouped(tokens)
return keywords
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from abc import abstractmethod, abstractproperty
from hashlib import sha1
from six import string_types
from twitter.common.dirutil.fileset import Fileset
from pants.base.build_environment import get_buildroot
from pants.util.dirutil import fast_relpath, fast_relpath_optional
from pants.util.memo import memoized_property
from pants.util.meta import AbstractClass
class FilesetWithSpec(AbstractClass):
"""A set of files that keeps track of how we got it."""
@staticmethod
def _no_content(path):
raise AssertionError('An empty FilesetWithSpec should never have file content requested.')
@staticmethod
def empty(rel_root):
"""Creates an empty FilesetWithSpec object for the given rel_root."""
return EagerFilesetWithSpec(rel_root, {'globs': []}, tuple(), '<empty>')
@abstractmethod
def matches(self, path_from_buildroot):
"""
Takes in any relative path from build root, and return whether it belongs to this filespec
:param path_from_buildroot: path relative to build root
:return: True if the path matches, else False.
"""
def __init__(self, rel_root, filespec):
"""
:param rel_root: The root for the given filespec, relative to the buildroot.
:param filespec: A filespec as generated by `FilesetRelPathWrapper`, which represents
what globs or file list it came from. Must be relative to the buildroot.
"""
self.rel_root = rel_root
self.filespec = filespec
self._validate_globs_in_filespec(filespec, rel_root)
def _validate_globs_in_filespec(self, filespec, rel_root):
for glob in filespec['globs']:
if not glob.startswith(rel_root):
raise ValueError('expected glob filespec: {!r}'
' to start with its root path: {!r}!'.format(glob, rel_root))
exclude = filespec.get('exclude')
if exclude:
for exclude_filespec in exclude:
self._validate_globs_in_filespec(exclude_filespec, rel_root)
@abstractproperty
def files(self):
"""Return the concrete set of files matched by this FilesetWithSpec, relative to `self.rel_root`."""
@abstractproperty
def files_hash(self):
"""Return a unique hash for this set of files."""
def __iter__(self):
return iter(self.files)
def __getitem__(self, index):
return self.files[index]
def paths_from_buildroot_iter(self):
"""An alternative `__iter__` that joins files with the relative root."""
for f in self:
yield os.path.join(self.rel_root, f)
class EagerFilesetWithSpec(FilesetWithSpec):
def __init__(self, rel_root, filespec, files, files_hash):
"""
:param rel_root: The root for the given filespec, relative to the buildroot.
:param filespec: A filespec as generated by `FilesetRelPathWrapper`, which represents
what globs or file list it came from. Must be relative to buildroot.
:param files: A list of matched files, with declared order and duplicates preserved.
:param files_hash: A string fingerprint for all files in the fileset.
"""
super(EagerFilesetWithSpec, self).__init__(rel_root, filespec)
self._files = files
self._files_hash = files_hash
@property
def files(self):
return self._files
@property
def files_hash(self):
return self._files_hash
def __repr__(self):
return 'EagerFilesetWithSpec(rel_root={!r}, files={!r})'.format(self.rel_root, self.files)
def matches(self, path_from_buildroot):
path_relative_to_rel_root = fast_relpath_optional(path_from_buildroot, self.rel_root)
return path_relative_to_rel_root is not None and path_relative_to_rel_root in self._files
class LazyFilesetWithSpec(FilesetWithSpec):
def __init__(self, rel_root, filespec, files_calculator):
"""
:param rel_root: The root for the given filespec, relative to the buildroot.
:param filespec: A filespec as generated by `FilesetRelPathWrapper`, which represents
what globs or file list it came from.
:param files_calculator: A no-arg function that will lazily compute the file paths for
this filespec.
"""
super(LazyFilesetWithSpec, self).__init__(rel_root, filespec)
self._files_calculator = files_calculator
@memoized_property
def files(self):
return self._files_calculator()
@property
def files_hash(self):
h = sha1()
for path in sorted(self.files):
h.update(path)
with open(os.path.join(get_buildroot(), self.rel_root, path), 'rb') as f:
h.update(f.read())
return h.digest()
def matches(self, path_from_buildroot):
return any(path_from_buildroot == path_in_spec for path_in_spec in self.paths_from_buildroot_iter())
class FilesetRelPathWrapper(AbstractClass):
KNOWN_PARAMETERS = frozenset(['exclude', 'follow_links'])
@abstractproperty
def wrapped_fn(cls):
"""The wrapped file calculation function."""
@abstractproperty
def validate_files(cls):
"""True to validate the existence of files returned by wrapped_fn."""
def __init__(self, parse_context):
"""
:param parse_context: The BUILD file parse context.
"""
self._parse_context = parse_context
def __call__(self, *patterns, **kwargs):
return self.create_fileset_with_spec(self._parse_context.rel_path, *patterns, **kwargs)
@classmethod
def create_fileset_with_spec(cls, rel_path, *patterns, **kwargs):
"""
:param rel_path: The relative path to create a FilesetWithSpec for.
:param patterns: glob patterns to apply.
:param exclude: A list of {,r,z}globs objects, strings, or lists of strings to exclude.
"""
for pattern in patterns:
if not isinstance(pattern, string_types):
raise ValueError("Expected string patterns for {}: got {}".format(cls.__name__, patterns))
raw_exclude = kwargs.pop('exclude', [])
buildroot = get_buildroot()
root = os.path.normpath(os.path.join(buildroot, rel_path))
# making sure there are no unknown arguments.
unknown_args = set(kwargs.keys()) - cls.KNOWN_PARAMETERS
if unknown_args:
raise ValueError('Unexpected arguments while parsing globs: {}'.format(
', '.join(unknown_args)))
for glob in patterns:
if cls._is_glob_dir_outside_root(glob, root):
raise ValueError('Invalid glob {}, points outside BUILD file root {}'.format(glob, root))
exclude = cls.process_raw_exclude(raw_exclude)
files_calculator = cls._file_calculator(root, patterns, kwargs, exclude)
rel_root = fast_relpath(root, buildroot)
if rel_root == '.':
rel_root = ''
filespec = cls.to_filespec(patterns, root=rel_root, exclude=exclude)
return LazyFilesetWithSpec(rel_root, filespec, files_calculator)
@classmethod
def _file_calculator(cls, root, patterns, kwargs, exclude):
def files_calculator():
result = cls.wrapped_fn(root=root, *patterns, **kwargs)
for ex in exclude:
result -= ex
# BUILD file's filesets should contain only files, not folders.
return [path for path in result
if not cls.validate_files or os.path.isfile(os.path.join(root, path))]
return files_calculator
@staticmethod
def _is_glob_dir_outside_root(glob, root):
# The assumption is that a correct glob starts with the root,
# even after normalizing.
glob_path = os.path.normpath(os.path.join(root, glob))
# Check if the glob path has the correct root.
return os.path.commonprefix([root, glob_path]) != root
@staticmethod
def process_raw_exclude(raw_exclude):
if isinstance(raw_exclude, string_types):
raise ValueError("Expected exclude parameter to be a list of globs, lists, or strings,"
" but was a string: {}".format(raw_exclude))
# You can't subtract raw strings from globs
def ensure_string_wrapped_in_list(element):
if isinstance(element, string_types):
return [element]
else:
return element
return [ensure_string_wrapped_in_list(exclude) for exclude in raw_exclude]
@classmethod
def to_filespec(cls, args, root='', exclude=None):
"""Return a dict representation of this glob list, relative to the buildroot.
The format of the dict is {'globs': [ 'list', 'of' , 'strings' ]
(optional) 'exclude' : [{'globs' : ... }, ...] }
The globs are in zglobs format.
"""
result = {'globs': [os.path.join(root, arg) for arg in args]}
if exclude:
result['exclude'] = []
for exclude in exclude:
if hasattr(exclude, 'filespec'):
result['exclude'].append(exclude.filespec)
else:
result['exclude'].append({'globs': [os.path.join(root, x) for x in exclude]})
return result
class Files(FilesetRelPathWrapper):
"""Matches literal files, _without_ confirming that they exist.
TODO: This exists as-is for historical reasons: we should add optional validation of the
existence of matched files at some point.
"""
@staticmethod
def _literal_files(*args, **kwargs):
if kwargs.keys() != ['root']:
raise ValueError('Literal file globs do not support kwargs other than `root`: {}'.format(kwargs))
return args
wrapped_fn = _literal_files
validate_files = False
class Globs(FilesetRelPathWrapper):
"""Matches files in the BUILD file's directory.
E.g., - ``sources = globs('*java'),`` to get .java files in this directory.
- ``globs('*',exclude=[globs('*.java'), 'foo.py'])`` to get all files in this directory
except ``.java`` files and ``foo.py``.
"""
wrapped_fn = Fileset.globs
validate_files = True
class RGlobs(FilesetRelPathWrapper):
"""Matches files recursively under the BUILD file's directory.
E.g., ``bundle(fileset=rglobs('config/*'))`` to bundle up all files in the config,
config/foo, config/foo/bar directories.
"""
@staticmethod
def rglobs_following_symlinked_dirs_by_default(*globspecs, **kw):
if 'follow_links' not in kw:
kw['follow_links'] = True
return Fileset.rglobs(*globspecs, **kw)
wrapped_fn = rglobs_following_symlinked_dirs_by_default
validate_files = True
@classmethod
def to_filespec(cls, args, root='', exclude=None):
# In rglobs, * at the beginning of a path component means "any
# number of directories, including 0". So every time we see ^*,
# we need to output "**/*whatever".
rglobs = []
for arg in args:
components = arg.split(os.path.sep)
out = []
for component in components:
if component == '**':
if out and out[-1].startswith("**"):
continue
out.append(component)
elif component[0] == '*':
if out and out[-1].startswith("**"):
# We want to translate *.py to **/*.py, not **/**/*.py
out.append(component)
else:
out.append('**/' + component)
else:
out.append(component)
rglobs.append(os.path.join(*out))
return super(RGlobs, cls).to_filespec(rglobs, root=root, exclude=exclude)
class ZGlobs(FilesetRelPathWrapper):
"""Matches files in the BUILD file's dir using zsh-style globs, including ``**/`` to recurse."""
@staticmethod
def zglobs_following_symlinked_dirs_by_default(*globspecs, **kw):
if 'follow_links' not in kw:
kw['follow_links'] = True
return Fileset.zglobs(*globspecs, **kw)
wrapped_fn = zglobs_following_symlinked_dirs_by_default
validate_files = True
| |
# Copyright Dave Trollope 2015
# This source code is not to be distributed without agreement from
# D. Trollope
#
# This example demonstrates a basic subscriber which receives data
# from a publisher. It supports TCP and UDP (Raw, Unicast and Multicast) data flows.
#
# It creates a name subscription on the name server to subscribe to name registrations
# which contain the connection info required to listen to the data flow. The name subscription
# is maintained so multiple publishers can be joined, or if they restart.
#
# The publisher registers the name and connectivity info.
#
# This example uses several methods defined in stk_examples.py to simplify understanding and
# keep focus on the most important details.
# System imports
import getopt, sys, time, random
import stk_examples
# Sequence Toolkit imports - stk_env must be first
from stk_env import *
from stk_options import *
from stk_service_group import *
from stk_service import *
from stk_tcp_client import *
from stk_udp_listener import *
from stk_rawudp import *
from stk_data_flow import *
from stk_sg_automation import *
from stk_name_service import *
# Add a handler for CTRL-C to gracefully exit
import signal
import sys
ending = 0
def signal_handler(signal, frame):
global ending
if ending > 0:
sys.exit(0)
ending += 1
stkbase.stop_dispatcher()
signal.signal(signal.SIGINT, signal_handler)
# Class to collect command line options
# command line options provided - set in process_cmdline()
class cmdopts:
callbacks = 2
quiet = False
seqs = 100
name_server_ip = "127.0.0.1"
name_server_port = "20002"
name_server_protocol = "tcp"
subscriber_name = None
server_ip = None
server_port = 0
bind_ip = None
opts = cmdopts()
# Process command line options
def process_cmdline():
try:
gopts, args = getopt.getopt(sys.argv[1:], "hqB:s:R:", ["help", "quiet"])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
for o, a in gopts:
if o in ("-q", "--quiet"):
opts.quiet = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-B"):
bind_ip = a.split(':')
opts.bind_ip = bind_ip[0]
if bind_ip.count == 2:
opts.bind_port = bind_ip[1]
elif o in ("-s"):
opts.seqs = int(a)
elif o in ("-R"):
p=stk_protocol_def_t()
a=stk_data_flow_parse_protocol_str(p,a)
if p.ip != '':
opts.name_server_ip = p.ip
if p.port != '':
opts.name_server_port = p.port
if p.protocol != '':
opts.name_server_protocol = p.protocol
if len(args) == 0:
usage()
sys.exit(5)
opts.subscriber_name = args[0]
def usage():
print("Usage: subscribe.py [options] name")
print(" -h : This help!")
print(" -q : Quiet")
print(" -B ip[:port] : IP and port to be bound (default: 0.0.0.0:29312)")
print(" -s <sequences> : # of sequences")
print(" -R <[protocol:]ip[:port]> : IP and port of name server")
# Process command line options
process_cmdline()
df_opts = None
dispatcher_cbs = None
svc = None
svc_opts = None
seq = None
stkbase = None
envopts = None
data_connections=[]
seqs_rcvd = 0
def cleanup():
try:
# destroy the data flow, sequence, service group and environment
for h in data_connections:
if h['data_flow']:
h['data_flow'].close()
if h['data_flow_options']:
h['data_flow_options'].remove_dispatcher_fd_cbs()
h['data_flow_options'].close()
# And get rid of the environment, we are done!
if stkbase:
stkbase.close()
if dispatcher_cbs:
#dispatcher_cbs.caller().close()
dispatcher_cbs.close()
if envopts:
# Now free the options that were built
# Because there was nested options, we must free each nest individually
# because there is no stored indication which options are nested
envopts.remove_dispatcher_wakeup_cb()
nsopts = envopts.find_option("name_server_options")
dfopts = nsopts.find_option("name_server_data_flow_options")
dfopts.remove_dispatcher_fd_cbs()
stk_env.remove_name_server_dispatcher_cbs(nsopts,"name_server_data_flow")
nsopts.free_sub_option("name_server_data_flow_options")
envopts.free_sub_option("name_server_options")
envopts.close()
except Exception, e:
print "Exception occurred during cleanup: " + str(e)
# Create the STK environment - can't do anything without one
opts_dict = {
"name_server_options": {
"name_server_data_flow_protocol": opts.name_server_protocol,
"name_server_data_flow_options": {
"data_flow_name": """%(protocol)s name server socket for subscribe""" % {"protocol": opts.name_server_protocol},
"data_flow_id": 10000,
"destination_address": opts.name_server_ip,
"destination_port": opts.name_server_port
}
}
}
envopts = stk_options(opts_dict)
class data:
cbs_rcvd = 0
expired = 0
# Class containing callbacks for services (added, removed and changing state)
class name_service_cb(stk_callback):
def __init__(self):
stk_callback.__init__(self)
def close(self):
stk_callback.close(self)
def print_meta_data(self,seq,data,user_type,clientd):
print "Meta data type " + str(user_type) + " sz " + str(len(data))
def name_info_cb(self,name_info,server_info,app_info,cb_type):
data.cbs_rcvd += 1
if cb_type == STK_NS_REQUEST_EXPIRED:
print "Request expired on name " + name_info.name()
data.expired = 1
return
ip = name_info.ip(0)
print "Received info on name " + name_info.name() + ", IP " + ip.ipstr + " Port " + ip.portstr + " Protocol " + ip.protocol
global stkbase
meta_data = name_info.meta_data(stkbase)
if meta_data != None:
meta_data.iterate(self.print_meta_data,None)
try:
self.create_data_flow(stkbase,ip.ipstr,ip.portstr,ip.protocol)
except Exception, e:
print str(e)
def create_data_flow(self,stkbase,ip,port,protocol):
# Create the options for the client data flow
print "Creating subscriber data flow"
if protocol == "udp" or protocol == "rawudp" or protocol == "multicast":
if port == None:
port = "29312"
if protocol == "multicast":
if opts.bind_ip:
bind_ip = opts.bind_ip
else:
bind_ip = "0.0.0.0"
else:
bind_ip = ip
df_opts_dict = {
"bind_address": bind_ip,
"bind_port": port,
"receive_buffer_size": 16000000,
"reuseaddr": 1
}
if protocol == "multicast":
df_opts_dict["multicast_address"] = ip
df_opts = stk_options(df_opts_dict)
df_opts.append_dispatcher_fd_cbs(None)
if protocol == "udp" or protocol == "multicast":
df = stk_udp_subscriber(stkbase,"udp subscriber data flow", 29090, df_opts)
else:
df = stk_rawudp_subscriber(stkbase,"rawudp subscriber data flow", 29090, df_opts)
if df == None:
print "Failed to create udp/rawudp subscriber data flow"
cleanup()
exit(5)
elif protocol == "tcp":
df_opts_dict = {
"destination_address": ip,
"destination_port": port,
"receive_buffer_size": 16000000,
"nodelay": 1
}
df_opts = stk_options(df_opts_dict)
df_opts.append_dispatcher_fd_cbs(None)
# Create the TCP client data flow to the server
df = stk_tcp_subscriber(stkbase,"tcp subscriber data flow", 29090, df_opts)
if df == None:
print "Failed to create the subscriber data flow"
cleanup()
exit(5)
else:
print "Unrecognized protocol " + protocol
return
print "Subscriber data flow created"
global data_connections;
subscription = { 'subscription_ip': ip, 'subscription_port': port, 'data_flow': df, 'data_flow_options': df_opts }
data_connections.append(subscription)
# Class containing callbacks for the dispatcher - this is how we receive data
class dispatcher_cb(stk_callback):
def __init__(self):
stk_callback.__init__(self)
def close(self):
stk_callback.close(self)
def process_seq_segment(self,seq,data,user_type,clientd):
if opts.quiet == False:
print "Sequence " + str(seq.id()) + " Received " + str(len(data)) + " bytes of type " + str(user_type)
if len(data) >= 4:
sz = len(data)
print 'Bytes: %02x %02x %02x %02x ... %02x %02x %02x %02x' % (ord(data[0]),ord(data[1]),ord(data[2]),ord(data[3]),ord(data[sz - 4]),ord(data[sz - 3]),ord(data[sz - 2]),ord(data[sz - 1]))
def process_data(self,rcvchannel,rcv_seq): # Callback to receive data
try:
global seqs_rcvd
# Call process_seq_segment() on each element in the sequence
rcv_seq.iterate(self.process_seq_segment,None)
seqs_rcvd += 1
if seqs_rcvd == opts.seqs:
stkbase.stop_dispatcher()
except Exception, e:
print str(e)
def process_name_response(self,rcvchannel,rcv_seq): # Callback to receive name info
try:
stk_name_service.invoke(rcv_seq)
except Exception, e:
print "Exception occurred processing received data: " + str(e)
dispatcher_cbs = dispatcher_cb()
# Let the environment automatically add and remove fds for these data flows to the dispatcher
stk_env.append_name_server_dispatcher_cbs(envopts,"name_server_data_flow")
# Create an STK environment.
stkbase = stk_env(envopts)
# Create name service callbacks object and request info on name
name_info_cb = name_service_cb()
stk_examples.name_lookup_and_dispatch(stkbase,opts.subscriber_name,name_info_cb,opts,data,dispatcher_cbs,False,cleanup)
# Run the client dispatcher to receive data until we get all the responses
while seqs_rcvd < opts.seqs:
try:
print "received " + str(seqs_rcvd) + " sequences"
stkbase.client_dispatcher_timed(dispatcher_cbs,1000);
except Exception, e:
print "Exception occurred waiting for data to arrive: " + str(e)
print "Done " + str(seqs_rcvd) + " sequences"
print "Waiting 5 seconds before closing"
stkbase.client_dispatcher_timed(dispatcher_cbs,5000);
stkbase.terminate_dispatcher()
# The dispatcher returned, cleanup everything
cleanup()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for tensor_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TensorUtilTest(test.TestCase):
def testFloat(self):
value = 10.0
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape {}
float_val: %.1f
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array(value, dtype=np.float32), a)
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerceNdarray(self):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float32), a)
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32), a)
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
# No conversion to Ndarray for this one: not enough values.
def testFloatNpArrayFloat64(self):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "@$\000\000\000\000\000\000@4\000\000\000\000\000\000@>\000\000\000\000\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float64, a.dtype)
self.assertAllClose(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64),
tensor_util.MakeNdarray(t))
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.float32, np.float32),
(dtypes.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]],
dtype=nptype),
a)
def testFloatMutateArray(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
a = tensor_util.MakeNdarray(t)
a[0] = 5.0
self.assertEquals(np.float32, a.dtype)
self.assertAllClose(np.array([5.0, 20.0, 30.0], dtype=np.float32), a)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
def testHalf(self):
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
self.assertProtoEquals("""
dtype: DT_HALF
tensor_shape {
dim {
size: 2
}
}
half_val: 18688
half_val: 19712
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.float16, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)
def testBfloat16(self):
test_type = dtypes.bfloat16.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=test_type))
# 10.0: 16672 = 010000010(130) 0100000: (1+0/2+1/4) * 2^(130-127)
# 20.0: 16800 = 010000011(131) 0100000: (1+0/2+1/4) * 2^(131-127)
self.assertProtoEquals("""
dtype: DT_BFLOAT16
tensor_shape {
dim {
size: 2
}
}
half_val: 16672
half_val: 16800
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(test_type, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=test_type), a)
def testInt(self):
t = tensor_util.make_tensor_proto(10)
self.assertProtoEquals("""
dtype: DT_INT32
tensor_shape {}
int_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int32), a)
def testLargeInt(self):
value = np.iinfo(np.int64).max
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testLargeNegativeInt(self):
# We don't use the min np.int64 value here
# because it breaks np.abs().
#
# np.iinfo(np.int64).min = -9223372036854775808
# np.iinfo(np.int64).max = 9223372036854775807
# np.abs(-9223372036854775808) = -9223372036854775808
value = np.iinfo(np.int64).min + 1
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\000\000\000\n\000\000\000\024\000\000\000\036\000\000\000("
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)
def testIntTypes(self):
for dtype, nptype in [(dtypes.int32, np.int32),
(dtypes.uint8, np.uint8),
(dtypes.uint16, np.uint16),
(dtypes.int16, np.int16),
(dtypes.int8, np.int8)]:
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testIntTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.int64, np.int64), (dtypes.int32, np.int32),
(dtypes.uint8, np.uint8), (dtypes.uint16, np.uint16),
(dtypes.int16, np.int16), (dtypes.int8, np.int8)]:
self.assertAllEqual(
np.array([[10, 11, 12, 12], [12, 12, 12, 12], [12, 12, 12, 12]],
dtype=nptype),
tensor_util.MakeNdarray(
tensor_util.make_tensor_proto([10, 11, 12],
shape=[3, 4],
dtype=dtype)))
def testIntMixedWithDimension(self):
# Github issue: 11974
dtype = dtypes.int32
nptype = np.int32
t = tensor_util.make_tensor_proto(
[10, tensor_shape.Dimension(20), 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testLong(self):
t = tensor_util.make_tensor_proto(10, dtype=dtypes.int64)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 10
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int64), a)
def testLongN(self):
t = tensor_util.make_tensor_proto(
[10, 20, 30], shape=[1, 3], dtype=dtypes.int64)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([[10, 20, 30]], dtype=np.int64), a)
def testLongNpArray(self):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30]))
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT64
tensor_shape { dim { size: 3 } }
tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
def testQuantizedTypes(self):
# Test with array.
data = [(21,), (22,), (23,)]
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\025\000\000\000\026\000\000\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint32.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint8)
self.assertProtoEquals(r"""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint8)
self.assertProtoEquals(r"""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint16)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.quint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint16)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(dtypes.qint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
def testString(self):
t = tensor_util.make_tensor_proto("foo")
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape {}
string_val: "foo"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertEquals([b"foo"], a)
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto(["f", "g"], shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(
np.array([[b"f", b"g", b"g", b"g"], [b"g", b"g", b"g", b"g"],
[b"g", b"g", b"g", b"g"]],
dtype=np.object), a)
def testStringN(self):
t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringNpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[b"a", b"ab"], [b"abc", b"abcd"]]))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testArrayMethod(self):
class Wrapper(object):
def __array__(self):
return np.array([b"foo", b"bar", b"baz"])
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testArrayInterface(self):
class Wrapper(object):
@property
def __array_interface__(self):
return np.array([b"foo", b"bar", b"baz"]).__array_interface__
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringTuple(self):
t = tensor_util.make_tensor_proto((b"a", b"ab", b"abc", b"abcd"))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 4 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array((b"a", b"ab", b"abc", b"abcd")), a)
def testStringNestedTuple(self):
t = tensor_util.make_tensor_proto(((b"a", b"ab"), (b"abc", b"abcd")))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.object, a.dtype)
self.assertAllEqual(np.array(((b"a", b"ab"), (b"abc", b"abcd"))), a)
def testComplex64(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape {}
scomplex_val: 1
scomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplex128(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape {}
dcomplex_val: 1
dcomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
for dtype, np_dtype in [(dtypes.complex64, np.complex64),
(dtypes.complex128, np.complex128)]:
t = tensor_util.make_tensor_proto((1 + 1j), shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)]],
dtype=np_dtype),
a)
def testComplex64N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 1 } dim { size: 3 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex128N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex64NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 2 } dim { size: 2 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
scomplex_val: 7
scomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex64, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testComplex128NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex128)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 2 } dim { size: 2 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
dcomplex_val: 7
dcomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEquals(np.complex128, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testUnsupportedDTypes(self):
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(np.array([1]), 0)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(3, dtype=dtypes.qint8)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto([3], dtype=dtypes.qint8)
# Validate the helpful error message when trying to convert an
# unconvertible list as strings.
with self.assertRaisesRegexp(TypeError, "Failed to convert object"):
tensor_util.make_tensor_proto([tensor_shape.Dimension(1)])
def testTensorShapeVerification(self):
array = np.array([[1], [2]])
correct_shape = (2, 1)
incorrect_shape = (1, 2)
tensor_util.make_tensor_proto(array, shape=correct_shape, verify_shape=True)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(
array, shape=incorrect_shape, verify_shape=True)
def testShapeTooLarge(self):
with self.assertRaises(ValueError):
tensor_util.make_tensor_proto(np.array([1, 2]), shape=[1])
def testLowRankSupported(self):
t = tensor_util.make_tensor_proto(np.array(7))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 7
""", t)
def testShapeEquals(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
self.assertTrue(
tensor_util.ShapeEquals(t, tensor_shape.as_shape([2, 2]).as_proto()))
self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
self.assertFalse(tensor_util.ShapeEquals(t, [4]))
@test_util.run_deprecated_v1
def testMockArray(self):
class MockArray(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return np.asarray(self.array, dtype)
with self.cached_session() as sess:
ma = MockArray(np.array([10, 20, 30]))
t = ops.convert_to_tensor(ma)
a = self.evaluate(t)
self.assertEquals(np.int64, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=np.int64), a)
class IsTensorTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testConstantTensor(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = constant_op.constant(np_val)
self.assertFalse(tensor_util.is_tensor(np_val))
self.assertTrue(tensor_util.is_tensor(tf_val))
class ConstantValueTest(test.TestCase):
def testConstant(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
@test_util.run_deprecated_v1
def testUnknown(self):
tf_val = gen_state_ops.variable(
shape=[3, 4, 7],
dtype=dtypes.float32,
name="tf_val",
container="",
shared_name="")
self.assertIs(None, tensor_util.constant_value(tf_val))
def testShape(self):
np_val = np.array([1, 2, 3], dtype=np.int32)
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.int32, c_val.dtype)
def testFill(self):
np_val = np.array([-1, -1, -1], dtype=np.float32)
tf_val = array_ops.fill([3], constant_op.constant(-1.0))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(np_val, c_val)
self.assertEqual(np.float32, c_val.dtype)
def testSize(self):
tf_val = array_ops.size(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(6, c_val)
@test_util.run_deprecated_v1
def testSizeOfScalar(self):
tf_val = array_ops.size(constant_op.constant(0.0))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(1, c_val)
self.assertEqual(np.ndarray, type(c_val))
@test_util.run_deprecated_v1
def testRank(self):
tf_val = array_ops.rank(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
# Repeat test using array_ops.rank_internal to avoid the optimization that
# happens in the rank function.
tf_val = array_ops.rank_internal(
constant_op.constant(
0.0, shape=[1, 2, 3]), optimize=False)
c_val = tensor_util.constant_value(tf_val)
self.assertEqual(np.ndarray, type(c_val))
self.assertEqual((), c_val.shape)
self.assertEqual(3, c_val)
self.assertEqual([3], c_val)
def testCast(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
@test_util.run_deprecated_v1
def testConcat(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = array_ops.concat(
[np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]], 0)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.concat(
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]],
array_ops.placeholder(dtypes.int32))
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
tf_val = array_ops.concat([
np_val[0, :, :], array_ops.placeholder(dtypes.float32), np_val[2, :, :]
], 1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
@test_util.run_deprecated_v1
def testPack_Axis0(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
np_val = np.array(inputs)
tf_val = array_ops.stack(inputs)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]])
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
@test_util.run_deprecated_v1
def testPack_Axis1(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
tf_val = array_ops.stack(inputs, axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIsNone(c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]], axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
@test_util.run_deprecated_v1
def testPack_Partial_Axis0(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stack([input_, array_ops.placeholder(dtypes.float32)])
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertAllClose(input_, c_val[0])
self.assertIsNone(c_val[1])
@test_util.run_deprecated_v1
def testPack_Partial_Axis1(self):
input_ = np.random.rand(4, 7)
tf_val = array_ops.stack([input_, array_ops.placeholder(dtypes.float32)],
axis=1)
c_val = tensor_util.constant_value(tf_val, partial=True)
self.assertIsNone(c_val)
def testEqual(self):
# Scalar inputs.
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), True)
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), False)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[True, False], [False, True]])
def testNotEqual(self):
# Scalar inputs.
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), False)
tf_val = math_ops.not_equal(constant_op.constant(1),
constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), True)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.not_equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[False, True], [True, False]])
class ConstantValueAsShapeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testConstant(self):
np_val = np.random.rand(3).astype(np.int32)
tf_val = constant_op.constant(np_val)
self.assertEqual(
tensor_shape.TensorShape(np_val),
tensor_util.constant_value_as_shape(tf_val))
tf_val = constant_op.constant([], dtype=dtypes.int32)
self.assertEqual(
tensor_shape.TensorShape([]),
tensor_util.constant_value_as_shape(tf_val))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
tf_val = array_ops.shape(constant_op.constant(0.0, shape=[1, 2, 3]))
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.TensorShape([1, 2, 3]), c_val)
@test_util.run_in_graph_and_eager_modes
def testMinusOneBecomesNone(self):
tf_val = constant_op.constant([-1, 1, -1], shape=[3])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, 1, None], c_val.as_list())
@test_util.run_deprecated_v1
def testPack(self):
tf_val = array_ops.stack(
[constant_op.constant(16), 37, array_ops.placeholder(dtypes.int32)])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None], c_val.as_list())
@test_util.run_deprecated_v1
def testConcat(self):
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(2,))], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, None], c_val.as_list())
tf_val = array_ops.concat(
[[16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
@test_util.run_deprecated_v1
def testSlice(self):
tf_val = array_ops.placeholder(dtypes.int32, shape=(4,))[0:2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None], c_val.as_list())
# begin:end
tf_val = constant_op.constant([10, 20, 30])[1:3]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20, 30], c_val.as_list())
# begin:end:stride
tf_val = array_ops.strided_slice(
constant_op.constant([10, 20, 30]), [1], [3], strides=[2])
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([20], c_val.as_list())
# [1, 2, 16, 37, None, 48]
tf_val_orig = array_ops.concat(
[[1, 2, 16, 37], array_ops.placeholder(
dtypes.int32, shape=(1,)), [48]], 0)
# begin: no end
tf_val = tf_val_orig[2:]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 37, None, 48], c_val.as_list())
# begin::negative slice
tf_val = tf_val_orig[2::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([16, 2, 1], c_val.as_list())
# :end:negative slice
tf_val = tf_val_orig[:1:-2]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([48, 37], c_val.as_list())
# begin:end:negative slice
tf_val = tf_val_orig[3:1:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# begin:negative end:slice
tf_val = tf_val_orig[1:-3:1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([2, 16], c_val.as_list())
# negative begin::slice
tf_val = tf_val_orig[-3::1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, None, 48], c_val.as_list())
# negative begin::negative slice
tf_val = tf_val_orig[-3::-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16, 2, 1], c_val.as_list())
# negative begin:negative end:negative slice
tf_val = tf_val_orig[-3:-5:-1]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([37, 16], c_val.as_list())
# Do not support shape inference for additional arguments
tf_val = constant_op.constant([10, 20, 30])[...]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual([None, None, None], c_val.as_list())
# Do not support shape inference for tensor slices.
tf_val = constant_op.constant([10, 20, 30])[
array_ops.placeholder(dtypes.int32, shape=()):]
c_val = tensor_util.constant_value_as_shape(tf_val)
self.assertEqual(tensor_shape.unknown_shape(), c_val)
# Do not support shape inference for higher rank
with self.assertRaises(ValueError):
tf_val = constant_op.constant([[10], [20], [30]])[:, 0:]
c_val = tensor_util.constant_value_as_shape(tf_val)
if __name__ == "__main__":
test.main()
| |
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging.handlers
import ssl
import socket
import time
#
# Some constants ...
#
SYSLOG_UDP_PORT = 514
class SysLogHandler(logging.handlers.SysLogHandler):
"""
A handler class which sends formatted logging records to a syslog
server. Extends the default SysLogHandler with SSL and enable lazy
connection.
.. code-block:: python
address = ('localhost', 514)
[...]
syslog = SysLogHandler(address=address,
socktype=socket.SOCK_STREAM,
use_ssl=True,
keyfile = "client.key",
certfile = "client.crt",
ca_certs = "ca.crt",
cert_reqs= ssl.CERT_REQUIRED)
[...]
logger.addHandler(syslog)
"""
# ================================
# Priorities (these are ordered)
# ================================
# system is unusable
LOG_EMERG = logging.handlers.SysLogHandler.LOG_EMERG
# action must be taken immediately
LOG_ALERT = logging.handlers.SysLogHandler.LOG_ALERT
# critical conditions
LOG_CRIT = logging.handlers.SysLogHandler.LOG_CRIT
# error conditions
LOG_ERR = logging.handlers.SysLogHandler.LOG_ERR
# warning conditions
LOG_WARNING = logging.handlers.SysLogHandler.LOG_WARNING
# normal but significant condition
LOG_NOTICE = logging.handlers.SysLogHandler.LOG_NOTICE
# informational
LOG_INFO = logging.handlers.SysLogHandler.LOG_INFO
# debug-level messages
LOG_DEBUG = logging.handlers.SysLogHandler.LOG_DEBUG
# ================
# Facility codes
# ================
# kernel messages
LOG_KERN = logging.handlers.SysLogHandler.LOG_KERN
# random user-level messages
LOG_USER = logging.handlers.SysLogHandler.LOG_USER
# mail system
LOG_MAIL = logging.handlers.SysLogHandler.LOG_MAIL
# system daemons
LOG_DAEMON = logging.handlers.SysLogHandler.LOG_DAEMON
# security/authorization messages
LOG_AUTH = logging.handlers.SysLogHandler.LOG_AUTH
# messages generated internally by syslogd
LOG_SYSLOG = logging.handlers.SysLogHandler.LOG_SYSLOG
# line printer subsystem
LOG_LPR = logging.handlers.SysLogHandler.LOG_LPR
# network news subsystem
LOG_NEWS = logging.handlers.SysLogHandler.LOG_NEWS
# UUCP subsystem
LOG_UUCP = logging.handlers.SysLogHandler.LOG_UUCP
# clock daemon
LOG_CRON = logging.handlers.SysLogHandler.LOG_CRON
# security/authorization messages (private)
LOG_AUTHPRIV = logging.handlers.SysLogHandler.LOG_AUTHPRIV
# FTP daemon
LOG_FTP = logging.handlers.SysLogHandler.LOG_FTP
# ========================
# Reserved for local use
# ========================
LOG_LOCAL0 = logging.handlers.SysLogHandler.LOG_LOCAL0
LOG_LOCAL1 = logging.handlers.SysLogHandler.LOG_LOCAL1
LOG_LOCAL2 = logging.handlers.SysLogHandler.LOG_LOCAL2
LOG_LOCAL3 = logging.handlers.SysLogHandler.LOG_LOCAL3
LOG_LOCAL4 = logging.handlers.SysLogHandler.LOG_LOCAL4
LOG_LOCAL5 = logging.handlers.SysLogHandler.LOG_LOCAL5
LOG_LOCAL6 = logging.handlers.SysLogHandler.LOG_LOCAL6
LOG_LOCAL7 = logging.handlers.SysLogHandler.LOG_LOCAL7
# ================================================
# Other codes through 15 reserved for system use
# ================================================
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
# The map below appears to be trivially lowercasing the key. However,
# there's more to it than meets the eye - in some locales, lowercasing
# gives unexpected results. See SF #1524081: in the Turkish locale,
# "INFO".lower() != "info"
priority_map = {
"DEBUG": "debug",
"INFO": "info",
"WARNING": "warning",
"ERROR": "error",
"CRITICAL": "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=socket.SOCK_DGRAM, **kwargs):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.socket = None
self.unixsocket = None
self.address = address
self.facility = facility
self.socktype = socktype
self.use_ssl = kwargs.pop("use_ssl", False)
self.timeout = kwargs.pop("timeout", 1)
self.kwargs = kwargs
self.closeOnError = 0
self.retryTime = None
# Exponential backoff parameters.
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self):
if isinstance(self.address, str):
self.unixsocket = 1
self._connect_unixsocket(self.address)
else:
self.unixsocket = 0
self.socket = socket.socket(socket.AF_INET, self.socktype)
if self.socktype == socket.SOCK_STREAM:
if self.use_ssl:
self.socket = ssl.wrap_socket(self.socket, **self.kwargs)
self.socket.connect(self.address)
def _connect_unixsocket(self, address):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# syslog may require either DGRAM or STREAM sockets
try:
if hasattr(self.socket, 'settimeout'):
self.socket.settimeout(self.timeout)
self.socket.connect(address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
if self.use_ssl:
self.socket = ssl.wrap_socket(self.socket, **self.kwargs)
if hasattr(self.socket, 'settimeout'):
self.socket.settimeout(self.timeout)
self.socket.connect(address)
except socket.error:
self.socket.close()
raise
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
# Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, msg):
"""
This function allows for partial sends which can happen when the
network is busy.
"""
if self.socket is None:
self.createSocket()
# self.socket can be None either because we haven't reached the retry
# time yet, or because we have reached the retry time and retried,
# but are still unable to connect.
if self.socket:
try:
if hasattr(self.socket, "sendall"):
try:
self.socket.sendall(msg)
except socket.error:
self.closeSocket()
self.createSocket()
self.socket.sendall(msg)
else:
sentsofar = 0
left = len(msg)
while left > 0:
sent = 0
try:
_msg = msg[sentsofar:]
if self.unixsocket:
sent = self.socket.send(_msg)
elif self.socktype == socket.SOCK_DGRAM:
sent = self.socket.sendto(_msg, self.address)
except Exception:
self.closeSocket()
self.createSocket()
if self.unixsocket:
sent = self.socket.send(_msg)
elif self.socktype == socket.SOCK_DGRAM:
sent = self.socket.sendto(_msg, self.address)
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.closeSocket()
self.socket = None # so we can call createSocket next time
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
# when talking to the unix-domain '/dev/log' socket, a zero-terminator
# seems to be required. this string is placed into a class variable so
# that it can be overridden if necessary.
log_tail = '\000' if self.unixsocket else '\n'
msg = self.format(record) + log_tail
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
# Message is a string. Convert to bytes as required by RFC 5424
if type(msg) is str:
msg = msg.encode('utf-8')
msg = prio + msg
try:
self.send(msg)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.socket:
self.close()
self.socket = None # try to reconnect next time
else:
logging.Handler.handleError(self, record)
def closeSocket(self):
if not self.socket:
return
if self.unixsocket:
self.socket.close()
if isinstance(self.socket, ssl.SSLSocket):
self.socket = self.socket.unwrap()
self.socket.close()
def close(self):
self.acquire()
try:
self.closeSocket()
finally:
self.release()
logging.Handler.close(self)
| |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
"""helper.py: Functions to handle specific code block or callback."""
import inspect
import os
import shutil
import app
import config
import server
def console_command(args, command_signal):
if len(args) < 3:
return False
command_signal_, command = args[1:3]
params = args[3:]
kwargs = get_kwargs(params, ':')
if command_signal == command_signal_:
if command == 'run':
server.run(**kwargs)
elif command == 'create':
for param in params:
create_app(param)
elif command == 'rename':
if len(params[:2]) == 2:
rename_app(*params[:2])
elif command == 'delete':
for param in params:
delete_app(param)
def create_app(dir_name):
app_file_names = [
'/controllers.py',
'/forms.py',
'/__init__.py',
'/models.py',
'/serializers.py',
'/views.py'
]
view_file_names = ['/index.html']
app_file_content = (
'#!/usr/bin/python2.7\n'
'# -*- coding: utf-8 -*-\n\n'
'"""{file_name}: File description."""\n\n'
)
create_tree(app_file_names, dir_name, config.DIR_APPS)
create_tree(view_file_names, dir_name, config.DIR_VIEWS)
write_tree(app_file_names, dir_name, config.DIR_APPS, app_file_content)
def create_tree(file_names, dir_name, root):
if not isinstance(file_names, list):
file_names = [file_names]
dir_name = dir_name.strip('/')
dir_path = os.path.join(root, dir_name)
if not (os.path.isdir(dir_path) or os.path.exists(dir_path)):
os.makedirs(dir_path)
else:
print '{} already exists.'.format(dir_path)
for file_name in file_names:
file_name = file_name.strip('/')
file_path = os.path.join(dir_path, file_name)
if not (os.path.isfile(file_path) and os.path.exists(file_path)):
if not os.path.isfile(file_path):
with open(file_path, 'a') as f:
pass # no need to close, with automatically does it
else:
print '{} already exists.'.format(file_path)
def custom_import(name):
module = __import__(name)
components = name.split('.')[1:]
for component in components:
module = getattr(module, component)
return module
def delete_app(dir_name):
roots = [config.DIR_APPS, config.DIR_VIEWS]
for root in roots:
delete_tree(dir_name, root)
def delete_tree(dir_name, root):
dir_name = dir_name.strip('/')
dir_path = os.path.join(root, dir_name)
if os.path.isdir(dir_path) and os.path.exists(dir_path):
shutil.rmtree(dir_path)
else:
print '{} does not exist.'.format(dir_path)
def get_kwargs(args, separator):
kwargs = {}
for arg in args:
if separator in arg:
k, v = arg.split(separator)
try:
v = int(v)
except:
if v in ['True', 'False']:
v = True if v == 'True' else False
kwargs.update({k:v})
return kwargs
def get_methods(class_):
methods = []
class_methods = dir(class_)
for method in class_methods:
if not (method.startswith('__') or method.endswith('__')):
methods.append(method)
return methods
def get_modules(path):
dir_names = os.listdir(path)
modules = []
for dir_name in dir_names:
dir_full_path = os.path.join(path, dir_name)
if os.path.isdir(dir_full_path):
modules.append(dir_name)
return modules
def is_primitive(object_):
object_type = type(object_)
primitives = [str, int, float, dict, list, tuple, bool, None]
return True if object_type in primitives else False
def rename_app(old_path, new_path):
old_path = old_path.strip('/')
new_path = new_path.strip('/')
apps_root = config.DIR_APPS
views_root = config.DIR_VIEWS
old_app_path = os.path.join(apps_root, old_path)
old_view_path = os.path.join(views_root, old_path)
new_app_path = os.path.join(apps_root, new_path)
new_view_path = os.path.join(views_root, new_path)
paths = [
(old_app_path, new_app_path),
(old_view_path, new_view_path)
]
for old_path, new_path in paths:
if not (os.path.isdir(old_path) or os.path.exists(old_path)):
print '{} does not exist'.format(old_path)
elif os.path.isdir(new_path) or os.path.exists(new_path):
print '{} already exists'.format(new_path)
else:
os.rename(old_path, new_path)
def to_func(callback, method):
if not inspect.isclass(callback):
return callback
return getattr(callback(), method)
def to_json(data, key=None):
key = key or 'data'
if not isinstance(data, dict):
data = dict({key:data})
for k, v in data.items():
if not is_primitive(v):
if type(v) is None:
data[k] = ''
else:
data[k] = str(v)
query = app.request.query.callback
if query:
app.response.content_type = 'application/javascript'
return '{}({})'.format(query, data)
return data
def write_tree(file_names, dir_name, root, message):
dir_name = dir_name.strip('/')
dir_path = os.path.join(root, dir_name)
for file_name in file_names:
file_name = file_name.strip('/')
file_path = os.path.join(dir_path, file_name)
if os.path.isfile(file_path) and os.path.exists(file_path):
mode = 'w'
if os.stat(file_path).st_size:
mode = 'a'
with open(file_path, mode) as f:
f.write(message.format(**locals()))
| |
from collections import namedtuple, ChainMap
import random
import json
import struct
import pulp_db
#---
# DATA
#---
msg_1 = {"name": "batman",
"age": 25,
"string": "I am batman",
}
msg_2 = {"name": "batman",
"age": 5,
"string": "I am also batman",
}
msg_3 = {"name": "batman",
"string": "I am also batman?",
}
msg_4 = {"name": "robin",
"age": 25,
"string": "I am not batman",
}
msg_5 = {"name": "superman",
"age": 2000,
"string": "I am superman",
}
DATA = [msg_1, msg_2, msg_3, msg_4, msg_5]
#---
# Utils
#---
# Need a few serialisers.
def dump_json(msg):
return json.dumps(msg).encode()
def load_json(raw_msg):
return json.loads(raw_msg.decode())
def dump_int(i):
return struct.pack("I", i)
def load_int(raw_i):
return struct.unpack("I", raw_i)[0]
def dump_str_to_bytes(string):
return string.encode()
def load_str_to_bytes(raw):
return raw.decode()
MSG_DUMPER = dump_json
MSG_LOADER = load_json
IDX_DUMPERS = {"name": dump_str_to_bytes,
"age": dump_int,
}
IDX_LOADERS = {"name": load_str_to_bytes,
"age": load_int,
}
#---
# Write the DB
#---
db_name = "data/pulp/query/json_ds"
with pulp_db.open(db_name, "w", msg_dumper=MSG_DUMPER, idx_dumpers=IDX_DUMPERS) as db:
for msg in DATA:
# What to index
index = {"name": msg["name"]}
if "age" in msg:
index["age"] = msg["age"]
# Put the data into the database.
# db.append(b'the_bin_json_data', {"name": "foo", "age": 5})
db.append(msg, index)
#---
# Read the DB
#---
with pulp_db.open(db_name,
mode="r",
msg_dumper=MSG_DUMPER,
msg_loader=MSG_LOADER,
idx_dumpers=IDX_DUMPERS,
idx_loaders=IDX_LOADERS,
) as DB:
num_messages = len(DB)
first_msg = DB[0] #<-- This is a message object.
first_msg.msg #<--- This is the data_blob
first_msg.id #<--- This is the position of the data_blob. 0 = first message of the day
last_msg = DB[-1] #<-- This is a message object.
last_msg_json = last_msg.msg #<--- This is the data_blob
last_msg_id = last_msg.id #<--- This is the position of the data_blob. 0 = first message of the day
first_two_messages = DB[:2]
assert [x.msg for x in first_two_messages] == DATA[:2]
last_two_messages = DB[-2:]
assert [x.msg for x in last_two_messages] == DATA[-2:]
actual_all_msgs = [x.msg for x in DB]
assert actual_all_msgs == DATA
# Forward Iterator (The same as above next == __next__)
actual_msgs = []
db_stream = DB.stream
while True:
try:
msg = db_stream.next()
except StopIteration:
break
else:
actual_msgs.append(msg.msg)
expected_messages = DATA
assert actual_msgs == expected_messages
#Backward Iterator ----------------------------
actual_msgs = []
db_stream = DB.stream
while True:
try:
msg = db_stream.prev()
except StopIteration:
break
else:
actual_msgs.append(msg.msg)
expected_messages = DATA[::-1]
assert actual_msgs == expected_messages
#test_jump_then_forwards_backwards----------------------------
db_stream = DB.stream
actual_msg_3 = db_stream[3]
assert actual_msg_3.msg == DATA[3]
expected_msg_4 = db_stream.next()
assert expected_msg_4.msg == DATA[4]
expected_msg_3 = db_stream.prev()
assert expected_msg_3.msg == DATA[3]
expected_msg_2 = db_stream.prev()
assert expected_msg_2.msg == DATA[2]
#test_all_indexed_fields----------------------------
indexed_fields = DB.idx.fields()
assert set(indexed_fields) == {"name", "age"}
#test_all_names----------------------------
actual_names = set(DB.idx["name"].keys())
expected_names = {msg["name"] for msg in DATA}
assert actual_names == expected_names
#test_all_ages----------------------------
actual_ages = set(DB.idx["age"].keys())
expected_ages = {msg["age"] for msg in DATA if "age" in msg}
assert actual_ages == expected_ages
actual_num_uniq_ages = len(DB.idx["age"])
expected_num_uniq_ages = len(set(x['age'] for x in DATA if "age" in x))
assert actual_num_uniq_ages == expected_num_uniq_ages
#test_all_strings----------------------------
actual_strings = [x.msg['string'] for x in DB]
expected_strings = [msg["string"] for msg in DATA]
assert actual_strings == expected_strings
#test_called_batman----------------------------
all_batmans = DB.idx['name']['batman']
all_batman_msgs = [x.msg for x in all_batmans]
expected_batmans = [msg for msg in DATA if msg['name']=="batman"]
assert all_batman_msgs == expected_batmans
#test_age_25----------------------------
all_age_25 = DB.idx['age'][25]
all_age_25_msgs = [x.msg for x in all_age_25]
expected_age_25 = [msg for msg in DATA if 'age' in msg and msg['age']==25]
assert all_age_25_msgs == expected_age_25
#test_age_ge_6_and_slice----------------------------
def age_ge_6(age):
return age >= 6
#all_age_ge_6 = DB.idx['age'](age_ge_6)
# Or
all_age_ge_6 = DB.idx(age=age_ge_6)
all_age_ge_6_msgs = [x.msg for x in all_age_ge_6]
expected_ge_6 = [msg for msg in DATA if 'age' in msg and msg['age']>=6]
assert all_age_ge_6_msgs == expected_ge_6
#test_batman_in_string----------------------------
def batman_in_str(msg):
return "batman" in msg['string']
all_with_batman_str = DB.stream(batman_in_str)
all_with_batman_str_msgs = [x.msg for x in all_with_batman_str]
expected_batman_str_msgs = [msg for msg in DATA if "batman" in msg['string']]
assert all_with_batman_str_msgs == expected_batman_str_msgs
#test_batman_in_string_and_age_25___or_name_superman----------------------------
expected_data = [msg for msg in DATA if (("age" in msg and msg['age'] == 25) and "batman" in msg["string"]) or msg["name"] == "superman" ]
def batman_in_str(msg):
return "batman" in msg['string']
def msg_age_eq_25(msg):
return 'age' in msg and msg['age'] == 25
# Method 1
all_with_batman_str_and_age_25 = DB.stream(batman_in_str)(msg_age_eq_25) #<--- Not using index
name_supername = DB.idx['name']['superman']
actual_msgs = [x.msg for x in all_with_batman_str_and_age_25 | name_supername]
assert actual_msgs == expected_data
print("Method 1: passed")
# Method 2
age_25 = DB.idx['age'][25] #<--- Using index
with_batman_str = DB.stream(batman_in_str) #<--- Not using index
name_supername = DB.idx['name']['superman']
actual_msgs = [x.msg for x in (age_25 & with_batman_str) | name_supername]
assert actual_msgs == expected_data
print("Method 2: passed")
# Method 3
age_25_with_batman_str = DB.idx['age'][25](batman_in_str) #<--- Only looking at messages that satisfy age. Faster
name_supername = DB.idx['name']['superman']
actual_msgs = [x.msg for x in age_25_with_batman_str | name_supername]
assert actual_msgs == expected_data
print("Method 3: passed")
#Method 4
age_25_with_batman_str = DB.stream(msg_age_eq_25)(batman_in_str) #<--- Not using index but at least using one to filter the other
name_supername = DB.idx['name']['superman']
actual_msgs = [x.msg for x in age_25_with_batman_str | name_supername]
assert actual_msgs == expected_data
print("Method 4: passed")
# Method 5
def age_eq_25(age):
return age == 25
age_25_with_batman_str = DB.idx['age'](age_eq_25)(batman_in_str)
name_supername = DB.idx['name']['superman']
actual_msgs = [x.msg for x in age_25_with_batman_str | name_supername]
assert actual_msgs == expected_data
print("Method 5: passed")
# Method 6
age_25_with_batman_str = DB.idx(age=age_eq_25)(batman_in_str)
name_supername = DB.idx['name']['superman']
actual_msgs = [x.msg for x in age_25_with_batman_str | name_supername]
assert actual_msgs == expected_data
print("Method 6: passed")
# Method 7
def msg_name_superman(msg):
return msg['name'] == 'superman'
everything = DB.idx(age=age_eq_25)(batman_in_str) | DB.stream(msg_name_superman)
actual_msgs = [x.msg for x in everything]
assert actual_msgs == expected_data
print("Method 7: passed")
#test_batman_in_string_minus_age25----------------------------
expected_data = [msg for msg in DATA if "batman" in msg["string"] and not ("age" in msg and msg["age"]==25) ]
def batman_in_str(msg):
return "batman" in msg['string']
everything = DB.stream(batman_in_str) - DB.idx['age'][25]
actual_msgs = [x.msg for x in everything]
assert actual_msgs == expected_data
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test code for the Face layer of RPC Framework."""
from __future__ import division
import abc
import contextlib
import itertools
import threading
import unittest
from concurrent import futures
import six
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
from tests.unit.framework.common import test_coverage
from tests.unit.framework.interfaces.face import _3069_test_constant
from tests.unit.framework.interfaces.face import _digest
from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class _PauseableIterator(object):
def __init__(self, upstream):
self._upstream = upstream
self._condition = threading.Condition()
self._paused = False
@contextlib.contextmanager
def pause(self):
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while self._paused:
self._condition.wait()
return next(self._upstream)
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._called = False
self._passed_future = None
self._passed_other_stuff = None
def __call__(self, *args, **kwargs):
with self._condition:
self._called = True
if args:
self._passed_future = args[0]
if 1 < len(args) or kwargs:
self._passed_other_stuff = tuple(args[1:]), dict(kwargs)
self._condition.notify_all()
def future(self):
with self._condition:
while True:
if self._passed_other_stuff is not None:
raise ValueError(
'Test callback passed unexpected values: %s',
self._passed_other_stuff)
elif self._called:
return self._passed_future
else:
self._condition.wait()
class TestCase(six.with_metaclass(abc.ABCMeta, test_coverage.Coverage, unittest.TestCase)):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
NAME = 'FutureInvocationAsynchronousEventServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)
self._digest = _digest.digest(
_stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.event_method_implementations, None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
self._digest_pool.shutdown(wait=True)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
response = response_future.result()
test_messages.verify(request, response, self)
self.assertIs(callback.future(), response_future)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
callback = _Callback()
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_future = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
future_passed_to_callback = callback.future()
response = future_passed_to_callback.result()
test_messages.verify(requests, response, self)
self.assertIs(future_passed_to_callback, response_future)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
request_iterator = _PauseableIterator(iter(requests))
# Use of a paused iterator of requests allows us to test that control is
# returned to calling code before the iterator yields any requests.
with request_iterator.pause():
response_iterator = self._invoker.future(group, method)(
request_iterator, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
test_messages.verify(first_request, first_response, self)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
second_response = second_response_future.result()
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response_future = self._invoker.future(group, method)(
first_request, test_constants.LONG_TIMEOUT)
second_response_future = self._invoker.future(group, method)(
second_request, test_constants.LONG_TIMEOUT)
first_response = first_response_future.result()
second_response = second_response_future.result()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures = []
for _ in range(test_constants.PARALLELISM):
request = test_messages.request()
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
requests.append(request)
response_futures.append(response_future)
responses = [
response_future.result() for response_future in response_futures]
for request, response in zip(requests, responses):
test_messages.verify(request, response, self)
def testWaitingForSomeButNotAllParallelInvocations(self):
pool = logging_pool.pool(test_constants.PARALLELISM)
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures_to_indices = {}
for index in range(test_constants.PARALLELISM):
request = test_messages.request()
inner_response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
outer_response_future = pool.submit(inner_response_future.result)
requests.append(request)
response_futures_to_indices[outer_response_future] = index
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures_to_indices),
test_constants.PARALLELISM // 2)
for response_future in some_completed_response_futures_iterator:
index = response_futures_to_indices[response_future]
test_messages.verify(requests[index], response_future.result(), self)
pool.shutdown(wait=True)
def testCancelledUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testCancelledStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_future.add_done_callback(callback)
cancel_method_return_value = response_future.cancel()
self.assertIs(callback.future(), response_future)
self.assertFalse(cancel_method_return_value)
self.assertTrue(response_future.cancelled())
def testCancelledStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
response_iterator.cancel()
with self.assertRaises(face.CancellationError):
next(response_iterator)
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(
group, method)(request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.pause():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause():
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = _Callback()
with self._control.fail():
response_future = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
response_future.add_done_callback(callback)
self.assertIs(callback.future(), response_future)
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is
# indistinguishable from simply not having called its
# response_callback before the expiration of the RPC.
self.assertIsInstance(
response_future.exception(), face.ExpirationError)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (
six.iteritems(self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
# Because the servicer fails outside of the thread from which the
# servicer-side runtime called into it its failure is indistinguishable
# from simply not having called its response_consumer before the
# expiration of the RPC.
with self._control.fail(), self.assertRaises(face.ExpirationError):
response_iterator = self._invoker.future(group, method)(
iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
| |
#!/usr/bin/python
# mininet tests for FAUCET
#
# * must be run as root
# * you can run a specific test case only, by adding the class name of the test
# case to the command. Eg ./faucet_mininet_test.py FaucetUntaggedIPv4RouteTest
#
# REQUIRES:
#
# * mininet 2.2.0 or later (Ubuntu 14 ships with 2.1.0, which is not supported)
# use the "install from source" option from
# https://github.com/mininet/mininet/blob/master/INSTALL.
# suggest ./util/install.sh -n
# * OVS 2.3.1 or later (Ubuntu 14 ships with 2.0.2, which is not supported)
# * VLAN utils (vconfig, et al - on Ubuntu, apt-get install vlan)
# * fuser
# * net-tools
# * iputils-ping
# * netcat-openbsd
# * tcpdump
# * exabgp
# * pylint
# * curl
import glob
import inspect
import os
import sys
import random
import re
import shutil
import socket
import subprocess
import tempfile
import time
import unittest
import json
import ipaddr
import requests
import yaml
from concurrencytest import ConcurrentTestSuite, fork_for_tests
from mininet.net import Mininet
from mininet.node import Controller
from mininet.node import Host
from mininet.node import Intf
from mininet.node import OVSSwitch
from mininet.topo import Topo
from mininet.util import dumpNodeConnections, pmonitor
from mininet.clean import Cleanup
from ryu.ofproto import ofproto_v1_3 as ofp
# list of required external dependencies
# external binary, argument to get version,
# RE to check present RE to get version, minimum required version.
EXTERNAL_DEPENDENCIES = (
('ryu-manager', ['--version'],
'ryu-manager', r'ryu-manager (\d+\.\d+)\n', float(4.4)),
('ovs-vsctl', ['--version'], 'Open vSwitch',
r'ovs-vsctl\s+\(Open vSwitch\)\s+(\d+\.\d+)\.\d+\n', float(2.3)),
('tcpdump', ['-h'], 'tcpdump',
r'tcpdump\s+version\s+(\d+\.\d+)\.\d+\n', float(4.5)),
('nc', [], 'nc from the netcat-openbsd', '', 0),
('vconfig', [], 'the VLAN you are talking about', '', 0),
('fuser', ['-V'], r'fuser \(PSmisc\)',
r'fuser \(PSmisc\) (\d+\.\d+)\n', float(22.0)),
('mn', ['--version'], r'\d+\.\d+.\d+',
r'(\d+\.\d+).\d+', float(2.2)),
('exabgp', ['--version'], 'ExaBGP',
r'ExaBGP : (\d+\.\d+).\d+', float(3.4)),
('pip', ['show', 'influxdb'], 'influxdb',
r'Version:\s+(\d+\.\d+)\.\d+', float(3.0)),
('pylint', ['--version'], 'pylint',
r'pylint (\d+\.\d+).\d+,', float(1.6)),
('curl', ['--version'], 'libcurl',
r'curl (\d+\.\d+).\d+', float(7.3)),
)
FAUCET_DIR = os.getenv('FAUCET_DIR', '../src/ryu_faucet/org/onfsdn/faucet')
# Must pass with 0 lint errors
FAUCET_LINT_SRCS = glob.glob(os.path.join(FAUCET_DIR, '*py'))
# Maximum number of parallel tests to run at once
MAX_PARALLEL_TESTS = 20
DPID = '1'
HARDWARE = 'Open vSwitch'
# see hw_switch_config.yaml for how to bridge in an external hardware switch.
HW_SWITCH_CONFIG_FILE = 'hw_switch_config.yaml'
REQUIRED_TEST_PORTS = 4
PORT_MAP = {'port_1': 1, 'port_2': 2, 'port_3': 3, 'port_4': 4}
SWITCH_MAP = {}
def str_int_dpid(hex_dpid):
return str(int(hex_dpid, 16))
# TODO: applications should retry if port not really free
def find_free_port():
while True:
free_socket = socket.socket()
free_socket.bind(('', 0))
free_port = free_socket.getsockname()[1]
free_socket.close()
# ports reserved in tests
if free_port not in [5001, 5002]:
break
return free_port
class FaucetSwitch(OVSSwitch):
def __init__(self, name, **params):
OVSSwitch.__init__(self, name=name, datapath='kernel', **params)
class VLANHost(Host):
def config(self, vlan=100, **params):
"""Configure VLANHost according to (optional) parameters:
vlan: VLAN ID for default interface"""
super_config = super(VLANHost, self).config(**params)
intf = self.defaultIntf()
self.cmd('ifconfig %s inet 0' % intf)
self.cmd('vconfig add %s %d' % (intf, vlan))
self.cmd('ifconfig %s.%d inet %s' % (intf, vlan, params['ip']))
vlan_intf_name = '%s.%d' % (intf, vlan)
intf.name = vlan_intf_name
self.nameToIntf[vlan_intf_name] = intf
return super_config
class FAUCET(Controller):
def __init__(self, name, cdir=FAUCET_DIR,
command='ryu-manager ryu.app.ofctl_rest faucet.py',
cargs='--ofp-tcp-listen-port=%s --verbose --use-stderr',
**kwargs):
name = 'faucet-%u' % os.getpid()
port = find_free_port()
self.ofctl_port = find_free_port()
cargs = '--wsapi-port=%u %s' % (self.ofctl_port, cargs)
Controller.__init__(
self, name, cdir=cdir, command=command, port=port, cargs=cargs, **kwargs)
class Gauge(Controller):
def __init__(self, name, cdir=FAUCET_DIR,
command='ryu-manager gauge.py',
cargs='--ofp-tcp-listen-port=%s --verbose --use-stderr',
**kwargs):
name = 'gauge-%u' % os.getpid()
port = find_free_port()
Controller.__init__(
self, name, cdir=cdir, command=command, port=port, cargs=cargs, **kwargs)
class FaucetSwitchTopo(Topo):
def build(self, dpid=0, n_tagged=0, tagged_vid=100, n_untagged=0):
pid = os.getpid()
for host_n in range(n_tagged):
host = self.addHost('t%x%s' % (pid % 0xff, host_n + 1),
cls=VLANHost, vlan=tagged_vid)
for host_n in range(n_untagged):
host = self.addHost('u%x%s' % (pid % 0xff, host_n + 1))
if SWITCH_MAP:
dpid = int(dpid, 16) + 1
print 'mapped switch will use DPID %s' % dpid
switch = self.addSwitch(
's1%x' % pid, cls=FaucetSwitch, listenPort=find_free_port(), dpid=dpid)
for host in self.hosts():
self.addLink(host, switch)
class FaucetTest(unittest.TestCase):
ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss'
CONFIG = ''
CONTROLLER_IPV4 = '10.0.0.254'
CONTROLLER_IPV6 = 'fc00::1:254'
OFCTL = 'ovs-ofctl -OOpenFlow13'
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
os.environ['FAUCET_CONFIG'] = os.path.join(
self.tmpdir, 'faucet.yaml')
os.environ['GAUGE_CONFIG'] = os.path.join(
self.tmpdir, 'gauge.conf')
os.environ['FAUCET_LOG'] = os.path.join(
self.tmpdir, 'faucet.log')
os.environ['FAUCET_EXCEPTION_LOG'] = os.path.join(
self.tmpdir, 'faucet-exception.log')
os.environ['GAUGE_LOG'] = os.path.join(
self.tmpdir, 'gauge.log')
os.environ['GAUGE_EXCEPTION_LOG'] = os.path.join(
self.tmpdir, 'gauge-exception.log')
self.debug_log_path = os.path.join(
self.tmpdir, 'ofchannel.log')
self.monitor_ports_file = os.path.join(
self.tmpdir, 'ports.txt')
self.monitor_flow_table_file = os.path.join(
self.tmpdir, 'flow.txt')
if SWITCH_MAP:
self.dpid = DPID
else:
self.dpid = str(random.randint(1, 2**32))
self.CONFIG = '\n'.join((
self.get_config_header(self.dpid, HARDWARE),
self.CONFIG % PORT_MAP,
'ofchannel_log: "%s"' % self.debug_log_path))
open(os.environ['FAUCET_CONFIG'], 'w').write(self.CONFIG)
self.GAUGE_CONFIG = self.get_gauge_config(
self.dpid,
os.environ['FAUCET_CONFIG'],
self.monitor_ports_file,
self.monitor_flow_table_file
)
open(os.environ['GAUGE_CONFIG'], 'w').write(self.GAUGE_CONFIG)
self.net = None
self.topo = None
def get_gauge_config(self, dp_id, faucet_config_file,
monitor_ports_file, monitor_flow_table_file):
return '''
faucet_configs:
- {0}
watchers:
port_stats:
dps: ['faucet-1']
type: 'port_stats'
interval: 5
db: 'ps_file'
flow_table:
dps: ['faucet-1']
type: 'flow_table'
interval: 5
db: 'ft_file'
dbs:
ps_file:
type: 'text'
file: {2}
ft_file:
type: 'text'
file: {3}
'''.format(
faucet_config_file,
dp_id,
monitor_ports_file,
monitor_flow_table_file
)
def get_config_header(self, dpid, hardware):
return '''
---
dp_id: %s
name: "faucet-1"
hardware: "%s"
''' % (str_int_dpid(dpid), hardware)
def attach_physical_switch(self):
switch = self.net.switches[0]
hosts_count = len(self.net.hosts)
for i, test_host_port in enumerate(sorted(SWITCH_MAP)):
port_i = i + 1
mapped_port_i = port_i + hosts_count
phys_port = Intf(SWITCH_MAP[test_host_port], node=switch)
switch.cmd('ifconfig %s up' % phys_port)
switch.cmd('ovs-vsctl add-port %s %s' % (switch.name, phys_port.name))
for port_pair in ((port_i, mapped_port_i), (mapped_port_i, port_i)):
port_x, port_y = port_pair
switch.cmd('%s add-flow %s in_port=%u,actions=output:%u' % (
self.OFCTL, switch.name, port_x, port_y))
for _ in range(20):
if (os.path.exists(self.debug_log_path) and
os.path.getsize(self.debug_log_path) > 0):
return
time.sleep(1)
print 'physical switch could not connect to controller'
sys.exit(-1)
def start_net(self):
self.net = Mininet(self.topo, controller=FAUCET)
# TODO: when running software only, also test gauge.
if not SWITCH_MAP:
self.net.addController(controller=Gauge)
self.net.start()
if SWITCH_MAP:
self.attach_physical_switch()
else:
for controller in self.net.controllers:
controller.isAvailable()
self.net.waitConnected()
self.wait_until_matching_flow('OUTPUT:CONTROLLER')
dumpNodeConnections(self.net.hosts)
def tearDown(self):
if self.net is not None:
self.net.stop()
# Mininet takes a long time to actually shutdown.
# TODO: detect and block when Mininet isn't done.
time.sleep(5)
shutil.rmtree(self.tmpdir)
def add_host_ipv6_address(self, host, ip_v6):
host.cmd('ip -6 addr add %s dev %s' % (ip_v6, host.intf()))
def add_host_ipv6_route(self, host, ip_dst, ip_gw):
host.cmd('ip -6 route add %s via %s' % (ip_dst.masked(), ip_gw))
def one_ipv4_ping(self, host, dst):
self.require_host_learned(host)
ping_result = host.cmd('ping -c1 %s' % dst)
self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result))
def one_ipv4_controller_ping(self, host):
self.one_ipv4_ping(host, self.CONTROLLER_IPV4)
def one_ipv6_ping(self, host, dst, timeout=2):
self.require_host_learned(host)
# TODO: retry our one ping. We should not have to retry.
for _ in range(timeout):
ping_result = host.cmd('ping6 -c1 %s' % dst)
if re.search(self.ONE_GOOD_PING, ping_result):
return
self.assertTrue(re.search(self.ONE_GOOD_PING, ping_result))
def one_ipv6_controller_ping(self, host):
self.one_ipv6_ping(host, self.CONTROLLER_IPV6)
def hup_faucet(self):
controller = self.net.controllers[0]
tcp_pattern = '%s/tcp' % controller.port
fuser_out = controller.cmd('fuser %s -k -1' % tcp_pattern)
self.assertTrue(re.search(r'%s:\s+\d+' % tcp_pattern, fuser_out))
def force_faucet_reload(self):
# Force FAUCET to reload by adding new line to config file.
open(os.environ['FAUCET_CONFIG'], 'a').write('\n')
self.hup_faucet()
def tcpdump_helper(self, tcpdump_host, tcpdump_filter, funcs=[],
timeout=10, packets=2):
tcpdump_out = tcpdump_host.popen(
'timeout %us tcpdump -e -n -U -v -c %u %s' % (
timeout, packets, tcpdump_filter),
stderr=subprocess.STDOUT)
popens = {tcpdump_host: tcpdump_out}
tcpdump_started = False
tcpdump_txt = ''
for host, line in pmonitor(popens):
if host == tcpdump_host:
if tcpdump_started:
tcpdump_txt += line.strip()
else:
# when we see tcpdump start, then call provided functions.
if re.search('tcpdump: listening on ', line):
tcpdump_started = True
for func in funcs:
func()
self.assertFalse(tcpdump_txt == '')
return tcpdump_txt
def ofctl_rest_url(self):
return 'http://127.0.0.1:%u' % self.net.controllers[0].ofctl_port
def matching_flow_present(self, exp_flow, timeout=10):
int_dpid = str_int_dpid(self.dpid)
for _ in range(timeout):
try:
ofctl_result = json.loads(requests.get(
'%s/stats/flow/%s' % (self.ofctl_rest_url(), int_dpid)).text)
except (ValueError, requests.exceptions.ConnectionError):
# Didn't get valid JSON, try again
time.sleep(1)
continue
dump_flows = ofctl_result[int_dpid]
for flow in dump_flows:
# Re-transform the dictionary into str to re-use
# the verify_ipv*_routing methods
flow_str = json.dumps(flow)
if re.search(exp_flow, flow_str):
return True
time.sleep(1)
return False
def wait_until_matching_flow(self, exp_flow, timeout=10):
if not self.matching_flow_present(exp_flow, timeout):
self.assertTrue(False), exp_flow
def host_learned(self, host):
return self.matching_flow_present(
'"table_id": 2,.+"dl_src": "%s"' % host.MAC())
def require_host_learned(self, host):
if not self.host_learned(host):
self.assertTrue(False), host
def ping_all_when_learned(self):
# Cause hosts to send traffic that FAUCET can use to learn them.
self.net.pingAll()
# we should have learned all hosts now, so should have no loss.
for host in self.net.hosts:
self.require_host_learned(host)
self.assertEquals(0, self.net.pingAll())
def wait_until_matching_route_as_flow(self, nexthop, prefix, timeout=5):
if prefix.version == 6:
exp_prefix = '/'.join(
(str(prefix.masked().ip), str(prefix.netmask)))
nw_dst_match = '"ipv6_dst": "%s"' % exp_prefix
else:
exp_prefix = prefix.masked().with_netmask
nw_dst_match = '"nw_dst": "%s"' % exp_prefix
self.wait_until_matching_flow(
'SET_FIELD: {eth_dst:%s}.+%s' % (nexthop, nw_dst_match), timeout)
def curl_portmod(self, int_dpid, port_no, config, mask):
# TODO: avoid dependency on varying 'requests' library.
curl_format = ' '.join((
'curl -X POST -d'
'\'{"dpid": %s, "port_no": %u, "config": %u, "mask": %u}\'',
'%s/stats/portdesc/modify'))
return curl_format % (
int_dpid, port_no, config, mask, self.ofctl_rest_url())
def flap_all_switch_ports(self, flap_time=1):
# TODO: for hardware switches also
if not SWITCH_MAP:
switch = self.net.switches[0]
int_dpid = str_int_dpid(self.dpid)
for port_no in sorted(switch.ports.itervalues()):
if port_no > 0:
os.system(self.curl_portmod(
int_dpid, port_no,
ofp.OFPPC_PORT_DOWN, ofp.OFPPC_PORT_DOWN))
time.sleep(flap_time)
os.system(self.curl_portmod(
int_dpid, port_no,
0, ofp.OFPPC_PORT_DOWN))
def swap_host_macs(self, first_host, second_host):
first_host_mac = first_host.MAC()
second_host_mac = second_host.MAC()
first_host.setMAC(second_host_mac)
second_host.setMAC(first_host_mac)
def add_host_ipv4_route(self, host, ip_dst, ip_gw):
host.cmd('route add -net %s gw %s' % (ip_dst.masked(), ip_gw))
def verify_ipv4_routing(self, first_host, first_host_routed_ip,
second_host, second_host_routed_ip):
first_host.cmd(('ifconfig %s:0 %s netmask 255.255.255.0 up' % (
first_host.intf(), first_host_routed_ip.ip)))
second_host.cmd(('ifconfig %s:0 %s netmask 255.255.255.0 up' % (
second_host.intf(), second_host_routed_ip.ip)))
self.add_host_ipv4_route(
first_host, second_host_routed_ip, self.CONTROLLER_IPV4)
self.add_host_ipv4_route(
second_host, first_host_routed_ip, self.CONTROLLER_IPV4)
self.net.ping(hosts=(first_host, second_host))
self.wait_until_matching_route_as_flow(
first_host.MAC(), first_host_routed_ip)
self.wait_until_matching_route_as_flow(
second_host.MAC(), second_host_routed_ip)
self.one_ipv4_ping(first_host, second_host_routed_ip.ip)
self.one_ipv4_ping(second_host, first_host_routed_ip.ip)
def verify_ipv4_routing_mesh(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddr.IPv4Network('10.0.1.1/24')
second_host_routed_ip = ipaddr.IPv4Network('10.0.2.1/24')
second_host_routed_ip2 = ipaddr.IPv4Network('10.0.3.1/24')
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2)
self.swap_host_macs(first_host, second_host)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2)
def setup_ipv6_hosts_addresses(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip):
for host in first_host, second_host:
host.cmd('ip addr flush dev %s' % host.intf())
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_ipv6_address(first_host, first_host_routed_ip)
self.add_host_ipv6_address(second_host, second_host_routed_ip)
def verify_ipv6_routing(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip):
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.add_host_ipv6_route(
first_host, second_host_routed_ip, self.CONTROLLER_IPV6)
self.add_host_ipv6_route(
second_host, first_host_routed_ip, self.CONTROLLER_IPV6)
self.wait_until_matching_route_as_flow(
first_host.MAC(), first_host_routed_ip)
self.wait_until_matching_route_as_flow(
second_host.MAC(), second_host_routed_ip)
self.one_ipv6_controller_ping(first_host)
self.one_ipv6_controller_ping(second_host)
self.one_ipv6_ping(first_host, second_host_routed_ip.ip)
self.one_ipv6_ping(second_host, first_host_routed_ip.ip)
def verify_ipv6_routing_pair(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip):
self.setup_ipv6_hosts_addresses(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.verify_ipv6_routing(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
def verify_ipv6_routing_mesh(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_ip = ipaddr.IPv6Network('fc00::1:1/112')
second_host_ip = ipaddr.IPv6Network('fc00::1:2/112')
first_host_routed_ip = ipaddr.IPv6Network('fc00::10:1/112')
second_host_routed_ip = ipaddr.IPv6Network('fc00::20:1/112')
second_host_routed_ip2 = ipaddr.IPv6Network('fc00::30:1/112')
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2)
self.swap_host_macs(first_host, second_host)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2)
def stop_exabgp(self, port=179):
controller = self.net.controllers[0]
controller.cmd('fuser %s/tcp -k -9' % port)
def start_exabgp(self, exabgp_conf, listen_address='127.0.0.1', port=179):
self.stop_exabgp(port)
exabgp_conf_file = os.path.join(self.tmpdir, 'exabgp.conf')
exabgp_log = os.path.join(self.tmpdir, 'exabgp.log')
exabgp_err = os.path.join(self.tmpdir, 'exabgp.err')
open(exabgp_conf_file, 'w').write(exabgp_conf)
controller = self.net.controllers[0]
controller.cmd(
'env exabgp.tcp.bind="%s" exabgp.tcp.port=%u '
'timeout -s9 180s stdbuf -o0 -e0 exabgp %s -d 2> %s > %s &' % (
listen_address, port, exabgp_conf_file, exabgp_err, exabgp_log))
for _ in range(60):
netstat = controller.cmd('netstat -an|grep %s:%s|grep ESTAB' % (
listen_address, port))
if netstat.find('ESTAB') > -1:
return exabgp_log
time.sleep(1)
self.assertTrue(False)
def exabgp_updates(self, exabgp_log):
controller = self.net.controllers[0]
# exabgp should have received our BGP updates
for _ in range(60):
updates = controller.cmd(
r'grep UPDATE %s |grep -Eo "\S+ next-hop \S+"' % exabgp_log)
if updates:
return updates
time.sleep(1)
self.assertTrue(False)
class FaucetUntaggedTest(FaucetTest):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
"""
def setUp(self):
super(FaucetUntaggedTest, self).setUp()
self.topo = FaucetSwitchTopo(dpid=self.dpid, n_untagged=4)
self.start_net()
def test_untagged(self):
self.ping_all_when_learned()
# TODO: a smoke test only - are flow/port stats accumulating
if not SWITCH_MAP:
for _ in range(5):
if (os.path.exists(self.monitor_ports_file) and
os.path.exists(self.monitor_flow_table_file)):
break
time.sleep(1)
assert os.stat(self.monitor_ports_file).st_size > 0
assert os.stat(self.monitor_flow_table_file).st_size > 0
class FaucetTaggedAndUntaggedVlanTest(FaucetTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "mixed"
"""
def setUp(self):
super(FaucetTaggedAndUntaggedVlanTest, self).setUp()
self.topo = FaucetSwitchTopo(dpid=self.dpid, n_tagged=1, n_untagged=3)
self.start_net()
def test_untagged(self):
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
class FaucetUntaggedMaxHostsTest(FaucetUntaggedTest):
CONFIG = """
timeout: 60
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
max_hosts: 2
"""
def test_untagged(self):
self.force_faucet_reload()
self.net.pingAll()
learned_hosts = [
host for host in self.net.hosts if self.host_learned(host)]
self.assertEquals(2, len(learned_hosts))
class FaucetUntaggedHUPTest(FaucetUntaggedTest):
def get_configure_count(self):
controller = self.net.controllers[0]
configure_count = controller.cmd(
'grep -c "configuration is unchanged" %s' % os.environ['FAUCET_LOG'])
return configure_count
def test_untagged(self):
controller = self.net.controllers[0]
switch = self.net.switches[0]
for i in range(0, 3):
configure_count = self.get_configure_count()
self.assertEquals(i, int(configure_count))
self.hup_faucet()
time.sleep(1)
for retry in range(3):
configure_count = self.get_configure_count()
if configure_count == i + 1:
break
time.sleep(1)
self.assertTrue(i + 1, configure_count)
self.assertTrue(switch.connected())
self.wait_until_matching_flow('OUTPUT:CONTROLLER')
self.ping_all_when_learned()
class FaucetSingleUntaggedBGPIPv4RouteTest(FaucetUntaggedTest):
CONFIG = """
arp_neighbor_timeout: 2
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
controller_ips: ["10.0.0.254/24"]
bgp_port: 9179
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_address: "127.0.0.1"
bgp_neighbor_as: 2
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
"""
def test_untagged(self):
exabgp_conf = """
group test {
router-id 2.2.2.2;
neighbor 127.0.0.1 {
passive;
local-address 127.0.0.1;
peer-as 1;
local-as 2;
static {
route 10.0.1.0/24 next-hop 10.0.0.1 local-preference 100;
route 10.0.2.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.3.0/24 next-hop 10.0.0.2 local-preference 100;
}
}
}
"""
first_host, second_host = self.net.hosts[:2]
# wait until 10.0.0.1 has been resolved
self.wait_until_matching_route_as_flow(
first_host.MAC(), ipaddr.IPv4Network('10.99.99.0/24'))
self.start_exabgp(exabgp_conf)
self.wait_until_matching_route_as_flow(
second_host.MAC(), ipaddr.IPv4Network('10.0.3.0/24'), timeout=30)
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
self.stop_exabgp()
class FaucetSingleUntaggedIPv4RouteTest(FaucetUntaggedTest):
CONFIG = """
arp_neighbor_timeout: 2
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
controller_ips: ["10.0.0.254/24"]
bgp_port: 9179
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_address: "127.0.0.1"
bgp_neighbor_as: 2
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
def test_untagged(self):
exabgp_conf = """
group test {
process test {
encoder json;
neighbor-changes;
receive-routes;
run /bin/cat;
}
router-id 2.2.2.2;
neighbor 127.0.0.1 {
passive;
local-address 127.0.0.1;
peer-as 1;
local-as 2;
}
}
"""
exabgp_log = self.start_exabgp(exabgp_conf)
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
# exabgp should have received our BGP updates
updates = self.exabgp_updates(exabgp_log)
self.stop_exabgp()
assert re.search('10.0.0.0/24 next-hop 10.0.0.254', updates)
assert re.search('10.0.1.0/24 next-hop 10.0.0.1', updates)
assert re.search('10.0.2.0/24 next-hop 10.0.0.2', updates)
assert re.search('10.0.2.0/24 next-hop 10.0.0.2', updates)
class FaucetUntaggedNoVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
unicast_flood: False
"""
def test_untagged(self):
self.ping_all_when_learned()
class FaucetUntaggedHostMoveTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
self.assertEqual(0, self.net.ping((first_host, second_host)))
self.swap_host_macs(first_host, second_host)
self.net.ping((first_host, second_host))
for host in (first_host, second_host):
self.require_host_learned(host)
self.assertEquals(0, self.net.ping((first_host, second_host)))
class FaucetUntaggedHostPermanentLearnTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
permanent_learn: True
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
"""
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host, third_host = self.net.hosts[0:3]
# 3rd host impersonates 1st, 3rd host breaks but 1st host still OK
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.net.ping((second_host, third_host)))
self.assertEqual(0, self.net.ping((first_host, second_host)))
# 3rd host stops impersonating, now everything fine again.
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned()
class FaucetUntaggedControlPlaneTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
controller_ips: ["10.0.0.254/24", "fc00::1:254/112"]
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
for _ in range(2):
# Verify IPv4 and IPv6 connectivity between first two hosts.
self.one_ipv4_ping(first_host, second_host.IP())
self.one_ipv6_ping(first_host, 'fc00::1:2')
# Verify first two hosts can ping controller over both IPv4 and IPv6
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.one_ipv6_controller_ping(host)
self.flap_all_switch_ports()
class FaucetTaggedAndUntaggedTest(FaucetTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
native_vlan: 101
description: "b3"
%(port_4)d:
native_vlan: 101
description: "b4"
vlans:
100:
description: "tagged"
101:
description: "untagged"
"""
def setUp(self):
super(FaucetTaggedAndUntaggedTest, self).setUp()
self.topo = FaucetSwitchTopo(dpid=self.dpid, n_tagged=2, n_untagged=2)
self.start_net()
def test_seperate_untagged_tagged(self):
tagged_host_pair = self.net.hosts[0:1]
untagged_host_pair = self.net.hosts[2:3]
# hosts within VLANs can ping each other
self.assertEquals(0, self.net.ping(tagged_host_pair))
self.assertEquals(0, self.net.ping(untagged_host_pair))
# hosts cannot ping hosts in other VLANs
self.assertEquals(
100, self.net.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetUntaggedACLTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
acls:
%(port_1)d:
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
nw_proto: 6
tp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
second_host.cmd('timeout 10s echo hello | nc -l 5001 &')
self.assertEquals(
'', first_host.cmd('timeout 10s nc %s 5001' % second_host.IP()))
self.wait_until_matching_flow(r'"packet_count": [1-9]+.+"tp_dst": 5001')
def test_port5002_unblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.net.hosts[0:2]
second_host.cmd('timeout 10s echo hello | nc -l %s 5002 &' % second_host.IP())
time.sleep(1)
self.assertEquals(
'hello\r\n',
first_host.cmd('nc -w 5 %s 5002' % second_host.IP()))
self.wait_until_matching_flow(r'"packet_count": [1-9]+.+"tp_dst": 5002')
class FaucetUntaggedACLMirrorTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
acl_in: 1
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
%(port_1)d:
- rule:
actions:
allow: 1
mirror: %(port_3)d
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.net.hosts[0:3]
mirror_mac = mirror_host.MAC()
tcpdump_filter = 'not ether src %s and icmp' % mirror_mac
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, [
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'%s: ICMP echo reply' % first_host.IP(), tcpdump_txt))
class FaucetUntaggedOutputTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
acl_in: 1
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
%(port_1)d:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
dl_dst: "06:06:06:06:06:06"
vlan_vid: 123
port: %(port_2)d
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedMirrorTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
mirror: %(port_1)d
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
unicast_flood: False
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.net.hosts[0:3]
mirror_mac = mirror_host.MAC()
tcpdump_filter = 'not ether src %s and icmp' % mirror_mac
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, [
lambda: first_host.cmd('ping -c1 %s' % second_host.IP())])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'%s: ICMP echo reply' % first_host.IP(), tcpdump_txt))
class FaucetTaggedTest(FaucetTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
vlans:
100:
description: "tagged"
"""
def setUp(self):
super(FaucetTaggedTest, self).setUp()
self.topo = FaucetSwitchTopo(dpid=self.dpid, n_tagged=4)
self.start_net()
def test_tagged(self):
self.ping_all_when_learned()
class FaucetTaggedControlPlaneTest(FaucetTaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
vlans:
100:
description: "tagged"
controller_ips: ["10.0.0.254/24", "fc00::1:254/112"]
"""
def test_ping_controller(self):
first_host, second_host = self.net.hosts[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
# Verify IPv4 and IPv6 connectivity between first two hosts.
self.one_ipv4_ping(first_host, second_host.IP())
self.one_ipv6_ping(first_host, 'fc00::1:2')
# Verify first two hosts can ping controller over both IPv4 and IPv6
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.one_ipv6_controller_ping(host)
class FaucetSingleTaggedIPv4RouteTest(FaucetTaggedTest):
CONFIG = """
arp_neighbor_timeout: 2
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
vlans:
100:
description: "tagged"
controller_ips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
def test_tagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddr.IPv4Network('10.0.1.1/24')
second_host_routed_ip = ipaddr.IPv4Network('10.0.2.1/24')
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
class FaucetSingleUntaggedBGPIPv6RouteTest(FaucetUntaggedTest):
CONFIG = """
arp_neighbor_timeout: 2
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
controller_ips: ["fc00::1:254/112"]
bgp_port: 9179
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_address: "::1"
bgp_neighbor_as: 2
"""
def test_untagged(self):
exabgp_conf = """
group test {
router-id 2.2.2.2;
neighbor ::1 {
passive;
local-address ::1;
peer-as 1;
local-as 2;
static {
route fc00::10:1/112 next-hop fc00::1:1 local-preference 100;
route fc00::20:1/112 next-hop fc00::1:2 local-preference 100;
route fc00::30:1/112 next-hop fc00::1:2 local-preference 100;
}
}
}
"""
self.start_exabgp(exabgp_conf, '::1')
self.verify_ipv6_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv6_routing_mesh()
self.stop_exabgp()
class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):
CONFIG = """
arp_neighbor_timeout: 2
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
controller_ips: ["fc00::10:1/112", "fc00::20:1/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::10:2"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::20:2"
"""
def test_untagged(self):
first_host, second_host = self.net.hosts[:2]
first_host_ip = ipaddr.IPv6Network('fc00::10:2/112')
first_host_ctrl_ip = ipaddr.IPv6Address('fc00::10:1')
second_host_ip = ipaddr.IPv6Network('fc00::20:2/112')
second_host_ctrl_ip = ipaddr.IPv6Address('fc00::20:1')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_ipv6_route(
first_host, second_host_ip, first_host_ctrl_ip)
self.add_host_ipv6_route(
second_host, first_host_ip, second_host_ctrl_ip)
self.wait_until_matching_route_as_flow(
first_host.MAC(), first_host_ip)
self.wait_until_matching_route_as_flow(
second_host.MAC(), second_host_ip)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(first_host, second_host_ctrl_ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ctrl_ip)
class FaucetSingleUntaggedIPv6RouteTest(FaucetUntaggedTest):
CONFIG = """
arp_neighbor_timeout: 2
interfaces:
%(port_1)d:
native_vlan: 100
description: "b1"
%(port_2)d:
native_vlan: 100
description: "b2"
%(port_3)d:
native_vlan: 100
description: "b3"
%(port_4)d:
native_vlan: 100
description: "b4"
vlans:
100:
description: "untagged"
controller_ips: ["fc00::1:254/112"]
bgp_port: 9179
bgp_as: 1
bgp_routerid: "1.1.1.1"
bgp_neighbor_address: "::1"
bgp_neighbor_as: 2
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
"""
def test_untagged(self):
exabgp_conf = """
group test {
process test {
encoder json;
neighbor-changes;
receive-routes;
run /bin/cat;
}
router-id 2.2.2.2;
neighbor ::1 {
passive;
local-address ::1;
peer-as 1;
local-as 2;
}
}
"""
exabgp_log = self.start_exabgp(exabgp_conf, '::1')
self.verify_ipv6_routing_mesh()
second_host = self.net.hosts[1]
self.flap_all_switch_ports()
self.wait_until_matching_route_as_flow(
second_host.MAC(), ipaddr.IPv6Network('fc00::30:0/112'))
self.verify_ipv6_routing_mesh()
updates = self.exabgp_updates(exabgp_log)
self.stop_exabgp()
assert re.search('fc00::1:0/112 next-hop fc00::1:254', updates)
assert re.search('fc00::10:0/112 next-hop fc00::1:1', updates)
assert re.search('fc00::20:0/112 next-hop fc00::1:2', updates)
assert re.search('fc00::30:0/112 next-hop fc00::1:2', updates)
class FaucetSingleTaggedIPv6RouteTest(FaucetTaggedTest):
CONFIG = """
arp_neighbor_timeout: 2
interfaces:
%(port_1)d:
tagged_vlans: [100]
description: "b1"
%(port_2)d:
tagged_vlans: [100]
description: "b2"
%(port_3)d:
tagged_vlans: [100]
description: "b3"
%(port_4)d:
tagged_vlans: [100]
description: "b4"
vlans:
100:
description: "tagged"
controller_ips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
"""
def test_tagged(self):
host_pair = self.net.hosts[:2]
first_host, second_host = host_pair
first_host_ip = ipaddr.IPv6Network('fc00::1:1/112')
second_host_ip = ipaddr.IPv6Network('fc00::1:2/112')
first_host_routed_ip = ipaddr.IPv6Network('fc00::10:1/112')
second_host_routed_ip = ipaddr.IPv6Network('fc00::20:1/112')
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
class FaucetMultipleDPSwitchTopo(Topo):
def build(self, dpids, n_tagged=0, tagged_vid=100, n_untagged=0):
'''
* s switches
* (n_tagged + n_untagged) hosts per switch
* (n_tagged + n_untagged + 1) links on switches 0 and s-1, with final link
being inter-switch
* (n_tagged + n_untagged + 2) links on switches 0 < n < s-1, with final two links
being inter-switch
'''
pid = os.getpid()
switches = []
for i, dpid in enumerate(dpids):
hosts = []
for host_n in range(n_tagged):
host = self.addHost('t%xs%ih%s' % (pid % 0xff, i + 1, host_n + 1),
cls=VLANHost, vlan=tagged_vid)
hosts.append(host)
for host_n in range(n_untagged):
host = self.addHost('u%xs%ih%s' % (pid % 0xff, i + 1, host_n + 1))
hosts.append(host)
switch = self.addSwitch(
's%i%x' % (i + 1, pid), cls=FaucetSwitch, listenPort=find_free_port(), dpid=dpid)
for host in hosts:
self.addLink(host, switch)
# Add a switch-to-switch link with the previous switch,
# if this isn't the first switch in the topology.
if switches:
self.addLink(switches[i - 1], switch)
switches.append(switch)
class FaucetMultipleDPTest(FaucetTest):
def build_net(self, n_dps=1, n_tagged=0, tagged_vid=100, n_untagged=0, untagged_vid=100,
include=[], include_optional=[], acls={}, acl_in_dp={}):
'''
Set up Mininet and Faucet for the given topology.
'''
self.dpids = [str(random.randint(1, 2**32)) for _ in range(n_dps)]
self.topo = FaucetMultipleDPSwitchTopo(
dpids=self.dpids,
n_tagged=n_tagged,
tagged_vid=tagged_vid,
n_untagged=n_untagged,
)
self.CONFIG = self.get_config(
self.dpids,
HARDWARE,
self.monitor_ports_file,
self.monitor_flow_table_file,
self.debug_log_path,
n_tagged,
tagged_vid,
n_untagged,
untagged_vid,
include,
include_optional,
acls,
acl_in_dp,
)
open(os.environ['FAUCET_CONFIG'], 'w').write(self.CONFIG)
def get_config(self, dpids=[], hardware=None, monitor_ports_files=None, monitor_flow_table_file=None,
ofchannel_log=None, n_tagged=0, tagged_vid=0, n_untagged=0, untagged_vid=0,
include=[], include_optional=[], acls={}, acl_in_dp={}):
'''
Build a complete Faucet configuration for each datapath, using the given topology.
'''
config = {'version': 2}
# Includes.
if include:
config['include'] = list(include)
if include_optional:
config['include-optional'] = list(include_optional)
# Datapaths.
if dpids:
num_switch_links = None
config['dps'] = {}
for i, dpid in enumerate(dpids):
p = 1
name = 'faucet-%i' % (i + 1)
config['dps'][name] = {
'dp_id': int(str_int_dpid(dpid)),
'hardware': hardware,
'monitor_ports': True,
'monitor_ports_interval': 5,
'monitor_ports_file': monitor_ports_files,
'monitor_flow_table': True,
'monitor_flow_tablet_interval': 5,
'monitor_flow_table_file': monitor_flow_table_file,
'ofchannel_log': ofchannel_log,
'interfaces': {},
}
for _ in range(n_tagged):
config['dps'][name]['interfaces'][p] = {
'tagged_vlans': [tagged_vid],
'description': 'b%i' % p,
}
if name in acl_in_dp and p in acl_in_dp[name]:
config['dps'][name]['interfaces'][p]['acl_in'] = acl_in_dp[name][p]
p += 1
for _ in range(n_untagged):
config['dps'][name]['interfaces'][p] = {
'native_vlan': untagged_vid,
'description': 'b%i' % p,
}
if name in acl_in_dp and p in acl_in_dp[name]:
config['dps'][name]['interfaces'][p]['acl_in'] = acl_in_dp[name][p]
p += 1
# Add configuration for the switch-to-switch links
# (0 for a single switch, 1 for an end switch, 2 for middle switches).
if len(dpids) > 1:
num_switch_links = 2 if i > 0 and i != len(dpids)-1 else 1
else:
num_switch_links = 0
for _ in range(num_switch_links):
tagged_vlans = None
config['dps'][name]['interfaces'][p] = {
'description': 'b%i' % p,
}
if n_tagged and n_untagged and n_tagged != n_untagged:
tagged_vlans = [tagged_vid, untagged_vid]
elif ((n_tagged and not n_untagged) or
(n_tagged and n_untagged and tagged_vid == untagged_vid)):
tagged_vlans = [tagged_vid]
elif n_untagged and not n_tagged:
tagged_vlans = [untagged_vid]
if tagged_vlans:
config['dps'][name]['interfaces'][p]['tagged_vlans'] = tagged_vlans
if name in acl_in_dp and p in acl_in_dp[name]:
config['dps'][name]['interfaces'][p]['acl_in'] = acl_in_dp[name][p]
# Used as the port number for the current switch.
p += 1
# VLANs.
config['vlans'] = {}
if n_untagged:
config['vlans'][untagged_vid] = {
'description': 'untagged',
}
if ((n_tagged and not n_untagged) or
(n_tagged and n_untagged and tagged_vid != untagged_vid)):
config['vlans'][tagged_vid] = {
'description': 'tagged',
}
# ACLs.
if acls:
config['acls'] = acls.copy()
return yaml.dump(config, default_flow_style=False)
def matching_flow_present(self, exp_flow, timeout=10):
'''
Override matching_flow_present with a version that (kind of) supports multiple DPs.
'''
for dpid in self.dpids:
int_dpid = str_int_dpid(dpid)
for _ in range(timeout):
try:
ofctl_result = json.loads(requests.get(
'%s/stats/flow/%s' % (self.ofctl_rest_url(), int_dpid)).text)
except (ValueError, requests.exceptions.ConnectionError):
# Didn't get valid JSON, try again
time.sleep(1)
continue
dump_flows = ofctl_result[int_dpid]
for flow in dump_flows:
# Re-transform the dictionary into str to re-use
# the verify_ipv*_routing methods
flow_str = json.dumps(flow)
if re.search(exp_flow, flow_str):
return True
time.sleep(1)
return False
class FaucetMultipleDPUntaggedTest(FaucetMultipleDPTest):
NUM_DPS = 3
NUM_HOSTS = 4
VID = 100
def setUp(self):
super(FaucetMultipleDPUntaggedTest, self).setUp()
self.build_net(n_dps=self.NUM_DPS, n_untagged=self.NUM_HOSTS, untagged_vid=self.VID)
self.start_net()
def test_untagged(self):
self.assertEquals(0, self.net.pingAll())
class FaucetMultipleDPTaggedTest(FaucetMultipleDPTest):
NUM_DPS = 3
NUM_HOSTS = 4
VID = 100
def setUp(self):
super(FaucetMultipleDPTaggedTest, self).setUp()
self.build_net(n_dps=self.NUM_DPS, n_tagged=self.NUM_HOSTS, tagged_vid=self.VID)
self.start_net()
def test_tagged(self):
self.assertEquals(0, self.net.pingAll())
class FaucetACLOverrideTest(FaucetMultipleDPTest):
NUM_DPS = 1
NUM_HOSTS = 2
VID = 100
# ACL rules which will get overridden.
ACLS = {
1: [
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5001,
'actions': {
'allow': 1,
},
}},
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5002,
'actions': {
'allow': 0,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# ACL rules which get put into an include-optional
# file, then reloaded into FAUCET.
ACLS_OVERRIDE = {
1: [
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5001,
'actions': {
'allow': 0,
},
}},
{'rule': {
'dl_type': int('0x800', 16),
'nw_proto': 6,
'tp_dst': 5002,
'actions': {
'allow': 1,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
ACL_IN_DP = {
'faucet-1': {
# Port 1, acl_in = 1
1: 1,
},
}
def setUp(self):
super(FaucetACLOverrideTest, self).setUp()
self.acls_config = os.path.join(self.tmpdir, 'acls.yaml')
self.build_net(
n_dps=self.NUM_DPS,
n_untagged=self.NUM_HOSTS,
untagged_vid=self.VID,
include_optional=[self.acls_config],
acls=self.ACLS,
acl_in_dp=self.ACL_IN_DP,
)
self.start_net()
def assert_blocked(self, port):
first_host, second_host = self.net.hosts[0:2]
second_host.cmd('timeout 10s echo hello | nc -l %i &' % port)
self.assertEquals(
'', first_host.cmd('timeout 10s nc %s %i' % (second_host.IP(), port)))
self.wait_until_matching_flow(r'"packet_count": [1-9]+.+"tp_dst": %i' % port)
def assert_unblocked(self, port):
first_host, second_host = self.net.hosts[0:2]
second_host.cmd('timeout 10s echo hello | nc -l %s %i &' % (second_host.IP(), port))
time.sleep(1)
self.assertEquals(
'hello\r\n',
first_host.cmd('nc -w 5 %s %i' % (second_host.IP(), port)))
self.wait_until_matching_flow(r'"packet_count": [1-9]+.+"tp_dst": %i' % port)
def test_port5001_blocked(self):
self.ping_all_when_learned()
self.assert_unblocked(5001)
open(self.acls_config, 'w').write(self.get_config(acls=self.ACLS_OVERRIDE))
self.hup_faucet()
time.sleep(1)
self.assert_blocked(5001)
def test_port5002_unblocked(self):
self.ping_all_when_learned()
self.assert_blocked(5002)
open(self.acls_config, 'w').write(self.get_config(acls=self.ACLS_OVERRIDE))
self.hup_faucet()
time.sleep(1)
self.assert_unblocked(5002)
def import_config():
try:
with open(HW_SWITCH_CONFIG_FILE, 'r') as config_file:
config = yaml.load(config_file)
except:
print 'Could not load YAML config data from %s' % HW_SWITCH_CONFIG_FILE
sys.exit(-1)
if 'hw_switch' in config and config['hw_switch']:
required_config = ['dp_ports']
for required_key in required_config:
if required_key not in config:
print '%s must be specified in %s to use HW switch.' % (
required_key, HW_SWITCH_CONFIG_FILE)
sys.exit(-1)
dp_ports = config['dp_ports']
if len(dp_ports) != REQUIRED_TEST_PORTS:
print ('Exactly %u dataplane ports are required, '
'%d are provided in %s.' %
(REQUIRED_TEST_PORTS, len(dp_ports), HW_SWITCH_CONFIG_FILE))
for i, switch_port in enumerate(dp_ports):
test_port_name = 'port_%u' % (i + 1)
global PORT_MAP
PORT_MAP[test_port_name] = switch_port
global SWITCH_MAP
SWITCH_MAP[test_port_name] = dp_ports[switch_port]
if 'dpid' in config:
global DPID
DPID = config['dpid']
if 'hardware' in config:
global HARDWARE
HARDWARE = config['hardware']
def check_dependencies():
for (binary, binary_get_version, binary_present_re,
binary_version_re, binary_minversion) in EXTERNAL_DEPENDENCIES:
binary_args = [binary] + binary_get_version
required_binary = 'required binary/library %s' % (
' '.join(binary_args))
try:
proc = subprocess.Popen(
binary_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc_out, proc_err = proc.communicate()
binary_output = proc_out
if proc_err is not None:
binary_output += proc_err
except subprocess.CalledProcessError:
# Might have run successfully, need to parse output
pass
except OSError:
print 'could not run %s' % required_binary
return False
present_match = re.search(binary_present_re, binary_output)
if not present_match:
print '%s not present or did not return expected string %s' % (
required_binary, binary_present_re)
return False
if binary_version_re:
version_match = re.search(binary_version_re, binary_output)
if version_match is None:
print 'could not get version from %s (%s)' % (
required_binary, binary_output)
return False
try:
binary_version = float(version_match.group(1))
except ValueError:
print 'cannot parse version %s for %s' % (
version_match, required_binary)
return False
if binary_version < binary_minversion:
print '%s version %.1f is less than required version %.1f' % (
required_binary, binary_version, binary_minversion)
return False
print '%s version is %.1f' % (required_binary, binary_version)
else:
print '%s present (%s)' % (required_binary, binary_present_re)
return True
def lint_check():
for faucet_src in FAUCET_LINT_SRCS:
ret = subprocess.call(['pylint', '-E', faucet_src])
if ret:
print 'lint of %s returns an error' % faucet_src
return False
return True
def make_suite(tc_class):
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(tc_class)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(tc_class(name))
return suite
def run_tests():
requested_test_classes = sys.argv[1:]
single_tests = unittest.TestSuite()
parallel_tests = unittest.TestSuite()
for name, obj in inspect.getmembers(sys.modules[__name__]):
if not inspect.isclass(obj):
continue
if requested_test_classes and name not in requested_test_classes:
continue
if name.endswith('Test') and name.startswith('Faucet'):
print 'adding test %s' % name
if SWITCH_MAP or name.startswith('FaucetSingle'):
single_tests.addTest(make_suite(obj))
else:
parallel_tests.addTest(make_suite(obj))
print 'running %u tests in parallel and %u tests serial' % (
parallel_tests.countTestCases(), single_tests.countTestCases())
results = []
if parallel_tests.countTestCases():
max_parallel_tests = max(parallel_tests.countTestCases(), MAX_PARALLEL_TESTS)
parallel_runner = unittest.TextTestRunner()
parallel_suite = ConcurrentTestSuite(
parallel_tests, fork_for_tests(max_parallel_tests))
results.append(parallel_runner.run(parallel_suite))
# TODO: Tests that are serialized generally depend on hardcoded ports.
# Make them use dynamic ports.
if single_tests.countTestCases():
single_runner = unittest.TextTestRunner()
results.append(single_runner.run(single_tests))
for result in results:
if not result.wasSuccessful():
print result.printErrors()
if __name__ == '__main__':
if '-c' in sys.argv[1:] or '--clean' in sys.argv[1:]:
print 'Cleaning up test interfaces, processes and openvswitch configuration from previous test runs'
Cleanup.cleanup()
sys.exit(0)
if not check_dependencies():
print ('dependency check failed. check required library/binary '
'list in header of this script')
sys.exit(-1)
if not lint_check():
print 'pylint must pass with no errors'
sys.exit(-1)
import_config()
run_tests()
| |
from totalimpactwebapp import json_sqlalchemy
from util import commit
from util import cached_property
from util import dict_from_dir
from totalimpactwebapp import db
from birdy.twitter import AppClient, TwitterApiError, TwitterRateLimitError, TwitterClientError
from collections import defaultdict
import os
import re
import datetime
import logging
import dateutil.parser
logger = logging.getLogger('ti.tweeter')
# from https://github.com/inueni/birdy/issues/7
# to overrride JSONObject
class AppDictClient(AppClient):
@staticmethod
def get_json_object_hook(data):
return data
def handle_all_user_lookups(user_dicts_from_twitter, tweeters):
dicts_by_screen_name = defaultdict(str)
for user_dict in user_dicts_from_twitter:
dicts_by_screen_name[user_dict["screen_name"].lower()] = user_dict
i = 0
for tweeter in tweeters:
i += 1
if tweeter.screen_name.lower() in dicts_by_screen_name.keys():
user_dict = dicts_by_screen_name[tweeter.screen_name.lower()]
tweeter.set_attributes_from_twitter_data(user_dict)
# print i, "updated tweeter", tweeter, tweeter.last_collected_date
else:
tweeter.set_as_deleted()
db.session.merge(tweeter)
return True
def get_and_save_tweeter_followers(tweeters):
client = AppDictClient(
os.getenv("TWITTER_CONSUMER_KEY"),
os.getenv("TWITTER_CONSUMER_SECRET"),
access_token=os.getenv("TWITTER_ACCESS_TOKEN")
)
logger.info(u"Length of tweeters {num}".format(
num=len(tweeters)))
# print "length of tweeters", len(tweeters)
screen_names_string = ",".join([tweeter.screen_name for tweeter in tweeters])
print ", ".join([tweeter.screen_name for tweeter in tweeters])
try:
response = client.api.users.lookup.post(screen_name=screen_names_string)
handle_all_user_lookups(response.data, tweeters)
except TwitterApiError, e:
logger.exception("TwitterApiError error, skipping")
except TwitterClientError, e:
logger.exception("TwitterClientError error, skipping")
except TwitterRateLimitError, e:
logger.exception("TwitterRateLimitError error, skipping")
# not totally sure what else I should do here. retry somehow, or catch on cleanup run?
commit(db)
return
# example payload from twitter: https://dev.twitter.com/rest/reference/get/users/lookup
class Tweeter(db.Model):
screen_name = db.Column(db.Text, primary_key=True)
twitter_id = db.Column(db.Integer) # alter table tweeter add twitter_id int4
followers = db.Column(db.Integer)
name = db.Column(db.Text)
description = db.Column(db.Text)
location = db.Column(db.Text) # alter table tweeter add location text
image_url = db.Column(db.Text)
profile_url = db.Column(db.Text) # alter table tweeter add profile_url text
num_statuses = db.Column(db.Integer) # alter table tweeter add num_statuses int4
num_follows = db.Column(db.Integer) # alter table tweeter add num_follows int4
last_collected_date = db.Column(db.DateTime()) #alter table tweeter add last_collected_date timestamp
is_deleted = db.Column(db.Boolean) # alter table tweeter add is_deleted bool
def __init__(self, **kwargs):
if not "last_collected_date" in kwargs:
self.last_collected_date = datetime.datetime.utcnow()
super(Tweeter, self).__init__(**kwargs)
@cached_property
def display_image_url(self):
if not self.image_url:
return ""
ret = self.image_url.replace("http://", "https://")
ret = ret.replace("_normal", "_reasonably_small")
return ret
def set_attributes_from_altmetric_post(self, post):
self.followers = post["author"].get("followers", 0)
self.name = post["author"].get("name", self.screen_name)
self.description = post["author"].get("description", "")
self.image_url = post["author"].get("image", None)
# don't update last_collected date, because altmetric data is from old tweet
return self
def set_attributes_from_twitter_data(self, data):
self.followers = data.get("followers_count", 0)
self.name = data.get("name", self.screen_name)
self.description = data.get("description", "")
self.image_url = data.get("profile_image_url", None)
self.profile_url = data.get("url", None)
self.location = data.get("location", None)
self.num_statuses = data.get("statuses_count", None)
self.num_follows = data.get("friends_count", None)
self.twitter_id = data.get("id", None)
self.last_collected_date = datetime.datetime.utcnow()
return self
def set_as_deleted(self):
self.is_deleted = True
self.last_collected_date = datetime.datetime.utcnow()
return self
def __repr__(self):
return u'<Tweeter {screen_name} {followers}>'.format(
screen_name=self.screen_name,
followers=self.followers)
def to_dict(self):
attributes_to_ignore = [
"tweet"
]
ret = dict_from_dir(self, attributes_to_ignore)
return ret
# example
# [
# {
# "name": "Twitter API",
# "profile_sidebar_fill_color": "DDEEF6",
# "profile_background_tile": false,
# "profile_sidebar_border_color": "C0DEED",
# "profile_image_url": "http://a0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png",
# "location": "San Francisco, CA",
# "created_at": "Wed May 23 06:01:13 +0000 2007",
# "follow_request_sent": false,
# "id_str": "6253282",
# "profile_link_color": "0084B4",
# "is_translator": false,
# "default_profile": true,
# "favourites_count": 24,
# "contributors_enabled": true,
# "url": "http://dev.twitter.com",
# "profile_image_url_https": "https://si0.twimg.com/profile_images/2284174872/7df3h38zabcvjylnyfe3_normal.png",
# "utc_offset": -28800,
# "id": 6253282,
# "profile_use_background_image": true,
# "listed_count": 10713,
# "profile_text_color": "333333",
# "lang": "en",
# "followers_count": 1198334,
# "protected": false,
# "profile_background_image_url_https": "https://si0.twimg.com/images/themes/theme1/bg.png",
# "geo_enabled": true,
# "description": "The Real Twitter API. I tweet about API changes, service issues and happily answer questions about Twitter and our API. Don't get an answer? It's on my website.",
# "profile_background_color": "C0DEED",
# "verified": true,
# "notifications": false,
# "time_zone": "Pacific Time (US & Canada)",
# "statuses_count": 3331,
# "status": {
# "coordinates": null,
# "created_at": "Fri Aug 24 16:15:49 +0000 2012",
# "favorited": false,
# "truncated": false,
# "id_str": "239033279343382529",
# "in_reply_to_user_id_str": "134727529",
# "text": "@gregclermont no, there is not. ^TS",
# "contributors": null,
# "retweet_count": 0,
# "id": 239033279343382529,
# "in_reply_to_status_id_str": "238933943146131456",
# "geo": null,
# "retweeted": false,
# "in_reply_to_user_id": 134727529,
# "place": null,
# "source": "<a href="//sites.google.com/site/yorufukurou/\"" rel="\"nofollow\"">YoruFukurou</a>",
# "in_reply_to_screen_name": "gregclermont",
# "in_reply_to_status_id": 238933943146131456
# },
# "profile_background_image_url": "http://a0.twimg.com/images/themes/theme1/bg.png",
# "default_profile_image": false,
# "friends_count": 31,
# "screen_name": "twitterapi",
# "following": true,
# "show_all_inline_media": false
# },
# {
# "name": "Twitter",
# "profile_sidebar_fill_color": "F6F6F6",
# "profile_background_tile": true,
# "profile_sidebar_border_color": "EEEEEE",
# "profile_image_url": "http://a0.twimg.com/profile_images/2284174758/v65oai7fxn47qv9nectx_normal.png",
# "location": "San Francisco, CA",
# "created_at": "Tue Feb 20 14:35:54 +0000 2007",
# "follow_request_sent": false,
# "id_str": "783214",
# "profile_link_color": "038543",
# "is_translator": false,
# "default_profile": false,
# "favourites_count": 17,
# "contributors_enabled": true,
# "url": "http://blog.twitter.com/",
# "profile_image_url_https": "https://si0.twimg.com/profile_images/2284174758/v65oai7fxn47qv9nectx_normal.png",
# "utc_offset": -28800,
# "id": 783214,
# "profile_banner_url": "https://si0.twimg.com/brand_banners/twitter/1323368512/live",
# "profile_use_background_image": true,
# "listed_count": 72534,
# "profile_text_color": "333333",
# "lang": "en",
# "followers_count": 12788713,
# "protected": false,
# "profile_background_image_url_https": "https://si0.twimg.com/profile_background_images/378245879/Twitter_1544x2000.png",
# "geo_enabled": true,
# "description": "Always wondering what's happening. ",
# "profile_background_color": "ACDED6",
# "verified": true,
# "notifications": false,
# "time_zone": "Pacific Time (US & Canada)",
# "statuses_count": 1379,
# "profile_background_image_url": "http://a0.twimg.com/profile_background_images/378245879/Twitter_1544x2000.png",
# "default_profile_image": false,
# "friends_count": 1195,
# "screen_name": "twitter",
# "following": true,
# "show_all_inline_media": true
# }
# ]
| |
from __future__ import print_function, division
from sympy.core.assumptions import StdFactKB
from sympy.core.compatibility import string_types, range
from .basic import Basic
from .sympify import sympify
from .singleton import S
from .expr import Expr, AtomicExpr
from .cache import cacheit
from .function import FunctionClass
from sympy.core.logic import fuzzy_bool
from sympy.logic.boolalg import Boolean
from sympy.utilities.iterables import cartes
import string
import re as _re
class Symbol(AtomicExpr, Boolean):
"""
Assumptions:
commutative = True
You can override the default assumptions in the constructor:
>>> from sympy import symbols
>>> A,B = symbols('A,B', commutative = False)
>>> bool(A*B != B*A)
True
>>> bool(A*B*2 == 2*A*B) == True # multiplication by scalars is commutative
True
"""
is_comparable = False
__slots__ = ['name']
is_Symbol = True
is_symbol = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Symbols.
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> x._diff_wrt
True
"""
return True
@staticmethod
def _sanitize(assumptions, obj=None):
"""Remove None, covert values to bool, check commutativity *in place*.
"""
# be strict about commutativity: cannot be None
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
if is_commutative is None:
whose = '%s ' % obj.__name__ if obj else ''
raise ValueError(
'%scommutativity must be True or False.' % whose)
# sanitize other assumptions so 1 -> True and 0 -> False
for key in list(assumptions.keys()):
from collections import defaultdict
from sympy.utilities.exceptions import SymPyDeprecationWarning
keymap = defaultdict(lambda: None)
keymap.update({'bounded': 'finite', 'unbounded': 'infinite', 'infinitesimal': 'zero'})
if keymap[key]:
SymPyDeprecationWarning(
feature="%s assumption" % key,
useinstead="%s" % keymap[key],
issue=8071,
deprecated_since_version="0.7.6").warn()
assumptions[keymap[key]] = assumptions[key]
assumptions.pop(key)
key = keymap[key]
v = assumptions[key]
if v is None:
assumptions.pop(key)
continue
assumptions[key] = bool(v)
def __new__(cls, name, **assumptions):
"""Symbols are identified by name and assumptions::
>>> from sympy import Symbol
>>> Symbol("x") == Symbol("x")
True
>>> Symbol("x", real=True) == Symbol("x", real=False)
False
"""
cls._sanitize(assumptions, cls)
return Symbol.__xnew_cached_(cls, name, **assumptions)
def __new_stage2__(cls, name, **assumptions):
if not isinstance(name, string_types):
raise TypeError("name should be a string, not %s" % repr(type(name)))
obj = Expr.__new__(cls)
obj.name = name
# TODO: Issue #8873: Forcing the commutative assumption here means
# later code such as ``srepr()`` cannot tell whether the user
# specified ``commutative=True`` or omitted it. To workaround this,
# we keep a copy of the assumptions dict, then create the StdFactKB,
# and finally overwrite its ``._generator`` with the dict copy. This
# is a bit of a hack because we assume StdFactKB merely copies the
# given dict as ``._generator``, but future modification might, e.g.,
# compute a minimal equivalent assumption set.
tmp_asm_copy = assumptions.copy()
# be strict about commutativity
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
assumptions['commutative'] = is_commutative
obj._assumptions = StdFactKB(assumptions)
obj._assumptions._generator = tmp_asm_copy # Issue #8873
return obj
__xnew__ = staticmethod(
__new_stage2__) # never cached (e.g. dummy)
__xnew_cached_ = staticmethod(
cacheit(__new_stage2__)) # symbols are always cached
def __getnewargs__(self):
return (self.name,)
def __getstate__(self):
return {'_assumptions': self._assumptions}
def _hashable_content(self):
# Note: user-specified assumptions not hashed, just derived ones
return (self.name,) + tuple(sorted(self.assumptions0.items()))
@property
def assumptions0(self):
return dict((key, value) for key, value
in self._assumptions.items() if value is not None)
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def as_dummy(self):
"""Return a Dummy having the same name and same assumptions as self."""
return Dummy(self.name, **self._assumptions.generator)
def __call__(self, *args):
from .function import Function
return Function(self.name)(*args)
def as_real_imag(self, deep=True, **hints):
from sympy import im, re
if hints.get('ignore') == self:
return None
else:
return (re(self), im(self))
def _sage_(self):
import sage.all as sage
return sage.var(self.name)
def is_constant(self, *wrt, **flags):
if not wrt:
return False
return not self in wrt
@property
def free_symbols(self):
return {self}
class Dummy(Symbol):
"""Dummy symbols are each unique, identified by an internal count index:
>>> from sympy import Dummy
>>> bool(Dummy("x") == Dummy("x")) == True
False
If a name is not supplied then a string value of the count index will be
used. This is useful when a temporary variable is needed and the name
of the variable used in the expression is not important.
>>> Dummy() #doctest: +SKIP
_Dummy_10
"""
_count = 0
__slots__ = ['dummy_index']
is_Dummy = True
def __new__(cls, name=None, **assumptions):
if name is None:
name = "Dummy_" + str(Dummy._count)
cls._sanitize(assumptions, cls)
obj = Symbol.__xnew__(cls, name, **assumptions)
Dummy._count += 1
obj.dummy_index = Dummy._count
return obj
def __getstate__(self):
return {'_assumptions': self._assumptions, 'dummy_index': self.dummy_index}
@cacheit
def sort_key(self, order=None):
return self.class_key(), (
2, (str(self), self.dummy_index)), S.One.sort_key(), S.One
def _hashable_content(self):
return Symbol._hashable_content(self) + (self.dummy_index,)
class Wild(Symbol):
"""
A Wild symbol matches anything, or anything
without whatever is explicitly excluded.
Examples
========
>>> from sympy import Wild, WildFunction, cos, pi
>>> from sympy.abc import x, y, z
>>> a = Wild('a')
>>> x.match(a)
{a_: x}
>>> pi.match(a)
{a_: pi}
>>> (3*x**2).match(a*x)
{a_: 3*x}
>>> cos(x).match(a)
{a_: cos(x)}
>>> b = Wild('b', exclude=[x])
>>> (3*x**2).match(b*x)
>>> b.match(a)
{a_: b_}
>>> A = WildFunction('A')
>>> A.match(a)
{a_: A_}
Tips
====
When using Wild, be sure to use the exclude
keyword to make the pattern more precise.
Without the exclude pattern, you may get matches
that are technically correct, but not what you
wanted. For example, using the above without
exclude:
>>> from sympy import symbols
>>> a, b = symbols('a b', cls=Wild)
>>> (2 + 3*y).match(a*x + b*y)
{a_: 2/x, b_: 3}
This is technically correct, because
(2/x)*x + 3*y == 2 + 3*y, but you probably
wanted it to not match at all. The issue is that
you really didn't want a and b to include x and y,
and the exclude parameter lets you specify exactly
this. With the exclude parameter, the pattern will
not match.
>>> a = Wild('a', exclude=[x, y])
>>> b = Wild('b', exclude=[x, y])
>>> (2 + 3*y).match(a*x + b*y)
Exclude also helps remove ambiguity from matches.
>>> E = 2*x**3*y*z
>>> a, b = symbols('a b', cls=Wild)
>>> E.match(a*b)
{a_: 2*y*z, b_: x**3}
>>> a = Wild('a', exclude=[x, y])
>>> E.match(a*b)
{a_: z, b_: 2*x**3*y}
>>> a = Wild('a', exclude=[x, y, z])
>>> E.match(a*b)
{a_: 2, b_: x**3*y*z}
"""
is_Wild = True
__slots__ = ['exclude', 'properties']
def __new__(cls, name, exclude=(), properties=(), **assumptions):
exclude = tuple([sympify(x) for x in exclude])
properties = tuple(properties)
cls._sanitize(assumptions, cls)
return Wild.__xnew__(cls, name, exclude, properties, **assumptions)
def __getnewargs__(self):
return (self.name, self.exclude, self.properties)
@staticmethod
@cacheit
def __xnew__(cls, name, exclude, properties, **assumptions):
obj = Symbol.__xnew__(cls, name, **assumptions)
obj.exclude = exclude
obj.properties = properties
return obj
def _hashable_content(self):
return super(Wild, self)._hashable_content() + (self.exclude, self.properties)
# TODO add check against another Wild
def matches(self, expr, repl_dict={}, old=False):
if any(expr.has(x) for x in self.exclude):
return None
if any(not f(expr) for f in self.properties):
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
def __call__(self, *args, **kwargs):
raise TypeError("'%s' object is not callable" % type(self).__name__)
_range = _re.compile('([0-9]*:[0-9]+|[a-zA-Z]?:[a-zA-Z])')
def symbols(names, **args):
"""
Transform strings into instances of :class:`Symbol` class.
:func:`symbols` function returns a sequence of symbols with names taken
from ``names`` argument, which can be a comma or whitespace delimited
string, or a sequence of strings::
>>> from sympy import symbols, Function
>>> x, y, z = symbols('x,y,z')
>>> a, b, c = symbols('a b c')
The type of output is dependent on the properties of input arguments::
>>> symbols('x')
x
>>> symbols('x,')
(x,)
>>> symbols('x,y')
(x, y)
>>> symbols(('a', 'b', 'c'))
(a, b, c)
>>> symbols(['a', 'b', 'c'])
[a, b, c]
>>> symbols(set(['a', 'b', 'c']))
set([a, b, c])
If an iterable container is needed for a single symbol, set the ``seq``
argument to ``True`` or terminate the symbol name with a comma::
>>> symbols('x', seq=True)
(x,)
To reduce typing, range syntax is supported to create indexed symbols.
Ranges are indicated by a colon and the type of range is determined by
the character to the right of the colon. If the character is a digit
then all contiguous digits to the left are taken as the nonnegative
starting value (or 0 if there is no digit left of the colon) and all
contiguous digits to the right are taken as 1 greater than the ending
value::
>>> symbols('x:10')
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
>>> symbols('x5:10')
(x5, x6, x7, x8, x9)
>>> symbols('x5(:2)')
(x50, x51)
>>> symbols('x5:10,y:5')
(x5, x6, x7, x8, x9, y0, y1, y2, y3, y4)
>>> symbols(('x5:10', 'y:5'))
((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4))
If the character to the right of the colon is a letter, then the single
letter to the left (or 'a' if there is none) is taken as the start
and all characters in the lexicographic range *through* the letter to
the right are used as the range::
>>> symbols('x:z')
(x, y, z)
>>> symbols('x:c') # null range
()
>>> symbols('x(:c)')
(xa, xb, xc)
>>> symbols(':c')
(a, b, c)
>>> symbols('a:d, x:z')
(a, b, c, d, x, y, z)
>>> symbols(('a:d', 'x:z'))
((a, b, c, d), (x, y, z))
Multiple ranges are supported; contiguous numerical ranges should be
separated by parentheses to disambiguate the ending number of one
range from the starting number of the next::
>>> symbols('x:2(1:3)')
(x01, x02, x11, x12)
>>> symbols(':3:2') # parsing is from left to right
(00, 01, 10, 11, 20, 21)
Only one pair of parentheses surrounding ranges are removed, so to
include parentheses around ranges, double them. And to include spaces,
commas, or colons, escape them with a backslash::
>>> symbols('x((a:b))')
(x(a), x(b))
>>> symbols('x(:1\,:2)') # or 'x((:1)\,(:2))'
(x(0,0), x(0,1))
All newly created symbols have assumptions set according to ``args``::
>>> a = symbols('a', integer=True)
>>> a.is_integer
True
>>> x, y, z = symbols('x,y,z', real=True)
>>> x.is_real and y.is_real and z.is_real
True
Despite its name, :func:`symbols` can create symbol-like objects like
instances of Function or Wild classes. To achieve this, set ``cls``
keyword argument to the desired type::
>>> symbols('f,g,h', cls=Function)
(f, g, h)
>>> type(_[0])
<class 'sympy.core.function.UndefinedFunction'>
"""
result = []
if isinstance(names, string_types):
marker = 0
literals = ['\,', '\:', '\ ']
for i in range(len(literals)):
lit = literals.pop(0)
if lit in names:
while chr(marker) in names:
marker += 1
lit_char = chr(marker)
marker += 1
names = names.replace(lit, lit_char)
literals.append((lit_char, lit[1:]))
def literal(s):
if literals:
for c, l in literals:
s = s.replace(c, l)
return s
names = names.strip()
as_seq = names.endswith(',')
if as_seq:
names = names[:-1].rstrip()
if not names:
raise ValueError('no symbols given')
# split on commas
names = [n.strip() for n in names.split(',')]
if not all(n for n in names):
raise ValueError('missing symbol between commas')
# split on spaces
for i in range(len(names) - 1, -1, -1):
names[i: i + 1] = names[i].split()
cls = args.pop('cls', Symbol)
seq = args.pop('seq', as_seq)
for name in names:
if not name:
raise ValueError('missing symbol')
if ':' not in name:
symbol = cls(literal(name), **args)
result.append(symbol)
continue
split = _range.split(name)
# remove 1 layer of bounding parentheses around ranges
for i in range(len(split) - 1):
if i and ':' in split[i] and split[i] != ':' and \
split[i - 1].endswith('(') and \
split[i + 1].startswith(')'):
split[i - 1] = split[i - 1][:-1]
split[i + 1] = split[i + 1][1:]
for i, s in enumerate(split):
if ':' in s:
if s[-1].endswith(':'):
raise ValueError('missing end range')
a, b = s.split(':')
if b[-1] in string.digits:
a = 0 if not a else int(a)
b = int(b)
split[i] = [str(c) for c in range(a, b)]
else:
a = a or 'a'
split[i] = [string.ascii_letters[c] for c in range(
string.ascii_letters.index(a),
string.ascii_letters.index(b) + 1)] # inclusive
if not split[i]:
break
else:
split[i] = [s]
else:
seq = True
if len(split) == 1:
names = split[0]
else:
names = [''.join(s) for s in cartes(*split)]
if literals:
result.extend([cls(literal(s), **args) for s in names])
else:
result.extend([cls(s, **args) for s in names])
if not seq and len(result) <= 1:
if not result:
return ()
return result[0]
return tuple(result)
else:
for name in names:
result.append(symbols(name, **args))
return type(names)(result)
def var(names, **args):
"""
Create symbols and inject them into the global namespace.
This calls :func:`symbols` with the same arguments and puts the results
into the *global* namespace. It's recommended not to use :func:`var` in
library code, where :func:`symbols` has to be used::
Examples
========
>>> from sympy import var
>>> var('x')
x
>>> x
x
>>> var('a,ab,abc')
(a, ab, abc)
>>> abc
abc
>>> var('x,y', real=True)
(x, y)
>>> x.is_real and y.is_real
True
See :func:`symbol` documentation for more details on what kinds of
arguments can be passed to :func:`var`.
"""
def traverse(symbols, frame):
"""Recursively inject symbols to the global namespace. """
for symbol in symbols:
if isinstance(symbol, Basic):
frame.f_globals[symbol.name] = symbol
elif isinstance(symbol, FunctionClass):
frame.f_globals[symbol.__name__] = symbol
else:
traverse(symbol, frame)
from inspect import currentframe
frame = currentframe().f_back
try:
syms = symbols(names, **args)
if syms is not None:
if isinstance(syms, Basic):
frame.f_globals[syms.name] = syms
elif isinstance(syms, FunctionClass):
frame.f_globals[syms.__name__] = syms
else:
traverse(syms, frame)
finally:
del frame # break cyclic dependencies as stated in inspect docs
return syms
| |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 11:33:24 2018
@author: stuart
"""
#group tools for nanowire-plugin
############################
### Group specific tools ###
############################
from minio import Minio
import subprocess
import os
import tarfile
import traceback
import logging
import json
from os import environ
import inspect
import requests
import time
#import time
import sys
import datetime
import shutil
#import hashlib
from nanowire_plugin import send
#import the relavant version of urllib depending on the version of python we are
import urllib
#set up the logger globally
logger = logging.getLogger("nanowire-plugin")
logging.getLogger("urllib").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
#This is the worker that does the uploading
class GroupWorker(object):
def __init__(self, function, minio_client, monitor_url, debug_mode):
self.function = function
self.minio_client = minio_client
self.monitor_url = monitor_url
self.debug_mode = debug_mode
self.connect_max_retries=5
self.new = True
self.backoff = 0
def run(self):
while True:
try:
if 'REQUEST_TIMEOUT' in os.environ.keys():
time_out = int(os.environ['REQUEST_TIMEOUT'])
else:
time_out = 5
message = requests.get(os.environ['CONTROLLER_BASE_URI'] + '/v1/tasks/?pluginId=' + os.environ['PLUGIN_ID'] + '&pluginInstance=' + os.environ['POD_NAME'], timeout = time_out)
code = message.status_code
except:
code = 500
if code == 200:
payload = json.loads(message.text)
meta = payload['metadata']
try:
tar_url = meta['task']['metadata']['cacheURL']
found_tarball=True
except:
tar_url = None
if meta['job']['workflow']['type'] != 'GROUP':
logger.warning("GROUP PLUGIN WAS SENT A NONE GROUP JOB")
else:
if tar_url != None:
try:
pull_and_extract_tarball(tar_url, '/cache')
except Exception as exp:
if "COULD NOT FIND TARBALL AT" in str(exp):
found_tarball = False
if found_tarball:
read_obj = reader()
write_obj = writer(meta)
#************** There needs to be some way of getting the url before we hit this
try:
#result = self.function(meta, jsonld, url)
result = run_group_function(self.function, read_obj, write_obj, meta)
except Exception as exp:
if self.debug_mode > 0:
result = str(traceback.format_exc())
logger.info("THERE WAS A PROBLEM RUNNING THE MAIN FUNCTION: %s"%str(result))
else:
result = str(exp)
logger.info("THERE WAS A PROBLEM RUNNING THE MAIN FUNCTION: %s"%str(result))
else:
result = "GROUP TARBALL IS MISSING"
else:
result = "GROUP TARBALL IS MISSING"
#send the result to minio and close everything down
minio_sender = Minio_tool(self.minio_client)
if not isinstance(result, str):
try:
new_payloads = minio_sender.send_file("/output/results.json", meta)
if isinstance(result, dict):
result['storePayloads'] = new_payloads
elif isinstance(result, str):
pass
else:
result = {'storePayloads':new_payloads}
#out_dict = {'storePayloads':new_payloads}
except Exception as exp:
logger.info("FAILED TO SEND RESULT: %s"%str(exp))
result = str(exp)
#send our results to the next plugin in the queue
logger.info("FINISHED RUNNING USER CODE AT %s"%str(datetime.datetime.now()))
logger.info("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
#logger.info(json.dumps(result))
#logger.info("++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
send(meta, result, self.minio_client, self.debug_mode)
self.backoff = 0
elif code == 404:
time.sleep(2)
if self.new:
self.new = False
self.backoff = 0
logger.warning("CONNECTED TO CONTROLER")
#this controls the backoff if we can't connect
else:
if self.backoff == 0:
logger.warning("FAILED TO CONNECT TO CONTROLLER, STARTING BACKOFF")
else:
logger.warning("TRIED TO CONNECT AGAIN AND FAILED, retrying in {0}s".format(min(self.backoff + 1, 30)))
self.backoff = min(self.backoff + 1, 30)
time.sleep(self.backoff)
self.new = False
#check the meta to see if we're working with groups
def check_for_group(meta):
if not isinstance(meta, dict):
raise Exception('meta should be a dictionary, is actually: %s, a %s'%(str(meta), str(type(meta))))
try:
result = (meta['job']['workflow']['type'] == 'GROUP')
except:
result = False
return result
#download a tarball from a url
def pull_tarball_url(meta):
if not isinstance(meta, dict):
raise Exception('meta should be a dictionary, is actually: %s, a %s'%(str(meta), str(type(meta))))
try:
url = meta['task']['metadata']['cacheURL']
except:
url = None
return url
#This function handles all the tarball stuff
def pull_and_extract_tarball(tar_url, cache_folder_name):
logger.info("DRAWING TARBALL FROM URL")
if not isinstance(cache_folder_name, str):
raise Exception("The cache folder should be a creatable path, is actually: %s, a %s"%(str(cache_folder_name), str(type(cache_folder_name))))
try:
#different libraries for interacting with urls in python 2 and python 3
if sys.version_info.major >= 3:
file_tmp = urllib.request.urlretrieve(tar_url, filename=None)[0]
else:
file_tmp = urllib.urlretrieve(tar_url, filename=None)[0]
except Exception as exp:
raise Exception("COULD NOT FIND TARBALL AT: %s, due to %s"%(tar_url, str(exp)))
#base_name = os.path.basename(tar_url)
#except Exception as e:
# logger.info("COULD NOT PULL TARBALL BECAUSE: %s"%str(e))
tar = tarfile.open(file_tmp)
logger.info("EXTRACTING TARBALL")
tar.extractall(cache_folder_name)
def read_jsonld(filename):
if not isinstance(filename, str):
raise Exception("Filename must be a string, is actually %s, a %s"%(str(filename), str(type(filename))))
f = open(filename, 'r')
raw = f.read()
f.close()
return json.loads(raw)
class writer():
def __init__(self, meta):
if not isinstance(meta, dict):
raise Exception("metadata should be in a dictionary, is actually: %s, a %s"%(str(meta), str(type(meta))))
self.meta = meta
self.out_folder = '/output'
self.output_filename = 'results.json'
self.out_file = os.path.join(self.out_folder, self.output_filename)
self.initialise_output_file()
def initialise_output_file(self):
if not os.path.isdir(self.out_folder):
os.mkdir(self.out_folder)
logger.info("Creating output file")
f = open(os.path.join(self.out_folder, self.output_filename), "w")
f.write('')
f.close()
#create a jsonld for the whole group job
def add_group_jsonld(self, group_jsonld):
if group_jsonld != {}:
#the group jsonld needs to be labeled with a line thats a json formatted like this
group_line_dict = {}
group_line_dict['update'] = {}
group_line_dict['update']['_id'] = self.meta['task']['_id']
group_line_dict['update']['_type'] = 'groupResults'
group_line_dict['update']['_index'] = 'group'
group_store_dict = {}
group_store_dict['doc_as_upsert'] = True
group_store_dict['doc'] = {}
group_store_dict['doc']['meta'] = {}
group_store_dict['doc']['meta']['userId'] = self.meta['task']['userId']
group_store_dict['doc']['meta']['projectId'] = self.meta['task']['projectId']
group_store_dict['doc']['meta']['jobId'] = self.meta['task']['jobId']
group_store_dict['doc']['meta']['taskId'] = self.meta['task']['_id']
group_store_dict['doc']['meta']['storedAt'] = datetime.datetime.utcnow().isoformat()
group_store_dict['doc']['jsonLD'] = group_jsonld
with open(self.out_file, "r+") as f:
lines = f.readlines()
f.seek(0)
f.write(json.dumps(group_line_dict) + '\n')
f.write(json.dumps(group_store_dict) + '\n')
f.writelines(lines)
#create a jsonld for the info added to a given single file. This might be the
#topic modeling results for this specific single file for example
def append_task(self, single_file):
if sys.version_info.major >= 3:
if 'nanowire_plugin.group_tools.single_file' not in str(type(single_file)):
raise Exception("You can only write a nanowire plugin single_file object to the output using the append task command. You have tried to send an invalid %s object"%str(type(single_file)))
else:
#logger.info(str(single_file))
if 'nanowire_plugin.group_tools.single_file' not in str(single_file):
raise Exception("You can only write a nanowire plugin single_file object to the output using the append task command. You have tried to send an invalid %s object"%str(type(single_file)))
#we only save a single file result if there have been changes
if single_file.change_dict != {}:
#Each single file jsonld needs to be labeled like this
task_line = {}
task_line['update'] = {}
#label the jsonld
task_line['update']['_id'] = single_file.filename.split("/")[-1] + ':' + self.meta['task']['_id']
task_line['update']['_type'] = 'taskResults'
task_line['update']['_index'] = 'group'
task_line['update']['_parent'] = self.meta['task']['_id']
#This is how we store the individual jsonlds
task_store_line = {}
task_store_line['doc'] = single_file.change_dict
task_store_line['doc_as_upsert'] = True
with open(self.out_file, 'a') as f:
f.write(json.dumps(task_line) + '\n')
f.write(json.dumps(task_store_line) + '\n')
class reader():
def __init__(self):
self.file_cache = '/cache/jsonlds'
self.files = os.listdir(self.file_cache)
#a function to create a generator to pull data
def file_generator(self):
for file_dat in self.files:
filename = os.path.join(self.file_cache, file_dat)
yield single_file(filename)
class Minio_tool():
def __init__(self, minio_client):
if "minio.api.Minio" not in str(type(minio_client)):
raise Exception("Minio_tool requires a minio client to initialise, has actually been given: %s, a %s"%(str(minio_client), str(type(minio_client))))
self.minioClient = minio_client
def send_file(self, filename, meta):
try:
if 'MINIO_BUCKET' in os.environ.keys():
bucket_name = os.environ['MINIO_BUCKET']
else:
bucket_name = meta['job']['_id']
job_id = meta['job']['_id']
task_id = meta['task']['_id']
except Exception as e:
logger.warning(str(e))
raise Exception("Key information missing from metadta either job_id or task_id. metadata is: %s"%json.dumps(meta))
logger.info("ENSURING EXISTANCE OF BUCKET: %s"%bucket_name)
#first check the bucket we want to save to exists
if not self.minioClient.bucket_exists(bucket_name):
self.minioClient.make_bucket(bucket_name)
save_name = '%s/%s/group.bin'%(job_id, task_id)
#read the outputted jsonld for storage
logger.info("READING SAVE FILE")
if not os.path.exists(filename):
raise Exception("Tried to send non-existant file: %s"%filename)
file_stat = os.stat(filename)
file_data = open(filename, 'rb')
#send the outputted jsonld to minio
logger.info("PUTTING OBJECT")
self.minioClient.put_object(bucket_name, save_name, file_data, file_stat.st_size)
#remove the cache from the pod
logger.info("CLEANING UP")
self.clean_up_after_sending()
#add the minio storage point to the metadata
try:
new_store_payloads = meta['task']['metadata']['storePayloads']
new_store_payloads.append(save_name)
except:
new_store_payloads = [save_name]
return new_store_payloads
def clean_up_after_sending(self):
if os.path.exists('/cache'):
shutil.rmtree('/cache')
if os.path.exists('/output'):
shutil.rmtree('/output')
#use this class to make sure all the data from each file stays together
class single_file():
def __init__(self, filename):
file_cache = '/cache'
if not os.path.exists(os.path.join(file_cache, filename)):
raise Exception("File to be loaded does not exist: %s"%filename)
self.filename = filename
self.jsonld = read_jsonld(os.path.join(file_cache, filename))
self.change_dict = {}
def run_group_function(function , read_tool, write_tool, meta):
arguments = inspect.getargspec(function)[0]
#3 argument calls
if arguments == ['reader', 'writer', 'meta'] or arguments == ['self', 'reader', 'writer', 'meta']:
out = function(read_tool, write_tool, meta)
return out
if arguments == ['reader', 'meta', 'writer'] or arguments == ['self', 'reader', 'meta', 'writer']:
out = function(read_tool, meta, write_tool)
return out
elif arguments == ['meta', 'reader', 'writer'] or arguments == ['self', 'meta', 'reader', 'writer']:
out = function(meta, read_tool, write_tool)
return out
elif arguments == ['meta', 'writer', 'reader'] or arguments == ['self', 'meta', 'writer', 'reader']:
out = function(meta, write_tool, read_tool)
return out
elif arguments == ['writer', 'reader', 'meta'] or arguments == ['self', 'writer', 'reader', 'meta']:
out = function(write_tool, read_tool, meta)
return out
elif arguments == ['writer', 'meta', 'reader'] or arguments == ['self', 'writer', 'meta', 'reader']:
out = function(write_tool, meta, read_tool)
return out
#2 arguments calls
elif arguments == ['reader', 'writer'] or arguments == ['self', 'reader', 'writer']:
out = function(read_tool, write_tool)
return out
elif arguments == ['writer', 'reader'] or arguments == ['self', 'writer', 'reader']:
out = function(write_tool, read_tool)
return out
else:
raise Exception("FUNCTION MUST ACCEPT VALID ARGUMENTS, CURRENT ARGUMENTS ARE %s"%str(arguments))
def group_bind(function, version="1.0.0", debug_mode=0):
"""binds a function to the input message queue"""
#set up the logging
logger.setLevel(logging.DEBUG)
#pools.set_limit(None)
#write to screen to ensure logging is working ok
#print "Initialising nanowire lib, this is a print"
logger.info("initialising nanowire lib")
#set up the minio client
minio_client = Minio(
environ["MINIO_HOST"] + ":" + environ["MINIO_PORT"],
access_key=environ["MINIO_ACCESS"],
secret_key=environ["MINIO_SECRET"],
secure=True if environ["MINIO_SCHEME"] == "https" else False)
#minio_client.set_app_info(name, version)
monitor_url = environ["MONITOR_URL"]
worker = GroupWorker(function, minio_client, monitor_url, debug_mode)
logger.info("Starting consuming")
worker.run()
def validate_group_function(function):
if sys.version_info.major == 3:
arguments = list(inspect.signature(function).parameters)
elif sys.version_info.major == 2:
arguments = inspect.getargspec(function)[0]
allowed = ['self', 'meta', 'reader', 'writer']
arg_dict = set()
for arg in arguments:
if arg not in arg_dict:
arg_dict.add(arg)
else:
raise Exception("ARGUMENTS MAY NOT BE REPEATED")
if arg not in allowed:
raise Exception("FUNCTION MAY ONLY USE ALLOWED ARGUMENTS, ALLOWED ARGUMENTS ARE: reader, writer, meta YOU HAVE USED THE ARGUMENT %s"%arg)
if 'reader' not in arguments:
raise Exception("GROUP ANALYSIS FUNCTION MUST TAKE reader AS AN ARGUMENT. THIS IS A CLASS FOR READING DATA")
if 'writer' not in arguments:
raise Exception("GROUP ANALYSIS FUNCTION MUST TAKE writer AS AN ARGUMENT. THIS IS A CLASS FOR WRITING RESULTS")
| |
from sunshine.models import Committee, Candidate, Officer, Candidacy, \
D2Report, FiledDoc, Receipt, Expenditure, Investment
import os
from datetime import date, datetime
from hashlib import md5
import sqlalchemy as sa
import csv
from csvkit.cleanup import RowChecker
from csvkit.sql import make_table, make_create_table_statement
from csvkit.table import Table
from collections import OrderedDict
from typeinferer import TypeInferer
import psycopg2
import logging
logger = logging.getLogger(__name__)
try:
from raven.conf import setup_logging
from raven.handlers.logging import SentryHandler
from sunshine.app_config import SENTRY_DSN
if SENTRY_DSN:
handler = SentryHandler(SENTRY_DSN)
setup_logging(handler)
except ImportError:
pass
except KeyError:
pass
class SunshineTransformLoad(object):
def __init__(self,
connection,
metadata=None,
chunk_size=50000,
file_path='downloads'):
self.connection = connection
self.chunk_size = chunk_size
if metadata:
self.metadata = metadata
self.initializeDB()
self.file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
file_path,
self.filename)
def executeTransaction(self, query, raise_exc=False, *args, **kwargs):
trans = self.connection.begin()
try:
if kwargs:
self.connection.execute(query, **kwargs)
else:
self.connection.execute(query, *args)
trans.commit()
except sa.exc.ProgrammingError as e:
logger.error(e, exc_info=True)
trans.rollback()
print(e)
if raise_exc:
raise e
def executeOutsideTransaction(self, query):
self.connection.connection.set_isolation_level(0)
curs = self.connection.connection.cursor()
try:
curs.execute(query)
except psycopg2.ProgrammingError:
pass
def addNameColumn(self):
sql_table = sa.Table(self.table_name, sa.MetaData(),
autoload=True, autoload_with=self.connection.engine)
if not 'search_name' in sql_table.columns.keys():
add_name_col = '''
ALTER TABLE {0} ADD COLUMN search_name tsvector
'''.format(self.table_name)
self.executeTransaction(add_name_col, raise_exc=True)
add_names = '''
UPDATE {0} SET
search_name = to_tsvector('english', COALESCE(first_name, '') || ' ' ||
COALESCE(REPLACE(last_name, '&', ''), ''))
'''.format(self.table_name)
self.executeTransaction(add_names)
add_index = '''
CREATE INDEX {0}_search_name_index ON {0}
USING gin(search_name)
'''.format(self.table_name)
self.executeTransaction(add_index)
trigger = '''
CREATE TRIGGER {0}_search_update
BEFORE INSERT OR UPDATE ON {0}
FOR EACH ROW EXECUTE PROCEDURE
tsvector_update_trigger(search_name,
'pg_catalog.english',
first_name,
last_name)
'''.format(self.table_name)
self.executeTransaction(trigger)
def addDateColumn(self, date_col):
sql_table = sa.Table(self.table_name, sa.MetaData(),
autoload=True, autoload_with=self.connection.engine)
if not 'search_date' in sql_table.columns.keys():
add_date_col = '''
ALTER TABLE {0} ADD COLUMN search_date TIMESTAMP
'''.format(self.table_name)
self.executeTransaction(add_date_col)
add_index = '''
CREATE INDEX {0}_search_date_index ON {0} (search_date)
'''.format(self.table_name)
self.executeTransaction(add_index)
add_dates = '''
UPDATE {0} SET
search_date = subq.search_date
FROM (
SELECT
{1}::timestamp AS search_date,
id
FROM {0}
) AS subq
WHERE {0}.id = subq.id
AND {0}.search_date IS NULL
'''.format(self.table_name, date_col)
self.executeTransaction(add_dates)
def initializeDB(self):
enum = '''
CREATE TYPE committee_position AS ENUM (
'support',
'oppose'
)
'''
self.executeTransaction(enum)
self.metadata.create_all(bind=self.connection.engine)
def makeRawTable(self):
try:
sql_table = sa.Table('raw_{0}'.format(self.table_name),
sa.MetaData(),
autoload=True,
autoload_with=self.connection.engine)
except sa.exc.NoSuchTableError:
inferer = TypeInferer(self.file_path)
inferer.infer()
sql_table = sa.Table('raw_{0}'.format(self.table_name),
sa.MetaData())
for column_name, column_type in inferer.types.items():
sql_table.append_column(sa.Column(column_name, column_type()))
dialect = sa.dialects.postgresql.dialect()
create_table = str(sa.schema.CreateTable(sql_table)\
.compile(dialect=dialect)).strip(';')
self.executeTransaction('DROP TABLE IF EXISTS raw_{0}'.format(self.table_name))
self.executeTransaction(create_table)
def writeRawToDisk(self):
with open(self.file_path, 'r', encoding='latin-1') as inp:
reader = csv.reader(inp, delimiter='\t', quoting=csv.QUOTE_NONE)
self.raw_header = next(reader)
checker = RowChecker(reader)
with open('%s_raw.csv' % self.file_path, 'w') as outp:
writer = csv.writer(outp)
writer.writerow(self.raw_header)
for row in checker.checked_rows():
writer.writerow(row)
def bulkLoadRawData(self):
import psycopg2
from sunshine.app_config import DB_USER, DB_PW, DB_HOST, \
DB_PORT, DB_NAME
DB_CONN_STR = 'host={0} dbname={1} user={2} port={3}'\
.format(DB_HOST, DB_NAME, DB_USER, DB_PORT)
copy_st = '''
COPY raw_{0} FROM STDIN WITH CSV HEADER DELIMITER ','
'''.format(self.table_name)
with open('%s_raw.csv' % self.file_path, 'r') as f:
next(f)
with psycopg2.connect(DB_CONN_STR) as conn:
with conn.cursor() as curs:
try:
curs.copy_expert(copy_st, f)
except psycopg2.IntegrityError as e:
logger.error(e, exc_info=True)
print(e)
conn.rollback()
os.remove('%s_raw.csv' % self.file_path)
def findNewRecords(self):
create_new_record_table = '''
CREATE TABLE new_{0} AS (
SELECT raw."ID"
FROM raw_{0} AS raw
LEFT JOIN {0} AS dat
ON raw."ID" = dat.id
WHERE dat.id IS NULL
)
'''.format(self.table_name)
self.executeTransaction('DROP TABLE IF EXISTS new_{0}'.format(self.table_name))
self.executeTransaction(create_new_record_table)
def iterIncomingData(self):
incoming = '''
SELECT raw.*
FROM raw_{0} AS raw
JOIN new_{0} AS new
USING("ID")
'''.format(self.table_name)
for record in self.connection.engine.execute(incoming):
yield record
def transform(self):
for row in self.iterIncomingData():
values = []
for value in row.values():
if isinstance(value, str):
if value.strip() == '':
values.append(None)
else:
values.append(value)
else:
values.append(value)
yield OrderedDict(zip(self.header, values))
@property
def insert(self):
return '''
INSERT INTO {0} ({1}) VALUES ({2})
'''.format(self.table_name,
','.join(self.header),
','.join([':%s' % h for h in self.header]))
def load(self, update_existing=False):
self.makeRawTable()
self.writeRawToDisk()
self.bulkLoadRawData()
self.findNewRecords()
self.insertNewRecords()
if update_existing:
self.updateExistingRecords()
def updateExistingRecords(self):
fields = ','.join(['{0}=s."{1}"'.format(clean, raw) \
for clean, raw in zip(self.header, self.raw_header)])
update = '''
UPDATE {table_name} SET
{fields}
FROM (
SELECT * FROM raw_{table_name}
) AS s
WHERE {table_name}.id = s."ID"
'''.format(table_name=self.table_name,
fields=fields)
self.executeTransaction(update)
def insertNewRecords(self):
rows = []
i = 0
for row in self.transform():
rows.append(row)
if len(rows) % self.chunk_size is 0:
self.executeTransaction(sa.text(self.insert), *rows)
print('Inserted %s %s' % (i, self.table_name))
rows = []
i += 1
if rows:
if len(rows) == 1:
self.executeTransaction(sa.text(self.insert), **rows[0])
else:
self.executeTransaction(sa.text(self.insert), *rows)
logger.info('inserted %s %s' % (i, self.table_name))
class SunshineCommittees(SunshineTransformLoad):
table_name = 'committees'
header = Committee.__table__.columns.keys()
filename = 'Committees.txt'
def addNameColumn(self):
sql_table = sa.Table(self.table_name, sa.MetaData(),
autoload=True, autoload_with=self.connection.engine)
if not 'search_name' in sql_table.columns.keys():
add_name_col = '''
ALTER TABLE {0} ADD COLUMN search_name tsvector
'''.format(self.table_name)
try:
self.executeTransaction(add_name_col, raise_exc=True)
except sa.exc.ProgrammingError:
return
add_names = '''
UPDATE {0} SET
search_name = to_tsvector('english', REPLACE(name, '&', ''))
'''.format(self.table_name)
self.executeTransaction(add_names)
add_index = '''
CREATE INDEX {0}_search_name_index ON {0}
USING gin(search_name)
'''.format(self.table_name)
self.executeTransaction(add_index)
trigger = '''
CREATE TRIGGER {0}_search_update
BEFORE INSERT OR UPDATE ON {0}
FOR EACH ROW EXECUTE PROCEDURE
tsvector_update_trigger(search_name,
'pg_catalog.english',
name)
'''.format(self.table_name)
self.executeTransaction(trigger)
def transform(self):
for row in self.iterIncomingData():
row = OrderedDict(zip(row.keys(), row.values()))
# Replace status value
if row['Status'] != 'A':
row['Status'] = False
else:
row['Status'] = True
# Replace position values
for idx in ['CanSuppOpp', 'PolicySuppOpp']:
if row[idx] == 'O':
row[idx] = 'oppose'
elif row[idx] == 'S':
row[idx] = 'support'
else:
row[idx] = None
if row.get('TypeOfCommittee'):
if 'Independent Expenditure' in row['TypeOfCommittee']:
row['TypeOfCommittee'] = 'Super PAC'
yield OrderedDict(zip(self.header, list(row.values())))
class SunshineCandidates(SunshineTransformLoad):
table_name = 'candidates'
header = [f for f in Candidate.__table__.columns.keys() \
if f not in ['date_added', 'last_update', 'ocd_id']]
filename = 'Candidates.txt'
class SunshineOfficers(SunshineTransformLoad):
table_name = 'officers'
header = Officer.__table__.columns.keys()
filename = 'Officers.txt'
current = True
def transform(self):
for row in self.iterIncomingData():
print(row)
row_list = list(row.values())
# Add empty committee_id
row_list.insert(1, None)
# Add empty resign date
row_list.insert(11, None)
# Add current flag
row_list.append(self.current)
yield OrderedDict(zip(self.header, row_list))
def updateExistingRecords(self):
ignore_fields = ['committee_id', 'resign_date', 'current']
header = [f for f in self.header if f not in ignore_fields]
fields = ','.join(['{0}=s."{1}"'.format(clean, raw) \
for clean, raw in zip(header, self.raw_header)])
update = '''
UPDATE {table_name} SET
{fields}
FROM (
SELECT * FROM raw_{table_name}
) AS s
WHERE {table_name}.id = s."ID"
'''.format(table_name=self.table_name,
fields=fields)
self.executeTransaction(update)
def bulkLoadRawData(self):
import psycopg2
from sunshine.app_config import DB_USER, DB_PW, DB_HOST, \
DB_PORT, DB_NAME
# drop_committee_id = '''
# ALTER TABLE raw_{} DROP COLUMN committee_id
# '''.format(self.table_name)
#
# self.executeTransaction(drop_committee_id)
DB_CONN_STR = 'host={0} dbname={1} user={2} port={3}'\
.format(DB_HOST, DB_NAME, DB_USER, DB_PORT)
copy_st = '''
COPY raw_{0} FROM STDIN WITH CSV HEADER DELIMITER ','
'''.format(self.table_name)
with open('%s_raw.csv' % self.file_path, 'r') as f:
next(f)
with psycopg2.connect(DB_CONN_STR) as conn:
with conn.cursor() as curs:
try:
curs.copy_expert(copy_st, f)
except psycopg2.IntegrityError as e:
logger.error(e, exc_info=True)
print(e)
conn.rollback()
# add_committee_id = '''
# ALTER TABLE raw_{} ADD COLUMN committee_id bigint
# '''.format(self.table_name)
# update_committee_id = '''
# UPDATE raw_{0} SET
# committee_id = s.committee_id
# FROM (
# SELECT * FROM officer_committees
# ) AS s
# WHERE raw_{0}."ID" = s.officer_id
# '''.format(self.table_name)
#
# self.executeTransaction(add_committee_id)
# self.executeTransaction(update_committee_id)
os.remove('%s_raw.csv' % self.file_path)
class SunshinePrevOfficers(SunshineOfficers):
table_name = 'officers'
header = Officer.__table__.columns.keys()
filename = 'PrevOfficers.txt'
current = False
def transform(self):
for row in self.iterIncomingData():
row_list = list(row.values())
# Add empty phone
row_list.insert(10, None)
# Add current flag
row_list.append(self.current)
yield OrderedDict(zip(self.header, row_list))
def updateExistingRecords(self):
header = [f for f in self.header if f != 'phone']
fields = ','.join(['{0}=s."{1}"'.format(clean, raw) \
for clean, raw in zip(header, self.raw_header)])
update = '''
UPDATE {table_name} SET
{fields}
FROM (
SELECT * FROM raw_{table_name}
WHERE "ResignDate" IS NOT NULL
) AS s
WHERE {table_name}.id = s."ID"
'''.format(table_name=self.table_name,
fields=fields)
self.executeTransaction(update)
class SunshineCandidacy(SunshineTransformLoad):
table_name = 'candidacies'
header = Candidacy.__table__.columns.keys()
filename = 'CanElections.txt'
election_types = {
'CE': 'Consolidated Election',
'GP': 'General Primary',
'GE': 'General Election',
'CP': 'Consolidated Primary',
'NE': None,
'SE': 'Special Election'
}
race_types = {
'Inc': 'incumbent',
'Open': 'open seat',
'Chal': 'challenger',
'Ret': 'retired',
}
def transform(self):
for row in self.iterIncomingData():
row = OrderedDict(zip(row.keys(), row.values()))
# Get election type
row['ElectionType'] = self.election_types.get(row['ElectionType'].strip())
# Get race type
if row.get('IncChallOpen'):
row['IncChallOpen'] = self.race_types.get(row['IncChallOpen'].strip())
# Get outcome
if row['WonLost'] == 'Won':
row['WonLost'] = 'won'
elif row['WonLost'] == 'Lost':
row['WonLost'] = 'lost'
else:
row['WonLost'] = None
yield OrderedDict(zip(self.header, row.values()))
class SunshineCandidateCommittees(SunshineTransformLoad):
table_name = 'candidate_committees'
header = ['committee_id', 'candidate_id']
filename = 'CmteCandidateLinks.txt'
def findNewRecords(self):
create_new_record_table = '''
CREATE TABLE new_{0} AS (
SELECT
raw."CommitteeID",
raw."CandidateID"
FROM raw_{0} AS raw
LEFT JOIN {0} AS dat
ON raw."CommitteeID" = dat.committee_id
AND raw."CandidateID" = dat.candidate_id
WHERE dat.committee_id IS NULL
AND dat.candidate_id IS NULL
)
'''.format(self.table_name)
self.executeTransaction('DROP TABLE IF EXISTS new_{0}'.format(self.table_name))
self.executeTransaction(create_new_record_table)
def iterIncomingData(self):
incoming = '''
SELECT raw.*
FROM raw_{0} AS raw
JOIN new_{0} AS new
ON raw."CommitteeID" = new."CommitteeID"
AND raw."CandidateID" = new."CandidateID"
'''.format(self.table_name)
for record in self.connection.engine.execute(incoming):
yield record
def transform(self):
for row in self.iterIncomingData():
row = [row['CommitteeID'], row['CandidateID']]
yield OrderedDict(zip(self.header, row))
class SunshineOfficerCommittees(SunshineTransformLoad):
table_name = 'officer_committees'
header = ['committee_id', 'officer_id']
filename = 'CmteOfficerLinks.txt'
def findNewRecords(self):
create_new_record_table = '''
CREATE TABLE new_{0} AS (
SELECT
raw."CommitteeID",
raw."OfficerID"
FROM raw_{0} AS raw
LEFT JOIN {0} AS dat
ON raw."CommitteeID" = dat.committee_id
AND raw."OfficerID" = dat.officer_id
WHERE dat.committee_id IS NULL
AND dat.officer_id IS NULL
)
'''.format(self.table_name)
self.executeTransaction('DROP TABLE IF EXISTS new_{0}'.format(self.table_name))
self.executeTransaction(create_new_record_table, rase_exc=True)
def iterIncomingData(self):
incoming = '''
SELECT raw.*
FROM raw_{0} AS raw
JOIN new_{0} AS new
ON raw."CommitteeID" = new."CommitteeID"
AND raw."OfficerID" = new."OfficerID"
'''.format(self.table_name)
for record in self.connection.engine.execute(incoming):
yield record
def transform(self):
for row in self.iterIncomingData():
row = [row['CommitteeID'], row['OfficerID']]
yield OrderedDict(zip(self.header, row))
def updateExistingRecords(self):
update = '''
UPDATE officers SET
committee_id=s."CommitteeID"
FROM (
SELECT * FROM raw_{table_name}
) AS s
WHERE officers.id = s."OfficerID"
'''.format(table_name=self.table_name)
self.executeTransaction(update)
class SunshineD2Reports(SunshineTransformLoad):
table_name = 'd2_reports'
header = D2Report.__table__.columns.keys()
filename = 'D2Totals.txt'
class SunshineFiledDocs(SunshineTransformLoad):
table_name = 'filed_docs'
header = FiledDoc.__table__.columns.keys()
filename = 'FiledDocs.txt'
class SunshineReceipts(SunshineTransformLoad):
table_name = 'receipts'
header = Receipt.__table__.columns.keys()
filename = 'Receipts.txt'
class SunshineExpenditures(SunshineTransformLoad):
table_name = 'expenditures'
header = Expenditure.__table__.columns.keys()
filename = 'Expenditures.txt'
class SunshineInvestments(SunshineTransformLoad):
table_name = 'investments'
header = Investment.__table__.columns.keys()
filename = 'Investments.txt'
class SunshineViews(object):
def __init__(self, connection):
self.connection = connection
def executeTransaction(self, query, **kwargs):
trans = self.connection.begin()
try:
self.connection.execute(query, **kwargs)
trans.commit()
except (sa.exc.ProgrammingError, psycopg2.ProgrammingError) as e:
# TODO: this line seems to break when creating views for the first time.
# logger.error(e, exc_info=True)
trans.rollback()
raise e
def executeOutsideTransaction(self, query):
self.connection.connection.set_isolation_level(0)
curs = self.connection.connection.cursor()
try:
curs.execute(query)
except psycopg2.ProgrammingError:
pass
def dropViews(self):
self.executeTransaction('DROP MATERIALIZED VIEW IF EXISTS receipts_by_month')
self.executeTransaction('DROP MATERIALIZED VIEW IF EXISTS committee_receipts_by_week')
self.executeTransaction('DROP MATERIALIZED VIEW IF EXISTS incumbent_candidates')
self.executeTransaction('DROP MATERIALIZED VIEW IF EXISTS most_recent_filings CASCADE')
self.executeTransaction('DROP MATERIALIZED VIEW IF EXISTS expenditures_by_candidate')
def makeAllViews(self):
self.incumbentCandidates()
self.mostRecentFilings()
self.condensedReceipts()
self.condensedExpenditures()
self.expendituresByCandidate() # relies on condensed_expenditures
self.receiptsAggregates() # relies on condensedReceipts
self.committeeReceiptAggregates() # relies on condensedReceipts
self.committeeMoney() # relies on mostRecentFilings
self.candidateMoney() # relies on committeeMoney and mostRecentFilings
def condensedExpenditures(self):
try:
self.executeTransaction('REFRESH MATERIALIZED VIEW CONCURRENTLY condensed_expenditures')
except sa.exc.ProgrammingError:
rec = '''
CREATE MATERIALIZED VIEW condensed_expenditures AS (
(
SELECT
e.*
FROM expenditures AS e
JOIN most_recent_filings AS m
USING(committee_id)
WHERE e.expended_date > COALESCE(m.reporting_period_end, '1900-01-01')
) UNION (
SELECT
e.*
FROM expenditures AS e
JOIN (
SELECT DISTINCT ON (
reporting_period_begin,
reporting_period_end,
committee_id
)
id AS filed_doc_id
FROM filed_docs
WHERE doc_name != 'Pre-election'
ORDER BY reporting_period_begin,
reporting_period_end,
committee_id,
received_datetime DESC
) AS f
USING(filed_doc_id)
)
)
'''
self.executeTransaction(rec)
self.condensedExpendituresIndex()
def condensedReceipts(self):
try:
self.executeTransaction('REFRESH MATERIALIZED VIEW CONCURRENTLY condensed_receipts')
except sa.exc.ProgrammingError:
rec = '''
CREATE MATERIALIZED VIEW condensed_receipts AS (
(
SELECT
r.*
FROM receipts AS r
LEFT JOIN most_recent_filings AS m
USING(committee_id)
WHERE r.received_date > COALESCE(m.reporting_period_end, '1900-01-01')
) UNION (
SELECT
r.*
FROM receipts AS r
JOIN (
SELECT DISTINCT ON (
reporting_period_begin,
reporting_period_end,
committee_id
)
id AS filed_doc_id
FROM filed_docs
WHERE doc_name != 'Pre-election'
ORDER BY reporting_period_begin,
reporting_period_end,
committee_id,
received_datetime DESC
) AS f
USING(filed_doc_id)
)
)
'''
self.executeTransaction(rec)
self.condensedReceiptsIndex()
def expendituresByCandidate(self):
try:
self.executeTransaction('REFRESH MATERIALIZED VIEW CONCURRENTLY expenditures_by_candidate')
except sa.exc.ProgrammingError:
exp = '''
CREATE MATERIALIZED VIEW expenditures_by_candidate AS (
SELECT
candidate_name,
office,
committee_id,
MAX(committee_name) AS committee_name,
MAX(committee_type) AS committee_type,
bool_or(supporting) AS supporting,
bool_or(opposing) AS opposing,
SUM(supporting_amount) AS supporting_amount,
SUM(opposing_amount) AS opposing_amount,
MIN(support_min_date) AS support_min_date,
MAX(support_max_date) AS support_max_date,
MIN(oppose_min_date) AS oppose_min_date,
MAX(oppose_max_date) AS oppose_max_date
FROM (
SELECT
e.candidate_name,
e.office,
cm.id AS committee_id,
MAX(cm.name) AS committee_name,
MAX(cm.type) AS committee_type,
bool_or(e.supporting) AS supporting,
bool_or(e.opposing) AS opposing,
SUM(e.amount) AS supporting_amount,
0 AS opposing_amount,
MIN(e.expended_date) AS support_min_date,
MAX(e.expended_date) AS support_max_date,
NULL::timestamp AS oppose_min_date,
NULL::timestamp AS oppose_max_date
FROM condensed_expenditures AS e
JOIN committees AS cm
ON e.committee_id = cm.id
WHERE supporting = TRUE AND opposing = FALSE
GROUP BY e.candidate_name, e.office, cm.id
UNION
SELECT
e.candidate_name,
e.office,
cm.id AS committee_id,
MAX(cm.name) AS committee_name,
MAX(cm.type) AS committee_type,
bool_or(e.supporting) AS supporting,
bool_or(e.opposing) AS opposing,
0 AS supporting_amount,
SUM(e.amount) AS opposing_amount,
NULL::timestamp AS support_min_date,
NULL::timestamp AS support_max_date,
MIN(e.expended_date) AS oppose_min_date,
MAX(e.expended_date) AS oppose_max_date
FROM condensed_expenditures AS e
JOIN committees AS cm
ON e.committee_id = cm.id
WHERE opposing = TRUE AND supporting = FALSE
GROUP BY e.candidate_name, e.office, cm.id
) AS subq
GROUP BY candidate_name, office, committee_id
)
'''
self.executeTransaction(exp)
self.expendituresByCandidateIndex()
def receiptsAggregates(self):
try:
self.executeTransaction('REFRESH MATERIALIZED VIEW CONCURRENTLY receipts_by_month')
except sa.exc.ProgrammingError:
weeks = '''
CREATE MATERIALIZED VIEW receipts_by_month AS (
SELECT
date_trunc('month', received_date) AS month,
SUM(amount) AS total_amount,
COUNT(id) AS donation_count,
AVG(amount) AS average_donation
FROM condensed_receipts
GROUP BY date_trunc('month', received_date)
ORDER BY month
)
'''
self.executeTransaction(weeks)
self.receiptsByWeekIndex()
def committeeReceiptAggregates(self):
try:
self.executeTransaction('REFRESH MATERIALIZED VIEW CONCURRENTLY committee_receipts_by_week')
except sa.exc.ProgrammingError:
weeks = '''
CREATE MATERIALIZED VIEW committee_receipts_by_week AS (
SELECT
committee_id,
date_trunc('week', received_date) AS week,
SUM(amount) AS total_amount,
COUNT(id) AS donation_count,
AVG(amount) AS average_donation
FROM receipts
GROUP BY committee_id,
date_trunc('week', received_date)
ORDER BY week
)
'''
self.executeTransaction(weeks)
self.committeeReceiptsByWeekIndex()
def incumbentCandidates(self):
try:
self.executeTransaction('REFRESH MATERIALIZED VIEW CONCURRENTLY incumbent_candidates')
except (sa.exc.ProgrammingError, psycopg2.ProgrammingError):
incumbents = '''
CREATE MATERIALIZED VIEW incumbent_candidates AS (
SELECT DISTINCT ON (cd.district, cd.office)
cd.*,
cs.election_year AS last_election_year,
cs.election_type AS last_election_type,
cs.race_type AS last_race_type
FROM candidates AS cd
JOIN candidacies AS cs
ON cd.id = cs.candidate_id
WHERE cs.outcome = :outcome
AND cs.election_year >= :year
ORDER BY cd.district, cd.office, cs.id DESC
)
'''
last_year = datetime.now().year - 1
self.executeTransaction(sa.text(incumbents),
outcome='won',
year=last_year)
self.incumbentCandidatesIndex()
def mostRecentFilings(self):
try:
self.executeTransaction('REFRESH MATERIALIZED VIEW CONCURRENTLY most_recent_filings')
except sa.exc.ProgrammingError:
create = '''
CREATE MATERIALIZED VIEW most_recent_filings AS (
SELECT
COALESCE(d2.end_funds_available, 0) AS end_funds_available,
COALESCE(d2.total_investments, 0) AS total_investments,
COALESCE(d2.total_debts, 0) AS total_debts,
COALESCE((d2.inkind_itemized + d2.inkind_non_itemized), 0) AS total_inkind,
cm.name AS committee_name,
cm.id AS committee_id,
cm.type AS committee_type,
cm.active AS committee_active,
fd.id AS filed_doc_id,
fd.doc_name,
fd.reporting_period_end,
fd.reporting_period_begin,
fd.received_datetime
FROM committees AS cm
LEFT JOIN (
SELECT DISTINCT ON (committee_id)
f.*
FROM (
SELECT DISTINCT ON (committee_id, reporting_period_end)
id,
committee_id,
doc_name,
reporting_period_end,
reporting_period_begin,
received_datetime
FROM filed_docs
WHERE doc_name NOT IN (
'A-1',
'Statement of Organization',
'Letter/Correspondence',
'B-1',
'Nonparticipation'
)
ORDER BY committee_id,
reporting_period_end DESC,
received_datetime DESC
) AS f
ORDER BY f.committee_id,
f.reporting_period_end DESC
) AS fd
ON fd.committee_id = cm.id
LEFT JOIN d2_reports AS d2
ON fd.id = d2.filed_doc_id
)
'''
self.executeTransaction(create)
self.mostRecentFilingsIndex()
def committeeMoney(self):
try:
self.executeTransaction('REFRESH MATERIALIZED VIEW CONCURRENTLY committee_money')
except sa.exc.ProgrammingError:
create = '''
CREATE MATERIALIZED VIEW committee_money AS (
SELECT
MAX(filings.end_funds_available) AS end_funds_available,
MAX(filings.total_inkind) AS total_inkind,
MAX(filings.committee_name) AS committee_name,
MAX(filings.committee_id) AS committee_id,
MAX(filings.committee_type) AS committee_type,
bool_and(filings.committee_active) AS committee_active,
MAX(filings.doc_name) AS doc_name,
MAX(filings.reporting_period_end) AS reporting_period_end,
MAX(filings.reporting_period_begin) AS reporting_period_begin,
(SUM(COALESCE(receipts.amount, 0)) +
MAX(COALESCE(filings.end_funds_available, 0)) +
MAX(COALESCE(filings.total_investments, 0)) -
MAX(COALESCE(filings.total_debts, 0))) AS total,
MAX(receipts.received_date) AS last_receipt_date
FROM most_recent_filings AS filings
LEFT JOIN receipts
ON receipts.committee_id = filings.committee_id
AND receipts.received_date > filings.reporting_period_end
GROUP BY filings.committee_id
ORDER BY total DESC NULLS LAST
)
'''
self.executeTransaction(create)
self.committeeMoneyIndex()
def candidateMoney(self):
try:
self.executeTransaction('REFRESH MATERIALIZED VIEW CONCURRENTLY candidate_money')
except sa.exc.ProgrammingError:
create = '''
CREATE MATERIALIZED VIEW candidate_money AS (
SELECT
cd.id AS candidate_id,
cd.first_name AS candidate_first_name,
cd.last_name AS candidate_last_name,
cd.office AS candidate_office,
cm.id AS committee_id,
cm.name AS committee_name,
cm.type AS committee_type,
m.total,
m.last_receipt_date
FROM candidates AS cd
JOIN candidate_committees AS cc
ON cd.id = cc.candidate_id
JOIN committees AS cm
ON cc.committee_id = cm.id
JOIN committee_money AS m
ON cm.id = m.committee_id
ORDER BY m.total DESC NULLS LAST
)
'''
self.executeTransaction(create)
self.candidateMoneyIndex()
def makeUniqueIndexes(self):
'''
Need a unique index on materialized views so that can be refreshed concurrently
'''
self.condensedExpendituresIndex()
self.condensedReceiptsIndex()
self.condensedReceiptsDateIndex()
self.condensedExpendituresDateIndex()
self.expendituresByCandidateIndex()
self.receiptsByWeekIndex()
self.committeeReceiptsByWeekIndex()
self.incumbentCandidatesIndex()
self.candidateMoneyIndex()
self.committeeMoneyIndex()
self.mostRecentFilingsIndex()
def condensedExpendituresIndex(self):
index = '''
CREATE UNIQUE INDEX CONCURRENTLY condensed_expenditures_id_idx
ON condensed_expenditures(id)
'''
self.executeOutsideTransaction(index)
def condensedReceiptsIndex(self):
index = '''
CREATE UNIQUE INDEX CONCURRENTLY condensed_receipts_id_idx
ON condensed_receipts(id)
'''
self.executeOutsideTransaction(index)
def condensedExpendituresDateIndex(self):
index = '''
CREATE INDEX CONCURRENTLY condensed_expenditures_date_idx
ON condensed_expenditures(expended_date)
'''
self.executeOutsideTransaction(index)
def condensedReceiptsDateIndex(self):
index = '''
CREATE INDEX CONCURRENTLY condensed_receipts_date_idx
ON condensed_receipts(date)
'''
self.executeOutsideTransaction(index)
def expendituresByCandidateIndex(self):
index = '''
CREATE UNIQUE INDEX CONCURRENTLY expenditures_by_candidate_idx
ON expenditures_by_candidate(candidate_name, office, committee_id, supporting)
'''
self.executeOutsideTransaction(index)
def receiptsByWeekIndex(self):
index = '''
CREATE UNIQUE INDEX CONCURRENTLY receipts_by_month_idx
ON receipts_by_month(month)
'''
self.executeOutsideTransaction(index)
def committeeReceiptsByWeekIndex(self):
index = '''
CREATE UNIQUE INDEX CONCURRENTLY committee_receipts_by_week_idx
ON committee_receipts_by_week(committee_id, week)
'''
self.executeOutsideTransaction(index)
def incumbentCandidatesIndex(self):
index = '''
CREATE UNIQUE INDEX CONCURRENTLY incumbent_candidates_idx
ON incumbent_candidates(id)
'''
self.executeOutsideTransaction(index)
def candidateMoneyIndex(self):
index = '''
CREATE UNIQUE INDEX CONCURRENTLY candidate_money_idx
ON candidate_money(candidate_id, committee_id)
'''
self.executeOutsideTransaction(index)
def committeeMoneyIndex(self):
index = '''
CREATE UNIQUE INDEX CONCURRENTLY committee_money_idx
ON committee_money(committee_id)
'''
self.executeOutsideTransaction(index)
def mostRecentFilingsIndex(self):
index = '''
CREATE UNIQUE INDEX CONCURRENTLY most_recent_filings_idx
ON most_recent_filings(committee_id, reporting_period_end)
'''
self.executeOutsideTransaction(index)
class SunshineIndexes(object):
def __init__(self, connection):
self.connection = connection
def executeTransaction(self, query):
trans = self.connection.begin()
try:
self.connection.execute(query)
trans.commit()
except sa.exc.ProgrammingError as e:
trans.rollback()
def executeOutsideTransaction(self, query):
self.connection.connection.set_isolation_level(0)
curs = self.connection.connection.cursor()
try:
curs.execute(query)
except psycopg2.ProgrammingError:
pass
def makeAllIndexes(self):
self.receiptsDate()
self.receiptsCommittee()
self.receiptsFiledDocs()
self.candidaciesCandidate()
self.candidateCommittees()
self.officersCommittee()
self.filedDocsCommittee()
self.receiptsName()
self.expendituresName()
def receiptsDate(self):
'''
Make index on received_date for receipts
'''
index = '''
CREATE INDEX CONCURRENTLY received_date_idx ON receipts (received_date)
'''
self.executeOutsideTransaction(index)
def receiptsCommittee(self):
'''
Make index on committee_id for receipts
'''
index = '''
CREATE INDEX CONCURRENTLY receipts_committee_idx ON receipts (committee_id)
'''
self.executeOutsideTransaction(index)
def receiptsFiledDocs(self):
index = '''
CREATE INDEX CONCURRENTLY receipts_filed_docs_idx ON receipts (filed_doc_id)
'''
self.executeOutsideTransaction(index)
def candidaciesCandidate(self):
index = '''
CREATE INDEX CONCURRENTLY candidacies_candidate_id_index
ON candidacies (candidate_id)
'''
self.executeOutsideTransaction(index)
def candidateCommittees(self):
index = '''
CREATE INDEX CONCURRENTLY cand_comm_candidate_id_index
ON candidate_committees (candidate_id)
'''
self.executeOutsideTransaction(index)
index = '''
CREATE INDEX CONCURRENTLY cand_comm_committee_id_index
ON candidate_committees (committee_id)
'''
self.executeOutsideTransaction(index)
def filedDocsCommittee(self):
index = '''
CREATE INDEX CONCURRENTLY filed_docs_committee_idx ON filed_docs (committee_id)
'''
self.executeOutsideTransaction(index)
def officersCommittee(self):
index = '''
CREATE INDEX CONCURRENTLY officers_committee_id_index
ON officers (committee_id)
'''
self.executeOutsideTransaction(index)
def receiptsName(self):
index = '''
CREATE INDEX CONCURRENTLY condensed_receipts_search_index ON condensed_receipts
USING gin(search_name)
'''
self.executeOutsideTransaction(index)
def expendituresName(self):
index = '''
CREATE INDEX CONCURRENTLY condensed_expenditures_search_index ON condensed_expenditures
USING gin(search_name)
'''
self.executeOutsideTransaction(index)
def downloadUnzip():
import urllib
import zipfile
latest_filename = 'IL_Campaign_Disclosure_latest.zip'
download_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'downloads'))
download_location = os.path.join(download_path, latest_filename)
download_url = 'http://il-elections.s3.amazonaws.com/%s' % latest_filename
filename, _ = urllib.request.urlretrieve(download_url,
filename=download_location)
with zipfile.ZipFile(filename, 'r') as zf:
date_prefix = zf.namelist()[0].split('/')[0]
zf.extractall(path=download_path)
for member in os.listdir(os.path.join(download_path, date_prefix)):
move_from = os.path.join(download_path, date_prefix, member)
move_to = os.path.join(download_path, member)
os.rename(move_from, move_to)
def alterSearchDictionary():
from sunshine.app_config import DB_HOST, DB_PORT, DB_NAME, STOP_WORD_LIST
alter = '''
ALTER TEXT SEARCH DICTIONARY english_stem (StopWords = '{0}');
'''.format(STOP_WORD_LIST)
DB_USER = 'postgres'
DB_PW = ''
DB_CONN='postgresql+psycopg2://{0}:{1}@{2}:{3}/{4}'\
.format(DB_USER, DB_PW, DB_HOST, DB_PORT, DB_NAME)
engine = sa.create_engine(DB_CONN,
convert_unicode=True,
server_side_cursors=True)
with engine.begin() as conn:
conn.execute(alter)
if __name__ == "__main__":
import sys
import argparse
from sunshine.app_config import STOP_WORD_LIST
from sunshine.database import engine, Base
parser = argparse.ArgumentParser(description='Download and import campaign disclosure data from the IL State Board of Elections.')
parser.add_argument('--download', action='store_true',
help='Downloading fresh data')
parser.add_argument('--load_data', action='store_true',
help='Load data into database')
parser.add_argument('--recreate_views', action='store_true',
help='Recreate database views')
parser.add_argument('--chunk_size', help='Adjust the size of each insert when loading data',
type=int)
parser.add_argument('--init', action='store_true',
help='Initialize a new database')
args = parser.parse_args()
connection = engine.connect()
if args.download:
logger.info("download start %s ..." % datetime.now().isoformat())
downloadUnzip()
logger.info("download finish %s ..." % datetime.now().isoformat())
else:
print("skipping download")
if args.load_data:
print("loading data start %s ..." % datetime.now().isoformat())
if STOP_WORD_LIST != 'english':
alterSearchDictionary()
chunk_size = 50000
if args.chunk_size:
chunk_size = args.chunk_size
metadata = None
if args.init:
metadata = Base.metadata
committees = SunshineCommittees(connection,
metadata=metadata,
chunk_size=chunk_size)
committees.load()
committees.addNameColumn()
committees.addDateColumn('NULL')
del committees
del Base.metadata
candidates = SunshineCandidates(connection, chunk_size=chunk_size)
candidates.load(update_existing=True)
candidates.addNameColumn()
candidates.addDateColumn('NULL')
del candidates
off_cmte_xwalk = SunshineOfficerCommittees(connection, chunk_size=chunk_size)
off_cmte_xwalk.load(update_existing=True)
del off_cmte_xwalk
officers = SunshineOfficers(connection, chunk_size=chunk_size)
officers.load(update_existing=True)
officers.addNameColumn()
officers.addDateColumn('NULL')
del officers
prev_off = SunshinePrevOfficers(connection, chunk_size=chunk_size)
prev_off.load(update_existing=True)
del prev_off
candidacy = SunshineCandidacy(connection, chunk_size=chunk_size)
candidacy.load()
del candidacy
can_cmte_xwalk = SunshineCandidateCommittees(connection, chunk_size=chunk_size)
can_cmte_xwalk.load()
del can_cmte_xwalk
filed_docs = SunshineFiledDocs(connection, chunk_size=chunk_size)
filed_docs.load()
del filed_docs
d2_reports = SunshineD2Reports(connection, chunk_size=chunk_size)
d2_reports.load()
del d2_reports
receipts = SunshineReceipts(connection, chunk_size=chunk_size)
receipts.load()
receipts.addNameColumn()
receipts.addDateColumn('received_date')
del receipts
expenditures = SunshineExpenditures(connection, chunk_size=chunk_size)
expenditures.load()
expenditures.addNameColumn()
expenditures.addDateColumn('expended_date')
del expenditures
investments = SunshineInvestments(connection, chunk_size=chunk_size)
investments.load()
investments.addNameColumn()
investments.addDateColumn('purchase_date')
del investments
print("loading data end %s ..." % datetime.now().isoformat())
else:
print("skipping load")
views = SunshineViews(connection)
if args.recreate_views:
print("dropping views")
views.dropViews()
logger.info("creating views %s..." % datetime.now().isoformat())
views.makeAllViews()
views.makeUniqueIndexes()
logger.info("creating indexes %s ..." % datetime.now().isoformat())
indexes = SunshineIndexes(connection)
indexes.makeAllIndexes()
connection.close()
| |
import io
import os
import tempfile
import numpy as np
import pyvips
from imageio import imwrite
from PIL import Image
from histomicstk.annotations_and_masks.annotation_and_mask_utils import (
get_image_from_htk_response, get_scale_factor_and_appendStr)
from histomicstk.annotations_and_masks.annotations_to_masks_handler import \
_visualize_annotations_on_rgb
from histomicstk.annotations_and_masks.annotations_to_object_mask_handler import \
get_all_rois_from_slide_v2
from histomicstk.annotations_and_masks.masks_to_annotations_handler import \
get_annotation_documents_from_contours
from histomicstk.workflows.workflow_runner import (Slide_iterator,
Workflow_runner)
# %============================================================================
# CONSTANTS
# source: https://libvips.github.io/libvips/API/current/Examples.md.html
# source 2: https://libvips.github.io/libvips/API/current/Examples.md.html
# source 3: https://github.com/libvips/pyvips/issues/109
# source 4: https://github.com/libvips/libvips/issues/1254
# map np dtypes to vips
DTYPE_TO_FORMAT = {
'uint8': 'uchar',
'int8': 'char',
'uint16': 'ushort',
'int16': 'short',
'uint32': 'uint',
'int32': 'int',
'float32': 'float',
'float64': 'double',
'complex64': 'complex',
'complex128': 'dpcomplex',
}
# map vips formats to np dtypes
FORMAT_TO_DTYPE = {
'uchar': np.uint8,
'char': np.int8,
'ushort': np.uint16,
'short': np.int16,
'uint': np.uint32,
'int': np.int32,
'float': np.float32,
'double': np.float64,
'complex': np.complex64,
'dpcomplex': np.complex128,
}
# %============================================================================
def get_all_rois_from_folder_v2(
gc, folderid, get_all_rois_kwargs, monitor=''):
"""Get all rois in a girder folder using get_all_rois_from_slide_v2().
Parameters
----------
gc : girder_client.Girder_Client
authenticated girder client
folderid : str
girder id of folder
get_all_rois_kwargs : dict
kwargs to pass to get_all_rois_from_slide_v2()
monitor : str
monitor prefix
Returns
-------
None
"""
def _get_all_rois(slide_id, monitorPrefix, **kwargs):
sld = gc.get('/item/%s' % slide_id)
if "." not in sld['name']:
sld['name'] += "."
sldname = sld['name'][:sld['name'].find('.')].replace('/', '_#_')
return get_all_rois_from_slide_v2(
slide_id=slide_id, monitorprefix=monitorPrefix,
# encoding slide id makes things easier later
slide_name="%s_id-%s" % (sldname, slide_id),
**kwargs)
# update with params
get_all_rois_kwargs['gc'] = gc
# pull annotations for each slide in folder
workflow_runner = Workflow_runner(
slide_iterator=Slide_iterator(
gc, source_folder_id=folderid,
keep_slides=None,
),
workflow=_get_all_rois,
workflow_kwargs=get_all_rois_kwargs,
monitorPrefix=monitor
)
workflow_runner.run()
# %============================================================================
def _get_visualization_zoomout(
gc, slide_id, bounds, MPP, MAG, zoomout=4):
"""Get a zoomed out visualization of ROI RGB and annotation overlay.
Parameters
----------
gc : girder_client.Girder_Client
authenticated girder client
slide_id : str
girder ID of slide
bounds : dict
bounds of the region of interest. Must contain the keys
XMIN, XMAX, YMIN, YMAX
MPP : float
Microns per pixel.
MAG : float
Magnification. MPP overrides this.
zoomout : float
how much to zoom out
Returns
-------
np.array
Zoomed out visualization. Outpu from _visualize_annotations_on_rgb().
"""
# get append string for server request
if MPP is not None:
getsf_kwargs = {
'MPP': MPP * (zoomout + 1),
'MAG': None,
}
elif MAG is not None:
getsf_kwargs = {
'MPP': None,
'MAG': MAG / (zoomout + 1),
}
else:
getsf_kwargs = {
'MPP': None,
'MAG': None,
}
sf, appendStr = get_scale_factor_and_appendStr(
gc=gc, slide_id=slide_id, **getsf_kwargs)
# now get low-magnification surrounding field
x_margin = (bounds['XMAX'] - bounds['XMIN']) * zoomout / 2
y_margin = (bounds['YMAX'] - bounds['YMIN']) * zoomout / 2
getStr = \
"/item/%s/tiles/region?left=%d&right=%d&top=%d&bottom=%d" \
% (slide_id,
max(0, bounds['XMIN'] - x_margin),
bounds['XMAX'] + x_margin,
max(0, bounds['YMIN'] - y_margin),
bounds['YMAX'] + y_margin)
getStr += appendStr
resp = gc.get(getStr, jsonResp=False)
rgb_zoomout = get_image_from_htk_response(resp)
# plot a bounding box at the ROI region
xmin = x_margin * sf
xmax = xmin + (bounds['XMAX'] - bounds['XMIN']) * sf
ymin = y_margin * sf
ymax = ymin + (bounds['YMAX'] - bounds['YMIN']) * sf
xmin, xmax, ymin, ymax = [str(int(j)) for j in (xmin, xmax, ymin, ymax)]
contours_list = [{
'color': 'rgb(255,255,0)',
'coords_x': ",".join([xmin, xmax, xmax, xmin, xmin]),
'coords_y': ",".join([ymin, ymin, ymax, ymax, ymin]),
}]
return _visualize_annotations_on_rgb(rgb_zoomout, contours_list)
def _get_review_visualization(rgb, vis, vis_zoomout):
"""Get a visualization of rgb and annotations for rapid review.
Parameters
----------
rgb : np.array
mxnx3 rgb image
vis : np.array
visualization of rgb with overlayed annotations
vis_zoomout
same as vis, but at a lower magnififcation.
Returns
-------
np.array
visualization to be used for gallery
"""
import matplotlib.pyplot as plt
wmax = max(vis.shape[1], vis_zoomout.shape[1])
hmax = max(vis.shape[0], vis_zoomout.shape[0])
fig, ax = plt.subplots(
1, 3, dpi=100,
figsize=(3 * wmax / 1000, hmax / 1000),
gridspec_kw={'wspace': 0.01, 'hspace': 0}
)
ax[0].imshow(vis)
ax[1].imshow(rgb)
ax[2].imshow(vis_zoomout)
for axis in ax:
axis.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
buf = io.BytesIO()
plt.savefig(buf, format='png', pad_inches=0, dpi=1000)
buf.seek(0)
combined_vis = np.uint8(Image.open(buf))[..., :3]
plt.close()
return combined_vis
def _plot_rapid_review_vis(
roi_out, gc, slide_id, slide_name, MPP, MAG,
combinedvis_savepath, zoomout=4,
verbose=False, monitorprefix=''):
"""Plot a visualization for rapid review of ROI.
This is a callback to be called inside get_all_rois_from_slide_v2().
Parameters
----------
roi_out : dict
output from annotations_to_contours_no_mask()
gc : girder_client.Girder_Client
authenticated girder client
slide_id : str
girder slide id
slide_name : str
name of the slide
MPP : float
microns per pixel
MAG : float
magnification. superceded by MPP.
combinedvis_savepath : str
path to save the combined visualization
zoomout : float
how much to zoom out to get the gallery visualization
verbose : bool
print statements to screen
monitorprefix : str
text to prepent to printed statements
Returns
-------
dict
roi_out parameter whether or not it is modified
"""
# get rgb and visualization (fetched mag + lower mag)
vis_zoomout = _get_visualization_zoomout(
gc=gc, slide_id=slide_id, bounds=roi_out['bounds'],
MPP=MPP, MAG=MAG, zoomout=zoomout)
# combined everything in a neat visualization for rapid review
ROINAMESTR = "%s_left-%d_top-%d_bottom-%d_right-%d" % (
slide_name,
roi_out['bounds']['XMIN'], roi_out['bounds']['YMIN'],
roi_out['bounds']['YMAX'], roi_out['bounds']['XMAX'])
savename = os.path.join(combinedvis_savepath, ROINAMESTR + ".png")
rapid_review_vis = _get_review_visualization(
rgb=roi_out['rgb'], vis=roi_out['visualization'],
vis_zoomout=vis_zoomout)
# save visualization for later use
if verbose:
print("%s: Saving %s" % (monitorprefix, savename))
imwrite(im=rapid_review_vis, uri=savename)
return roi_out
# %============================================================================
def create_review_galleries(
tilepath_base, upload_results=True, gc=None,
gallery_savepath=None, gallery_folderid=None,
padding=25, tiles_per_row=2, tiles_per_column=5,
annprops=None, url=None, nameprefix=''):
"""Create and or post review galleries for rapid review.
Parameters
----------
tilepath_base : str
directory where combined visualization.
upload_results : bool
upload results to DSA?
gc : girder_client.Girder_Client
authenticated girder client. Only needed upload_results.
gallery_savepath : str
directory to save gallery. Only if upload_results.
gallery_folderid : str
girder ID of folder to post galleries. Only if upload_result.
padding : int
padding in pixels between tiles in same gallery.
tiles_per_row : int
how many visualization tiles per row in gallery.
tiles_per_column : int
how many visualization tiles per column in gallery.
annprops : dict
properties of the annotations to be posted to DSA. Passed directly
as annprops to get_annotation_documents_from_contours()
url : str
url of the Digital Slide Archive Instance. For example:
http://candygram.neurology.emory.edu:8080/
nameprefix : str
prefix to prepend to gallery name
Returns
-------
list
each entry is a dict representing the response of the server
post request to upload the gallery to DSA.
"""
from pandas import DataFrame
if upload_results:
for par in ('gc', 'gallery_folderid', 'url'):
if locals()[par] is None:
raise Exception(
"%s cannot be None if upload_results!" % par)
if gallery_savepath is None:
gallery_savepath = tempfile.mkdtemp(prefix='gallery-')
savepaths = []
resps = []
tile_paths = [
os.path.join(tilepath_base, j) for j in
os.listdir(tilepath_base) if j.endswith('.png')]
tile_paths.sort()
def _parse_tilepath(tpath):
basename = os.path.basename(tpath)
basename = basename[:basename.rfind('.')]
tileinfo = {'slide_name': basename.split('_')[0]}
for attrib in ['id', 'left', 'top', 'bottom', 'right']:
tileinfo[attrib] = basename.split(
attrib + '-')[1].split('_')[0]
# add URL in histomicsTK
tileinfo['URL'] = url + \
"histomicstk#?image=%s&bounds=%s%%2C%s%%2C%s%%2C%s%%2C0" % (
tileinfo['id'],
tileinfo['left'], tileinfo['top'],
tileinfo['right'], tileinfo['bottom'])
return tileinfo
n_tiles = len(tile_paths)
n_galleries = int(np.ceil(n_tiles / (tiles_per_row * tiles_per_column)))
tileidx = 0
for galno in range(n_galleries):
# this makes a 8-bit, mono image (initializes as 1x1x3 matrix)
im = pyvips.Image.black(1, 1, bands=3)
# this will store the roi contours
contours = []
for row in range(tiles_per_column):
rowpos = im.height + padding
# initialize "row" strip image
row_im = pyvips.Image.black(1, 1, bands=3)
for col in range(tiles_per_row):
if tileidx == n_tiles:
break
tilepath = tile_paths[tileidx]
print("Inserting tile %d of %d: %s" % (
tileidx, n_tiles, tilepath))
tileidx += 1
# # get tile from file
tile = pyvips.Image.new_from_file(
tilepath, access="sequential")
# insert tile into mosaic row
colpos = row_im.width + padding
row_im = row_im.insert(
tile[:3], colpos, 0, expand=True, background=255)
if upload_results:
tileinfo = _parse_tilepath(tilepath)
xmin = colpos
ymin = rowpos
xmax = xmin + tile.width
ymax = ymin + tile.height
xmin, xmax, ymin, ymax = [
str(j) for j in (xmin, xmax, ymin, ymax)]
contours.append({
'group': tileinfo['slide_name'],
'label': tileinfo['URL'],
'color': 'rgb(0,0,0)',
'coords_x': ",".join([xmin, xmax, xmax, xmin, xmin]),
'coords_y': ",".join([ymin, ymin, ymax, ymax, ymin]),
})
# Add a small contour so that when the pathologist
# changes the label to approve or disapprove of the
# FOV, the URL in THIS contour (a link to the original
# FOV) can be used. We place it in the top right corner.
boxsize = 25
xmin = str(int(xmax) - boxsize)
ymax = str(int(ymin) + boxsize)
contours.append({
'group': tileinfo['slide_name'],
'label': tileinfo['URL'],
'color': 'rgb(0,0,0)',
'coords_x': ",".join([xmin, xmax, xmax, xmin, xmin]),
'coords_y': ",".join([ymin, ymin, ymax, ymax, ymin]),
})
# insert row into main gallery
im = im.insert(row_im, 0, rowpos, expand=True, background=255)
filename = '%s_gallery-%d' % (nameprefix, galno + 1)
savepath = os.path.join(gallery_savepath, filename + '.tiff')
print("Saving gallery %d of %d to %s" % (
galno + 1, n_galleries, savepath))
# save temporarily to disk to be uploaded
im.tiffsave(
savepath, tile=True, tile_width=256, tile_height=256, pyramid=True)
if upload_results:
# upload the gallery to DSA
resps.append(gc.uploadFileToFolder(
folderId=gallery_folderid, filepath=savepath,
filename=filename))
os.remove(savepath)
# get and post FOV location annotations
annotation_docs = get_annotation_documents_from_contours(
DataFrame(contours), separate_docs_by_group=True,
annprops=annprops)
for doc in annotation_docs:
_ = gc.post(
"/annotation?itemId=" + resps[-1]['itemId'], json=doc)
else:
savepaths.append(savepath)
return resps if upload_results else savepaths
# %============================================================================
| |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import os
import pytest
from cryptography.hazmat.backends.interfaces import CipherBackend
from cryptography.hazmat.primitives.ciphers import algorithms, base, modes
from .utils import generate_aead_test, generate_encrypt_test
from ...utils import load_nist_vectors
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.CBC("\x00" * 16)
),
skip_message="Does not support AES CBC",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestAESModeCBC(object):
test_CBC = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CBC"),
[
"CBCGFSbox128.rsp",
"CBCGFSbox192.rsp",
"CBCGFSbox256.rsp",
"CBCKeySbox128.rsp",
"CBCKeySbox192.rsp",
"CBCKeySbox256.rsp",
"CBCVarKey128.rsp",
"CBCVarKey192.rsp",
"CBCVarKey256.rsp",
"CBCVarTxt128.rsp",
"CBCVarTxt192.rsp",
"CBCVarTxt256.rsp",
"CBCMMT128.rsp",
"CBCMMT192.rsp",
"CBCMMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.ECB()
),
skip_message="Does not support AES ECB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestAESModeECB(object):
test_ECB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "ECB"),
[
"ECBGFSbox128.rsp",
"ECBGFSbox192.rsp",
"ECBGFSbox256.rsp",
"ECBKeySbox128.rsp",
"ECBKeySbox192.rsp",
"ECBKeySbox256.rsp",
"ECBVarKey128.rsp",
"ECBVarKey192.rsp",
"ECBVarKey256.rsp",
"ECBVarTxt128.rsp",
"ECBVarTxt192.rsp",
"ECBVarTxt256.rsp",
"ECBMMT128.rsp",
"ECBMMT192.rsp",
"ECBMMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda **kwargs: modes.ECB(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.OFB("\x00" * 16)
),
skip_message="Does not support AES OFB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestAESModeOFB(object):
test_OFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "OFB"),
[
"OFBGFSbox128.rsp",
"OFBGFSbox192.rsp",
"OFBGFSbox256.rsp",
"OFBKeySbox128.rsp",
"OFBKeySbox192.rsp",
"OFBKeySbox256.rsp",
"OFBVarKey128.rsp",
"OFBVarKey192.rsp",
"OFBVarKey256.rsp",
"OFBVarTxt128.rsp",
"OFBVarTxt192.rsp",
"OFBVarTxt256.rsp",
"OFBMMT128.rsp",
"OFBMMT192.rsp",
"OFBMMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.CFB("\x00" * 16)
),
skip_message="Does not support AES CFB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestAESModeCFB(object):
test_CFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CFB"),
[
"CFB128GFSbox128.rsp",
"CFB128GFSbox192.rsp",
"CFB128GFSbox256.rsp",
"CFB128KeySbox128.rsp",
"CFB128KeySbox192.rsp",
"CFB128KeySbox256.rsp",
"CFB128VarKey128.rsp",
"CFB128VarKey192.rsp",
"CFB128VarKey256.rsp",
"CFB128VarTxt128.rsp",
"CFB128VarTxt192.rsp",
"CFB128VarTxt256.rsp",
"CFB128MMT128.rsp",
"CFB128MMT192.rsp",
"CFB128MMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.CFB8("\x00" * 16)
),
skip_message="Does not support AES CFB8",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestAESModeCFB8(object):
test_CFB8 = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CFB"),
[
"CFB8GFSbox128.rsp",
"CFB8GFSbox192.rsp",
"CFB8GFSbox256.rsp",
"CFB8KeySbox128.rsp",
"CFB8KeySbox192.rsp",
"CFB8KeySbox256.rsp",
"CFB8VarKey128.rsp",
"CFB8VarKey192.rsp",
"CFB8VarKey256.rsp",
"CFB8VarTxt128.rsp",
"CFB8VarTxt192.rsp",
"CFB8VarTxt256.rsp",
"CFB8MMT128.rsp",
"CFB8MMT192.rsp",
"CFB8MMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CFB8(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.CTR("\x00" * 16)
),
skip_message="Does not support AES CTR",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestAESModeCTR(object):
test_CTR = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CTR"),
["aes-128-ctr.txt", "aes-192-ctr.txt", "aes-256-ctr.txt"],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CTR(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES("\x00" * 16), modes.GCM("\x00" * 12)
),
skip_message="Does not support AES GCM",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestAESModeGCM(object):
test_GCM = generate_aead_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "GCM"),
[
"gcmDecrypt128.rsp",
"gcmDecrypt192.rsp",
"gcmDecrypt256.rsp",
"gcmEncryptExtIV128.rsp",
"gcmEncryptExtIV192.rsp",
"gcmEncryptExtIV256.rsp",
],
algorithms.AES,
modes.GCM,
)
def test_gcm_tag_with_only_aad(self, backend):
key = binascii.unhexlify(b"5211242698bed4774a090620a6ca56f3")
iv = binascii.unhexlify(b"b1e1349120b6e832ef976f5d")
aad = binascii.unhexlify(b"b6d729aab8e6416d7002b9faa794c410d8d2f193")
tag = binascii.unhexlify(b"0f247e7f9c2505de374006738018493b")
cipher = base.Cipher(
algorithms.AES(key),
modes.GCM(iv),
backend=backend
)
encryptor = cipher.encryptor()
encryptor.authenticate_additional_data(aad)
encryptor.finalize()
assert encryptor.tag == tag
def test_gcm_ciphertext_with_no_aad(self, backend):
key = binascii.unhexlify(b"e98b72a9881a84ca6b76e0f43e68647a")
iv = binascii.unhexlify(b"8b23299fde174053f3d652ba")
ct = binascii.unhexlify(b"5a3c1cf1985dbb8bed818036fdd5ab42")
tag = binascii.unhexlify(b"23c7ab0f952b7091cd324835043b5eb5")
pt = binascii.unhexlify(b"28286a321293253c3e0aa2704a278032")
cipher = base.Cipher(
algorithms.AES(key),
modes.GCM(iv),
backend=backend
)
encryptor = cipher.encryptor()
computed_ct = encryptor.update(pt) + encryptor.finalize()
assert computed_ct == ct
assert encryptor.tag == tag
| |
import flask
import json
from indexd.errors import AuthError, AuthzError
from indexd.errors import UserError
from indexd.index.errors import NoRecordFound as IndexNoRecordFound
from indexd.errors import IndexdUnexpectedError
blueprint = flask.Blueprint("drs", __name__)
blueprint.config = dict()
blueprint.index_driver = None
@blueprint.route("/ga4gh/drs/v1/objects/<path:object_id>", methods=["GET"])
def get_drs_object(object_id):
"""
Returns a specific DRSobject with object_id
"""
expand = True if flask.request.args.get("expand") == "true" else False
ret = blueprint.index_driver.get_with_nonstrict_prefix(object_id)
data = indexd_to_drs(ret, expand=expand)
return flask.jsonify(data), 200
@blueprint.route("/ga4gh/drs/v1/objects", methods=["GET"])
def list_drs_records():
limit = flask.request.args.get("limit")
start = flask.request.args.get("start")
page = flask.request.args.get("page")
form = flask.request.args.get("form")
try:
limit = 100 if limit is None else int(limit)
except ValueError as err:
raise UserError("limit must be an integer")
if limit < 0 or limit > 1024:
raise UserError("limit must be between 0 and 1024")
if page is not None:
try:
page = int(page)
except ValueError as err:
raise UserError("page must be an integer")
if form == "bundle":
records = blueprint.index_driver.get_bundle_list(
start=start, limit=limit, page=page
)
elif form == "object":
records = blueprint.index_driver.ids(start=start, limit=limit, page=page)
else:
records = blueprint.index_driver.get_bundle_and_object_list(
start=start, limit=limit, page=page
)
ret = {
"drs_objects": [indexd_to_drs(record, True) for record in records],
}
return flask.jsonify(ret), 200
@blueprint.route(
"/ga4gh/drs/v1/objects/<path:object_id>/access",
defaults={"access_id": None},
methods=["GET"],
)
@blueprint.route(
"/ga4gh/drs/v1/objects/<path:object_id>/access/<path:access_id>", methods=["GET"]
)
def get_signed_url(object_id, access_id):
if not access_id:
raise (UserError("Access ID/Protocol is required."))
res = flask.current_app.fence_client.get_signed_url_for_object(
object_id=object_id, access_id=access_id
)
if not res:
raise IndexNoRecordFound("No signed url found")
return res, 200
def indexd_to_drs(record, expand=False):
"""
Convert record to ga4gh-compilant format
Args:
record(dict): json object record
expand(bool): show contents of the descendants
"""
did = (
record["id"]
if "id" in record
else record["did"]
if "did" in record
else record["bundle_id"]
)
self_uri = "drs://" + flask.current_app.hostname + "/" + did
name = record["file_name"] if "file_name" in record else record["name"]
created_time = (
record["created_date"] if "created_date" in record else record["created_time"]
)
version = (
record["rev"]
if "rev" in record
else record["version"]
if "version" in record
else ""
)
updated_date = (
record["updated_date"] if "updated_date" in record else record["updated_time"]
)
form = record["form"] if "form" in record else "bundle"
description = record["description"] if "description" in record else None
alias = (
record["alias"]
if "alias" in record
else eval(record["aliases"])
if "aliases" in record
else []
)
drs_object = {
"id": did,
"description": "",
"mime_type": "application/json",
"name": name,
"created_time": created_time,
"updated_time": updated_date,
"size": record["size"],
"aliases": alias,
"contents": [],
"self_uri": self_uri,
"version": version,
"form": form,
"checksums": [],
"description": description,
}
if "description" in record:
drs_object["description"] = record["description"]
for bundle in record.get("bundle_data", []):
bundle_object = bundle_to_drs(bundle, expand=expand, is_content=True)
if not expand:
bundle_object.pop("contents", None)
drs_object["contents"].append(bundle_object)
# access_methods mapping
if "urls" in record:
drs_object["access_methods"] = []
for location in record["urls"]:
location_type = location.split(":")[
0
] # (s3, gs, ftp, gsiftp, globus, htsget, https, file)
drs_object["access_methods"].append(
{
"type": location_type,
"access_url": {"url": location},
"access_id": location_type,
"region": "",
}
)
# parse out checksums
drs_object["checksums"] = parse_checksums(record, drs_object)
return drs_object
def bundle_to_drs(record, expand=False, is_content=False):
"""
record(dict): json object record
expand(bool): show contents of the descendants
is_content: is an expanded content in a bundle
"""
did = (
record["id"]
if "id" in record
else record["did"]
if "did" in record
else record["bundle_id"]
)
drs_uri = "drs://" + flask.current_app.hostname + "/" + did
name = record["file_name"] if "file_name" in record else record["name"]
drs_object = {
"id": did,
"name": name,
"drs_uri": drs_uri,
"contents": [],
}
contents = (
record["contents"]
if "contents" in record
else record["bundle_data"]
if "bundle_data" in record
else []
)
if not expand and isinstance(contents, list):
for content in contents:
if isinstance(content, dict):
content.pop("contents", None)
drs_object["contents"] = contents
if not is_content:
# Show these only if its the leading bundle
description = record["description"] if "description" in record else ""
aliases = (
record["alias"]
if "alias" in record
else eval(record["aliases"])
if "aliases" in record
else []
)
version = record["version"] if "version" in record else ""
drs_object["checksums"] = parse_checksums(record, drs_object)
created_time = (
record["created_date"]
if "created_date" in record
else record.get("created_time")
)
updated_time = (
record["updated_date"]
if "updated_date" in record
else record.get("updated_time")
)
if created_time:
drs_object["created_time"] = created_time
if updated_time:
drs_object["updated_time"] = updated_time
drs_object["size"] = record["size"]
drs_object["aliases"] = aliases
drs_object["description"] = description
drs_object["version"] = version
return drs_object
def parse_checksums(record, drs_object):
"""
Create valid checksums format from a DB object -
either a record ("hashes") or a bundle ("checksum")
"""
ret_checksum = []
if "hashes" in record:
for k in record["hashes"]:
ret_checksum.append({"checksum": record["hashes"][k], "type": k})
elif "checksum" in record:
try:
checksums = json.loads(record["checksum"])
except json.decoder.JSONDecodeError:
# TODO: Remove the code after fixing the record["checksum"] format
checksums = [{"checksum": record["checksum"], "type": "md5"}]
for checksum in checksums:
ret_checksum.append(
{"checksum": checksum["checksum"], "type": checksum["type"]}
)
return ret_checksum
@blueprint.errorhandler(UserError)
def handle_user_error(err):
ret = {"msg": str(err), "status_code": 400}
return flask.jsonify(ret), 400
@blueprint.errorhandler(AuthzError)
def handle_authz_error(err):
ret = {"msg": str(err), "status_code": 401}
return flask.jsonify(ret), 401
@blueprint.errorhandler(AuthError)
def handle_requester_auth_error(err):
ret = {"msg": str(err), "status_code": 403}
return flask.jsonify(ret), 403
@blueprint.errorhandler(IndexNoRecordFound)
def handle_no_index_record_error(err):
ret = {"msg": str(err), "status_code": 404}
return flask.jsonify(ret), 404
@blueprint.errorhandler(IndexdUnexpectedError)
def handle_unexpected_error(err):
ret = {"msg": err.message, "status_code": err.code}
return flask.jsonify(ret), err.code
@blueprint.record
def get_config(setup_state):
index_config = setup_state.app.config["INDEX"]
blueprint.index_driver = index_config["driver"]
| |
#!/usr/bin/env python
import os, os.path
# Generates resource file from each po file.
# And also other configuration stuff too.
desc_h = """<?xml version='1.0' encoding='UTF-8'?>
<description xmlns="http://openoffice.org/extensions/description/2006"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:d="http://openoffice.org/extensions/description/2006">
<identifier value="mytools.calc.WatchWindow" />
<version value="{VERSION}" />
<dependencies>
<d:OpenOffice.org-minimal-version value="3.4" d:name="OpenOffice.org 3.4" />
<d:OpenOffice.org-maximal-version value="4.0" d:name="OpenOffice.org 4.0" />
</dependencies>
<registration>
<simple-license accept-by="admin" default-license-id="this" suppress-on-update="true" suppress-if-required="true">
<license-text xlink:href="LICENSE" lang="en" license-id="this" />
</simple-license>
</registration>
<display-name>
{NAMES}
</display-name>
<extension-description>
{DESCRIPTIONS}
</extension-description>
<update-information>
<src xlink:href="https://raw.github.com/hanya/WatchingWindow/master/files/WatchingWindow.update.xml"/>
</update-information>
</description>"""
update_feed = """<?xml version="1.0" encoding="UTF-8"?>
<description xmlns="http://openoffice.org/extensions/update/2006"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:d="http://openoffice.org/extensions/description/2006">
<identifier value="mytools.calc.WatchWindow" />
<version value="{VERSION}" />
<dependencies>
<d:OpenOffice.org-minimal-version value="4.0" d:name="OpenOffice.org 4.0" />
</dependencies>
<update-download>
<src xlink:href="https://raw.github.com/hanya/WatchingWindow/master/files/WatchingWindow-{VERSION}.oxt"/>
</update-download>
</description>
"""
def genereate_description(d):
version = read_version()
names = []
for lang, v in d.iteritems():
name = v["id.label.ww"]
names.append("<name lang=\"{LANG}\">{NAME}</name>".format(LANG=lang, NAME=name.encode("utf-8")))
descs = []
for lang, v in d.iteritems():
desc = v["id.extension.description"]
with open("descriptions/desc_{LANG}.txt".format(LANG=lang), "w") as f:
f.write(desc.encode("utf-8"))
descs.append("<src lang=\"{LANG}\" xlink:href=\"descriptions/desc_{LANG}.txt\"/>".format(LANG=lang))
return desc_h.format(
VERSION=version, NAMES="\n".join(names), DESCRIPTIONS="\n".join(descs))
def read_version():
version = ""
with open("VERSION") as f:
version = f.read().strip()
return version
config_h = """<?xml version='1.0' encoding='UTF-8'?>
<oor:component-data
xmlns:oor="http://openoffice.org/2001/registry"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
oor:package="{PACKAGE}"
oor:name="{NAME}">"""
config_f = "</oor:component-data>"
class XCUData(object):
PACKAGE = ""
NAME = ""
def __init__(self):
self.lines = []
def append(self, line):
self.lines.append(line)
def add_node(self, name, op=None):
if op:
self.append("<node oor:name=\"{NAME}\" oor:op=\"{OP}\">".format(NAME=name, OP=op))
else:
self.append("<node oor:name=\"{NAME}\">".format(NAME=name))
def close_node(self):
self.append("</node>")
def add_prop(self, name, value):
self.append("<prop oor:name=\"{NAME}\">".format(NAME=name))
self.append("<value>{VALUE}</value>".format(VALUE=value))
self.append("</prop>")
def open_prop(self, name):
self.append("<prop oor:name=\"{NAME}\">".format(NAME=name))
def close_prop(self):
self.append("</prop>")
def add_value(self, v, locale=None):
if locale:
self.append("<value xml:lang=\"{LANG}\">{VALUE}</value>".format(VALUE=v.encode("utf-8"), LANG=locale))
else:
self.append("<value>{VALUE}</value>".format(VALUE=v.encode("utf-8")))
def add_value_for_localse(self, name, k, d):
self.open_prop(name)
locales = list(d.iterkeys())
locales.sort()
for lang in locales:
_d = d[lang]
self.add_value(_d[k], lang)
self.close_prop()
#def _generate(self, d): pass
def generate(self, d):
self.lines.append(config_h.format(PACKAGE=self.PACKAGE, NAME=self.NAME))
self._generate(d)
self.lines.append(config_f)
return "\n".join(self.lines)
class CalcWindowStateXCU(XCUData):
PACKAGE = "org.openoffice.Office.UI"
NAME = "CalcWindowState"
def _generate(self, d):
self.add_node("UIElements")
self.add_node("States")
self.add_node("private:resource/toolpanel/mytools.calc/WatchWindow", "replace")
self.add_value_for_localse("UIName", "id.label.ww", d)
self.add_prop("ImageURL", "vnd.sun.star.extension://mytools.calc.WatchWindow/icons/ww_24.png")
self.close_node()
self.close_node()
self.close_node()
def extract(d, locale, lines):
msgid = msgstr = id = ""
for l in lines:
#if l[0] == "#":
# pass
if l[0:2] == "#,":
pass
elif l[0:2] == "#:":
id = l[2:].strip()
if l[0] == "#":
continue
elif l.startswith("msgid"):
msgid = l[5:]
elif l.startswith("msgstr"):
msgstr = l[6:].strip()
#print(id, msgid, msgstr)
if msgstr and id:
d[id] = msgstr[1:-1].decode("utf-8").replace('\\"', '"')
_l = l.strip()
if not _l:
continue
def as_resource(d):
lines = []
for k, v in d.iteritems():
cs = []
for c in v:
a = ord(c)
if a > 0x7f:
cs.append("\\u%04x" % a)
else:
cs.append(c)
lines.append("%s=%s" % (k, "".join(cs)))
lines.sort()
return "\n".join(lines)
def write_resource(res_path, d):
lines = as_resource(d)
with open(res_path, "w") as f:
f.write("# comment\n")
f.write(lines.encode("utf-8"))
def write_update_feed():
version = read_version()
s = update_feed.format(VERSION=version)
with open("./files/WatchingWindow.update.xml", "w") as f:
f.write(s.encode("utf-8"))
def main():
prefix = "strings_"
res_dir = "resources"
locales = {}
po_dir = os.path.join(".", "po")
for po in os.listdir(po_dir):
if po.endswith(".po"):
locale = po[:-3]
try:
lines = open(os.path.join(po_dir, po)).readlines()
except:
print("%s can not be opened")
d = {}
extract(d, locale, lines)
locales[locale] = d
resources_dir = os.path.join(".", res_dir)
for locale, d in locales.iteritems():
write_resource(os.path.join(resources_dir,
"%s%s.properties" % (prefix, locale.replace("-", "_"))), d)
s = CalcWindowStateXCU().generate(locales)
with open("CalcWindowState.xcu", "w") as f:
f.write(s)#.encode("utf-8"))
s = genereate_description(locales)
with open("description.xml", "w") as f:
f.write(s)#.encode("utf-8"))
write_update_feed()
if __name__ == "__main__":
main()
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, zipfile, _jcc
python_ver = '%d.%d.%d' %(sys.version_info[0:3])
if python_ver < '2.4':
from sets import Set as set
def split_pkg(string, sep):
parts = string.split(sep)
if len(parts) > 1:
return sep.join(parts[:-1]), parts[-1]
return parts
def sort(list, fn=None, key=None):
if fn:
list.sort(fn)
elif key:
def fn(x, y):
return cmp(key(x), key(y))
list.sort(fn)
else:
list.sort()
else:
def split_pkg(string, sep):
return string.rsplit(sep, 1)
def sort(list, fn=None, key=None):
if fn:
list.sort(cmp=fn)
elif key:
list.sort(key=key)
else:
list.sort()
class JavaError(Exception):
def getJavaException(self):
return self.args[0]
def __str__(self):
writer = StringWriter()
self.getJavaException().printStackTrace(PrintWriter(writer))
return '\n'.join((super(JavaError, self).__str__(),
"Java stacktrace:", str(writer)))
class InvalidArgsError(Exception):
pass
_jcc._set_exception_types(JavaError, InvalidArgsError)
from _jcc import findClass as _findClass
from _jcc import *
def findClass(className):
try:
cls = _findClass(className)
except:
print >>sys.stderr, "While loading", className
raise
if cls is None:
raise ValueError, className
return cls
INDENT = ' '
HALF_INDENT = ' '
PRIMITIVES = { 'boolean': 'Z',
'byte': 'B',
'char': 'C',
'double': 'D',
'float': 'F',
'int': 'I',
'long': 'J',
'short': 'S',
'void': 'V' }
RESERVED = set(['delete', 'and', 'or', 'not', 'xor', 'union', 'register',
'const', 'bool', 'operator', 'typeof', 'asm', 'mutable',
'NULL', 'DOMAIN', 'IGNORE'])
RENAME_METHOD_SUFFIX = '_'
RENAME_FIELD_SUFFIX = '__'
def cppname(name):
if name in RESERVED:
return name + '$'
return name
def cppnames(names):
return [cppname(name) for name in names]
def absname(names):
if names:
return "::%s" %('::'.join(names))
return ''
def typename(cls, current, const):
if cls.isArray():
componentType = cls.getComponentType()
name = 'JArray< %s >' %(typename(componentType, current, False))
elif cls.isPrimitive():
name = cls.getName()
if name != 'void':
name = 'j' + name
const = False
elif cls == current:
name = cppname(cls.getName().split('.')[-1])
else:
name = absname([cppname(name) for name in cls.getName().split('.')])
if const:
return "const %s &" %(name)
return name
def argnames(params, cls):
if not params:
return '', ''
count = len(params)
decls = ', '.join(["%s a%d" %(typename(params[i], cls, True), i)
for i in xrange(count)])
args = ', '.join(['a%d%s' %(i, not params[i].isPrimitive() and '.this$' or '')
for i in xrange(count)])
return decls, ', ' + args
def line(out, indent=0, string='', *args):
out.write(INDENT * indent)
out.write(string % args)
out.write('\n')
def known(cls, typeset, declares, packages, excludes, generics):
if generics:
if Class.instance_(cls):
cls = Class.cast_(cls)
elif ParameterizedType.instance_(cls):
pt = ParameterizedType.cast_(cls)
if not known(pt.getRawType(), typeset, declares, packages, excludes,
True):
return False
for ta in pt.getActualTypeArguments():
if TypeVariable.instance_(ta):
continue
if not known(ta, typeset, declares, packages, excludes, True):
return False
return True
elif WildcardType.instance_(cls):
wc = WildcardType.cast_(cls)
for ub in wc.getUpperBounds():
if not known(ub, typeset, declares, packages, excludes, True):
return False
return True
elif TypeVariable.instance_(cls):
for bounds in TypeVariable.cast_(cls).getBounds():
if not known(bounds, typeset, declares, packages, excludes,
True):
return False
return True
elif GenericArrayType.instance_(cls):
return known(GenericArrayType.cast_(cls).getGenericComponentType(),
typeset, declares, packages, excludes, True)
else:
raise TypeError, (cls, cls.getClass())
while cls.isArray():
cls = cls.getComponentType()
className = cls.getName()
if className.split('$', 1)[0] in excludes or className in excludes:
return False
if cls.isPrimitive():
return True
if cls in typeset:
declares.add(cls)
return True
if split_pkg(className, '.')[0] in packages:
typeset.add(cls)
declares.add(cls)
cls = cls.getSuperclass()
while cls and cls not in typeset:
typeset.add(cls)
cls = cls.getSuperclass()
return True
return False
def addRequiredTypes(cls, typeset, generics):
if generics:
if Class.instance_(cls):
cls = Class.cast_(cls)
if not (cls.isPrimitive() or cls in typeset):
if cls.isArray():
addRequiredTypes(cls.getComponentType(), typeset, True)
else:
typeset.add(cls)
cls = cls.getGenericSuperclass()
if cls is not None:
addRequiredTypes(cls, typeset, True)
elif ParameterizedType.instance_(cls):
pt = ParameterizedType.cast_(cls)
addRequiredTypes(pt.getRawType(), typeset, True)
for ta in pt.getActualTypeArguments():
addRequiredTypes(ta, typeset, True)
elif GenericArrayType.instance_(cls):
gat = GenericArrayType.cast_(cls)
addRequiredTypes(gat.getGenericComponentType(), typeset, True)
elif not (TypeVariable.instance_(cls) or WildcardType.instance_(cls)):
raise NotImplementedError, repr(cls)
else:
if cls not in typeset:
typeset.add(cls)
cls = cls.getSuperclass()
if cls is not None:
addRequiredTypes(cls, typeset, False)
def find_method(cls, name, params):
declared = False
while True:
try:
if declared:
method = cls.getDeclaredMethod(name, params)
else:
method = cls.getMethod(name, params)
break
except JavaError, e:
if (e.getJavaException().getClass().getName() == 'java.lang.NoSuchMethodException'):
if not declared:
declared = True
else:
cls = cls.getSuperclass()
if not cls:
return None
continue
raise
modifiers = method.getModifiers()
if Modifier.isAbstract(modifiers):
return None
if Modifier.isPrivate(modifiers):
return None
return method
def signature(fn, argsOnly=False):
def typename(cls):
array = ''
while cls.isArray():
array += '['
cls = cls.getComponentType()
if cls.isPrimitive():
return array + PRIMITIVES[cls.getName()]
return '%sL%s;' %(array, cls.getName().replace('.', '/'))
if isinstance(fn, Constructor):
returnType = 'V'
elif isinstance(fn, Method):
returnType = typename(fn.getReturnType())
elif isinstance(fn, Field):
return typename(fn.getType())
if argsOnly:
return '(%s)' %(''.join([typename(param)
for param in fn.getParameterTypes()]))
return '(%s)%s' %(''.join([typename(param)
for param in fn.getParameterTypes()]),
returnType)
def forward(out, namespace, indent):
for name, entries in namespace.iteritems():
if entries is True:
line(out, indent, 'class %s;', cppname(name))
else:
line(out, indent, 'namespace %s {', cppname(name))
forward(out, entries, indent + 1)
line(out, indent, '}')
def expandjar(path):
jar = zipfile.ZipFile(path, 'r')
for member in jar.infolist():
f = member.filename
if f.endswith('.class'):
yield f.split('.')[0].replace('/', '.')
jar.close()
def jcc(args):
classNames = set()
listedClassNames = set()
packages = set()
jars = []
classpath = [_jcc.CLASSPATH]
libpath = []
vmargs = ['-Djava.awt.headless=true']
moduleName = None
modules = []
build = False
install = False
recompile = False
egg_info = False
output = 'build'
debug = False
excludes = []
version = ''
mappings = {}
sequences = {}
renames = {}
env = None
wrapperFiles = 1
prefix = None
root = None
install_dir = None
home_dir = None
use_distutils = False
shared = False
dist = False
wininst = False
find_jvm_dll = False
compiler = None
generics = hasattr(_jcc, "Type")
arch = []
resources = []
imports = {}
extra_setup_args = []
initvm_args = {}
i = 1
while i < len(args):
arg = args[i]
if arg.startswith('-'):
if arg == '--jar':
i += 1
classpath.append(args[i])
classNames.update(expandjar(args[i]))
jars.append(args[i])
elif arg == '--include':
i += 1
classpath.append(args[i])
jars.append(args[i])
elif arg == '--package':
i += 1
packages.add(args[i])
elif arg == '--classpath':
i += 1
classpath.append(args[i])
elif arg == '--libpath':
i += 1
libpath.append(args[i])
elif arg == '--vmarg':
i += 1
vmargs.append(args[i])
elif arg == '--maxheap':
i += 1
initvm_args['maxheap'] = args[i]
elif arg == '--python':
from python import python, module
i += 1
moduleName = args[i]
elif arg == '--module':
i += 1
modules.append(args[i])
elif arg == '--build':
from python import compile
build = True
elif arg == '--install':
from python import compile
install = True
elif arg == '--compile':
from python import compile
recompile = True
elif arg == '--egg-info':
from python import compile
egg_info = True
elif arg == '--extra-setup-arg':
i += 1
extra_setup_args.append(args[i])
elif arg == '--output':
i += 1
output = args[i]
elif arg == '--debug':
debug = True
elif arg == '--exclude':
i += 1
excludes.append(args[i])
elif arg == '--version':
i += 1
version = args[i]
elif arg == '--mapping':
mappings[args[i + 1]] = args[i + 2]
i += 2
elif arg == '--sequence':
sequences[args[i + 1]] = (args[i + 2], args[i + 3])
i += 3
elif arg == '--rename':
i += 1
renames.update(dict([arg.split('=')
for arg in args[i].split(',')]))
elif arg == '--files':
i += 1
wrapperFiles = args[i]
if wrapperFiles != 'separate':
wrapperFiles = int(wrapperFiles)
elif arg == '--prefix':
i += 1
prefix = args[i]
elif arg == '--root':
i += 1
root = args[i]
elif arg == '--install-dir':
i += 1
install_dir = args[i]
elif arg == '--home':
i += 1
home_dir = args[i]
elif arg == '--use-distutils':
use_distutils = True
elif arg == '--shared':
shared = True
elif arg == '--bdist':
from python import compile
dist = True
elif arg == '--wininst':
from python import compile
wininst = True
dist = True
elif arg == '--compiler':
i += 1
compiler = args[i]
elif arg == '--reserved':
i += 1
RESERVED.update(args[i].split(','))
elif arg == '--arch':
i += 1
arch.append(args[i])
elif arg == '--no-generics':
generics = False
elif arg == '--find-jvm-dll':
find_jvm_dll = True
elif arg == '--resources':
i += 1
resources.append(args[i])
elif arg == '--import':
i += 1
imports[args[i]] = ()
else:
raise ValueError, "Invalid argument: %s" %(arg)
else:
classNames.add(arg)
listedClassNames.add(arg)
i += 1
if libpath:
vmargs.append('-Djava.library.path=' + os.pathsep.join(libpath))
initvm_args['maxstack'] = '512k'
initvm_args['vmargs'] = vmargs
env = initVM(os.pathsep.join(classpath) or None, **initvm_args)
typeset = set()
excludes = set(excludes)
if imports:
if shared:
loadClassNamesFromImportedModule(imports, renames)
imports = dict((__import__(import_), set()) for import_ in imports)
else:
raise ValueError, "--shared must be used when using --import"
if recompile or not build and (install or dist or egg_info):
if moduleName is None:
raise ValueError, 'module name not specified (use --python)'
else:
compile(env, os.path.dirname(args[0]), output, moduleName,
install, dist, debug, jars, version,
prefix, root, install_dir, home_dir, use_distutils,
shared, compiler, modules, wininst, find_jvm_dll,
arch, generics, resources, imports, egg_info,
extra_setup_args)
else:
if imports:
def walk((include, importset), dirname, names):
for name in names:
if name.endswith('.h'):
className = os.path.join(dirname[len(include) + 1:],
name[:-2])
if os.path.sep != '/':
className = className.replace(os.path.sep, '/')
importset.add(findClass(className))
for import_, importset in imports.iteritems():
env._addClassPath(import_.CLASSPATH)
include = os.path.join(import_.__dir__, 'include')
os.path.walk(include, walk, (include, importset))
typeset.update(importset)
typeset.add(findClass('java/lang/Object'))
typeset.add(findClass('java/lang/Class'))
typeset.add(findClass('java/lang/String'))
typeset.add(findClass('java/lang/Throwable'))
typeset.add(findClass('java/lang/Exception'))
typeset.add(findClass('java/lang/RuntimeException'))
if moduleName:
typeset.add(findClass('java/lang/Number'))
typeset.add(findClass('java/lang/Boolean'))
typeset.add(findClass('java/lang/Byte'))
typeset.add(findClass('java/lang/Character'))
typeset.add(findClass('java/lang/Double'))
typeset.add(findClass('java/lang/Float'))
typeset.add(findClass('java/lang/Integer'))
typeset.add(findClass('java/lang/Long'))
typeset.add(findClass('java/lang/Short'))
typeset.add(findClass('java/util/Iterator'))
typeset.add(findClass('java/util/Enumeration'))
typeset.add(findClass('java/io/StringWriter'))
typeset.add(findClass('java/io/PrintWriter'))
typeset.add(findClass('java/io/Writer'))
packages.add('java.lang')
for className in classNames:
if className.split('$', 1)[0] in excludes or className in excludes:
continue
cls = findClass(className.replace('.', '/'))
if (Modifier.isPublic(cls.getModifiers()) or
className in listedClassNames):
addRequiredTypes(cls, typeset, generics)
_dll_export = ''
if moduleName:
cppdir = os.path.join(output, '_%s' %(moduleName))
if shared and sys.platform == 'win32':
_dll_export = "_dll_%s " %(moduleName)
else:
cppdir = output
allInOne = wrapperFiles != 'separate'
if allInOne:
if not os.path.isdir(cppdir):
os.makedirs(cppdir)
if wrapperFiles <= 1:
out_cpp = file(os.path.join(cppdir, '__wrap__.cpp'), 'w')
else:
fileCount = 1
fileName = '__wrap%02d__.cpp' %(fileCount)
out_cpp = file(os.path.join(cppdir, fileName), 'w')
done = set()
pythonNames = {}
for importset in imports.itervalues():
done.update(importset)
if moduleName:
for cls in importset:
name = getPythonicClassName(cls.getName(), renames, pythonNames)
#if name == 'SKIP':
# pythonNames[name] = cls
# continue
#elif name in pythonNames:
# raise ValueError, ('Python class name \'%s\' already in use by: %s;\n use: --rename %s=<someNewName> %s' %
# (name, pythonNames[name].getName(), cls.getName(), get_substitution_help(cls.getName())))
#else:
# pythonNames[name] = cls
todo = typeset - done
if allInOne and wrapperFiles > 1:
classesPerFile = max(1, len(todo) / wrapperFiles)
classCount = 0
while todo:
for cls in todo:
classCount += 1
className = cls.getName()
names = className.split('.')
dir_ = os.path.join(cppdir, *names[:-1])
if not os.path.isdir(dir_):
os.makedirs(dir_)
fileName = os.path.join(dir_, names[-1])
out_h = file(fileName + '.h', "w")
line(out_h, 0, '#ifndef %s_H', '_'.join(names))
line(out_h, 0, '#define %s_H', '_'.join(names))
(superCls, constructors, methods, protectedMethods,
methodNames, fields, instanceFields, declares) = \
header(env, out_h, cls, typeset, packages, excludes,
generics, _dll_export)
if not allInOne:
out_cpp = file(fileName + '.cpp', 'w')
names, superNames = code(env, out_cpp,
cls, superCls, constructors,
methods, protectedMethods,
methodNames, fields, instanceFields,
declares, typeset)
if moduleName:
name = getPythonicClassName(className, renames, pythonNames)
if name == 'SKIP':
print 'skipping', cls
continue
elif name in pythonNames:
raise ValueError, ('Python class name \'%s\' already in use by: %s;\n use: --rename %s=<someNewName> %s'
% (name, pythonNames[name].getName(), cls.getName(), get_substitution_help(className)))
else:
pythonNames[name] = cls
python(env, out_h, out_cpp,
cls, superCls, names, superNames,
constructors, methods, protectedMethods,
methodNames, fields, instanceFields,
mappings.get(className), sequences.get(className),
renames.get(className),
declares, typeset, moduleName, generics,
_dll_export)
line(out_h)
line(out_h, 0, '#endif')
out_h.close()
if not allInOne:
out_cpp.close()
elif wrapperFiles > 1:
if classCount >= classesPerFile:
out_cpp.close()
fileCount += 1
fileName = '__wrap%02d__.cpp' %(fileCount)
out_cpp = file(os.path.join(cppdir, fileName), 'w')
classCount = 0
done.update(todo)
todo = typeset - done
if allInOne:
out_cpp.close()
if moduleName:
out = file(os.path.join(cppdir, moduleName) + '.cpp', 'w')
module(out, allInOne, done, imports, cppdir, moduleName,
shared, generics)
out.close()
if build or install or dist or egg_info:
compile(env, os.path.dirname(args[0]), output, moduleName,
install, dist, debug, jars, version,
prefix, root, install_dir, home_dir, use_distutils,
shared, compiler, modules, wininst, find_jvm_dll,
arch, generics, resources, imports, egg_info,
extra_setup_args)
def header(env, out, cls, typeset, packages, excludes, generics, _dll_export):
names = cls.getName().split('.')
superCls = cls.getSuperclass()
declares = set([cls.getClass()])
interfaces = []
if generics:
for interface in cls.getGenericInterfaces():
if Class.instance_(interface):
pt = None
interface = Class.cast_(interface)
elif ParameterizedType.instance_(interface):
pt = ParameterizedType.cast_(interface)
interface = Class.cast_(pt.getRawType())
else:
raise NotImplementedError, repr(interface)
if superCls and interface.isAssignableFrom(superCls):
continue
if known(interface, typeset, declares, packages, excludes, False):
interfaces.append(interface)
if pt is not None:
for ta in pt.getActualTypeArguments():
addRequiredTypes(ta, typeset, True)
else:
for interface in cls.getInterfaces():
if superCls and interface.isAssignableFrom(superCls):
continue
if known(interface, typeset, declares, packages, excludes, False):
interfaces.append(interface)
if cls.isInterface():
if interfaces:
superCls = interfaces.pop(0)
else:
superCls = findClass('java/lang/Object')
superClsName = superCls.getName()
elif superCls:
superClsName = superCls.getName()
else:
superClsName = 'JObject'
constructors = []
for constructor in cls.getDeclaredConstructors():
if Modifier.isPublic(constructor.getModifiers()):
if generics:
genericParams = constructor.getGenericParameterTypes()
params = constructor.getParameterTypes()
# It appears that the implicit instance-of-the-declaring-class
# parameter of a non-static inner class is missing from
# getGenericParameterTypes()
if len(params) == len(genericParams) + 1:
params[1:] = genericParams
else:
params = genericParams
if len(params) == 1:
if params[0] == cls:
continue
if ParameterizedType.instance_(params[0]):
param = ParameterizedType.cast_(params[0])
if param.getRawType() == cls:
continue
else:
params = constructor.getParameterTypes()
if len(params) == 1 and params[0] == cls:
continue
for param in params:
if not known(param, typeset, declares, packages, excludes,
generics):
break
else:
constructors.append(constructor)
sort(constructors, key=lambda x: len(x.getParameterTypes()))
methods = {}
protectedMethods = []
for method in cls.getDeclaredMethods():
modifiers = method.getModifiers()
if Modifier.isPublic(modifiers):
if generics:
returnType = method.getGenericReturnType()
else:
returnType = method.getReturnType()
if not known(returnType, typeset, declares, packages, excludes,
generics):
continue
sig = "%s:%s" %(method.getName(), signature(method, True))
if sig in methods and returnType != cls:
continue
if generics:
params = method.getGenericParameterTypes()
else:
params = method.getParameterTypes()
for param in params:
if not known(param, typeset, declares, packages, excludes,
generics):
break
else:
methods[sig] = method
elif Modifier.isProtected(modifiers):
protectedMethods.append(method)
def _compare(m0, m1):
value = cmp(m0.getName(), m1.getName())
if value == 0:
value = len(m0.getParameterTypes()) - len(m1.getParameterTypes())
return value
methods = methods.values()
sort(methods, fn=_compare)
methodNames = set([cppname(method.getName()) for method in methods])
for constructor in constructors:
if generics:
exceptions = constructor.getGenericExceptionTypes()
else:
exceptions = constructor.getExceptionTypes()
for exception in exceptions:
known(exception, typeset, declares, packages, excludes, generics)
for method in methods:
if generics:
exceptions = method.getGenericExceptionTypes()
else:
exceptions = method.getExceptionTypes()
for exception in exceptions:
known(exception, typeset, declares, packages, excludes, generics)
fields = []
instanceFields = []
for field in cls.getDeclaredFields():
modifiers = field.getModifiers()
if Modifier.isPublic(modifiers):
if generics:
fieldType = field.getGenericType()
else:
fieldType = field.getType()
if not known(fieldType, typeset, declares, packages, excludes,
generics):
continue
if Modifier.isStatic(modifiers):
fields.append(field)
else:
instanceFields.append(field)
sort(fields, key=lambda x: x.getName())
sort(instanceFields, key=lambda x: x.getName())
line(out)
superNames = superClsName.split('.')
line(out, 0, '#include "%s.h"', '/'.join(superNames))
line(out, 0)
namespaces = {}
for declare in declares:
namespace = namespaces
if declare not in (cls, superCls):
declareNames = declare.getName().split('.')
for declareName in declareNames[:-1]:
namespace = namespace.setdefault(declareName, {})
namespace[declareNames[-1]] = True
forward(out, namespaces, 0)
line(out, 0, 'template<class T> class JArray;')
indent = 0;
line(out)
for name in names[:-1]:
line(out, indent, 'namespace %s {', cppname(name))
indent += 1
line(out)
if superClsName == 'JObject':
line(out, indent, 'class %s%s : public JObject {',
_dll_export, cppname(names[-1]))
else:
line(out, indent, 'class %s%s : public %s {',
_dll_export, cppname(names[-1]), absname(cppnames(superNames)))
line(out, indent, 'public:')
indent += 1
if methods or protectedMethods or constructors:
line(out, indent, 'enum {')
for constructor in constructors:
line(out, indent + 1, 'mid_init$_%s,',
env.strhash(signature(constructor)))
for method in methods:
line(out, indent + 1, 'mid_%s_%s,', method.getName(),
env.strhash(signature(method)))
for method in protectedMethods:
line(out, indent + 1, 'mid_%s_%s,', method.getName(),
env.strhash(signature(method)))
line(out, indent + 1, 'max_mid')
line(out, indent, '};')
if instanceFields:
line(out)
line(out, indent, 'enum {')
for field in instanceFields:
line(out, indent + 1, 'fid_%s,', field.getName())
line(out, indent + 1, 'max_fid')
line(out, indent, '};')
line(out)
line(out, indent, 'static ::java::lang::Class *class$;');
line(out, indent, 'static jmethodID *mids$;');
if instanceFields:
line(out, indent, 'static jfieldID *fids$;');
line(out, indent, 'static bool live$;');
line(out, indent, 'static jclass initializeClass(bool);');
line(out)
line(out, indent, 'explicit %s(jobject obj) : %s(obj) {',
cppname(names[-1]), absname(cppnames(superNames)))
line(out, indent + 1, 'if (obj != NULL)');
line(out, indent + 2, 'env->getClass(initializeClass);')
line(out, indent, '}')
line(out, indent, '%s(const %s& obj) : %s(obj) {}',
cppname(names[-1]), cppname(names[-1]),
absname(cppnames(superNames)))
if fields:
line(out)
for field in fields:
fieldType = field.getType()
fieldName = cppname(field.getName())
if fieldName in methodNames:
print >>sys.stderr, " Warning: renaming static variable '%s' on class %s to '%s%s' since it is shadowed by a method of same name." %(fieldName, '.'.join(names), fieldName, RENAME_FIELD_SUFFIX)
fieldName += RENAME_FIELD_SUFFIX
if fieldType.isPrimitive():
line(out, indent, 'static %s %s;',
typename(fieldType, cls, False), fieldName)
else:
line(out, indent, 'static %s *%s;',
typename(fieldType, cls, False), fieldName)
if instanceFields:
line(out)
for field in instanceFields:
fieldType = field.getType()
fieldName = field.getName()
modifiers = field.getModifiers()
line(out, indent, '%s _get_%s() const;',
typename(fieldType, cls, False), fieldName)
if not Modifier.isFinal(modifiers):
line(out, indent, 'void _set_%s(%s) const;',
fieldName, typename(fieldType, cls, True))
if constructors:
line(out)
for constructor in constructors:
params = [typename(param, cls, True)
for param in constructor.getParameterTypes()]
line(out, indent, '%s(%s);', cppname(names[-1]), ', '.join(params))
if methods:
line(out)
for method in methods:
modifiers = method.getModifiers()
if Modifier.isStatic(modifiers):
prefix = 'static '
const = ''
else:
prefix = ''
const = ' const'
params = [typename(param, cls, True)
for param in method.getParameterTypes()]
methodName = cppname(method.getName())
line(out, indent, '%s%s %s(%s)%s;',
prefix, typename(method.getReturnType(), cls, False),
methodName, ', '.join(params), const)
indent -= 1
line(out, indent, '};')
while indent:
indent -= 1
line(out, indent, '}')
return (superCls, constructors, methods, protectedMethods,
methodNames, fields, instanceFields, declares)
def code(env, out, cls, superCls, constructors, methods, protectedMethods,
methodNames, fields, instanceFields, declares, typeset):
className = cls.getName()
names = className.split('.')
if superCls:
superClsName = superCls.getName()
else:
superClsName = 'JObject'
superNames = superClsName.split('.')
line(out, 0, '#include <jni.h>')
line(out, 0, '#include "JCCEnv.h"')
line(out, 0, '#include "%s.h"', className.replace('.', '/'))
for declare in declares:
if declare not in (cls, superCls):
line(out, 0, '#include "%s.h"', declare.getName().replace('.', '/'))
line(out, 0, '#include "JArray.h"')
indent = 0
line(out)
for name in names[:-1]:
line(out, indent, 'namespace %s {', cppname(name))
indent += 1
line(out)
line(out, indent, '::java::lang::Class *%s::class$ = NULL;',
cppname(names[-1]))
line(out, indent, 'jmethodID *%s::mids$ = NULL;', cppname(names[-1]))
if instanceFields:
line(out, indent, 'jfieldID *%s::fids$ = NULL;', cppname(names[-1]))
line(out, indent, 'bool %s::live$ = false;', cppname(names[-1]))
for field in fields:
fieldType = field.getType()
fieldName = cppname(field.getName())
if fieldName in methodNames:
fieldName += RENAME_FIELD_SUFFIX
typeName = typename(fieldType, cls, False)
if fieldType.isPrimitive():
line(out, indent, '%s %s::%s = (%s) 0;',
typeName, cppname(names[-1]), fieldName, typeName)
else:
line(out, indent, '%s *%s::%s = NULL;',
typeName, cppname(names[-1]), fieldName)
line(out)
line(out, indent, 'jclass %s::initializeClass(bool getOnly)',
cppname(names[-1]))
line(out, indent, '{')
line(out, indent + 1, 'if (getOnly)')
line(out, indent + 2, 'return (jclass) (live$ ? class$->this$ : NULL);')
line(out, indent + 1, 'if (class$ == NULL)')
line(out, indent + 1, '{')
line(out, indent + 2, 'jclass cls = (jclass) env->findClass("%s");',
className.replace('.', '/'))
if methods or protectedMethods or constructors:
line(out)
line(out, indent + 2, 'mids$ = new jmethodID[max_mid];')
for constructor in constructors:
sig = signature(constructor)
line(out, indent + 2,
'mids$[mid_init$_%s] = env->getMethodID(cls, "<init>", "%s");',
env.strhash(sig), sig)
isExtension = False
for method in methods:
methodName = method.getName()
if methodName == 'pythonExtension':
isExtension = True
sig = signature(method)
line(out, indent + 2,
'mids$[mid_%s_%s] = env->get%sMethodID(cls, "%s", "%s");',
methodName, env.strhash(sig),
Modifier.isStatic(method.getModifiers()) and 'Static' or '',
methodName, sig)
for method in protectedMethods:
methodName = method.getName()
sig = signature(method)
line(out, indent + 2,
'mids$[mid_%s_%s] = env->get%sMethodID(cls, "%s", "%s");',
methodName, env.strhash(sig),
Modifier.isStatic(method.getModifiers()) and 'Static' or '',
methodName, sig)
if instanceFields:
line(out)
line(out, indent + 2, 'fids$ = new jfieldID[max_fid];')
for field in instanceFields:
fieldName = field.getName()
line(out, indent + 2,
'fids$[fid_%s] = env->getFieldID(cls, "%s", "%s");',
fieldName, fieldName, signature(field))
line(out)
line(out, indent + 2, 'class$ = (::java::lang::Class *) new JObject(cls);')
if fields:
line(out, indent + 2, 'cls = (jclass) class$->this$;')
line(out)
for field in fields:
fieldType = field.getType()
fieldName = field.getName()
cppFieldName = cppname(fieldName)
if cppFieldName in methodNames:
cppFieldName += RENAME_FIELD_SUFFIX
if fieldType.isPrimitive():
line(out, indent + 2,
'%s = env->getStatic%sField(cls, "%s");',
cppFieldName, fieldType.getName().capitalize(),
fieldName)
else:
line(out, indent + 2,
'%s = new %s(env->getStaticObjectField(cls, "%s", "%s"));',
cppFieldName, typename(fieldType, cls, False),
fieldName, signature(field))
line(out, indent + 2, "live$ = true;")
line(out, indent + 1, '}')
line(out, indent + 1, 'return (jclass) class$->this$;')
line(out, indent, '}')
for constructor in constructors:
line(out)
sig = signature(constructor)
decls, args = argnames(constructor.getParameterTypes(), cls)
line(out, indent, "%s::%s(%s) : %s(env->newObject(initializeClass, &mids$, mid_init$_%s%s)) {}",
cppname(names[-1]), cppname(names[-1]), decls,
absname(cppnames(superNames)),
env.strhash(sig), args)
for method in methods:
modifiers = method.getModifiers()
returnType = method.getReturnType()
params = method.getParameterTypes()
methodName = method.getName()
superMethod = None
isStatic = Modifier.isStatic(modifiers)
if (isExtension and not isStatic and superCls and
Modifier.isNative(modifiers)):
superMethod = find_method(superCls, methodName, params)
if superMethod is None:
continue
if isStatic:
qualifier = 'Static'
this = 'cls'
midns = ''
const = ''
else:
isStatic = False
if superMethod is not None:
qualifier = 'Nonvirtual'
this = 'this$, (jclass) %s::class$->this$' %(absname(cppnames(superNames)))
declaringClass = superMethod.getDeclaringClass()
midns = '%s::' %(typename(declaringClass, cls, False))
else:
qualifier = ''
this = 'this$'
midns = ''
const = ' const'
sig = signature(method)
decls, args = argnames(params, cls)
line(out)
line(out, indent, '%s %s::%s(%s)%s',
typename(returnType, cls, False), cppname(names[-1]),
cppname(methodName), decls, const)
line(out, indent, '{')
if isStatic:
line(out, indent + 1,
'jclass cls = env->getClass(initializeClass);');
if returnType.isPrimitive():
line(out, indent + 1,
'%senv->call%s%sMethod(%s, %smids$[%smid_%s_%s]%s);',
not returnType.getName() == 'void' and 'return ' or '',
qualifier, returnType.getName().capitalize(), this,
midns, midns, methodName, env.strhash(sig), args)
else:
line(out, indent + 1,
'return %s(env->call%sObjectMethod(%s, %smids$[%smid_%s_%s]%s));',
typename(returnType, cls, False), qualifier, this,
midns, midns, methodName, env.strhash(sig), args)
line(out, indent, '}')
if instanceFields:
for field in instanceFields:
fieldType = field.getType()
fieldName = field.getName()
line(out)
line(out, indent, '%s %s::_get_%s() const',
typename(fieldType, cls, False), cppname(names[-1]), fieldName)
line(out, indent, '{')
if fieldType.isPrimitive():
line(out, indent + 1,
'return env->get%sField(this$, fids$[fid_%s]);',
fieldType.getName().capitalize(), fieldName)
else:
line(out, indent + 1,
'return %s(env->getObjectField(this$, fids$[fid_%s]));',
typename(fieldType, cls, False), fieldName)
line(out, indent, '}')
if not Modifier.isFinal(field.getModifiers()):
line(out)
line(out, indent, 'void %s::_set_%s(%s a0) const',
cppname(names[-1]), fieldName,
typename(fieldType, cls, True))
line(out, indent, '{')
if fieldType.isPrimitive():
line(out, indent + 1,
'env->set%sField(this$, fids$[fid_%s], a0);',
fieldType.getName().capitalize(), fieldName)
else:
line(out, indent + 1,
'env->setObjectField(this$, fids$[fid_%s], a0.this$);',
fieldName)
line(out, indent, '}')
while indent:
indent -= 1
line(out, indent, '}')
return names, superNames
def getPythonicClassName(clsName, renames, alreadyRegistered):
elements = clsName.split('.')
lastName = elements[-1]
codes = getSubstitutionCodes(elements)
if clsName in renames:
return renames[clsName] % codes
# match for foo.bar%(name)s
for k,v in renames.items():
if '%(' in k and (k % codes == clsName):
return v % codes
# also try the various allowed combinations (in decreasing order)
for pattern in (lastName, '#'+clsName, '%(package_short)s'+lastName,
'%(package)s'+lastName, '%(package_short)s%(name)s'
'%(name)s', '%(fullname)s', ):
if pattern in renames:
resolved = renames[pattern] % codes
#there could be various strategies for recovery
if resolved in alreadyRegistered:
continue
return resolved
# and if nothing succeeded, then always return the last name
return lastName
def getSubstitutionCodes(elements):
markers = {
'clsname' : '.'.join(elements),
'fullname': ''.join(elements),
'package' : ''.join(elements[:-1]),
'package_short': ''.join(x[0] for x in elements[0:-1]),
'name': elements[-1],
'java_pkg': '.'.join(elements[:-1]),
}
return markers
def get_substitution_help(clsName):
codes = getSubstitutionCodes(clsName.split('.'))
return '''
You can also use substitution codes, examples:
--rename %(java_pkg)s%%(name)s=foo%%(name)s
# => foo%(name)s
--rename %%(java_pkg)s%(name)s=bar%%(name)s
# => bar%(name)s
--rename %%(fullname)s=%%(package_short)%%(name)s
# => %(package_short)s%(name)s
--rename %%(package)s%%(name)s=%%(package_short)sXYZ
# => %(package_short)sXYZ
--rename %%(fullname)s=py%%(name)s
# => py%(name)s
codes:
%%(clsname)s = %(clsname)s
%%(java_pkg)s = %(java_pkg)s
%%(fullname)s = %(fullname)s
%%(package)s = %(package)s
%%(package_short)s = %(package_short)s
%%(name)s = %(name)s
''' % codes
def loadClassNamesFromImportedModule(modules, renames):
import tempfile
temp_file = os.path.join(tempfile.gettempdir(), 'jcc-renames-%s.tmp' % '-'.join(modules))
imports = ';'.join(['import %s;mods.append(%s);cp.append(%s.CLASSPATH)' % (x,x,x) for x in modules])
updates = ';'.join(["cls.update(['%%s\\t%%s' %% (x.class_.getName(), x.class_.getSimpleName()) for x in filter(lambda x: hasattr(x, 'class_'), %s.__dict__.values())])" % x for x in modules])
cmd = """%(executable)s -c \"import os,sys;sys.path=%(syspath)s;
cp=[];mods=[];cls=set();%(imports)s;
mods[0].initVM(os.pathsep.join(cp));
%(updates)s;
open('%(tmpfile)s', 'w').write('\\n'.join(list(cls)))\"
""" % {'executable': sys.executable, 'syspath':sys.path, 'imports':imports, 'tmpfile':temp_file,
'updates': updates}
print cmd
cmd = ' '.join(cmd.split())
os.system(cmd)
fi = open(temp_file, 'r')
for line in fi:
cls, name = line.strip().split('\t')
if (cls in renames):
raise Exception("Error while importing module %s. The class name %s is already reserved for %s" % (module_name, name, cls))
else:
renames['#%s' % cls] = 'SKIP'
fi.close()
if __name__ == '__main__':
jcc(sys.argv)
| |
"""
sentry.manager
~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import datetime
import hashlib
import logging
import time
import warnings
import uuid
from celery.signals import task_postrun
from django.conf import settings
from django.contrib.auth.models import UserManager
from django.core.signals import request_finished
from django.db import models, transaction, IntegrityError
from django.db.models import Sum
from django.utils import timezone
from django.utils.datastructures import SortedDict
from raven.utils.encoding import to_string
from sentry import app
from sentry.constants import (
STATUS_RESOLVED, STATUS_UNRESOLVED, MINUTE_NORMALIZATION,
LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH)
from sentry.db.models import BaseManager
from sentry.processors.base import send_group_processors
from sentry.signals import regression_signal
from sentry.tasks.index import index_event
from sentry.utils.cache import cache, memoize
from sentry.utils.dates import get_sql_date_trunc, normalize_datetime
from sentry.utils.db import get_db_engine, has_charts, attach_foreignkey
from sentry.utils.safe import safe_execute, trim, trim_dict, trim_frames
from sentry.utils.strings import strip
logger = logging.getLogger('sentry.errors')
UNSAVED = dict()
MAX_TAG_LENGTH = 200
def get_checksum_from_event(event):
interfaces = event.interfaces
for interface in interfaces.itervalues():
result = interface.get_composite_hash(interfaces=event.interfaces)
if result:
hash = hashlib.md5()
for r in result:
hash.update(to_string(r))
return hash.hexdigest()
return hashlib.md5(to_string(event.message)).hexdigest()
class ScoreClause(object):
def __init__(self, group):
self.group = group
def prepare_database_save(self, unused):
return self
def prepare(self, evaluator, query, allow_joins):
return
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = self.group.get_score()
return (sql, [])
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
class UserManager(BaseManager, UserManager):
pass
class ChartMixin(object):
def get_chart_data_for_group(self, instances, max_days=90, key=None):
if not instances:
if key is None:
return []
return {}
if hasattr(instances[0], '_state'):
db = instances[0]._state.db or 'default'
else:
db = 'default'
field = self.model.groupcountbyminute_set.related
column = field.field.name
queryset = field.model.objects.filter(**{
'%s__in' % column: instances,
})
return self._get_chart_data(queryset, max_days, db, key=key)
def get_chart_data(self, instance, max_days=90, key=None):
if hasattr(instance, '_state'):
db = instance._state.db or 'default'
else:
db = 'default'
queryset = instance.groupcountbyminute_set
return self._get_chart_data(queryset, max_days, db, key=key)
def _get_chart_data(self, queryset, max_days=90, db='default', key=None):
if not has_charts(db):
if key is None:
return []
return {}
today = timezone.now().replace(microsecond=0, second=0)
# the last interval is not accurate, so we exclude it
# TODO: it'd be ideal to normalize the last datapoint so that we can include it
# and not have ~inaccurate data for up to MINUTE_NORMALIZATION
today -= datetime.timedelta(minutes=MINUTE_NORMALIZATION)
if max_days >= 30:
g_type = 'date'
d_type = 'days'
points = max_days
modifier = 1
today = today.replace(hour=0)
elif max_days >= 1:
g_type = 'hour'
d_type = 'hours'
points = max_days * 24
modifier = 1
today = today.replace(minute=0)
else:
g_type = 'minute'
d_type = 'minutes'
modifier = MINUTE_NORMALIZATION
points = max_days * 24 * (60 / modifier)
min_date = today - datetime.timedelta(days=max_days)
method = get_sql_date_trunc('date', db, grouper=g_type)
chart_qs = queryset.filter(
date__gte=min_date,
).extra(
select={'grouper': method},
)
if key:
chart_qs = chart_qs.values('grouper', key)
else:
chart_qs = chart_qs.values('grouper')
chart_qs = chart_qs.annotate(
num=Sum('times_seen'),
)
if key:
chart_qs = chart_qs.values_list(key, 'grouper', 'num').order_by(key, 'grouper')
else:
chart_qs = chart_qs.values_list('grouper', 'num').order_by('grouper')
if key is None:
rows = {None: dict(chart_qs)}
else:
rows = {}
for item, grouper, num in chart_qs:
if item not in rows:
rows[item] = {}
rows[item][grouper] = num
results = {}
for item, tsdata in rows.iteritems():
results[item] = []
for point in xrange(points, -1, -1):
dt = today - datetime.timedelta(**{d_type: point * modifier})
results[item].append((int(time.mktime((dt).timetuple())) * 1000, tsdata.get(dt, 0)))
if key is None:
return results[None]
return results
class GroupManager(BaseManager, ChartMixin):
use_for_related_fields = True
def normalize_event_data(self, data):
# TODO(dcramer): store http.env.REMOTE_ADDR as user.ip
# First we pull out our top-level (non-data attr) kwargs
if not data.get('level') or data['level'] not in LOG_LEVELS:
data['level'] = logging.ERROR
if not data.get('logger'):
data['logger'] = DEFAULT_LOGGER_NAME
else:
data['logger'] = trim(data['logger'], 64)
timestamp = data.get('timestamp')
if not timestamp:
timestamp = timezone.now()
# We must convert date to local time so Django doesn't mess it up
# based on TIME_ZONE
if settings.TIME_ZONE:
if not timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=timezone.utc)
elif timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=None)
data['timestamp'] = timestamp
if not data.get('event_id'):
data['event_id'] = uuid.uuid4().hex
data.setdefault('message', None)
data.setdefault('culprit', None)
data.setdefault('time_spent', None)
data.setdefault('server_name', None)
data.setdefault('site', None)
data.setdefault('checksum', None)
data.setdefault('platform', None)
data.setdefault('extra', {})
tags = data.get('tags')
if not tags:
tags = []
# full support for dict syntax
elif isinstance(tags, dict):
tags = tags.items()
# prevent [tag, tag, tag] (invalid) syntax
elif not all(len(t) == 2 for t in tags):
tags = []
else:
tags = list(tags)
data['tags'] = tags
data['message'] = strip(data['message'])
data['culprit'] = strip(data['culprit'])
if not isinstance(data['extra'], dict):
# throw it away
data['extra'] = {}
trim_dict(
data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE)
if 'sentry.interfaces.Exception' in data:
if 'values' not in data['sentry.interfaces.Exception']:
data['sentry.interfaces.Exception'] = {
'values': [data['sentry.interfaces.Exception']]
}
# convert stacktrace + exception into expanded exception
if 'sentry.interfaces.Stacktrace' in data:
data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace')
for exc_data in data['sentry.interfaces.Exception']['values']:
for key in ('type', 'module', 'value'):
value = exc_data.get(key)
if value:
exc_data[key] = trim(value)
if exc_data.get('stacktrace'):
trim_frames(exc_data['stacktrace'])
for frame in exc_data['stacktrace']['frames']:
stack_vars = frame.get('vars', {})
trim_dict(stack_vars)
if 'sentry.interfaces.Stacktrace' in data:
trim_frames(data['sentry.interfaces.Stacktrace'])
for frame in data['sentry.interfaces.Stacktrace']['frames']:
stack_vars = frame.get('vars', {})
trim_dict(stack_vars)
if 'sentry.interfaces.Message' in data:
msg_data = data['sentry.interfaces.Message']
trim(msg_data['message'], 1024)
if msg_data.get('params'):
msg_data['params'] = trim(msg_data['params'])
if 'sentry.interfaces.Http' in data:
http_data = data['sentry.interfaces.Http']
for key in ('cookies', 'querystring', 'headers', 'env', 'url'):
value = http_data.get(key)
if not value:
continue
if type(value) == dict:
trim_dict(value)
else:
http_data[key] = trim(value)
value = http_data.get('data')
if value:
http_data['data'] = trim(value, 2048)
# default the culprit to the url
if not data['culprit']:
data['culprit'] = trim(strip(http_data.get('url')), MAX_CULPRIT_LENGTH)
return data
def from_kwargs(self, project, **kwargs):
data = self.normalize_event_data(kwargs)
return self.save_data(project, data)
def save_data(self, project, data, raw=False):
# TODO: this function is way too damn long and needs refactored
# the inner imports also suck so let's try to move it away from
# the objects manager
# TODO: culprit should default to "most recent" frame in stacktraces when
# it's not provided.
from sentry.plugins import plugins
from sentry.models import Event, Project, EventMapping
with transaction.commit_on_success():
project = Project.objects.get_from_cache(id=project)
# First we pull out our top-level (non-data attr) kwargs
event_id = data.pop('event_id')
message = data.pop('message')
culprit = data.pop('culprit')
level = data.pop('level')
time_spent = data.pop('time_spent')
logger_name = data.pop('logger')
server_name = data.pop('server_name')
site = data.pop('site')
date = data.pop('timestamp')
checksum = data.pop('checksum')
platform = data.pop('platform')
if 'sentry.interfaces.Exception' in data:
if 'values' not in data['sentry.interfaces.Exception']:
data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]}
# convert stacktrace + exception into expanded exception
if 'sentry.interfaces.Stacktrace' in data:
data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace')
kwargs = {
'level': level,
'message': message,
'platform': platform,
'culprit': culprit or '',
'logger': logger_name,
}
event = Event(
project=project,
event_id=event_id,
data=data,
server_name=server_name,
site=site,
time_spent=time_spent,
datetime=date,
**kwargs
)
# Calculate the checksum from the first highest scoring interface
if not checksum:
checksum = get_checksum_from_event(event)
event.checksum = checksum
group_kwargs = kwargs.copy()
group_kwargs.update({
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
tags = data['tags']
tags.append(('level', LOG_LEVELS[level]))
if logger:
tags.append(('logger', logger_name))
if server_name:
tags.append(('server_name', server_name))
if site:
tags.append(('site', site))
for plugin in plugins.for_project(project):
added_tags = safe_execute(plugin.get_tags, event)
if added_tags:
tags.extend(added_tags)
try:
group, is_new, is_sample = self._create_group(
event=event,
tags=data['tags'],
**group_kwargs
)
except Exception as exc:
# TODO: should we mail admins when there are failures?
try:
logger.exception(u'Unable to process log entry: %s', exc)
except Exception, exc:
warnings.warn(u'Unable to process log entry: %s', exc)
return
using = group._state.db
event.group = group
# save the event unless its been sampled
if not is_sample:
sid = transaction.savepoint(using=using)
try:
event.save()
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
sid = transaction.savepoint(using=using)
try:
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
transaction.commit_unless_managed(using=using)
if not raw:
send_group_processors(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample
)
# TODO: move this to the queue
if is_new and not raw:
regression_signal.send_robust(sender=self.model, instance=group)
if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH):
index_event.delay(event)
return event
def should_sample(self, group, event):
if not settings.SENTRY_SAMPLE_DATA:
return False
silence_timedelta = event.datetime - group.last_seen
silence = silence_timedelta.days * 86400 + silence_timedelta.seconds
if group.times_seen % count_limit(group.times_seen):
return False
if group.times_seen % time_limit(silence):
return False
return True
def _create_group(self, event, tags=None, **kwargs):
from sentry.models import ProjectCountByMinute, GroupCountByMinute
date = event.datetime
time_spent = event.time_spent
project = event.project
group, is_new = self.get_or_create(
project=project,
checksum=event.checksum,
defaults=kwargs
)
if is_new:
transaction.commit_unless_managed(using=group._state.db)
update_kwargs = {
'times_seen': 1,
}
if time_spent:
update_kwargs.update({
'time_spent_total': time_spent,
'time_spent_count': 1,
})
if not is_new:
extra = {
'last_seen': max(event.datetime, group.last_seen),
'score': ScoreClause(group),
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != event.level:
extra['level'] = event.level
if group.status == STATUS_RESOLVED or group.is_over_resolve_age():
# Making things atomic
is_new = bool(self.filter(
id=group.id,
status=STATUS_RESOLVED,
).exclude(
active_at__gte=date,
).update(active_at=date, status=STATUS_UNRESOLVED))
transaction.commit_unless_managed(using=group._state.db)
group.active_at = date
group.status = STATUS_UNRESOLVED
group.last_seen = extra['last_seen']
app.buffer.incr(self.model, update_kwargs, {
'id': group.id,
}, extra)
else:
# TODO: this update should actually happen as part of create
group.update(score=ScoreClause(group))
# We need to commit because the queue can run too fast and hit
# an issue with the group not existing before the buffers run
transaction.commit_unless_managed(using=group._state.db)
# Determine if we've sampled enough data to store this event
if is_new:
is_sample = False
elif not self.should_sample(group, event):
is_sample = False
else:
is_sample = True
# Rounded down to the nearest interval
normalized_datetime = normalize_datetime(date)
app.buffer.incr(GroupCountByMinute, update_kwargs, {
'group': group,
'project': project,
'date': normalized_datetime,
})
app.buffer.incr(ProjectCountByMinute, update_kwargs, {
'project': project,
'date': normalized_datetime,
})
try:
self.add_tags(group, tags)
except Exception, e:
logger.exception('Unable to record tags: %s' % (e,))
return group, is_new, is_sample
def add_tags(self, group, tags):
from sentry.models import TagValue, GroupTag
project = group.project
date = group.last_seen
for tag_item in tags:
if len(tag_item) == 2:
(key, value), data = tag_item, None
else:
key, value, data = tag_item
if not value:
continue
value = unicode(value)
if len(value) > MAX_TAG_LENGTH:
continue
app.buffer.incr(TagValue, {
'times_seen': 1,
}, {
'project': project,
'key': key,
'value': value,
}, {
'last_seen': date,
'data': data,
})
app.buffer.incr(GroupTag, {
'times_seen': 1,
}, {
'group': group,
'project': project,
'key': key,
'value': value,
}, {
'last_seen': date,
})
def get_by_natural_key(self, project, logger, culprit, checksum):
return self.get(project=project, logger=logger, view=culprit, checksum=checksum)
@memoize
def model_fields_clause(self):
return ', '.join('sentry_groupedmessage."%s"' % (f.column,) for f in self.model._meta.fields)
def get_accelerated(self, project_ids, queryset=None, minutes=15):
if not project_ids:
return self.none()
if queryset is None:
queryset = self.filter(
project__in=project_ids,
status=STATUS_UNRESOLVED,
)
else:
queryset = queryset._clone()
queryset.query.select_related = False
normalization = float(MINUTE_NORMALIZATION)
assert minutes >= normalization
intervals = 8
engine = get_db_engine(queryset.db)
# We technically only support mysql and postgresql, since there seems to be no standard
# way to get the epoch from a datetime/interval
if engine.startswith('mysql'):
minute_clause = "interval %s minute"
epoch_clause = "unix_timestamp(utc_timestamp()) - unix_timestamp(mcbm.date)"
now_clause = 'utc_timestamp()'
else:
minute_clause = "interval '%s minutes'"
epoch_clause = "extract(epoch from now()) - extract(epoch from mcbm.date)"
now_clause = 'now()'
sql, params = queryset.query.get_compiler(queryset.db).as_sql()
before_select, after_select = str(sql).split('SELECT ', 1)
after_where = after_select.split(' WHERE ', 1)[1]
# Ensure we remove any ordering clause
after_where = after_where.split(' ORDER BY ')[0]
query = """
SELECT ((mcbm.times_seen + 1) / ((%(epoch_clause)s) / 60)) / (COALESCE(z.rate, 0) + 1) as sort_value,
%(fields)s
FROM sentry_groupedmessage
INNER JOIN sentry_messagecountbyminute as mcbm
ON (sentry_groupedmessage.id = mcbm.group_id)
LEFT JOIN (SELECT a.group_id, (SUM(a.times_seen)) / COUNT(a.times_seen) / %(norm)f as rate
FROM sentry_messagecountbyminute as a
WHERE a.date >= %(now)s - %(max_time)s
AND a.date < %(now)s - %(min_time)s
AND a.project_id IN (%(project_ids)s)
GROUP BY a.group_id) as z
ON z.group_id = mcbm.group_id
WHERE mcbm.date >= %(now)s - %(min_time)s
AND mcbm.date < %(now)s - %(offset_time)s
AND mcbm.times_seen > 0
AND ((mcbm.times_seen + 1) / ((%(epoch_clause)s) / 60)) > (COALESCE(z.rate, 0) + 1)
AND %(after_where)s
GROUP BY z.rate, mcbm.times_seen, mcbm.date, %(fields)s
ORDER BY sort_value DESC
""" % dict(
fields=self.model_fields_clause,
after_where=after_where,
offset_time=minute_clause % (1,),
min_time=minute_clause % (minutes + 1,),
max_time=minute_clause % (minutes * intervals + 1,),
norm=normalization,
epoch_clause=epoch_clause,
now=now_clause,
project_ids=', '.join((str(int(x)) for x in project_ids)),
)
return RawQuerySet(self, query, params)
class RawQuerySet(object):
def __init__(self, queryset, query, params):
self.queryset = queryset
self.query = query
self.params = params
def __getitem__(self, k):
offset = k.start or 0
limit = k.stop - offset
limit_clause = ' LIMIT %d OFFSET %d' % (limit, offset)
query = self.query + limit_clause
return self.queryset.raw(query, self.params)
class ProjectManager(BaseManager, ChartMixin):
def get_for_user(self, user=None, access=None, hidden=False, team=None,
superuser=True):
"""
Returns a SortedDict of all projects a user has some level of access to.
"""
from sentry.models import Team
if not (user and user.is_authenticated()):
return []
# TODO: the result of this function should be cached
is_authenticated = (user and user.is_authenticated())
base_qs = self
if not hidden:
base_qs = base_qs.filter(status=0)
if team:
base_qs = base_qs.filter(team=team)
if team and user.is_superuser and superuser:
projects = set(base_qs)
else:
projects_qs = base_qs
if not settings.SENTRY_PUBLIC:
# If the user is authenticated, include their memberships
teams = Team.objects.get_for_user(
user, access, access_groups=False).values()
if not teams:
projects_qs = self.none()
if team and team not in teams:
projects_qs = self.none()
elif not team:
projects_qs = projects_qs.filter(team__in=teams)
projects = set(projects_qs)
if is_authenticated:
projects |= set(base_qs.filter(accessgroup__members=user))
attach_foreignkey(projects, self.model.team)
return sorted(projects, key=lambda x: x.name.lower())
class MetaManager(BaseManager):
NOTSET = object()
def __init__(self, *args, **kwargs):
super(MetaManager, self).__init__(*args, **kwargs)
task_postrun.connect(self.clear_cache)
request_finished.connect(self.clear_cache)
self.__metadata = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_MetaManager__metadata', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__metadata = {}
def get_value(self, key, default=NOTSET):
result = self.get_all_values()
if default is self.NOTSET:
return result[key]
return result.get(key, default)
def unset_value(self, key):
self.filter(key=key).delete()
self.__metadata.pop(key, None)
def set_value(self, key, value):
print key, value
inst, _ = self.get_or_create(
key=key,
defaults={
'value': value,
}
)
if inst.value != value:
inst.update(value=value)
self.__metadata[key] = value
def get_all_values(self):
if not hasattr(self, '_MetaManager__metadata'):
self.__metadata = dict(self.values_list('key', 'value'))
return self.__metadata
def clear_cache(self, **kwargs):
self.__metadata = {}
class InstanceMetaManager(BaseManager):
NOTSET = object()
def __init__(self, field_name, *args, **kwargs):
super(InstanceMetaManager, self).__init__(*args, **kwargs)
self.field_name = field_name
task_postrun.connect(self.clear_cache)
request_finished.connect(self.clear_cache)
self.__metadata = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_InstanceMetaManager__metadata', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__metadata = {}
def _make_key(self, instance):
if isinstance(instance, models.Model):
instance_id = instance.pk
else:
instance_id = instance
return '%s:%s' % (self.model._meta.db_table, instance_id)
def get_value_bulk(self, instances, key):
return dict(self.filter(**{
'%s__in' % self.field_name: instances,
'key': key,
}).values_list(self.field_name, 'value'))
def get_value(self, instance, key, default=NOTSET):
result = self.get_all_values(instance)
if default is self.NOTSET:
return result[key]
return result.get(key, default)
def unset_value(self, instance, key):
self.filter(**{self.field_name: instance, 'key': key}).delete()
if instance.pk not in self.__metadata:
cache.delete(self._make_key(instance))
return
self.__metadata[instance.pk].pop(key, None)
cache.set(self._make_key(instance), self.__metadata[instance.pk])
def set_value(self, instance, key, value):
inst, created = self.get_or_create(**{
self.field_name: instance,
'key': key,
'defaults': {
'value': value,
}
})
if not created and inst.value != value:
inst.update(value=value)
if instance.pk not in self.__metadata:
cache.delete(self._make_key(instance))
return
self.__metadata[instance.pk][key] = value
cache.set(self._make_key(instance), self.__metadata[instance.pk])
def get_all_values(self, instance):
if isinstance(instance, models.Model):
instance_id = instance.pk
else:
instance_id = instance
if instance_id not in self.__metadata:
cache_key = self._make_key(instance)
result = cache.get(cache_key)
if result is None:
result = dict(
(i.key, i.value) for i in
self.filter(**{
self.field_name: instance_id,
})
)
cache.set(cache_key, result)
self.__metadata[instance_id] = result
return self.__metadata.get(instance_id, {})
def clear_cache(self, **kwargs):
self.__metadata = {}
class UserOptionManager(BaseManager):
NOTSET = object()
def __init__(self, *args, **kwargs):
super(UserOptionManager, self).__init__(*args, **kwargs)
task_postrun.connect(self.clear_cache)
request_finished.connect(self.clear_cache)
self.__metadata = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_UserOptionManager__metadata', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__metadata = {}
def get_value(self, user, project, key, default=NOTSET):
result = self.get_all_values(user, project)
if default is self.NOTSET:
return result[key]
return result.get(key, default)
def unset_value(self, user, project, key):
self.filter(user=user, project=project, key=key).delete()
if not hasattr(self, '_metadata'):
return
if project:
metakey = (user.pk, project.pk)
else:
metakey = (user.pk, None)
if metakey not in self.__metadata:
return
self.__metadata[metakey].pop(key, None)
def set_value(self, user, project, key, value):
inst, created = self.get_or_create(
user=user,
project=project,
key=key,
defaults={
'value': value,
},
)
if not created and inst.value != value:
inst.update(value=value)
if project:
metakey = (user.pk, project.pk)
else:
metakey = (user.pk, None)
if metakey not in self.__metadata:
return
self.__metadata[metakey][key] = value
def get_all_values(self, user, project):
if project:
metakey = (user.pk, project.pk)
else:
metakey = (user.pk, None)
if metakey not in self.__metadata:
result = dict(
(i.key, i.value) for i in
self.filter(
user=user,
project=project,
)
)
self.__metadata[metakey] = result
return self.__metadata.get(metakey, {})
def clear_cache(self, **kwargs):
self.__metadata = {}
class TagKeyManager(BaseManager):
def _get_cache_key(self, project_id):
return 'filterkey:all:%s' % project_id
def all_keys(self, project):
# TODO: cache invalidation via post_save/post_delete signals much like BaseManager
key = self._get_cache_key(project.id)
result = cache.get(key)
if result is None:
result = list(self.filter(project=project).values_list('key', flat=True))
cache.set(key, result, 60)
return result
class TeamManager(BaseManager):
def get_for_user(self, user, access=None, access_groups=True, with_projects=False):
"""
Returns a SortedDict of all teams a user has some level of access to.
Each <Team> returned has a ``membership`` attribute which holds the
<TeamMember> instance.
"""
from sentry.models import TeamMember, AccessGroup, Project
results = SortedDict()
if not user.is_authenticated():
return results
if settings.SENTRY_PUBLIC and access is None:
for team in sorted(self.iterator(), key=lambda x: x.name.lower()):
results[team.slug] = team
else:
all_teams = set()
qs = TeamMember.objects.filter(
user=user,
).select_related('team')
if access is not None:
qs = qs.filter(type__lte=access)
for tm in qs:
all_teams.add(tm.team)
if access_groups:
qs = AccessGroup.objects.filter(
members=user,
).select_related('team')
if access is not None:
qs = qs.filter(type__lte=access)
for group in qs:
all_teams.add(group.team)
for team in sorted(all_teams, key=lambda x: x.name.lower()):
results[team.slug] = team
if with_projects:
# these kinds of queries make people sad :(
new_results = SortedDict()
for team in results.itervalues():
project_list = list(Project.objects.get_for_user(
user, team=team))
new_results[team.slug] = (team, project_list)
results = new_results
return results
| |
"""This module contains utility functions for working with Pastas models."""
import logging
from datetime import datetime, timedelta
from logging import handlers
import numpy as np
from pandas import Series, Timedelta, Timestamp, date_range, to_datetime
from pandas.tseries.frequencies import to_offset
from scipy import interpolate
logger = logging.getLogger(__name__)
def frequency_is_supported(freq):
"""Method to determine if a frequency is supported for a Pastas model.
Parameters
----------
freq: str
Returns
-------
freq
String with the simulation frequency
Notes
-----
Possible frequency-offsets are listed in:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
The frequency can be a multiple of these offsets, like '7D'. Because of the
use in convolution, only frequencies with an equidistant offset are
allowed. This means monthly ('M'), yearly ('Y') or even weekly ('W')
frequencies are not allowed. Use '7D' for a weekly simulation.
D calendar day frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseconds
U, us microseconds
N nanoseconds
TODO: Rename to get_frequency_string and change Returns-documentation
"""
offset = to_offset(freq)
if not hasattr(offset, 'delta'):
msg = "Frequency {} not supported.".format(freq)
logger.error(msg)
raise KeyError(msg)
else:
if offset.n == 1:
freq = offset.name
else:
freq = str(offset.n) + offset.name
return freq
def _get_stress_dt(freq):
"""Internal method to obtain a timestep in days from a frequency string.
Parameters
----------
freq: str
Returns
-------
dt: float
Approximate timestep in number of days.
Notes
-----
Used for comparison to determine if a time series needs to be up or
downsampled.
See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
for the offset_aliases supported by Pandas.
"""
# Get the frequency string and multiplier
offset = to_offset(freq)
if hasattr(offset, 'delta'):
dt = offset.delta / Timedelta(1, "D")
else:
num = offset.n
freq = offset.name
if freq in ['A', 'Y', 'AS', 'YS', 'BA', 'BY', 'BAS', 'BYS']:
# year
dt = num * 365
elif freq in ['BQ', 'BQS', 'Q', 'QS']:
# quarter
dt = num * 90
elif freq in ['BM', 'BMS', 'CBM', 'CBMS', 'M', 'MS']:
# month
dt = num * 30
elif freq in ['SM', 'SMS']:
# semi-month
dt = num * 15
elif freq in ['W']:
# week
dt = num * 7
elif freq in ['B', 'C']:
# day
dt = num
elif freq in ['BH', 'CBH']:
# hour
dt = num * 1 / 24
else:
raise (ValueError('freq of {} not supported'.format(freq)))
return dt
def _get_dt(freq):
"""Internal method to obtain a timestep in DAYS from a frequency string.
Parameters
----------
freq: str
Returns
-------
dt: float
Number of days
"""
# Get the frequency string and multiplier
dt = to_offset(freq).delta / Timedelta(1, "D")
return dt
def _get_time_offset(t, freq):
"""Internal method to calculate the time offset of a TimeStamp.
Parameters
----------
t: pandas.Timestamp
Timestamp to calculate the offset from the desired freq for.
freq: str
String with the desired frequency.
Returns
-------
offset: pandas.Timedelta
Timedelta with the offset for the timestamp t.
"""
if freq is None:
raise TypeError("frequency is None")
return t - t.floor(freq)
def get_sample(tindex, ref_tindex):
"""Sample the index so that the frequency is not higher than the frequency
of ref_tindex.
Parameters
----------
tindex: pandas.index
Pandas index object
ref_tindex: pandas.index
Pandas index object
Returns
-------
series: pandas.index
Notes
-----
Find the index closest to the ref_tindex, and then return a selection
of the index.
"""
if len(tindex) == 1:
return tindex
else:
f = interpolate.interp1d(tindex.asi8, np.arange(0, tindex.size),
kind='nearest', bounds_error=False,
fill_value='extrapolate')
ind = np.unique(f(ref_tindex.asi8).astype(int))
return tindex[ind]
def timestep_weighted_resample(series0, tindex):
"""Resample a timeseries to a new tindex, using an overlapping period
weighted average.
The original series and the new tindex do not have to be equidistant. Also,
the timestep-edges of the new tindex do not have to overlap with the
original series.
It is assumed the series consists of measurements that describe an
intensity at the end of the period for which they apply. Therefore, when
upsampling, the values are uniformly spread over the new timestep (like
bfill).
Compared to the reample methods in Pandas, this method is more accurate for
non-equidistanct series. It is much slower however.
Parameters
----------
series0 : pandas.Series
The original series to be resampled
tindex : pandas.index
The index to which to resample the series
Returns
-------
series : pandas.Series
The resampled series
"""
# determine some arrays for the input-series
t0e = np.array(series0.index)
dt0 = np.diff(t0e)
dt0 = np.hstack((dt0[0], dt0))
t0s = t0e - dt0
v0 = series0.values
# determine some arrays for the output-series
t1e = np.array(tindex)
dt1 = np.diff(t1e)
dt1 = np.hstack((dt1[0], dt1))
t1s = t1e - dt1
v1 = []
for t1si, t1ei in zip(t1s, t1e):
# determine which periods within the series are within the new tindex
mask = (t0e > t1si) & (t0s < t1ei)
if np.any(mask):
# cut by the timestep-edges
ts = t0s[mask]
te = t0e[mask]
ts[ts < t1si] = t1si
te[te > t1ei] = t1ei
# determine timestep
dt = (te - ts).astype(float)
# determine timestep-weighted value
v1.append(np.sum(dt * v0[mask]) / np.sum(dt))
# replace all values in the series
series = Series(v1, index=tindex)
return series
def timestep_weighted_resample_fast(series0, freq):
"""Resample a time series to a new frequency, using an overlapping period
weighted average.
The original series does not have to be equidistant.
It is assumed the series consists of measurements that describe an
intensity at the end of the period for which they apply. Therefore, when
upsampling, the values are uniformly spread over the new timestep (like
bfill).
Compared to the resample methods in Pandas, this method is more accurate
for non-equidistant series. It is slower than Pandas (but faster then the
original timestep_weighted_resample).
Parameters
----------
series0 : pandas.Series
original series to be resampled
freq : str
a Pandas frequency string
Returns
-------
series : pandas.Series
resampled series
"""
series = series0.copy()
# first mutiply by the timestep in the unit of freq
dt = np.diff(series0.index) / Timedelta(1, freq)
series[1:] = series[1:] * dt
# get a new index
index = date_range(series.index[0].floor(freq), series.index[-1],
freq=freq)
# calculate the cumulative sum
series = series.cumsum()
# add NaNs at none-existing values in series at index
series = series.combine_first(Series(np.NaN, index=index))
# interpolate these NaN's, only keep values at index
series = series.interpolate('time')[index]
# calculate the diff again (inverse of cumsum)
series[1:] = series.diff()[1:]
# drop nan's at the beginning
series = series[series.first_valid_index():]
return series
def get_equidistant_series(series, freq, minimize_data_loss=False):
"""Get equidistant timeseries using nearest reindexing.
This method will shift observations to the nearest equidistant timestep to
create an equidistant timeseries, if necessary. Each observation is
guaranteed to only be used once in the equidistant timeseries.
Parameters
----------
series : pandas.Series
original (non-equidistant) timeseries
freq : str
frequency of the new equidistant timeseries
(i.e. "H", "D", "7D", etc.)
minimize_data_loss : bool, optional
if set to True, method will attempt use any unsampled
points from original timeseries to fill some remaining
NaNs in the new equidistant timeseries. Default is False.
This only happens in rare cases.
Returns
-------
s : pandas.Series
equidistant timeseries
Notes
-----
This method creates an equidistant timeseries with specified freq
using nearest sampling (meaning observations can be shifted in time),
with additional filling logic that ensures each original measurement
is only included once in the new timeseries. Values are filled as close
as possible to their original timestamp in the new equidistant timeseries.
"""
# build new equidistant index
idx = date_range(series.index[0].floor(freq),
series.index[-1].ceil(freq),
freq=freq)
# get linear interpolated index from original series
fl = interpolate.interp1d(series.index.asi8,
np.arange(0, series.index.size),
kind='linear', bounds_error=False,
fill_value='extrapolate')
ind_linear = fl(idx.asi8)
# get nearest index from original series
f = interpolate.interp1d(series.index.asi8,
np.arange(0, series.index.size),
kind='nearest', bounds_error=False,
fill_value='extrapolate')
ind = f(idx.asi8).astype(int)
# create a new equidistant series
s = Series(index=idx, data=np.nan)
# fill in nearest value for each timestamp in equidistant series
s.loc[idx] = series.values[ind]
# remove duplicates, each observation can only be used once
mask = Series(ind).duplicated(keep=False).values
# mask all duplicates and set to NaN
s.loc[mask] = np.nan
# look through duplicates which equidistant timestamp is closest
# then fill value from original series for closest timestamp
for i in np.unique(ind[mask]):
# mask duplicates
dupe_mask = ind == i
# get location of first duplicate
first_dupe = np.nonzero(dupe_mask)[0][0]
# get index for closest equidistant timestamp
i_nearest = np.argmin(np.abs(ind_linear - ind)[dupe_mask])
# fill value
s.iloc[first_dupe + i_nearest] = series.values[i]
# This next part is an ugly bit of code to fill up any
# nans if there are unused values in the original timeseries
# that lie close enough to our missing datapoint in the new equidisant
# series.
if minimize_data_loss:
# find remaining nans
nanmask = s.isna()
if nanmask.sum() > 0:
# get unused (not sampled) timestamps from original series
unused = set(range(series.index.size)) - set(ind)
if len(unused) > 0:
# dropna: do not consider unused nans
missing_ts = series.iloc[list(unused)].dropna().index
# loop through nan timestamps in new series
for t in s.loc[nanmask].index:
# find closest unused value
closest = np.argmin(np.abs(missing_ts - t))
# check if value is not farther away that freq to avoid
# weird behavior
if np.abs(missing_ts[closest] - t) <= Timedelta(freq):
# fill value
s.loc[t] = series.loc[missing_ts[closest]]
return s
def to_daily_unit(series, method=True):
"""Experimental method, use wth caution!
Recalculate a timeseries of a stress with a non-daily unit (e/g.
m3/month) to a daily unit (e.g. m3/day). This method just changes
the values of the timeseries, and does not alter the frequency.
"""
if method is True or method == "divide":
dt = series.index.to_series().diff() / Timedelta(1, 'D')
dt[:-1] = dt[1:]
dt[-1] = np.NaN
if not ((dt == 1.0) | dt.isna()).all():
series = series / dt
return series
def excel2datetime(tindex, freq="D"):
"""Method to convert excel datetime to pandas timetime objects.
Parameters
----------
tindex: datetime index
can be a datetime object or a pandas datetime index.
freq: str
Returns
-------
datetimes: pandas.datetimeindex
"""
datetimes = to_datetime('1899-12-30') + Timedelta(tindex, freq)
return datetimes
def datenum_to_datetime(datenum):
"""Convert Matlab datenum into Python datetime.
Parameters
----------
datenum: float
date in datenum format
Returns
-------
datetime :
Datetime object corresponding to datenum.
"""
days = datenum % 1.
return datetime.fromordinal(int(datenum)) \
+ timedelta(days=days) - timedelta(days=366)
def datetime2matlab(tindex):
mdn = tindex + Timedelta(days=366)
frac = (tindex - tindex.round("D")).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def get_stress_tmin_tmax(ml):
"""Get the minimum and maximum time that all of the stresses have data."""
from .model import Model
tmin = Timestamp.min
tmax = Timestamp.max
if isinstance(ml, Model):
for sm in ml.stressmodels:
for st in ml.stressmodels[sm].stress:
tmin = max((tmin, st.series_original.index.min()))
tmax = min((tmax, st.series_original.index.max()))
else:
raise (TypeError('Unknown type {}'.format(type(ml))))
return tmin, tmax
def initialize_logger(logger=None, level=logging.INFO):
"""Internal method to create a logger instance to log program output.
Parameters
-------
logger : logging.Logger
A Logger-instance. Use ps.logger to initialise the Logging instance
that handles all logging throughout pastas, including all sub modules
and packages.
"""
if logger is None:
logger = logging.getLogger('pastas')
logger.setLevel(level)
remove_file_handlers(logger)
set_console_handler(logger)
# add_file_handlers(logger)
def set_console_handler(logger=None, level=logging.INFO,
fmt="%(levelname)s: %(message)s"):
"""Method to add a console handler to the logger of Pastas.
Parameters
-------
logger : logging.Logger
A Logger-instance. Use ps.logger to initialise the Logging instance
that handles all logging throughout pastas, including all sub modules
and packages.
"""
if logger is None:
logger = logging.getLogger('pastas')
remove_console_handler(logger)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter(fmt=fmt)
ch.setFormatter(formatter)
logger.addHandler(ch)
def set_log_level(level):
"""Set the log-level of the console. This method is just a wrapper around
set_console_handler.
Parameters
----------
level: str
String with the level to log messages to the screen for. Options
are: "INFO", "WARNING", and "ERROR".
Examples
--------
>>> import pandas as ps
>>> ps.set_log_level("ERROR")
"""
set_console_handler(level=level)
def remove_console_handler(logger=None):
"""Method to remove the console handler to the logger of Pastas.
Parameters
-------
logger : logging.Logger
A Logger-instance. Use ps.logger to initialise the Logging instance
that handles all logging throughout pastas, including all sub modules
and packages.
"""
if logger is None:
logger = logging.getLogger('pastas')
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
logger.removeHandler(handler)
def add_file_handlers(logger=None, filenames=('info.log', 'errors.log'),
levels=(logging.INFO, logging.ERROR), maxBytes=10485760,
backupCount=20, encoding='utf8',
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%y-%m-%d %H:%M'):
"""Method to add file handlers in the logger of Pastas.
Parameters
-------
logger : logging.Logger
A Logger-instance. Use ps.logger to initialise the Logging instance
that handles all logging throughout pastas, including all sub modules
and packages.
"""
if logger is None:
logger = logging.getLogger('pastas')
# create formatter
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
# create file handlers, set the level & formatter, and add it to the logger
for filename, level in zip(filenames, levels):
fh = handlers.RotatingFileHandler(filename, maxBytes=maxBytes,
backupCount=backupCount,
encoding=encoding)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
def remove_file_handlers(logger=None):
"""Method to remove any file handlers in the logger of Pastas.
Parameters
-------
logger : logging.Logger
A Logger-instance. Use ps.logger to initialise the Logging instance
that handles all logging throughout pastas, including all sub modules
and packages.
"""
if logger is None:
logger = logging.getLogger('pastas')
for handler in logger.handlers:
if isinstance(handler, handlers.RotatingFileHandler):
logger.removeHandler(handler)
def validate_name(name):
"""Method to check user-provided names and log a warning if wrong.
Parameters
----------
name: str
String with the name to check for 'illegal' characters.
Returns
-------
name: str
Unchanged name string
Notes
-----
Forbidden characters are: "/", "\", " ".
"""
name = str(name) # Make sure it is a string
for char in ["\\", "/", " "]:
if char in name:
logger.warning("User-provided name '%s' contains illegal "
"character %s", name, char)
return name
def show_versions(lmfit=False, numba=False):
"""Method to print the version of dependencies.
Parameters
----------
lmfit: bool, optional
Print the version of lmfit. Needs to be installed.
numba: bool, optional
Print the version of numba. Needs to be installed.
"""
from sys import version as os_version
from matplotlib import __version__ as mpl_version
from numpy import __version__ as np_version
from pandas import __version__ as pd_version
from scipy import __version__ as sc_version
from pastas import __version__ as ps_version
msg = (
f"Python version: {os_version}\n"
f"Numpy version: {np_version}\n"
f"Scipy version: {sc_version}\n"
f"Pandas version: {pd_version}\n"
f"Pastas version: {ps_version}\n"
f"Matplotlib version: {mpl_version}"
)
if lmfit:
from lmfit import __version__ as lm_version
msg = msg + f"\nlmfit version: {lm_version}"
if numba:
from numba import __version__ as nb_version
msg = msg + f"\nnumba version: {nb_version}"
return print(msg)
def check_numba():
try:
from numba import njit
except ImportError:
logger.warning("Numba is not installed. Installing Numba is "
"recommended for significant speed-ups.")
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 09:36:48 2018
@author: mbarbier
"""
""" Clear all variables """
from IPython import get_ipython
get_ipython().magic('reset -sf')
import os
import argparse
from module_train import train
from module_utilities import writeDictToCsv
import time
from keras import losses
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
def multi_slice_viewer(volume):
volume = np.transpose( volume, (2,0,1) )
#remove_keymap_conflicts({'j', 'i'})
fig, ax = plt.subplots()
ax.volume = volume
ax.index = 0
#slider = Slider(ax, 'slice', 0, volume.shape[0], valinit=0)
ax.imshow(volume[ax.index])
fig.canvas.mpl_connect('key_press_event', process_key)
def process_key(event):
fig = event.canvas.figure
ax = fig.axes[0]
if event.key == 'j':
previous_slice(ax)
elif event.key == 'i':
next_slice(ax)
fig.canvas.draw()
def previous_slice(ax):
volume = ax.volume
ax.index = (ax.index - 1) % volume.shape[0] # wrap around using %
ax.images[0].set_array(volume[ax.index])
def next_slice(ax):
volume = ax.volume
ax.index = (ax.index + 1) % volume.shape[0]
ax.images[0].set_array(volume[ax.index])
def imageShow( y ):
img = Image.fromarray(255*y/y.max(),"F")
img.resize( (100,100), resample=Image.NEAREST )
plt.imshow(img, cmap="Greys")
#plt.colorbar()
def splitLabels( yint, nClasses ):
nRows = yint.shape[0]
nCols = yint.shape[1]
y = np.zeros( (nRows, nCols, nClasses) )
y[:,:,0] = np.ones( (nRows, nCols) )
for regionIndex in range( 1, nClasses ):
# put the mask layer of the region index to 1
y_region = np.zeros( (nRows, nCols) )
y_region[ yint == regionIndex ] = 1
y[:,:,regionIndex] = y_region
# and the background layer to 0, for every region pixel
y[:,:,0] = (1 - y_region) * y[:,:,0]
return y
def compare( flag ):
print('-'*30)
print('Masks as single channel with multiple 1, .. , n labels')
print('-'*30)
y = np.zeros( (3, 3), np.float32 )
y[1,1] = 2
y[1,2] = 2
y[0,2] = 1
y_pred = np.zeros( (3, 3), np.float32 )
y_pred[1,1] = 1
y_pred[2,2] = 2
y_pred[1,2] = 2
nClasses = int(y.max()) + 1
y_cat = splitLabels( y, nClasses )
y_pred_cat = splitLabels( y_pred, nClasses )
multi_slice_viewer(y_cat)
plt.figure()
multi_slice_viewer(y_pred_cat)
# imageShow( y )
# imageShow( y_pred )
# multi_slice_viewer(y_cat)
#confusion_matrix_np( y_true, y_pred , nClasses )
#img.save('my.png')
#img.show()
def init():
regionList = [ "1", "2", "3" ]
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", help="output directory", default="./output_loss" )
parser.add_argument("--loss_metric", help="Metric used as loss, if None, the network default metric is used", default=None )
parser.add_argument("--region_list", help="List of ROIs", default=regionList )
parser.add_argument("--image_format", help="Format of input and output images", default="png" )
flag = parser.parse_args()
return flag
def run( flag ):
makeDirs( flag )
compare( flag )
def makeDirs( flag ):
if not os.path.isdir( flag.output_dir ):
os.mkdir( flag.output_dir )
def main():
flag = init()
run( flag )
if __name__ == '__main__':
main()
"""
print('-'*30)
print('Fuse masks to single multi-label image')
print('-'*30)
regionList = flag.region_list
regionIdList = range(1, len(regionList)+1)
nClasses = len(regionList)+1
#masks = masks_all[regionList[0]]
s = masks_all[regionList[0]].shape
nImages = s[0]
masks = np.zeros( (nImages, img_rows, img_cols, nClasses) )
masks_reshape = np.zeros( (nImages, img_rows * img_cols, nClasses) )
#one_temp = np.ones( (s[0], img_rows, img_cols) )
masks[:,:,:,0] = np.ones( (nImages, img_rows, img_cols) )
for regionIndex in range(len(regionList)):
# put the mask layer of the region index to 1
masks_region = masks_all[regionList[regionIndex]]
masks_region = preprocess(masks_region, img_rows, img_cols )
temp = masks_region[:,:,:,0]
masks[:,:,:,regionIndex+1] = temp
# and the background layer to 0, for every region pixel
masks[:,:,:,0] = (1 - temp) * masks[:,:,:,0]
# Reshape (we don't need/use this???)
temp = temp.reshape((nImages,img_rows * img_cols))
masks_reshape[:,:,regionIndex+1] = temp
print('-'*30)
print('Save masks as images')
print('-'*30)
for imageIndex in range( nImages ):
fileName = 'mask_sample_%d.tif' % ( imageIndex )
filePath = os.path.join( flag.output_masks_feed_dir, fileName )
maskData = np.transpose( masks[imageIndex, ...], (2, 0, 1) )
maskData = np.expand_dims( maskData, axis = 3 )
writeData( filePath, maskData )
print('-'*30)
print('Save (preprocessed) images as images')
print('-'*30)
for imageIndex in range( nImages ):
fileName = 'image_sample_%d.tif' % ( imageIndex )
filePath = os.path.join( flag.output_images_feed_dir, fileName )
imageData = np.transpose( images[imageIndex, ...], (2, 0, 1) )
imageData = np.expand_dims( imageData, axis = 3 )
writeData( filePath, imageData )
#masks = np.expand_dims( masks, axis = 3 )
print('-'*30)
print('Load the model')
print('-'*30)
#model = get_unet( img_rows, img_cols )
metrics = getMetricFunctions()
model = getModel( flag.network, nClasses, flag.optimizer, flag.activation, flag.loss_metric, metrics, flag.learning_rate, flag.image_size )
#model = unet( nClasses = nClasses, optimizer = None, img_rows = img_rows, img_cols = img_cols )
#model = segnet( nClasses = nClasses, optimizer = None, img_rows = img_rows, img_cols = img_cols )
model_checkpoint = ModelCheckpoint( os.path.join( flag.output_run_dir, 'weights.{epoch:03d}.h5'), period=n_epochs//10+1)
show_pred_masks = trainCheck(flag)
if flag.data_augmentation:
#steps_per_epoch = len(train_generator) / flag.batch_size
steps_per_epoch = 10
images, imagesValidation = splitDataSet( images, 0.8 )
masks, masksValidation = splitDataSet( masks, 0.8)
train_generator = dataAugmentation( flag, images, masks )
history = model.fit_generator( train_generator,
validation_data = (imagesValidation, masksValidation),
steps_per_epoch = steps_per_epoch,
epochs=n_epochs, verbose=1, shuffle=True,
callbacks=[model_checkpoint,show_pred_masks])
# for e in range(flag.epochs):
# print('Epoch', e)
# batches = 0
# for x_batch, y_batch in train_generator:
# history = model.fit( x_batch[0], y_batch[0], validation_data = (imagesValidation, masksValidation), callbacks=[show_pred_masks])
# print('batch ' + str(batches) + ' : do nothing, save something?')
# #model.fit(x_batch, y_batch)
# batches += 1
# if batches >= len(images) / 32:
# # we need to break the loop by hand because
# # the generator loops indefinitely
# break
else:
#categorical_labels = to_categorical(int_labels, num_classes=None)
# if model.loss is "categorical_crossentropy":
# masks_cat = to_categorical(masks, num_classes=None)
history = model.fit( images, masks, batch_size=flag.batch_size, epochs=n_epochs, verbose=1, shuffle=True,
validation_split=0.2, callbacks=[model_checkpoint,show_pred_masks])
# list all data in history
#print(history.history.keys())
plotVarList = getMetrics( history )
showTrainingHistory( flag, history, plotVarList )
#showTrainingHistoryMultiClass(history)
"""
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
import copy
import functools
import numpy as np
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.model_base import ModelBase
from federatedml.protobuf.generated.data_transform_meta_pb2 import DataTransformMeta
from federatedml.protobuf.generated.data_transform_meta_pb2 import DataTransformImputerMeta
from federatedml.protobuf.generated.data_transform_meta_pb2 import DataTransformOutlierMeta
from federatedml.protobuf.generated.data_transform_param_pb2 import DataTransformParam
from federatedml.protobuf.generated.data_transform_param_pb2 import DataTransformImputerParam
from federatedml.protobuf.generated.data_transform_param_pb2 import DataTransformOutlierParam
from federatedml.statistic import data_overview
from federatedml.util import abnormal_detection
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
# =============================================================================
# DenseFeatureTransformer
# =============================================================================
class DenseFeatureTransformer(object):
def __init__(self, data_transform_param):
self.delimitor = data_transform_param.delimitor
self.data_type = data_transform_param.data_type
self.exclusive_data_type = data_transform_param.exclusive_data_type
self.missing_fill = data_transform_param.missing_fill
self.default_value = data_transform_param.default_value
self.missing_fill_method = data_transform_param.missing_fill_method
self.missing_impute = data_transform_param.missing_impute
self.outlier_replace = data_transform_param.outlier_replace
self.outlier_replace_method = data_transform_param.outlier_replace_method
self.outlier_impute = data_transform_param.outlier_impute
self.outlier_replace_value = data_transform_param.outlier_replace_value
self.with_label = data_transform_param.with_label
self.label_name = data_transform_param.label_name.lower() if self.with_label else None
self.label_type = data_transform_param.label_type if self.with_label else None
self.output_format = data_transform_param.output_format
self.missing_impute_rate = None
self.outlier_replace_rate = None
self.label_idx = None
self.header = None
self.sid_name = None
self.exclusive_data_type_fid_map = {}
self.match_id_name = None
self.with_match_id = data_transform_param.with_match_id
def generate_header(self, input_data, mode="fit"):
header = input_data.schema["header"].lower()
sid_name = input_data.schema["sid"].lower()
LOGGER.debug("header is {}".format(header))
LOGGER.debug("sid_name is {}".format(sid_name))
if not header and not sid_name:
raise ValueError("dense input-format should have header schema")
header_gen = None
if self.with_match_id:
self.match_id_name = header.split(self.delimitor, -1)[0]
if self.with_label and self.label_name == self.match_id_name:
raise ValueError("Match id column name equals to label column name")
if self.with_label:
if mode == "fit":
if not header:
raise ValueError("dense input-format for fit stage should not be None if with_label is true")
self.label_idx = header.split(self.delimitor, -1).index(self.label_name)
header_list = header.split(self.delimitor, -1)
header_gen = header_list[:self.label_idx] + header_list[self.label_idx + 1:]
elif header:
header_list = header.split(self.delimitor, -1)
if self.label_name in header_list:
self.label_idx = header_list.index(self.label_name)
header_gen = header_list[:self.label_idx] + header_list[self.label_idx + 1:]
else:
self.label_idx = None
header_gen = header_list
elif header:
header_gen = header.split(self.delimitor, -1)
if self.with_match_id:
header_gen = header_gen[1:]
self.header = header_gen
self.sid_name = sid_name
if header_gen:
for i in range(len(header_gen)):
col_name = header_gen[i]
if self.exclusive_data_type is not None and col_name in self.exclusive_data_type:
self.exclusive_data_type_fid_map[i] = self.exclusive_data_type[col_name]
def get_schema(self):
schema = make_schema(self.header, self.sid_name, self.label_name, self.match_id_name)
return schema
def extract_feature_value(self, value):
value = value.split(self.delimitor, -1)
if not self.header:
return []
elif self.with_match_id and self.label_idx is not None:
if len(value) == 2:
return []
elif len(value) < 2:
raise ValueError("Only {} column is found, can not extract match_id and label")
else:
return value[1: self.label_idx] + value[self.label_idx + 1:]
elif self.with_match_id:
if len(value) < 1:
raise ValueError("Only 0 column is found, can not extract match_id")
elif len(value) == 1:
return []
else:
return value[1:]
elif self.label_idx is not None:
if len(value) < 1:
raise ValueError("Only 0 column is found, can not extract label")
elif len(value) == 1:
return []
else:
return value[:self.label_idx] + value[self.label_idx + 1:]
else:
return value
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read dense data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
input_data_labels = None
input_data_match_id = None
fit_header = None
if mode == "transform":
fit_header = self.header
self.generate_header(input_data, mode=mode)
input_data_features = input_data.mapValues(self.extract_feature_value)
if self.label_idx is not None:
data_shape = data_overview.get_data_shape(input_data)
if not data_shape or self.label_idx >= data_shape:
raise ValueError("input data's value is empty or it does not contain a label")
input_data_labels = input_data.mapValues(lambda value: value.split(self.delimitor, -1)[self.label_idx])
if self.with_match_id:
input_data_match_id = input_data.mapValues(
lambda value: value.split(self.delimitor, -1)[0])
if mode == "fit":
data_instance = self.fit(input_data, input_data_features, input_data_labels, input_data_match_id)
else:
data_instance = self.transform(input_data_features, input_data_labels, input_data_match_id)
data_instance = data_overview.header_alignment(data_instance, fit_header)
return data_instance
def fit(self, input_data, input_data_features, input_data_labels, input_data_match_id):
schema = self.get_schema()
set_schema(input_data_features, schema)
input_data_features = self.fill_missing_value(input_data_features, "fit")
input_data_features = self.replace_outlier_value(input_data_features, "fit")
data_instance = self.gen_data_instance(input_data_features, input_data_labels, input_data_match_id)
set_schema(data_instance, schema)
return data_instance
@assert_io_num_rows_equal
def transform(self, input_data_features, input_data_labels, input_data_match_id):
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(input_data_features, schema)
input_data_features = self.fill_missing_value(input_data_features, "transform")
input_data_features = self.replace_outlier_value(input_data_features, "transform")
data_instance = self.gen_data_instance(input_data_features, input_data_labels, input_data_match_id)
set_schema(data_instance, schema)
return data_instance
def fill_missing_value(self, input_data_features, mode="fit"):
if self.missing_fill:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.missing_impute)
if mode == "fit":
input_data_features, self.default_value = imputer_processor.fit(input_data_features,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def replace_outlier_value(self, input_data_features, mode="fit"):
if self.outlier_replace:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.outlier_impute)
if mode == "fit":
input_data_features, self.outlier_replace_value = \
imputer_processor.fit(input_data_features,
replace_method=self.outlier_replace_method,
replace_value=self.outlier_replace_value)
if self.outlier_impute is None:
self.outlier_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.outlier_replace_value)
self.outlier_replace_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def gen_data_instance(self, input_data_features, input_data_labels, input_data_match_id):
if self.label_idx is not None:
data_instance = input_data_features.join(input_data_labels,
lambda features, label: self.to_instance(features, label))
else:
data_instance = input_data_features.mapValues(lambda features: self.to_instance(features))
if self.with_match_id:
data_instance = data_instance.join(input_data_match_id, self.append_match_id)
return data_instance
def append_match_id(self, inst, match_id):
inst.inst_id = match_id
return inst
def to_instance(self, features, label=None):
if self.header is None and len(features) != 0:
raise ValueError("features shape {} not equal to header shape 0".format(len(features)))
elif self.header is not None and len(self.header) != len(features):
raise ValueError("features shape {} not equal to header shape {}".format(len(features), len(self.header)))
if self.label_idx is not None:
if self.label_type == 'int':
label = int(label)
elif self.label_type in ["float", "float64"]:
label = float(label)
format_features = DenseFeatureTransformer.gen_output_format(features, self.data_type,
self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
else:
format_features = DenseFeatureTransformer.gen_output_format(features, self.data_type,
self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
return Instance(inst_id=None,
features=format_features,
label=label)
@staticmethod
def gen_output_format(features, data_type='float', exclusive_data_type_fid_map=None,
output_format='dense', missing_impute=None):
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
if output_format == "dense":
format_features = copy.deepcopy(features)
if data_type in ["int", "int64", "long", "float", "float64", "double"]:
for i in range(len(features)):
if (missing_impute is not None and features[i] in missing_impute) or \
(missing_impute is None and features[i] in ['', 'NULL', 'null', "NA"]):
format_features[i] = np.nan
if exclusive_data_type_fid_map:
for fid in range(len(features)):
if fid in exclusive_data_type_fid_map:
dtype = exclusive_data_type_fid_map[fid]
else:
dtype = data_type
format_features[fid] = getattr(np, dtype)(features[fid])
return np.asarray(format_features, dtype=object)
else:
return np.asarray(format_features, dtype=data_type)
indices = []
data = []
column_shape = len(features)
non_zero = 0
for i in range(column_shape):
if (missing_impute is not None and features[i] in missing_impute) or \
(missing_impute is None and features[i] in ['', 'NULL', 'null', "NA"]):
indices.append(i)
data.append(np.nan)
non_zero += 1
elif data_type in ['float', 'float64', "double"]:
if np.fabs(float(features[i])) < consts.FLOAT_ZERO:
continue
indices.append(i)
data.append(float(features[i]))
non_zero += 1
elif data_type in ['int', "int64", "long"]:
if int(features[i]) == 0:
continue
indices.append(i)
data.append(int(features[i]))
else:
indices.append(i)
data.append(features[i])
return SparseVector(indices, data, column_shape)
def get_summary(self):
if not self.missing_fill and not self.outlier_replace:
return {}
summary_buf = {}
if self.missing_fill:
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf["missing_fill_info"] = missing_summary
if self.outlier_replace:
outlier_replace_summary = dict()
outlier_replace_summary["outlier_value"] = list(self.outlier_impute)
outlier_replace_summary["outlier_replace_value"] = dict(zip(self.header, self.outlier_replace_value))
outlier_replace_summary["outlier_replace_rate"] = dict(zip(self.header, self.outlier_replace_rate))
summary_buf["outlier_replace_rate"] = outlier_replace_summary
return summary_buf
def save_model(self):
transform_meta, transform_param = save_data_transform_model(input_format="dense",
delimitor=self.delimitor,
data_type=self.data_type,
exclusive_data_type=self.exclusive_data_type,
with_label=self.with_label,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
with_match_id=self.with_match_id,
model_name="DenseFeatureTransformer")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
transform_meta.imputer_meta.CopyFrom(missing_imputer_meta)
transform_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(self.outlier_replace,
self.outlier_replace_method,
self.outlier_impute,
self.outlier_replace_value,
self.outlier_replace_rate,
self.header,
"Outlier")
transform_meta.outlier_meta.CopyFrom(outlier_meta)
transform_param.outlier_param.CopyFrom(outlier_param)
return {"DataTransformMeta": transform_meta,
"DataTransformParam": transform_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, self.exclusive_data_type, _1, _2, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name, self.with_match_id = \
load_data_transform_model("DenseFeatureTransformer", model_meta, model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
self.outlier_replace, self.outlier_replace_method, \
self.outlier_impute, self.outlier_replace_value = load_outlier_model(self.header,
"Outlier",
model_meta.outlier_meta,
model_param.outlier_param)
# =============================================================================
# SparseFeatureTransformer: mainly for libsvm input format
# =============================================================================
class SparseFeatureTransformer(object):
def __init__(self, data_transform_param):
self.delimitor = data_transform_param.delimitor
self.data_type = data_transform_param.data_type
self.label_type = data_transform_param.label_type
self.output_format = data_transform_param.output_format
self.header = None
self.sid_name = "sid"
self.with_match_id = data_transform_param.with_match_id
self.match_id_name = "match_id" if self.with_match_id else None
self.with_label = data_transform_param.with_label
self.label_name = data_transform_param.label_name if self.with_label else None
def get_max_feature_index(self, line, delimitor=' '):
if line.strip() == '':
raise ValueError("find an empty line, please check!!!")
cols = line.split(delimitor, -1)
offset = 0
if self.with_match_id:
offset += 1
if self.with_label:
offset += 1
if len(cols) <= offset:
return -1
return max([int(fid_value.split(":", -1)[0]) for fid_value in cols[offset:]])
def generate_header(self, max_feature):
self.header = [str(i) for i in range(max_feature + 1)]
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
if not data_overview.get_data_shape(input_data):
raise ValueError("input data's value is empty, it does not contain a label")
if mode == "fit":
data_instance = self.fit(input_data)
else:
data_instance = self.transform(input_data)
schema = make_schema(self.header, self.sid_name, self.label_name, self.match_id_name)
set_schema(data_instance, schema)
return data_instance
def fit(self, input_data):
get_max_fid = functools.partial(self.get_max_feature_index, delimitor=self.delimitor)
max_feature = input_data.mapValues(get_max_fid).reduce(lambda max_fid1, max_fid2: max(max_fid1, max_fid2))
if max_feature == -1:
raise ValueError("no feature value in input data, please check!")
self.generate_header(max_feature)
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def transform(self, input_data):
max_feature = len(self.header) - 1
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def gen_data_instance(self, input_data, max_feature):
params = [self.delimitor, self.data_type,
self.label_type, self.with_match_id,
self.output_format, self.with_label, max_feature]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
label_type = param_list[2]
with_match_id = param_list[3]
output_format = param_list[4]
with_label = param_list[5]
max_fid = param_list[6]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
next_idx = 0
if with_match_id:
match_id = cols[0]
next_idx = 1
else:
match_id = None
label = None
if with_label:
label = cols[next_idx]
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
next_idx += 1
fid_value = []
for i in range(next_idx, len(cols)):
fid, val = cols[i].split(":", -1)
fid = int(fid)
if data_type in ["float", "float64"]:
val = float(val)
elif data_type in ["int", "int64"]:
val = int(val)
fid_value.append((fid, val))
if output_format == "dense":
features = [0 for i in range(max_fid + 1)]
for fid, val in fid_value:
features[fid] = val
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fid, val in fid_value:
indices.append(fid)
data.append(val)
features = SparseVector(indices, data, max_fid + 1)
return Instance(inst_id=match_id,
features=features,
label=label)
def save_model(self):
transform_meta, transform_param = save_data_transform_model(input_format="sparse",
delimitor=self.delimitor,
data_type=self.data_type,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
with_match_id=self.with_match_id,
with_label=self.with_label,
model_name="SparseFeatureTransformer")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(missing_fill=False,
model_name="Imputer")
transform_meta.imputer_meta.CopyFrom(missing_imputer_meta)
transform_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
transform_meta.outlier_meta.CopyFrom(outlier_meta)
transform_param.outlier_param.CopyFrom(outlier_param)
return {"DataTransformMeta": transform_meta,
"DataTransformParam": transform_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, _1, _2, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name, self.with_match_id = \
load_data_transform_model(
"SparseFeatureTransformer",
model_meta,
model_param)
# =============================================================================
# SparseTagTransformer: mainly for tag data
# =============================================================================
class SparseTagTransformer(object):
def __init__(self, data_transform_param):
self.delimitor = data_transform_param.delimitor
self.data_type = data_transform_param.data_type
self.tag_with_value = data_transform_param.tag_with_value
self.tag_value_delimitor = data_transform_param.tag_value_delimitor
self.with_label = data_transform_param.with_label
self.label_type = data_transform_param.label_type if self.with_label else None
self.output_format = data_transform_param.output_format
self.header = None
self.sid_name = "sid"
self.label_name = self.label_name = data_transform_param.label_name
self.missing_fill = data_transform_param.missing_fill
self.missing_fill_method = data_transform_param.missing_fill_method
self.default_value = data_transform_param.default_value
self.with_match_id = data_transform_param.with_match_id
self.match_id_name = "match_id" if self.with_match_id else None
self.missing_impute_rate = None
self.missing_impute = None
@staticmethod
def agg_tag(kvs, delimitor=' ', with_label=True, with_match_id=False, tag_with_value=False,
tag_value_delimitor=":"):
tags_set = set()
offset = 1 if with_match_id else 0
for key, value in kvs:
if with_label:
cols = value.split(delimitor, -1)[1 + offset:]
else:
cols = value.split(delimitor, -1)[0 + offset:]
if tag_with_value is False:
tags = cols
else:
tags = [fea_value.split(tag_value_delimitor, -1)[0] for fea_value in cols]
tags_set |= set(tags)
return tags_set
def generate_header(self, tags):
self.header = tags
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
if mode == "fit":
data_instance = self.fit(input_data)
if self.with_label:
self.label_name = "label"
else:
data_instance = self.transform(input_data)
schema = make_schema(self.header, self.sid_name, self.label_name, self.match_id_name)
set_schema(data_instance, schema)
return data_instance
@staticmethod
def change_tag_to_str(value, tags_dict=None, delimitor=",", with_label=False, with_match_id=False,
tag_value_delimitor=":"):
vals = value.split(delimitor, -1)
ret = [''] * len(tags_dict)
offset = 0
if with_label:
offset += 1
if with_match_id:
offset += 1
vals = vals[2:]
for i in range(len(vals)):
tag, value = vals[i].split(tag_value_delimitor, -1)
idx = tags_dict.get(tag, None)
if idx is not None:
ret[idx] = value
return ret
@staticmethod
def change_str_to_tag(value, tags_dict=None, delimitor=",", tag_value_delimitor=":"):
ret = [None] * len(tags_dict)
tags = sorted(list(tags_dict.keys()))
for i in range(len(value)):
tag, val = tags[i], value[i]
ret[i] = tag_value_delimitor.join([tag, val])
return delimitor.join(ret)
def fill_missing_value(self, input_data, tags_dict, mode="fit"):
str_trans_method = functools.partial(self.change_tag_to_str,
tags_dict=tags_dict,
delimitor=self.delimitor,
with_label=self.with_label,
with_match_id=self.with_match_id,
tag_value_delimitor=self.tag_value_delimitor)
input_data = input_data.mapValues(str_trans_method)
schema = make_schema(self.header, self.sid_name, self.label_name, self.match_id_name)
set_schema(input_data, schema)
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer()
if mode == "fit":
data, self.default_value = imputer_processor.fit(input_data,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
LOGGER.debug("self.default_value is {}".format(self.default_value))
else:
data = imputer_processor.transform(input_data,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
LOGGER.debug("self.missing_impute is {}".format(self.missing_impute))
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
str_trans_tag_method = functools.partial(self.change_str_to_tag,
tags_dict=tags_dict,
delimitor=self.delimitor,
tag_value_delimitor=self.tag_value_delimitor)
data = data.mapValues(str_trans_tag_method)
return data
def fit(self, input_data):
tag_aggregator = functools.partial(SparseTagTransformer.agg_tag,
delimitor=self.delimitor,
with_label=self.with_label,
with_match_id=self.with_match_id,
tag_with_value=self.tag_with_value,
tag_value_delimitor=self.tag_value_delimitor)
tags_set_list = list(input_data.applyPartitions(tag_aggregator).collect())
tags_set = set()
for _, _tags_set in tags_set_list:
tags_set |= _tags_set
tags = list(tags_set)
tags = sorted(tags)
tags_dict = dict(zip(tags, range(len(tags))))
self.generate_header(tags)
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, mode="fit")
data_instance = self.gen_data_instance(input_data, tags_dict)
return data_instance
def transform(self, input_data):
tags_dict = dict(zip(self.header, range(len(self.header))))
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, mode="transform")
data_instance = self.gen_data_instance(input_data, tags_dict)
return data_instance
def gen_data_instance(self, input_data, tags_dict):
params = [self.delimitor,
self.data_type,
self.tag_with_value,
self.tag_value_delimitor,
self.with_label,
self.with_match_id,
self.label_type,
self.output_format,
tags_dict]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
def get_summary(self):
if not self.missing_fill:
return {}
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf = {"missing_fill_info": missing_summary}
return summary_buf
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
tag_with_value = param_list[2]
tag_value_delimitor = param_list[3]
with_label = param_list[4]
with_match_id = param_list[5]
label_type = param_list[6]
output_format = param_list[7]
tags_dict = param_list[8]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
offset = 0
label = None
match_id = None
if with_match_id:
offset += 1
match_id = cols[0]
if with_label:
label = cols[offset]
offset += 1
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
if output_format == "dense":
features = [0 for i in range(len(tags_dict))]
for fea in cols[offset:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
if _tag in tags_dict:
features[tags_dict.get(_tag)] = _val
else:
if fea in tags_dict:
features[tags_dict.get(fea)] = 1
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fea in cols[offset:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
else:
_tag = fea
_val = 1
if _tag not in tags_dict:
continue
indices.append(tags_dict.get(_tag))
if data_type in ["float", "float64"]:
_val = float(_val)
elif data_type in ["int", "int64", "long"]:
_val = int(_val)
elif data_type == "str":
_val = str(_val)
data.append(_val)
features = SparseVector(indices, data, len(tags_dict))
return Instance(inst_id=match_id,
features=features,
label=label)
def save_model(self):
transform_meta, transform_param = save_data_transform_model(input_format="tag",
delimitor=self.delimitor,
data_type=self.data_type,
tag_with_value=self.tag_with_value,
tag_value_delimitor=self.tag_value_delimitor,
with_label=self.with_label,
label_type=self.label_type,
with_match_id=self.with_match_id,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
model_name="Transformer")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
transform_meta.imputer_meta.CopyFrom(missing_imputer_meta)
transform_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
transform_meta.outlier_meta.CopyFrom(outlier_meta)
transform_param.outlier_param.CopyFrom(outlier_param)
return {"DataTransformMeta": transform_meta,
"DataTransformParam": transform_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, self.tag_with_value, self.tag_value_delimitor, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name, self.with_match_id = load_data_transform_model(
"SparseTagTransformer",
model_meta,
model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
class DataTransform(ModelBase):
def __init__(self):
super(DataTransform, self).__init__()
self.transformer = None
from federatedml.param.data_transform_param import DataTransformParam
self.model_param = DataTransformParam()
def _init_model(self, model_param):
print("model_param is {}".format(model_param))
if model_param.input_format == "dense":
self.transformer = DenseFeatureTransformer(self.model_param)
elif model_param.input_format == "sparse":
self.transformer = SparseFeatureTransformer(self.model_param)
elif model_param.input_format == "tag":
self.transformer = SparseTagTransformer(self.model_param)
self.model_param = model_param
def load_model(self, model_dict):
input_model_param = None
input_model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if model.endswith("Meta"):
input_model_meta = value[model]
if model.endswith("Param"):
input_model_param = value[model]
if input_model_meta.input_format == "dense":
self.transformer = DenseFeatureTransformer(self.model_param)
elif input_model_meta.input_format == "sparse":
self.transformer = SparseFeatureTransformer(self.model_param)
elif input_model_meta.input_format == "tag":
self.transformer = SparseTagTransformer(self.model_param)
self.transformer.load_model(input_model_meta, input_model_param)
def fit(self, data_inst):
data_inst = self.transformer.read_data(data_inst, "fit")
if isinstance(self.transformer, (DenseFeatureTransformer, SparseTagTransformer)):
summary_buf = self.transformer.get_summary()
if summary_buf:
self.set_summary(summary_buf)
return data_inst
def transform(self, data_inst):
return self.transformer.read_data(data_inst, "transform")
def export_model(self):
model_dict = self.transformer.save_model()
model_dict["DataTransformMeta"].need_run = self.need_run
return model_dict
def make_schema(header=None, sid_name=None, label_name=None, match_id_name=None):
schema = {}
if header:
schema["header"] = header
if sid_name:
schema["sid_name"] = sid_name
if label_name:
schema["label_name"] = label_name
if match_id_name:
schema["match_id_name"] = match_id_name
ModelBase.check_schema_content(schema)
return schema
def set_schema(data_instance, schema):
data_instance.schema = schema
def save_data_transform_model(input_format="dense",
delimitor=",",
data_type="str",
exclusive_data_type=None,
tag_with_value=False,
tag_value_delimitor=":",
with_label=False,
label_name='',
label_type="int",
output_format="dense",
header=None,
sid_name=None,
with_match_id=False,
model_name="DataTransform"):
model_meta = DataTransformMeta()
model_param = DataTransformParam()
model_meta.input_format = input_format
model_meta.delimitor = delimitor
model_meta.data_type = data_type
model_meta.tag_with_value = tag_with_value
model_meta.tag_value_delimitor = tag_value_delimitor
model_meta.with_label = with_label
if with_label:
model_meta.label_name = label_name
model_meta.label_type = label_type
model_meta.output_format = output_format
model_meta.with_match_id = with_match_id
if header is not None:
model_param.header.extend(header)
if sid_name:
model_param.sid_name = sid_name
if label_name:
model_param.label_name = label_name
if exclusive_data_type is not None:
model_meta.exclusive_data_type.update(exclusive_data_type)
return model_meta, model_param
def load_data_transform_model(model_name="DataTransform",
model_meta=None,
model_param=None):
delimitor = model_meta.delimitor
data_type = model_meta.data_type
tag_with_value = model_meta.tag_with_value
tag_value_delimitor = model_meta.tag_value_delimitor
with_label = model_meta.with_label
label_name = model_meta.label_name if with_label else None
label_type = model_meta.label_type if with_label else None
with_match_id = model_meta.with_match_id
output_format = model_meta.output_format
header = list(model_param.header) or None
sid_name = None
if model_param.sid_name:
sid_name = model_param.sid_name
exclusive_data_type = None
if model_meta.exclusive_data_type:
exclusive_data_type = {}
for col_name in model_meta.exclusive_data_type:
exclusive_data_type[col_name] = model_meta.exclusive_data_type.get(col_name)
return delimitor, data_type, exclusive_data_type, tag_with_value, tag_value_delimitor, with_label, \
label_type, output_format, header, sid_name, label_name, with_match_id
def save_missing_imputer_model(missing_fill=False,
missing_replace_method=None,
missing_impute=None,
missing_fill_value=None,
missing_replace_rate=None,
header=None,
model_name="Imputer"):
model_meta = DataTransformImputerMeta()
model_param = DataTransformImputerParam()
model_meta.is_imputer = missing_fill
if missing_fill:
if missing_replace_method:
model_meta.strategy = str(missing_replace_method)
if missing_impute is not None:
model_meta.missing_value.extend(map(str, missing_impute))
if missing_fill_value is not None:
feature_value_dict = dict(zip(header, map(str, missing_fill_value)))
model_param.missing_replace_value.update(feature_value_dict)
if missing_replace_rate is not None:
missing_replace_rate_dict = dict(zip(header, missing_replace_rate))
model_param.missing_value_ratio.update(missing_replace_rate_dict)
return model_meta, model_param
def load_missing_imputer_model(header=None,
model_name="Imputer",
model_meta=None,
model_param=None):
missing_fill = model_meta.is_imputer
missing_replace_method = model_meta.strategy
missing_value = model_meta.missing_value
missing_fill_value = model_param.missing_replace_value
if missing_fill:
if not missing_replace_method:
missing_replace_method = None
if not missing_value:
missing_value = None
else:
missing_value = list(missing_value)
if missing_fill_value:
missing_fill_value = [missing_fill_value.get(head) for head in header]
else:
missing_fill_value = None
else:
missing_replace_method = None
missing_value = None
missing_fill_value = None
return missing_fill, missing_replace_method, missing_value, missing_fill_value
def save_outlier_model(outlier_replace=False,
outlier_replace_method=None,
outlier_impute=None,
outlier_replace_value=None,
outlier_replace_rate=None,
header=None,
model_name="Outlier"):
model_meta = DataTransformOutlierMeta()
model_param = DataTransformOutlierParam()
model_meta.is_outlier = outlier_replace
if outlier_replace:
if outlier_replace_method:
model_meta.strategy = str(outlier_replace_method)
if outlier_impute:
model_meta.outlier_value.extend(map(str, outlier_impute))
if outlier_replace_value:
outlier_value_dict = dict(zip(header, map(str, outlier_replace_value)))
model_param.outlier_replace_value.update(outlier_value_dict)
if outlier_replace_rate:
outlier_value_ratio_dict = dict(zip(header, outlier_replace_rate))
model_param.outlier_value_ratio.update(outlier_value_ratio_dict)
return model_meta, model_param
def load_outlier_model(header=None,
model_name="Outlier",
model_meta=None,
model_param=None):
outlier_replace = model_meta.is_outlier
outlier_replace_method = model_meta.strategy
outlier_value = model_meta.outlier_value
outlier_replace_value = model_param.outlier_replace_value
if outlier_replace:
if not outlier_replace_method:
outlier_replace_method = None
if not outlier_value:
outlier_value = None
else:
outlier_value = list(outlier_value)
if outlier_replace_value:
outlier_replace_value = [outlier_replace_value.get(head) for head in header]
else:
outlier_replace_value = None
else:
outlier_replace_method = None
outlier_value = None
outlier_replace_value = None
return outlier_replace, outlier_replace_method, outlier_value, outlier_replace_value
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Quang-Cuong Pham <cuong.pham@normalesup.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Child class of MintimeProblem which considers the case of a
manipulator under torque limits
"""
from openravepy import *
from numpy import *
from pylab import *
import time
import MintimeProblemGeneric
class MintimeProblemTorque(MintimeProblemGeneric.MintimeProblemGeneric):
############################# Initialization ############################
def __init__(self,robot,traj):
MintimeProblemGeneric.MintimeProblemGeneric.__init__(self,robot,traj)
def set_dynamics_limits(self,limits):
self.tau_min=limits[0]
self.tau_max=limits[1]
self.isset_dynamics_limits=True
################################ Dynamics ################################
def sample_dynamics(self):
"""Sample the dynamics coefficients along the trajectory"""
a_vect=zeros((self.dim,self.n_steps))
b_vect=zeros((self.dim,self.n_steps))
c_vect=zeros((self.dim,self.n_steps))
for i in range(self.n_steps):
q=self.q_vect[:,i]
qd=self.qd_vect[:,i]
qdd=self.qdd_vect[:,i]
with self.robot:
self.robot.SetDOFValues(q)
self.robot.SetDOFVelocities(qd)
tm,tc,tg = self.robot.ComputeInverseDynamics(qdd,None,returncomponents=True)
to = self.robot.ComputeInverseDynamics(qd) - tc - tg
a_vect[:,i]=to
b_vect[:,i]=tm+tc
c_vect[:,i]=tg
self.a_vect=a_vect
self.b_vect=b_vect
self.c_vect=c_vect
def dynamics_coefficients(self,s):
"""Compute the dynamics coefficients at a given point by interpolation
s -- point on the trajectory
"""
return self.linear_interpolate_multi(s,[self.a_vect,self.b_vect,self.c_vect])
############################ Accel limits ################################
def accel_limits(self,s,sdot):
"""Compute the acceleration limits caused by torque limits
(s,sdot) -- point of the phase plane
"""
tau_min=self.tau_min
tau_max=self.tau_max
[a,b,c]=self.dynamics_coefficients(s)
alpha=-1e15
beta=1e15
ialpha=0
ibeta=0
for i in range(self.dim):
if a[i]>0:
tau_min_i=tau_min[i]
tau_max_i=tau_max[i]
else:
tau_min_i=tau_max[i]
tau_max_i=tau_min[i]
alpha_i=(tau_min_i-b[i]*sdot**2-c[i])/a[i]
beta_i=(tau_max_i-b[i]*sdot**2-c[i])/a[i]
if alpha_i>alpha:
alpha=alpha_i
ialpha=i
if beta_i<beta:
beta=beta_i
ibeta=i
return [alpha,beta,ialpha,ibeta]
def maxvel_accel(self,s):
"""Compute the maximum velocity caused by torque limits
s -- point on the trajectory
"""
tau_min=self.tau_min
tau_max=self.tau_max
[a,b,c]=self.dynamics_coefficients(s)
tau_alpha=zeros(self.dim)
tau_beta=zeros(self.dim)
for i in range(self.dim):
if a[i]>0:
tau_alpha[i]=tau_min[i]
tau_beta[i]=tau_max[i]
else:
tau_alpha[i]=tau_max[i]
tau_beta[i]=tau_min[i]
[alpha,beta,ialpha,ibeta]=self.accel_limits(s,0)
if alpha>beta:
return 0
sdot_min=1e15
for k in range(self.dim):
for m in range(k+1,self.dim):
r=(a[k]*(tau_alpha[m]-c[m])-a[m]*(tau_beta[k]-c[k]))/(a[k]*b[m]-a[m]*b[k])
if r>=0:
sdot=sqrt(r)
if sdot<sdot_min:
sdot_min=sdot
r=(a[m]*(tau_alpha[k]-c[k])-a[k]*(tau_beta[m]-c[m]))/(a[m]*b[k]-a[k]*b[m])
if r>=0:
sdot=sqrt(r)
if sdot<sdot_min:
sdot_min=sdot
return(sdot_min)
########################## Zero-inertia points ############################
def find_zero_inertia_points(self):
"""Find all zero-inertia points and assign to the list self.sw_zero_inertia"""
if self.n_steps<1:
self.sw_zero_inertia=[]
return
s=self.t_vect[0]
[ap,bp,cp]=self.dynamics_coefficients(s)
i_list=[]
for i in range(1,self.n_steps-1):
s=self.t_vect[i]
[a,b,c]=self.dynamics_coefficients(s)
for j in range(self.dim):
aj=a[j]
ajp=ap[j]
if aj*ajp<0:
if (not self.isset_velocity_limits) or (self.maxvel_accel_curve[i]<self.maxvel_velocity_curve[i]):
if abs(aj)<abs(ajp):
if not (i in i_list):
i_list.append(i)
if self.maxvel_curve[i-1]< self.maxvel_curve[i]:
if not ((i-1) in i_list):
i_list.append(i-1)
else:
if not ((i-1) in i_list):
i_list.append(i-1)
if self.maxvel_curve[i]< self.maxvel_curve[i-1]:
if not (i in i_list):
i_list.append(i)
ap=a
self.sw_zero_inertia=i_list
def correct_accel_zi(self,s):
"""Compute the correct acceleration at a zero-inertia point
s -- a zero-inertia point on the trajectory
"""
sdot=self.maxvel_interp(s)
[a,b,c]=self.dynamics_coefficients(s)
a_abs=list(abs(a))
i_sat=a_abs.index(min(a_abs))
delta=0.01
[a2,b2,c2]=self.dynamics_coefficients(s-delta)
a2=a2[i_sat]
b2=b2[i_sat]
c2=c2[i_sat]
a=a[i_sat]
b=b[i_sat]
c=c[i_sat]
ap=-(a2-a)/delta
bp=-(b2-b)/delta
cp=-(c2-c)/delta
return ((-bp*sdot*sdot-cp)/(2*b*sdot+ap*sdot))*sdot
################## Trajectory utilities ################################
def ComputeTorques(robot,traj,grav):
robot.GetEnv().GetPhysicsEngine().SetGravity(grav)
n_steps=traj.n_steps
t_vect=traj.t_vect
q_vect=traj.q_vect
qd_vect=traj.qd_vect
qdd_vect=traj.qdd_vect
tau_vect=zeros(shape(q_vect))
for i in range(n_steps):
q=q_vect[:,i]
qd=qd_vect[:,i]
qdd=qdd_vect[:,i]
with robot:
robot.SetDOFValues(q)
robot.SetDOFVelocities(qd)
tau = robot.ComputeInverseDynamics(qdd,None,returncomponents=False)
tau_vect[:,i]=tau
#Smooth out the first steps
if n_steps>2:
tau_vect[:,0]=tau_vect[:,2]
tau_vect[:,1]=tau_vect[:,2]
#Smooth out the last two steps
if n_steps>2:
tau_vect[:,n_steps-1]=tau_vect[:,n_steps-3]
tau_vect[:,n_steps-2]=tau_vect[:,n_steps-3]
return tau_vect
def PlotTorques(t_vect,tau,tau_min,tau_max,offset=0,reverse=False):
T=t_vect[-1]
hold('on')
plot([offset,T+offset],array([tau_min,tau_min]),'--',linewidth=2)
plot([offset,T+offset],array([tau_max,tau_max]),'--',linewidth=2)
if not reverse:
plot(t_vect+offset,transpose(tau),linewidth=2)
else:
plot(t_vect+offset,transpose(reverse_array(tau)),linewidth=2)
for k in gca().get_xticklabels():
k.set_fontsize(18)
for k in gca().get_yticklabels():
k.set_fontsize(18)
xlabel('Time (s)',fontsize=20)
ylabel('Torque (Nm)',fontsize=20)
grid('on')
def PlotRobots(env,traj,stepsize=1,start=0):
robot_list=[]
j=0
for i in range(start,traj.n_steps,stepsize):
j=j+1
robotxml="""<Robot name="2DOF"""+str(j)+"""">
<KinBody>
<Mass type="mimicgeom">
<density>100000</density>
</Mass>
<!-- Create the base body, it should never move-->
<!-- Note that all translations and rotations are with respect to this base-->
<!-- For example, the robot at the identity transformation is equivalent to the identity transformation of the first body.-->
<Body name="Base" type="dynamic">
<Translation>0.0 0.0 0.0</Translation>
<Geom type="cylinder">
<rotationaxis>1 0 0 90</rotationaxis>
<radius>0.03</radius>
<height>0.02</height>
<diffuseColor>0.05 0.05 0.05</diffuseColor>
</Geom>
</Body>
<!-- the first movable link-->
<Body name="Arm0" type="dynamic">
<!-- Offset from is a fast way of specifying that the translation and rotation of this-->
<!-- body should be relative to another link-->
<offsetfrom>Base</offsetfrom>
<!-- Translation relative to Base-->
<Translation>0 0 0</Translation>
<Geom type="box">
<Translation>0.1 0 0</Translation>
<Extents>0.1 0.01 0.01</Extents>
</Geom>
</Body>
<!-- declare a circular hinge joint (circular joints have no limits) -->
<Joint circular="true" name="Arm0" type="hinge">
<Body>Base</Body>
<Body>Arm0</Body>
<offsetfrom>Arm0</offsetfrom>
<weight>4</weight>
<limitsdeg>-180 180</limitsdeg>
<axis>0 0 1</axis>
<maxvel>3</maxvel>
<resolution>1</resolution>
</Joint>
<!-- the second movable link-->
<Body name="Arm1" type="dynamic">
<offsetfrom>Arm0</offsetfrom>
<Translation>0.2 0 0</Translation>
<Geom type="box">
<Translation>0.1 0 0</Translation>
<Extents>0.1 0.01 0.01</Extents>
</Geom>
</Body>
<!-- declare a circular hinge joint (circular joints have no limits) -->
<Joint circular="true" name="Arm1" type="hinge">
<Body>Arm0</Body>
<Body>Arm1</Body>
<offsetfrom>Arm1</offsetfrom>
<weight>3</weight>
<limitsdeg>-180 180</limitsdeg>
<axis>0 0 1</axis>
<maxvel>4</maxvel>
<resolution>1</resolution>
</Joint>
</KinBody>
</Robot>"""
trans=0.9-0.9*float(i)/traj.n_steps
r=env.ReadRobotData(robotxml)
env.Add(r)
k=r.GetLinks()[0]
g=k.GetGeometries()[0]
g.SetAmbientColor([0.0,0,0])
g.SetDiffuseColor([0.0,0,0])
k=r.GetLinks()[1]
g=k.GetGeometries()[0]
g.SetAmbientColor([0.6,0,0])
g.SetDiffuseColor([0.6,0,0])
g.SetTransparency(trans)
k=r.GetLinks()[2]
g=k.GetGeometries()[0]
g.SetAmbientColor([0,0,0.6])
g.SetDiffuseColor([0,0,0.6])
g.SetTransparency(trans)
r.SetTransform(array([[0,0,1,0],[0,1,0,0],[-1,0,0,0.3],[0,0,0,1]]))
robot_list.append(r)
robot=env.GetRobots()[j]
robot.SetDOFValues(traj.q_vect[:,i])
return robot_list
def Execute(robot,traj,t_sleep,stepsize=1):
for i in range(0,traj.n_steps,stepsize):
robot.SetDOFValues(traj.q_vect[:,i])
time.sleep(t_sleep)
def CheckCollisionTraj(robot,traj):
for i in range(traj.n_steps):
with robot:
robot.SetDOFValues(traj.q_vect[:,i])
if robot.GetEnv().CheckCollision(robot):
return [True,'env',i]
if robot.CheckSelfCollision():
return [True,'self',i]
return [False,None,None]
def PlotVelocities(t_vect,qd_vect,qd_max):
T=t_vect[-1]
clf()
hold('on')
plot(t_vect,transpose(qd_vect),linewidth=2)
plot([0,T],array([-qd_max,-qd_max]),'k--',linewidth=2)
plot([0,T],array([qd_max,qd_max]),'k--',linewidth=2)
for k in gca().get_xticklabels():
k.set_fontsize(18)
for k in gca().get_yticklabels():
k.set_fontsize(18)
xlabel('Time (s)',fontsize=20)
ylabel('Velocity (rad.s-1)',fontsize=20)
axis([0,T,-max(qd_max)*1.2,max(qd_max)*1.2])
grid('on')
| |
"""
A bunch of classes for each of BigCommerce's resources, individually and
as collections.
Supports filter options as dictionaries, e.g.
someresourceset.get(options={'limit' : 5, 'page' : 2})
See the BigCommerce resources index documentation for available filter fields.
http://developer.bigcommerce.com/docs/api/v2/resources
"""
import json
from connection import Connection
class ResourceSet(object):
"""
Base class representing a collection of BigCommerce resources.
"""
client = Connection
resource_class = None
res_name = "" # this needs to be, e.g., "brands" for brand resources
@classmethod
def count(cls):
return cls.client.get('/{}/count.json'.format(cls.res_name))['count']
@classmethod
def get(cls, **options):
"""
Returns list of resources.
"""
req = '/{}.json'.format(cls.res_name)
resource_list = cls.client.get(req, **options)
return [cls.resource_class(res) for res in resource_list] if resource_list else None
@classmethod
def get_by_id(cls, id, **options):
"""Returns an individual resource by given ID"""
req = '/{}/{}.json'.format(cls.res_name, id)
resource = cls.client.get(req, **options)
return cls.resource_class(resource) if resource else None
@classmethod
def create(cls, fields, **options):
"""
Creates a new resource, returning its corresponding object.
Don't include the id field.
Fails (raises an exception) if mandatory fields are missing. See
resource documentation for which fields are required.
"""
new_obj_data = cls.client.post('/{}.json'.format(cls.res_name), json.dumps(fields), **options)
return cls.resource_class(new_obj_data) if new_obj_data else None
@classmethod
def delete_from_id(cls, id, **options):
cls.client.delete('/{}/{}.json'.format(cls.res_name, id), **options)
class Resource(object):
"""
Base class for an individual resource.
"""
client = Connection
res_name = "" # this needs to be, e.g., "brands" for brand resources
def __init__(self, fields=None):
"""
This constructor should only be used with a dict of fields
retrieved from a store.
If you want to create a new resource, use
the corresponding ResourceSet.create method.
"""
self._fields = fields or {} # internal dict for fields
# __dict__ is used as a staging area for local changes
# it gets cleared and moved to _fields upon update()
@classmethod
def get_time(cls): # TODO: format? or at least note how to
return cls.client.get('/time')
def __getattr__(self, attr): # if __dict__ doesn't have it, try _fields
try:
return self._fields[attr]
except KeyError:
raise AttributeError("No attribute {}".format(attr))
def delete(self, **options):
"""Deletes this object"""
self.client.delete('/{}/{}.json'.format(self.res_name, self.id), **options)
def update(self, **options):
"""Updates local changes to the object."""
body = self._copy_dict()
body = json.dumps(body)
new_fields = self.client.put('/{}/{}.json'.format(self.res_name, self.id), body, **options)
# commit changes locally
self._replace_fields(new_fields)
def _copy_dict(self):
copy_d = self.__dict__.copy()
if copy_d.has_key('id'):
del copy_d['id']
del copy_d['_fields']
return copy_d
def _replace_fields(self, new_fields):
self._fields = new_fields if new_fields else {}
self.__dict__ = {'_fields' : self._fields}
class ParentResource(Resource):
"""
A Resource class that has subresources.
Implements subresource related operations that do not
require a specific instance of a ParentResource.
Contains a SubResourceManager for operations that do.
"""
# in future, should allow the get methods to take names of the subresources, rather than just class
# also in future - should move some of these methods to mixins, or otherwise restrict them
# for resources that do not support some methods ...
def __init__(self, fields=None):
super(ParentResource, self).__init__(fields)
self.subresources = SubResourceManager(self)
@classmethod
def count_all(cls, sres_class):
"""
Number of all subresources of type sres_class.
GET /resource/subresource/count
"""
req_str = '/{}/{}/count.json'
return cls.client.get(req_str.format(cls.res_name,
sres_class.resname))['count']
@classmethod
def get_sres_by_id(cls, sres_class, id, **options):
"""
Returns an individual subresource by given ID.
Equivalent to GET /resource/subresource/sres_id
"""
sres_name = sres_class.res_name
resource = self.client.get('/{}/{}/{}.json'.format(self.res_name, sres_name, id),
**options)
return sres_class(resource) if resource else None
@classmethod
def get_all_sres(cls, sres_class, **options):
"""
List of subresources of type sres_class, up to default limit (can be specified in options).
GET /resource/subresource
"""
resource_list = cls.client.get('/{}/{}.json'.format(cls.res_name, sres_class.res_name), **options)
return [sres_class(res) for res in resource_list] if resource_list else None
def _copy_dict(self):
copy_d = super(ParentResource, self)._copy_dict()
del copy_d['subresources']
return copy_d
def _replace_fields(self, new_fields):
self._fields = new_fields
self.__dict__ = {'_fields' : self._fields,
'subresources' : self.subresources}
class SubResourceManager(object):
"""
Handles the subresources of a specific instance of a ParentResource.
Uses very similar interface to ResourceSet,
but requires a subresource class or instance to be passed in as argument.
Not all operations are supported by all resources/subresources.
Refer to the BigCommerce resources documentation if unsure.
Currently, all methods are available for all parent resources. There is
no guarentee that all methods will be supported, in which case a 400 or 501
exception may be raised.
"""
def __init__(self, parent_res):
self._res = parent_res
@property
def id(self):
return self._res.id
@property
def res_name(self):
return self._res.res_name
@property
def client(self):
return self._res.client
def create(self, sres_class, fields, **options):
"""
Creates a new resource, returning its corresponding object.
Don't include the id field.
Equivalent to POST /resource/res_id/subresource
"""
sres_name = sres_class.res_name
new_obj_data = self.client.post('/{}/{}/{}.json'.format(self.res_name, self.id, sres_name),
json.dumps(fields),
**options)
return sres_class(new_obj_data) if new_obj_data else None
def count(self, sres):
"""
Returns number of subresources, corresponding to sres, related
to this object.
"""
sres_name = sres.res_name
req_str = '/{}/{}/{}/count.json'
return self.client.get(req_str.format(self.res_name, self.id, sres_name))['count']
def get(self, sres_class, **options):
"""
Returns list of subresources related to this object (up to limit,
default or specified).
Equivalent to GET resource/res_id/subresource
Can be used like get_by_id if id is given.
"""
sres_name = sres_class.res_name
resource_list = self.client.get('/{}/{}/{}.json'.format(self.res_name, self.id, sres_name),
**options)
return [sres_class(res) for res in resource_list] if resource_list else None
def get_by_id(self, sres_class, id, **options):
"""
Returns an individual subresource of this object by given ID.
Equivalent to GET /resource/res_id/subresource/sres_id
"""
sres_name = sres_class.res_name
resource = self.client.get('/{}/{}/{}/{}.json'.format(self.res_name, self.id, sres_name, id),
**options)
return sres_class(resource) if resource else None
def delete_all(self, sres_class, **options):
"""
DELETE /resource/res_id/subresource
"""
self.client.delete('/{}/{}/{}.json'.format(self.res_name, self.id, sres_class.res_name),
**options)
def delete(self, sres, **options):
"""
DELETE /resource/res_id/subresource/sres_id
"""
self.client.delete('/{}/{}/{}/{}.json'.format(self.res_name, self.id, sres.res_name, sres.id),
**options)
def update(self, sres, **options):
"""
Updates the given subresource with its local changes.
Equivalent to PUT /resource/res_id/subresource/sres_id
"""
body = sres._copy_dict()
body = json.dumps(body)
new_fields = self.client.put('/{}/{}/{}/{}.json'.format(self.res_name,
self.id,
sres.res_name,
sres.id),
body,
**options)
# commit changes locally
sres._replace_fields(new_fields)
# Resources and ResourceSets
class Brand(Resource):
res_name = "brands"
class Brands(ResourceSet):
res_name = "brands"
resource_class = Brand
class Category(Resource):
res_name = "categories"
class Categories(ResourceSet):
res_name = "categories"
resource_class = Category
class OrderStatus(Resource):
res_name = "orderstatuses"
class OrderStatuses(ResourceSet):
res_name = "orderstatuses"
resource_class = OrderStatus
class CustomerGroup(Resource):
res_name = "customer_groups"
class CustomerGroups(ResourceSet):
res_name = "customer_groups"
resource_class = CustomerGroup
class Coupon(Resource):
res_name = "coupons"
class Coupons(ResourceSet):
res_name = "coupons"
resource_class = Coupon
class Store(Resource):
res_name = "store"
class Stores(ResourceSet):
"""Only supports GET /store.json, according to documentation."""
res_name = "store"
resource_class = Store
class Country(ParentResource):
res_name = "countries"
class Countries(ResourceSet):
res_name = "countries"
resource_class = Country
class Customer(ParentResource):
res_name = "customers"
class Customers(ResourceSet):
res_name = "customers"
resource_class = Customer
class Option(ParentResource):
res_name = "options"
class Options(ResourceSet):
res_name = "options"
resource_class = Option
class OptionSet(ParentResource):
res_name = "optionsets"
class OptionSets(ResourceSet):
res_name = "optionsets"
resource_class = OptionSet
class Order(ParentResource):
res_name = "orders"
class Orders(ResourceSet):
res_name = "orders"
resource_class = Order
class Product(ParentResource):
res_name = "products"
class Products(ResourceSet):
res_name = "products"
resource_class = Product
class Redirect(Resource):
res_name = "redirects"
class Redirects(ResourceSet):
res_name = "redirects"
resource_class = Redirect
class Shipping(ParentResource):
"""Only GET"""
res_name = "shipping"
# class Shippings(ResourceSet):
# """
# An actual "Shipping" resource does not appear to exist.
# The resource is actually shipping methods - use Shipping.get_all_sres
# and other methods to interact.
# """
# res_name = "shipping"
# resource_class = Shipping
| |
import logging
import os
from nose.tools import assert_raises
from openpromela import logic
logger = logging.getLogger(__name__)
# bit blasting log
BIT_LOG = 'bitblaster.txt'
bit_log = logging.getLogger('omega.logic.bitvector')
h = logging.FileHandler(BIT_LOG, mode='w')
h.setLevel(logging.ERROR)
bit_log.addHandler(h)
logging.getLogger('omega').setLevel('ERROR')
logging.getLogger('openpromela').setLevel('ERROR')
logging.getLogger('promela.ast').setLevel('ERROR')
logging.getLogger('promela.yacc').setLevel('ERROR')
logging.getLogger('astutils').setLevel('ERROR')
logging.getLogger('openpromela.logic').setLevel('ERROR')
logging.getLogger('openpromela.slugs').setLevel('ERROR')
class Parser(logic.Parser):
start = 'full_expr'
tabmodule = 'expr_parsetab'
def build(self):
# silence warnings about unreachable rules
# above `full_expr`
super(Parser, self).build(errorlog=logger, write_tables=True)
expr_parser = Parser()
def test_conj():
f = logic.conj
assert f(['a', 'b']) == '(a) & (b)'
assert f(['a', 'b', 'c', 'd']) == '((a) & (b)) & ((c) & (d))'
assert f(['a', 'True']) == 'a'
assert f(['a', 'False']) == 'False'
assert f(['True', 'True', 'b', 'c']) == '(b) & (c)'
assert f([]) == 'True'
assert f(str(x) for x in xrange(1)) == '0'
assert f(str(x) for x in []) == 'True'
assert f(['', 'a', 'b']) == '(a) & (b)'
def test_disj():
f = logic.disj
assert f(['a', 'b']) == '(a) | (b)'
assert f(['a', 'b', 'c', 'd']) == '((a) | (b)) | ((c) | (d))'
assert f(['a', 'False']) == 'a'
assert f(['a', 'True']) == 'True'
assert f(['False', 'False', 'b', 'c']) == '(b) | (c)'
assert f([]) == 'False'
assert f(str(x) for x in xrange(1)) == '0'
assert f(str(x) for x in []) == 'False'
assert f(['', 'a', 'b']) == '(a) | (b)'
def test_trivial_unrealizable():
"""If realizable, then the assumption is False."""
c = '''
assert ltl { []<> false }
'''
r = logic.synthesize(c)
assert not r, r
def test_false_assumption():
c = '''
assume ltl { false }
assert ltl { []<> false }
'''
# slugs returns empty strategy if assumption is False
# an `tulip.synth.stratgy2mealy` raises Exception
# with assert_raises(Exception):
# logic.synthesize(c)
def test_trivial_realizable():
c = '''
assert ltl { []<> true }
'''
r = logic.synthesize(c)
assert r, r
parser = logic.Parser()
code = dict()
code['terminate'] = '''
active proctype main(){
byte x;
x = 1;
true
}
'''
code['stutter'] = '''
active proctype main(){
byte x;
x = 1;
x = x + 3;
do
:: true
od
}
'''
code['model check triv'] = '''
assert active env proctype main(){
do
:: true
od
}
'''
code['model check triv 2'] = '''
assert active env proctype main(){
do
:: true
:: true
od
}
'''
code['model check loose'] = '''
assert active env proctype main(){
do
:: true
:: true; false
od
}
'''
code['sys again'] = '''
assert active sys proctype main(){
do
:: true
:: true; false
od
}
'''
def test_assume_assert_realizability():
realizable = {
'stutter', 'model check triv', 'model check triv 2',
'sys again'}
win = {k: True for k in realizable}
win.update({k: False for k in code if k not in realizable})
for k, v in code.iteritems():
print k
r = logic.synthesize(v)
assert win[k] == r, (k, r)
# print mealy
# mealy.dump()
def run_single():
mealy = logic.synthesize(code['sys again'])
print(mealy)
def test_bdd_filename():
c = '''
proctype foo(){
int(0, 50) x;
do
:: x' == x + 1
:: x = x - 1
od
}
'''
fname = 'mybdd.p'
assert logic.synthesize(c, filename=fname)
assert os.path.isfile(fname)
def test_executability():
# test primed var owner vs context
p = '''
env bool x;
sys bool y;
ltl { x && y && x' && y' }
'''
guard, primed = to_guard(p, 'sys')
assert guard == '(((pid0_x && pid0_y) && (X pid0_x)) && (X True))', guard
# raise Exception if primed sys variable in assumption
with assert_raises(AssertionError):
to_guard(p, 'env')
# test primed var owner vs context
p = '''
env bool x;
sys bool y;
ltl { x && y && x' }
'''
guard, primed = to_guard(p, 'env')
assert guard == '((pid0_x && pid0_y) && (X True))', guard
# test negation context
p = '''
env bool x;
sys bool y;
ltl { ! x && ! y && x' && ! y' }
'''
guard, primed = to_guard(p, 'sys')
assert guard == (
'((((! pid0_x) && (! pid0_y)) && '
'(X pid0_x)) && (! (X False)))'), guard
# test double negation
p = '''
env bool x;
sys bool y;
ltl { ! ! x && ! y && x' && ! ! y' }
'''
guard, primed = to_guard(p, 'sys')
assert guard == (
'((((! (! pid0_x)) && (! pid0_y)) && '
'(X pid0_x)) && (! (! (X True))))'), guard
# test positive arithmetic context
p = '''
env int(0, 10) x;
sys int(0, 5) y;
ltl { (x == 1) && (y > 0) | (y' <= 2 + x) }
'''
guard, primed = to_guard(p, 'sys')
assert guard == (
'((pid0_x = 1) && ((pid0_y > 0) | True))'), guard
# test negative arithmetic context
p = '''
env int(0, 10) x;
sys int(0, 5) y;
ltl { (x == 1) && (y > 0) | ! (y' <= 2 + x') }
'''
guard, primed = to_guard(p, 'sys')
assert guard == (
'((pid0_x = 1) && ((pid0_y > 0) | (! False)))'), guard
# test primed sys var in assumptin arithmeti context
with assert_raises(AssertionError):
to_guard(p, 'env')
# test synthesis of whole programs
c = '''
free env bool x;
assert active sys proctype main(){
bool y;
do
:: x && y' /* x */
od
}
'''
assert not logic.synthesize(c)
# guard of: `x || y' = true`
# but init to false, and is imperative var
c = '''
free env bool x;
assert active sys proctype main(){
bool y;
do
:: x || y /* true */
od
}
'''
assert not logic.synthesize(c)
# y is primed, so deconstrained
c = '''
free env bool x;
assert active sys proctype main(){
free bool y;
do
:: x || y' /* true */
od
}
'''
assert logic.synthesize(c)
# y is free, but initially `false`
c = '''
free env bool x;
assert active sys proctype main(){
free bool y = false;
do
:: x || y /* true */
od
}
'''
assert not logic.synthesize(c)
# y is free
c = '''
free env bool x;
assert active sys proctype main(){
free bool y;
do
:: x || y /* true */
od
}
'''
assert logic.synthesize(c)
c = '''
free env bool x;
assert active sys proctype main(){
bool y;
do
:: y && y' /* y */
od
}
'''
assert not logic.synthesize(c)
def to_guard(p, assume):
program = parser.parse(p)
global_defs, products, ltl_blocks = program.to_table()
(ltl,) = ltl_blocks
f = ltl.formula
t = logic.Table()
logic.add_variables_to_table(
t, global_defs, pid=0, assume_context='sys')
guard, primed = f.to_guard(
t, pid=0, assume=assume, primed=False, negated=False)
return guard, primed
def test_assume_assert():
# unconditioned ltl block
c = "ltl { []<> x }"
program = parser.parse(c)
global_defs, products, ltl_blocks = program.to_table()
(ltl,) = ltl_blocks
assert hasattr(ltl, 'assume'), ltl
assert ltl.assume == 'assert', ltl.assume
# asserted ltl block
c = "assert ltl { []<> x }"
program = parser.parse(c)
global_defs, products, ltl_blocks = program.to_table()
(ltl,) = ltl_blocks
assert hasattr(ltl, 'assume'), ltl
assert ltl.assume == 'assert', ltl.assume
# assumed ltl block
c = "assume ltl { []<> x }"
program = parser.parse(c)
global_defs, products, ltl_blocks = program.to_table()
(ltl,) = ltl_blocks
assert hasattr(ltl, 'assume'), ltl
assert ltl.assume == 'assume', ltl.assume
# unconditioned: default to assertion owned by sys
c = '''
proctype foo(){
bool x;
}
'''
check_assume_owner(c, 'sys', 'sys')
# env proctype
c = '''
env proctype foo(){
bool x;
}
'''
check_assume_owner(c, 'env', 'env')
# assumption
c = '''
assume proctype foo(){
bool x;
}
'''
check_assume_owner(c, 'env', 'env')
# assume sys
c = '''
assume sys proctype foo(){
bool x;
}
'''
check_assume_owner(c, 'env', 'sys')
# sys pc
c = '''
sys proctype foo(){
bool x;
}
'''
check_assume_owner(c, 'sys', 'sys')
# assertion
c = '''
assert proctype foo(){
bool x;
}
'''
check_assume_owner(c, 'sys', 'sys')
# assertion env pc
c = '''
assert env proctype foo(){
bool x;
}
'''
check_assume_owner(c, 'sys', 'env')
def check_assume_owner(c, assume, owner):
program = parser.parse(c)
global_defs, products, ltl_blocks = program.to_table()
(proc,) = products
assert hasattr(proc, 'assume'), proc
assert hasattr(proc, 'owner'), proc
assert proc.assume == assume, proc.assume
assert proc.owner == owner, proc.owner
def test_assume_sys():
c = '''
env bool x;
assume sys proctype foo(){
do
:: x = ! x
od
}
assert ltl { []<> x }
'''
assert logic.synthesize(c)
# sys must help env
c = '''
env bool x = false;
assume sys proctype foo(){
do
:: x = ! x
:: skip
od
}
assert ltl { [] ! x }
'''
assert logic.synthesize(c)
# must not be trivially realizable
c += ' assert ltl { []<> false }'
assert not logic.synthesize(c)
# sys cannot avoid "[] x"
c = '''
env bool x = false;
assume sys proctype foo(){
do
:: x = true
od
}
assert ltl { [] ! x}
'''
assert not logic.synthesize(c)
# sys has to alternate
c = '''
env bool x = false;
assume sys proctype foo(){
do
:: x = true
:: x = false
od
}
assert ltl { []<> x && []<> !x }
'''
assert logic.synthesize(c)
# not trivially
c += ' assert ltl { []<> false }'
assert not logic.synthesize(c)
# a larger graph
c = '''
env int(0, 5) x = 0;
assume sys proctype foo(){
do
:: (x < 5); x = x + 1
:: (x > 0); x = x - 1
od
}
'''
assert logic.synthesize(c)
c += ' assert ltl { []<> false }'
assert not logic.synthesize(c)
# env deadlocked at init
c = '''
env bit x = 0;
assume env proctype foo(){
do
:: x = 0
od
}
assume sys proctype frozen(){
do
:: false; x = 1
od
}
assert ltl { [](x == 0) }
'''
assert logic.synthesize(c)
c += 'assert ltl { []<> false }'
assert not logic.synthesize(c)
def test_env_sys_key():
"""Keys are named by conditioning and owner.
This avoids the key used for assumption processes
controlled by env to coincide with the key used for
assertion processes controlled by sys.
"""
c = '''
env bit x;
env bit y;
bit z;
/* env top async product = ps0 */
assume ltl { []<>(x == 1) && []<>(y == 1) }
assume active env proctype producer_0(){
bit x;
do
:: x = 0; x = 1
od
}
assume active env proctype producer_1(){
bit y;
do
:: y = 0; y = 1
od
}
assert active env proctype consumer(){
do
:: ((x == 0) && (y == 0)); z = 1; z = 0;
:: !((x == 0) && (y == 0))
od
}
assert ltl { []<>(z == 1) }
'''
assert logic.synthesize(c)
def test_atomic_sys_sys():
c = '''
bool x;
sys proctype foo(){
do
:: atomic{ !x; x = true; x; x = false }
od
}
sys proctype spoiler(){
do
:: x = false; x
od
}
assert ltl { []<> x }
'''
assert logic.synthesize(c, strict_atomic=False)
def test_atomic_sys_env():
c = '''
env bool y;
bool x;
assume active env proctype one(){
do
:: y = true
:: y = false
od
}
assert active sys proctype two(){
do
:: atomic{ y'; x = true }
:: atomic{ !y'; x = false }
od
}
ltl { [] (x <-> y) }
'''
assert logic.synthesize(c, strict_atomic=True)
def test_async_inside_sync():
# parsing
c = '''
bit x;
sync{
async{
proctype foo_0(){
x = 0
}
proctype foo_1(){
x = 1
}
}
proctype foo_2(){
x = 1
}
proctype foo_3(){
x = 1
}
}
proctype foo_3(){
x = 1
}
'''
p = logic._parser.parse(c)
print(p)
def test_array():
# single array parsed
c = '''sys int(0, 3) x[3];'''
program = parser.parse(c)
(x,), _, ltlblocks = program.to_table()
assert x.length == 3, x.length
# single array inserted to table
t = logic.Table()
x.insert_logic_var(t, assume_context='sys', pid='global')
assert 'x' in t.scopes['global'], t
d = t.scopes['global']['x']
assert d['length'] == 3, d
# array ref with constant index
c = '''
sys int(0, 3) x[4];
ltl { x[2] == 0}
'''
program = parser.parse(c)
vardefs, _, ltlblocks = program.to_table()
t = logic.Table()
logic.add_variables_to_table(
t, vardefs, pid='global', assume_context='sys')
ltl = next(iter(ltlblocks))
f = ltl.formula
s, context = f.to_logic(t, pid='global')
assert context == 'bool', context
assert s == '(pidglobal_x2 = 0)', s
s, pr = f.to_guard(
t, pid='global', assume='sys', primed=False, negated=False)
assert not pr
assert s == '(pidglobal_x2 = 0)', s
s, pr = f.to_guard(
t, pid='global', assume='sys', primed=True, negated=False)
assert pr
assert s == 'True', s
s, pr = f.to_guard(
t, pid='global', assume='sys', primed=True, negated=True)
assert pr
assert s == 'False', s
# array ref with index an expr containing a var
c = '''
sys int(0, 3) x[3];
sys int(0, 2) y;
ltl { x[y] == 0 }
'''
program = parser.parse(c)
vardefs, groups, ltlblocks = program.to_table()
t = logic.Table()
logic.add_variables_to_table(
t, vardefs, pid='global', assume_context='sys')
ltl = next(iter(ltlblocks))
f = ltl.formula
s, context = f.to_logic(t, pid='global')
assert context == 'bool', context
correct = '(ite( {y} = 2, {x}2, ite( {y} = 1, {x}1, {x}0)) = 0)'.format(
x='pidglobal_x', y='pidglobal_y')
assert s == correct, s
s, pr = f.to_guard(
t, pid='global', assume='sys', primed=False, negated=False)
assert not pr
assert s == correct
s, pr = f.to_guard(
t, pid='global', assume='sys', primed=True, negated=False)
assert pr
assert s == 'True'
s, pr = f.to_guard(
t, pid='global', assume='sys', primed=True, negated=True)
assert pr
assert s == 'False'
# array ref with index a primed controlled var
# raise exception if primed controlled index
c = '''
sys int(0, 3) x[3];
sys int(0, 2) y;
ltl { x[y'] == 0 }
'''
program = parser.parse(c)
vardefs, groups, ltlblocks = program.to_table()
t = logic.Table()
logic.add_variables_to_table(
t, vardefs, pid='global', assume_context='sys')
ltl = next(iter(ltlblocks))
f = ltl.formula
s, context = f.to_logic(t, pid='global')
correct = (
'(ite( (X {y}) = 2, {x}2, ite( (X {y}) = 1, {x}1, {x}0)) = 0)').format(
x='pidglobal_x', y='pidglobal_y')
assert s == correct, s
f.to_guard(t, pid='global', assume='sys', primed=True, negated=True)
with assert_raises(AssertionError):
f.to_guard(t, pid='global', assume='sys', primed=False, negated=True)
t.scopes['global']['x']['owner'] = 'env'
with assert_raises(AssertionError):
f.to_guard(t, pid='global', assume='sys', primed=False, negated=True)
# realizability test
c = '''
active sys proctype foo(){
int(0, 3) x[5] = 3;
int(0, 4) y;
x[3] = 1;
do
:: x[3] == 1
od
}
'''
r = logic.synthesize(c)
assert r
c = '''
active sys proctype foo(){
int(0, 3) x[5] = 3;
int(0, 4) y;
x[3] = 1;
do
:: x[4] == 3
od
}
'''
r = logic.synthesize(c)
assert r
c = '''
active sys proctype foo(){
int(0, 3) x[5] = 3;
int(0, 4) y;
x[3] = 1;
do
:: x[4] == 2
od
}
'''
r = logic.synthesize(c)
assert not r
def test_else():
c = '''
bit x;
bit y;
active sys proctype foo(){
do
:: x == 0
:: y == 1
:: else
od
}
'''
program = parser.parse(c)
vardefs, groups, ltlblocks = program.to_table()
t = logic.Table()
logic.add_variables_to_table(
t, vardefs, pid='global', assume_context='sys')
(proc,) = groups
g = proc.to_pg()
for u, v, d in g.edges_iter(data=True):
c = d['stmt']
if not isinstance(c, logic.AST.Else):
continue
print c.to_logic(t, pid=0, assume='sys')
print c.to_guard(t, pid=0, assume='sys')
# realizability
c = '''
bit x;
bit y;
active sys proctype foo(){
do
:: true;
:: else; false
od
}
'''
r = logic.synthesize(c)
assert r
c = '''
bit x;
bit y;
active sys proctype foo(){
do
:: true; false
:: else;
od
}
'''
r = logic.synthesize(c)
assert not r
c = '''
bit x;
bit y;
active sys proctype foo(){
do
:: false;
:: else;
od
}
'''
r = logic.synthesize(c)
assert r
c = '''
bit x;
bit y;
active sys proctype foo(){
do
:: false;
:: else; false
od
}
'''
r = logic.synthesize(c)
assert not r
def test_else_bug():
c = '''
sys proctype foo(){
do
::
if
:: false
:: else
fi
od
}
'''
assert logic.synthesize(c)
def test_sync():
c = '''
sync{
assert active sys proctype maintain_lock(){
do
:: true; false
:: false
od
}
assert active sys proctype count_burst(){
do
:: false
od
}
}
'''
r = logic.synthesize(c)
assert not r
def test_collect_primed_vars():
# add var to symbol table
pid = 'global'
player = 'sys'
t = logic.Table()
t.add_var(pid, 'y', 'y', 'bool', 'bool', True, player)
# primed var
e = expr_parser.parse("y' < 2")
primed = logic.collect_primed_vars(e.expr, t, pid, player)
(r,) = primed
scope, node = r
assert scope == 'global', scope
assert str(node) == 'y', node
# prefix operator for "next"
e = expr_parser.parse("(X y) < 2")
primed = logic.collect_primed_vars(e.expr, t, pid, player)
(r,) = primed
scope, node = r
assert scope == 'global', scope
assert str(node) == 'y', node
def test_constrain_global_declarative_vars():
t = logic.Table()
y = logic.AST.VarDef('y', 'bool', owner='env', free=True)
y.insert_logic_var(t, 'sys', 'global')
z = logic.AST.VarDef('z', 'bool', owner='env', free=True)
z.insert_logic_var(t, 'sys', 'global')
w = logic.AST.VarDef('w', 'bool', owner='sys', free=True)
w.insert_logic_var(t, 'sys', 'global')
# global_defs = [y, z, w]
r = logic.freeze_declarative_vars(t, 'env')
s = (
'(((X pidglobal_y) <-> pidglobal_y)) &'
' (((X pidglobal_z) <-> pidglobal_z))')
assert r == s, r
# env must freeze
c = '''
free env bit x;
proctype foo(){
do
:: atomic{ skip; x' == x }
od
}
'''
assert logic.synthesize(c)
def test_remote_ref():
c = '''
proctype foo(){
bar @ critical
}
proctype bar(){
bit x;
critical:
if
:: x = x + 1
fi
}
'''
program = logic._parser.parse(c)
global_defs, products, ltl = program.to_table()
t = logic.products_to_logic(products, global_defs)[0]
proctypes = t.proctypes
assert len(proctypes) == 2, proctypes
foo = proctypes['foo']
g = foo['program_graph']
edges = g.edges(data=True)
(e,) = edges
u, v, d = e
stmt = d['stmt']
assert isinstance(stmt, logic.AST.Expression), stmt
f, _ = stmt.to_logic(t=t, pid=0)
assert f == '(pc1 = 1)', (f, t.pids)
def scaffold():
e = "(x == y)'"
tree = expr_parser.parse(e).expr
print(repr(tree))
t = logic.Table()
t.add_var(pid=0, name='x', flatname='pid0_x',
dom='bool', free=False, owner='sys')
t.add_var(pid=0, name='y', flatname='pid0_y',
dom='bool', free=False, owner='env')
if logic.collect_primed_vars(tree, t, pid=0, player='sys'):
print('has next var')
else:
print('does not have next var')
if __name__ == '__main__':
test_trivial_realizable()
| |
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import pbr.version
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../playbooks/inventory/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinxmark'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2017, OpenStack-Ansible Contributors'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
project = 'OpenStack-Ansible'
target_name = 'openstack-ansible'
title = 'OpenStack-Ansible Documentation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version_info = pbr.version.VersionInfo(target_name)
# The full version, including alpha/beta/rc tags.
release = version_info.version_string_with_vcs()
# The short X.Y version.
version = version_info.canonical_version_string()
# openstackdocstheme options
repository_name = 'openstack/' + target_name
bug_project = project.lower()
bug_tag = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# If true, publish source files
html_copy_source = False
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, target_name + '.tex',
title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, target_name,
title, [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, target_name,
title, author, project,
description, category),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
(master_doc, target_name,
title, author)
]
# Used for the developer documentation
latest_tag = os.popen('git describe --abbrev=0 --tags').read().strip('\n')
# Used for the upgrade documentation
previous_release_branch_name = 'pike'
current_release_branch_name = 'queens'
# dev docs have no branch specified on master; for stable braches it's "/branch/"
watermark = os.popen("git branch --contains $(git rev-parse HEAD) | awk -F/ '/stable/ {print $2}'").read().strip(' \n\t').capitalize()
if watermark == "":
watermark = "Pre-release"
deploy_branch_link_name = "latest"
dev_branch_link_name = ""
current_release_git_branch_name = "master"
else:
deploy_branch_link_name = current_release_branch_name
dev_branch_link_name = "{}/".format(current_release_branch_name)
current_release_git_branch_name = 'stable/' + current_release_branch_name
previous_release_capital_name = previous_release_branch_name.upper()
previous_release_formal_name = previous_release_branch_name.capitalize()
current_release_capital_name = current_release_branch_name.upper()
current_release_formal_name = current_release_branch_name.capitalize()
upgrade_backup_dir = "``/etc/openstack_deploy."+previous_release_capital_name+"``"
# Used to reference the deploy guide
deploy_guide_prefix = "http://docs.openstack.org/project-deploy-guide/openstack-ansible/{}/%s".format(deploy_branch_link_name)
dev_docs_prefix = "http://docs.openstack.org/openstack-ansible/{}%s".format(dev_branch_link_name)
rst_epilog = """
.. |previous_release_branch_name| replace:: %s
.. |current_release_branch_name| replace:: %s
.. |current_release_git_branch_name| replace:: %s
.. |previous_release_capital_name| replace:: %s
.. |previous_release_formal_name| replace:: %s
.. |current_release_capital_name| replace:: %s
.. |current_release_formal_name| replace:: %s
.. |upgrade_backup_dir| replace:: %s
.. |latest_tag| replace:: %s
""" % (previous_release_branch_name,
current_release_branch_name,
current_release_git_branch_name,
previous_release_capital_name,
previous_release_formal_name,
current_release_capital_name,
current_release_formal_name,
upgrade_backup_dir,
latest_tag)
extlinks = {'deploy_guide': (deploy_guide_prefix, ''),
'dev_docs': (dev_docs_prefix, '')
}
# -- Options for sphinxmark -----------------------------------------------
sphinxmark_enable = True
sphinxmark_div = 'docs-body'
sphinxmark_image = 'text'
sphinxmark_text = watermark
sphinxmark_text_color = (128, 128, 128)
sphinxmark_text_size = 70
| |
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from os_win import exceptions as os_win_exc
from os_brick import exception
from os_brick.initiator.windows import fibre_channel as fc
from os_brick.tests.windows import test_base
@ddt.ddt
class WindowsFCConnectorTestCase(test_base.WindowsConnectorTestBase):
def setUp(self):
super(WindowsFCConnectorTestCase, self).setUp()
self._connector = fc.WindowsFCConnector(
device_scan_interval=mock.sentinel.rescan_interval)
self._diskutils = self._connector._diskutils
self._fc_utils = self._connector._fc_utils
@ddt.data(True, False)
@mock.patch.object(fc.utilsfactory, 'get_fc_utils')
def test_get_volume_connector_props(self, valid_fc_hba_ports,
mock_get_fc_utils):
fake_fc_hba_ports = [{'node_name': mock.sentinel.node_name,
'port_name': mock.sentinel.port_name},
{'node_name': mock.sentinel.second_node_name,
'port_name': mock.sentinel.second_port_name}]
self._fc_utils = mock_get_fc_utils.return_value
self._fc_utils.get_fc_hba_ports.return_value = (
fake_fc_hba_ports if valid_fc_hba_ports else [])
props = self._connector.get_connector_properties()
self._fc_utils.refresh_hba_configuration.assert_called_once_with()
self._fc_utils.get_fc_hba_ports.assert_called_once_with()
if valid_fc_hba_ports:
expected_props = {
'wwpns': [mock.sentinel.port_name,
mock.sentinel.second_port_name],
'wwnns': [mock.sentinel.node_name,
mock.sentinel.second_node_name]
}
else:
expected_props = {}
self.assertCountEqual(expected_props, props)
@mock.patch.object(fc.WindowsFCConnector, '_get_scsi_wwn')
@mock.patch.object(fc.WindowsFCConnector, 'get_volume_paths')
def test_connect_volume(self, mock_get_vol_paths,
mock_get_scsi_wwn):
mock_get_vol_paths.return_value = [mock.sentinel.dev_name]
mock_get_dev_num = self._diskutils.get_device_number_from_device_name
mock_get_dev_num.return_value = mock.sentinel.dev_num
expected_device_info = dict(type='block',
path=mock.sentinel.dev_name,
number=mock.sentinel.dev_num,
scsi_wwn=mock_get_scsi_wwn.return_value)
device_info = self._connector.connect_volume(mock.sentinel.conn_props)
self.assertEqual(expected_device_info, device_info)
mock_get_vol_paths.assert_called_once_with(mock.sentinel.conn_props)
mock_get_dev_num.assert_called_once_with(mock.sentinel.dev_name)
mock_get_scsi_wwn.assert_called_once_with(mock.sentinel.dev_num)
@mock.patch.object(fc.WindowsFCConnector, 'get_volume_paths')
def test_connect_volume_not_found(self, mock_get_vol_paths):
mock_get_vol_paths.return_value = []
self.assertRaises(exception.NoFibreChannelVolumeDeviceFound,
self._connector.connect_volume,
mock.sentinel.conn_props)
@ddt.data({'volume_mappings': [], 'expected_paths': []},
{'volume_mappings': [dict(device_name='',
fcp_lun=mock.sentinel.fcp_lun)] * 3,
'scsi_id_side_eff': os_win_exc.OSWinException,
'expected_paths': []},
{'volume_mappings': [dict(device_name='',
fcp_lun=mock.sentinel.fcp_lun),
dict(device_name=mock.sentinel.disk_path)],
'expected_paths': [mock.sentinel.disk_path]},
{'volume_mappings': [dict(device_name='',
fcp_lun=mock.sentinel.fcp_lun)],
'scsi_id_side_eff': [[mock.sentinel.disk_path]],
'expected_paths': [mock.sentinel.disk_path]},
{'volume_mappings': [dict(device_name=mock.sentinel.disk_path)],
'use_multipath': True,
'is_mpio_disk': True,
'expected_paths': [mock.sentinel.disk_path]},
{'volume_mappings': [dict(device_name=mock.sentinel.disk_path)],
'use_multipath': True,
'is_mpio_disk': False,
'expected_paths': []})
@ddt.unpack
@mock.patch('time.sleep')
@mock.patch.object(fc.WindowsFCConnector, '_get_fc_volume_mappings')
@mock.patch.object(fc.WindowsFCConnector, '_get_disk_paths_by_scsi_id')
def test_get_volume_paths(self, mock_get_disk_paths_by_scsi_id,
mock_get_fc_mappings,
mock_sleep,
volume_mappings, expected_paths,
scsi_id_side_eff=None,
use_multipath=False,
is_mpio_disk=False):
mock_get_dev_num = self._diskutils.get_device_number_from_device_name
mock_get_fc_mappings.return_value = volume_mappings
mock_get_disk_paths_by_scsi_id.side_effect = scsi_id_side_eff
self._diskutils.is_mpio_disk.return_value = is_mpio_disk
self._connector.use_multipath = use_multipath
vol_paths = self._connector.get_volume_paths(mock.sentinel.conn_props)
self.assertEqual(expected_paths, vol_paths)
# In this test case, either the volume is found after the first
# attempt, either it's not found at all, in which case we'd expect
# the number of retries to be the requested maximum number of rescans.
expected_try_count = (1 if expected_paths
else self._connector.device_scan_attempts)
self._diskutils.rescan_disks.assert_has_calls(
[mock.call()] * expected_try_count)
mock_get_fc_mappings.assert_has_calls(
[mock.call(mock.sentinel.conn_props)] * expected_try_count)
mock_sleep.assert_has_calls(
[mock.call(mock.sentinel.rescan_interval)] *
(expected_try_count - 1))
dev_names = [mapping['device_name']
for mapping in volume_mappings if mapping['device_name']]
if volume_mappings and not dev_names:
mock_get_disk_paths_by_scsi_id.assert_any_call(
mock.sentinel.conn_props,
volume_mappings[0]['fcp_lun'])
if expected_paths and use_multipath:
mock_get_dev_num.assert_called_once_with(expected_paths[0])
self._diskutils.is_mpio_disk.assert_any_call(
mock_get_dev_num.return_value)
@mock.patch.object(fc.WindowsFCConnector, '_get_fc_hba_mappings')
def test_get_fc_volume_mappings(self, mock_get_fc_hba_mappings):
fake_target_wwpn = 'FAKE_TARGET_WWPN'
fake_conn_props = dict(target_lun=mock.sentinel.target_lun,
target_wwn=[fake_target_wwpn])
mock_hba_mappings = {mock.sentinel.node_name: mock.sentinel.hba_ports}
mock_get_fc_hba_mappings.return_value = mock_hba_mappings
all_target_mappings = [{'device_name': mock.sentinel.dev_name,
'port_name': fake_target_wwpn,
'lun': mock.sentinel.target_lun},
{'device_name': mock.sentinel.dev_name_1,
'port_name': mock.sentinel.target_port_name_1,
'lun': mock.sentinel.target_lun},
{'device_name': mock.sentinel.dev_name,
'port_name': mock.sentinel.target_port_name,
'lun': mock.sentinel.target_lun_1}]
expected_mappings = [all_target_mappings[0]]
self._fc_utils.get_fc_target_mappings.return_value = (
all_target_mappings)
volume_mappings = self._connector._get_fc_volume_mappings(
fake_conn_props)
self.assertEqual(expected_mappings, volume_mappings)
def test_get_fc_hba_mappings(self):
fake_fc_hba_ports = [{'node_name': mock.sentinel.node_name,
'port_name': mock.sentinel.port_name}]
self._fc_utils.get_fc_hba_ports.return_value = fake_fc_hba_ports
resulted_mappings = self._connector._get_fc_hba_mappings()
expected_mappings = {
mock.sentinel.node_name: [mock.sentinel.port_name]}
self.assertEqual(expected_mappings, resulted_mappings)
@mock.patch.object(fc.WindowsFCConnector, '_get_dev_nums_by_scsi_id')
def test_get_disk_paths_by_scsi_id(self, mock_get_dev_nums):
remote_wwpns = [mock.sentinel.remote_wwpn_0,
mock.sentinel.remote_wwpn_1]
fake_init_target_map = {mock.sentinel.local_wwpn: remote_wwpns}
conn_props = dict(initiator_target_map=fake_init_target_map)
mock_get_dev_nums.side_effect = [os_win_exc.FCException,
[mock.sentinel.dev_num]]
mock_get_dev_name = self._diskutils.get_device_name_by_device_number
mock_get_dev_name.return_value = mock.sentinel.dev_name
disk_paths = self._connector._get_disk_paths_by_scsi_id(
conn_props, mock.sentinel.fcp_lun)
self.assertEqual([mock.sentinel.dev_name], disk_paths)
mock_get_dev_nums.assert_has_calls([
mock.call(mock.sentinel.local_wwpn,
remote_wwpn,
mock.sentinel.fcp_lun)
for remote_wwpn in remote_wwpns])
mock_get_dev_name.assert_called_once_with(mock.sentinel.dev_num)
@mock.patch.object(fc.WindowsFCConnector, '_get_fc_hba_wwn_for_port')
def test_get_dev_nums_by_scsi_id(self, mock_get_fc_hba_wwn):
fake_identifier = dict(id=mock.sentinel.id,
type=mock.sentinel.type)
mock_get_fc_hba_wwn.return_value = mock.sentinel.local_wwnn
self._fc_utils.get_scsi_device_identifiers.return_value = [
fake_identifier]
self._diskutils.get_disk_numbers_by_unique_id.return_value = (
mock.sentinel.dev_nums)
dev_nums = self._connector._get_dev_nums_by_scsi_id(
mock.sentinel.local_wwpn,
mock.sentinel.remote_wwpn,
mock.sentinel.fcp_lun)
self.assertEqual(mock.sentinel.dev_nums, dev_nums)
mock_get_fc_hba_wwn.assert_called_once_with(mock.sentinel.local_wwpn)
self._fc_utils.get_scsi_device_identifiers.assert_called_once_with(
mock.sentinel.local_wwnn, mock.sentinel.local_wwpn,
mock.sentinel.remote_wwpn, mock.sentinel.fcp_lun)
self._diskutils.get_disk_numbers_by_unique_id.assert_called_once_with(
unique_id=mock.sentinel.id,
unique_id_format=mock.sentinel.type)
| |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, record_only
@record_only()
class AzureManagementGroupsScenarioTest(ScenarioTest):
def test_list_managementgroups(self):
managementgroups_list = self.cmd(
'account management-group list').get_output_in_json()
self.assertIsNotNone(managementgroups_list)
self.assertTrue(len(managementgroups_list) > 0)
self.assertIsNotNone(managementgroups_list[0]["displayName"])
self.assertTrue(managementgroups_list[0]["id"].startswith(
"/providers/Microsoft.Management/managementGroups/"))
self.assertIsNotNone(managementgroups_list[0]["name"])
self.assertIsNotNone(managementgroups_list[0]["tenantId"])
self.assertEqual(
managementgroups_list[0]["type"],
"/providers/Microsoft.Management/managementGroups")
def test_show_managementgroup(self):
self.cmd('account management-group create --name testcligetgroup1')
self.cmd('account management-group create --name testcligetgroup2 --parent /providers/Microsoft.Management/managementGroups/testcligetgroup1')
managementgroup_get = self.cmd(
'account management-group show --name testcligetgroup2').get_output_in_json()
self.cmd('account management-group delete --name testcligetgroup2')
self.cmd('account management-group delete --name testcligetgroup1')
self.assertIsNotNone(managementgroup_get)
self.assertIsNone(managementgroup_get["children"])
self.assertIsNotNone(managementgroup_get["details"])
self.assertEqual(
managementgroup_get["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup2")
self.assertEqual(managementgroup_get["name"], "testcligetgroup2")
self.assertEqual(
managementgroup_get["displayName"],
"testcligetgroup2")
self.assertEqual(
managementgroup_get["details"]["parent"]["displayName"],
"testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["name"],
"testcligetgroup1")
self.assertIsNotNone(managementgroup_get["tenantId"])
self.assertEqual(
managementgroup_get["type"],
"/providers/Microsoft.Management/managementGroups")
def test_show_managementgroup_with_expand(self):
self.cmd('account management-group create --name testcligetgroup1')
self.cmd('account management-group create --name testcligetgroup2 --parent testcligetgroup1')
self.cmd('account management-group create --name testcligetgroup3 --parent /providers/Microsoft.Management/managementGroups/testcligetgroup2')
managementgroup_get = self.cmd(
'account management-group show --name testcligetgroup2 --expand').get_output_in_json()
self.cmd('account management-group delete --name testcligetgroup3')
self.cmd('account management-group delete --name testcligetgroup2')
self.cmd('account management-group delete --name testcligetgroup1')
self.assertIsNotNone(managementgroup_get)
self.assertIsNotNone(managementgroup_get["children"])
self.assertIsNotNone(managementgroup_get["details"])
self.assertEqual(
managementgroup_get["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup2")
self.assertEqual(managementgroup_get["name"], "testcligetgroup2")
self.assertEqual(
managementgroup_get["displayName"],
"testcligetgroup2")
self.assertEqual(
managementgroup_get["details"]["parent"]["displayName"],
"testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["name"],
"testcligetgroup1")
self.assertIsNotNone(managementgroup_get["tenantId"])
self.assertEqual(
managementgroup_get["type"],
"/providers/Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup3")
self.assertEqual(
managementgroup_get["children"][0]["type"],
"/providers/Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["displayName"],
"testcligetgroup3")
self.assertEqual(
managementgroup_get["children"][0]["name"],
"testcligetgroup3")
def test_show_managementgroup_with_expand_and_recurse(self):
self.cmd('account management-group create --name testcligetgroup1')
self.cmd('account management-group create --name testcligetgroup2 --parent /providers/Microsoft.Management/managementGroups/testcligetgroup1')
self.cmd('account management-group create --name testcligetgroup3 --parent testcligetgroup2')
self.cmd('account management-group create --name testcligetgroup4 --parent /providers/Microsoft.Management/managementGroups/testcligetgroup3')
managementgroup_get = self.cmd(
'account management-group show --name testcligetgroup2 --expand --recurse').get_output_in_json()
self.cmd('account management-group delete --name testcligetgroup4')
self.cmd('account management-group delete --name testcligetgroup3')
self.cmd('account management-group delete --name testcligetgroup2')
self.cmd('account management-group delete --name testcligetgroup1')
self.assertIsNotNone(managementgroup_get)
self.assertIsNotNone(managementgroup_get["children"])
self.assertIsNotNone(managementgroup_get["details"])
self.assertEqual(
managementgroup_get["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup2")
self.assertEqual(managementgroup_get["name"], "testcligetgroup2")
self.assertEqual(
managementgroup_get["displayName"],
"testcligetgroup2")
self.assertEqual(
managementgroup_get["details"]["parent"]["displayName"],
"testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup1")
self.assertEqual(
managementgroup_get["details"]["parent"]["name"],
"testcligetgroup1")
self.assertIsNotNone(managementgroup_get["tenantId"])
self.assertEqual(
managementgroup_get["type"],
"/providers/Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup3")
self.assertEqual(
managementgroup_get["children"][0]["type"],
"/providers/Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["displayName"],
"testcligetgroup3")
self.assertEqual(
managementgroup_get["children"][0]["name"],
"testcligetgroup3")
self.assertEqual(
managementgroup_get["children"][0]["children"][0]["id"],
"/providers/Microsoft.Management/managementGroups/testcligetgroup4")
self.assertEqual(
managementgroup_get["children"][0]["children"][0]["type"],
"/providers/Microsoft.Management/managementGroups")
self.assertEqual(
managementgroup_get["children"][0]["children"][0]["displayName"],
"testcligetgroup4")
self.assertEqual(
managementgroup_get["children"][0]["children"][0]["name"],
"testcligetgroup4")
def test_create_managementgroup(self):
name = "testcligroup"
displayName = "testcligroup"
managementgroup_create = self.cmd(
'account management-group create --name ' +
name).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.assertIsNotNone(managementgroup_create)
self.assertIsNotNone(managementgroup_create["properties"]["details"])
self.assertEqual(
managementgroup_create["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_create["name"], name)
self.assertEqual(
managementgroup_create["properties"]["displayName"],
displayName)
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["displayName"],
"Tenant Root Group")
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/" +
managementgroup_create["properties"]["tenantId"])
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["name"],
managementgroup_create["properties"]["tenantId"])
self.assertIsNotNone(managementgroup_create["properties"]["tenantId"])
self.assertEqual(
managementgroup_create["type"],
"/providers/Microsoft.Management/managementGroups")
def test_create_managementgroup_with_displayname(self):
name = "testcligroup"
displayName = "TestCliDisplayName"
managementgroup_create = self.cmd(
'account management-group create --name ' +
name +
' --display-name ' +
displayName).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.assertIsNotNone(managementgroup_create)
self.assertIsNotNone(managementgroup_create["properties"]["details"])
self.assertEqual(
managementgroup_create["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_create["name"], name)
self.assertEqual(
managementgroup_create["properties"]["displayName"],
displayName)
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["displayName"],
"Tenant Root Group")
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/" +
managementgroup_create["properties"]["tenantId"])
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["name"],
managementgroup_create["properties"]["tenantId"])
self.assertIsNotNone(managementgroup_create["properties"]["tenantId"])
self.assertEqual(
managementgroup_create["type"],
"/providers/Microsoft.Management/managementGroups")
def test_create_managementgroup_with_parentid(self):
name = "testcligroupchild"
displayName = "testcligroupchild"
parentId = "/providers/Microsoft.Management/managementGroups/testcligroup"
parentName = "testcligroup"
self.cmd('account management-group create --name ' + parentName)
managementgroup_create = self.cmd(
'account management-group create --name ' +
name +
' --parent ' +
parentId).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.cmd('account management-group delete --name ' + parentName)
self.assertIsNotNone(managementgroup_create)
self.assertIsNotNone(managementgroup_create["properties"]["details"])
self.assertEqual(
managementgroup_create["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_create["name"], name)
self.assertEqual(
managementgroup_create["properties"]["displayName"],
displayName)
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["displayName"],
parentName)
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["id"],
parentId)
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["name"],
parentName)
self.assertIsNotNone(managementgroup_create["properties"]["tenantId"])
self.assertEqual(
managementgroup_create["type"],
"/providers/Microsoft.Management/managementGroups")
def test_create_managementgroup_with_displayname_and_parentid(self):
name = "testcligroupchild"
displayName = "testcligroupchildDisplayName"
parentId = "/providers/Microsoft.Management/managementGroups/testcligroup"
parentName = "testcligroup"
self.cmd('account management-group create --name ' + parentName)
managementgroup_create = self.cmd(
'account management-group create --name ' +
name +
' --display-name ' +
displayName +
' --parent ' +
parentName).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.cmd('account management-group delete --name ' + parentName)
self.assertIsNotNone(managementgroup_create)
self.assertIsNotNone(managementgroup_create["properties"]["details"])
self.assertEqual(
managementgroup_create["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_create["name"], name)
self.assertEqual(
managementgroup_create["properties"]["displayName"],
displayName)
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["displayName"],
parentName)
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["id"],
parentId)
self.assertEqual(
managementgroup_create["properties"]["details"]["parent"]["name"],
parentName)
self.assertIsNotNone(managementgroup_create["properties"]["tenantId"])
self.assertEqual(
managementgroup_create["type"],
"/providers/Microsoft.Management/managementGroups")
def test_update_managementgroup_with_displayname(self):
name = "testcligroup"
displayName = "testcligroupDisplayName"
self.cmd('account management-group create --name ' + name)
managementgroup_update = self.cmd(
'account management-group update --name ' +
name +
' --display-name ' +
displayName).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.assertIsNotNone(managementgroup_update)
self.assertIsNotNone(managementgroup_update["details"])
self.assertEqual(
managementgroup_update["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_update["name"], name)
self.assertEqual(managementgroup_update["displayName"], displayName)
self.assertEqual(
managementgroup_update["details"]["parent"]["displayName"],
"Tenant Root Group")
self.assertEqual(
managementgroup_update["details"]["parent"]["id"],
"/providers/Microsoft.Management/managementGroups/" +
managementgroup_update["tenantId"])
self.assertEqual(
managementgroup_update["details"]["parent"]["name"],
managementgroup_update["tenantId"])
self.assertIsNotNone(managementgroup_update["tenantId"])
self.assertEqual(
managementgroup_update["type"],
"/providers/Microsoft.Management/managementGroups")
def test_update_managementgroup_with_parentid(self):
name = "testcligroupchild"
displayName = "testcligroupchild"
parentId = "/providers/Microsoft.Management/managementGroups/testcligroup"
parentName = "testcligroup"
self.cmd('account management-group create --name ' + parentName)
self.cmd('account management-group create --name ' + name)
managementgroup_update = self.cmd(
'account management-group update --name ' +
name +
' --parent ' +
parentId).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.cmd('account management-group delete --name ' + parentName)
self.assertIsNotNone(managementgroup_update)
self.assertIsNotNone(managementgroup_update["details"])
self.assertEqual(
managementgroup_update["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_update["name"], name)
self.assertEqual(managementgroup_update["displayName"], displayName)
self.assertEqual(
managementgroup_update["details"]["parent"]["displayName"],
parentName)
self.assertEqual(
managementgroup_update["details"]["parent"]["id"],
parentId)
self.assertEqual(
managementgroup_update["details"]["parent"]["name"],
parentName)
self.assertIsNotNone(managementgroup_update["tenantId"])
self.assertEqual(
managementgroup_update["type"],
"/providers/Microsoft.Management/managementGroups")
def test_update_managementgroup_with_displayname_and_parentid(self):
name = "testcligroupchild"
displayName = "testcligroupchild"
parentId = "/providers/Microsoft.Management/managementGroups/testcligroup"
parentName = "testcligroup"
self.cmd('account management-group create --name ' + parentName)
self.cmd('account management-group create --name ' + name)
managementgroup_update = self.cmd(
'account management-group update --name ' +
name +
' --display-name ' +
displayName +
' --parent ' +
parentName).get_output_in_json()
self.cmd('account management-group delete --name ' + name)
self.cmd('account management-group delete --name ' + parentName)
self.assertIsNotNone(managementgroup_update)
self.assertIsNotNone(managementgroup_update["details"])
self.assertEqual(
managementgroup_update["id"],
"/providers/Microsoft.Management/managementGroups/" + name)
self.assertEqual(managementgroup_update["name"], name)
self.assertEqual(managementgroup_update["displayName"], displayName)
self.assertEqual(
managementgroup_update["details"]["parent"]["displayName"],
parentName)
self.assertEqual(
managementgroup_update["details"]["parent"]["id"],
parentId)
self.assertEqual(
managementgroup_update["details"]["parent"]["name"],
parentName)
self.assertIsNotNone(managementgroup_update["tenantId"])
self.assertEqual(
managementgroup_update["type"],
"/providers/Microsoft.Management/managementGroups")
def test_create_delete_group_managementgroup(self):
self.cmd('account management-group create --name testcligroup')
self.cmd('account management-group delete --name testcligroup')
| |
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Integer parameter type testcases - INT16_Max
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
INT16_Max :
- size = 16
- range : [-32768, 32767]
Test cases :
------------
- INT16_Max parameter min value = -32768
- INT16_Max parameter min value out of bounds = -32769
- INT16_Max parameter max value = 32767
- INT16_Max parameter max value out of bounds = 32768
- INT16_Max parameter in nominal case = 50
"""
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type INT16_Max - range [-32768, 32767]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/INT16_Max"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing INT16_Max in nominal case = 50
--------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16_Max parameter in nominal case = 50
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16_Max parameter set to 50
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
log.I("INT16_Max parameter in nominal case = 50")
value = "50"
hex_value = "0x32"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16_Max') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing INT16_Max minimal value = -32768
----------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16_Max parameter min value = -32768
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16_Max parameter set to -32768
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
log.I("INT16_Max parameter min value = -32768")
value = "-32768"
hex_value = "0x8000"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16_Max') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing INT16_Max parameter value out of negative range
-------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16_Max to -32769
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- INT16_Max parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("INT16_Max parameter min value out of bounds = -32769")
value = "-32769"
param_check = commands.getoutput('cat $PFW_RESULT/INT16_Max')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16_Max') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing INT16_Max parameter maximum value
-----------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16_Max to 32767
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16_Max parameter set to 32767
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
log.I("INT16_Max parameter max value = 32767")
value = "32767"
hex_value = "0x7fff"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16_Max') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing INT16_Max parameter value out of positive range
-------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set INT16_Max to 32768
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- INT16_Max parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("INT16_Max parameter max value out of bounds = 32768")
value = "32768"
param_check = commands.getoutput('cat $PFW_RESULT/INT16_Max')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/INT16_Max') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
| |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The grammar class."""
import collections
import itertools
from GroundedScan import grammar
from GroundedScan import world
# Represents the non-terminal symbol `NT` with linked index i
NTParent = collections.namedtuple(
"Nonterminal", ("name", "index"), defaults=[None, 0])
TParent = collections.namedtuple("Terminal", "name")
# Define sub-class to override __str__ and __repr__ for easier debugging.
class Nonterminal(NTParent):
"""Nonterminal symbol."""
def __str__(self):
return "%s (%d)" % (self.name, self.index)
def __repr__(self):
return str(self)
# Override the instance type for consistency.
@property
def __class__(self):
return grammar.Nonterminal
# Define sub-class to override __str__ and __repr__ for easier debugging.
class Terminal(TParent):
"""Terminal symbol."""
def __str__(self):
return "'%s'" % self.name
def __repr__(self):
return str(self)
# Override the instance type for consistency.
@property
def __class__(self):
return grammar.Terminal
ROOT = Nonterminal("ROOT")
VP = Nonterminal("VP")
VV_intransitive = Nonterminal("VV_intransitive")
VV_transitive = Nonterminal("VV_transitive")
RB = Nonterminal("RB")
DP = Nonterminal("DP")
NP = Nonterminal("NP")
NN = Nonterminal("NN")
JJ = Nonterminal("JJ")
PP = Nonterminal("PP")
LOC = Nonterminal("LOC")
LOCATION = world.SemType("location")
fields = ("action", "is_transitive", "manner", "adjective_type", "noun",
"location")
Weights = collections.namedtuple(
"Weights", fields, defaults=[None] * len(fields))
class LogicalForm(world.LogicalForm):
"""Logical form class supports object relation."""
def split_terms_on_location(self):
is_loc = [True if term.specs.location else False for term in self.terms]
split_index = is_loc.index(True) if True in is_loc else -1
target_terms = self.terms[split_index + 1:]
ref_terms = self.terms[:split_index + 1]
return target_terms, ref_terms
def to_predicate(self, return_ref_predicate=False):
"""Similar to the parent's function but allow returning ref predicate."""
assert len(self.variables) == 1
target_predicate = {"noun": "", "size": "", "color": ""}
ref_predicate = {"noun": "", "size": "", "color": "", "location": ""}
target_terms, ref_terms = self.split_terms_on_location()
for term in target_terms:
term.to_predicate(target_predicate)
for term in ref_terms:
term.to_predicate(ref_predicate)
object_str = ""
if target_predicate["color"]:
object_str += " " + target_predicate["color"]
object_str += " " + target_predicate["noun"]
object_str = object_str.strip()
if return_ref_predicate:
return object_str, target_predicate, ref_predicate
else:
return object_str, target_predicate
class Term(world.Term):
"""Term class that supports location in predicate."""
def replace(self, var_to_find, replace_by_var):
"""Find a variable `var_to_find` the arguments and replace it by `replace_by_var`."""
return Term(
function=self.function,
args=tuple(replace_by_var if variable == var_to_find else variable
for variable in self.arguments),
specs=self.specs,
meta=self.meta)
def to_predicate(self, predicate):
output = self.function
if self.specs.location:
predicate["location"] = output
else:
super().to_predicate(predicate)
class Rule(object):
"""Rule class of form LHS -> RHS with method instantiate defines its meaning.
The rule is similar to original gSCAN grammar but supoorts indexing. See
https://github.com/LauraRuis/groundedSCAN/blob/master/GroundedScan/grammar.py
for more details.
"""
def __init__(self, lhs, rhs, max_recursion=2):
self.lhs = lhs
self.rhs = rhs
self.sem_type = None
self.max_recursion = max_recursion
def instantiate(self, *args, **kwargs):
raise NotImplementedError()
def __repr__(self):
rhs = " ".join([str(rhs) for rhs in self.rhs])
return f"{self.lhs} -> {rhs}"
class LexicalRule(Rule):
"""Rule of form Non-Terminal -> Terminal."""
def __init__(self, lhs, word, specs, sem_type):
super().__init__(lhs=lhs, rhs=[Terminal(word)], max_recursion=1)
self.name = word
self.sem_type = sem_type
self.specs = specs
def instantiate(self, meta=None, **kwargs):
var = grammar.free_var(self.sem_type)
return LogicalForm(
variables=(var,),
terms=(Term(self.name, (var,), specs=self.specs, meta=meta),))
def __repr__(self):
return f"{self.lhs.name} -> {self.rhs[0].name}"
class Root(Rule):
"""Root rule."""
def __init__(self):
super().__init__(lhs=ROOT, rhs=[VP])
def instantiate(self, child, **kwargs):
return child
class VpWrapper(Rule):
"""VP Wrapper rule."""
def __init__(self, max_recursion=0):
super().__init__(lhs=VP, rhs=[VP, RB], max_recursion=max_recursion)
def instantiate(self, rb, vp, unused_meta, **kwargs):
bound = rb.bind(vp.head)
assert bound.variables[0] == vp.head
return LogicalForm(
variables=vp.variables + bound.variables[1:],
terms=vp.terms + bound.terms)
class VpIntransitive(Rule):
"""Intransitive VP rule."""
def __init__(self):
super().__init__(lhs=VP, rhs=[VV_intransitive, Terminal("to"), DP])
def instantiate(self, vv, dp, meta, **kwargs):
role = Term("patient", (vv.head, dp.head))
meta["arguments"].append(dp)
return LogicalForm(
variables=vv.variables + dp.variables,
terms=vv.terms + dp.terms + (role,))
class VpTransitive(Rule):
"""Transitive VP rule."""
def __init__(self):
super().__init__(lhs=VP, rhs=[VV_transitive, DP])
def instantiate(self, vv, dp, meta, **kwargs):
role = Term("patient", (vv.head, dp.head))
meta["arguments"].append(dp)
return LogicalForm(
variables=vv.variables + dp.variables,
terms=vv.terms + dp.terms + (role,))
class Dp(Rule):
"""DP rule."""
def __init__(self, l_ind=0, r_inds=(0, 0)):
super().__init__(
lhs=Nonterminal("DP", l_ind),
rhs=[Terminal("a"), Nonterminal("NP", r_inds[1])])
def instantiate(self, noun_p, **kwargs):
return noun_p
class NpWrapper(Rule):
"""NP Wrapper rule."""
def __init__(self, max_recursion=0, l_ind=0, r_inds=(0, 0)):
super().__init__(
lhs=Nonterminal("NP", l_ind),
rhs=[Nonterminal("JJ", r_inds[0]),
Nonterminal("NP", r_inds[1])],
max_recursion=max_recursion)
def instantiate(self, jj, noun_p, unused_meta=None, **kwargs):
bound = jj.bind(noun_p.head)
assert bound.variables[0] == noun_p.head
return LogicalForm(
variables=noun_p.variables + bound.variables[1:],
terms=noun_p.terms + bound.terms)
class Np(Rule):
"""NP rule."""
def __init__(self, l_ind=0, r_inds=(0,)):
super().__init__(
lhs=Nonterminal("NP", l_ind), rhs=[Nonterminal("NN", r_inds[0])])
def instantiate(self, nn, **kwargs):
return nn
class NpPpWrapper(Rule):
"""NP PP Wrapper rule."""
def __init__(self, max_recursion=0):
super().__init__(lhs=NP, rhs=[NP, PP], max_recursion=max_recursion)
def instantiate(self, noun_p, pp, unused_meta=None, **kwargs):
bound = noun_p.bind(pp.head)
assert bound.variables[0] == pp.head
return LogicalForm(
variables=pp.variables + bound.variables[1:],
terms=pp.terms + bound.terms)
class PpWrapper(Rule):
"""PP Wrapper rule."""
def __init__(self, max_recursion=0, l_ind=0, r_inds=(0, 0)):
super().__init__(
lhs=Nonterminal("PP", l_ind),
rhs=[Nonterminal("LOC", r_inds[0]),
Nonterminal("DP", r_inds[1])],
max_recursion=max_recursion)
def instantiate(self, loc, dp, unused_meta=None, **kwargs):
bound = loc.bind(dp.head)
assert bound.variables[0] == dp.head
return LogicalForm(
variables=dp.variables + bound.variables[1:],
terms=dp.terms + bound.terms)
class Derivation(grammar.Derivation):
"""Holds a constituency tree that makes up a sentence."""
# Override the instance type for consistency.
@property
def __class__(self):
return grammar.Derivation
@classmethod
def from_rules(cls, rules, symbol=ROOT, lexicon=None):
"""Recursively form a derivation from a rule list."""
# If the current symbol is a Terminal, close current branch and return.
if isinstance(symbol, grammar.Terminal):
return symbol
if symbol not in lexicon.keys():
next_rule = rules.pop()
else:
next_rule = lexicon[symbol].pop()
return Derivation(
next_rule,
tuple(
cls.from_rules(rules, symbol=next_symbol, lexicon=lexicon)
for next_symbol in next_rule.rhs))
def to_rules(self, rules, lexicon):
"""In-order travesal for the constituency tree."""
if isinstance(self.rule, LexicalRule):
if self.rule.lhs not in lexicon:
lexicon[self.rule.lhs] = [self.rule]
else:
lexicon[self.rule.lhs] = [self.rule] + lexicon[self.rule.lhs]
else:
rules.insert(0, self.rule)
for child in self.children:
if isinstance(child, Derivation):
child.to_rules(rules, lexicon)
else:
lexicon[child] = [child]
return
class RelationGrammar(grammar.Grammar):
"""The grammar class that supports new rules."""
BASE_RULES = [Root(), Dp(), Np()]
RELATION_RULES = [
NpPpWrapper(max_recursion=1),
PpWrapper(r_inds=[0, 1]),
Dp(l_ind=1, r_inds=[0, 1]),
NpWrapper(max_recursion=2, l_ind=1, r_inds=[0, 1]),
Np(l_ind=1, r_inds=[0])
]
RULES = {}
RULES["simple_trans"] = BASE_RULES.copy() + [
VpTransitive(), NpWrapper(max_recursion=1)
]
RULES["simple_intrans"] = BASE_RULES.copy() + [
VpIntransitive(), NpWrapper(max_recursion=1)
]
RULES["normal"] = BASE_RULES.copy() + [
VpIntransitive(),
VpTransitive(),
NpWrapper(max_recursion=2)
]
RULES["adverb"] = RULES["normal"].copy() + [VpWrapper()]
# Add rules support spatial relations.
for rule_name in set(RULES):
RULES[f"relation_{rule_name}"] = RULES[rule_name].copy() + RELATION_RULES
def lexical_rules(self, verbs_intrans, verbs_trans, adverbs, nouns,
color_adjectives, size_adjectives, location_preps):
"""Instantiate the lexical rules using new LexicalRule class."""
assert size_adjectives or color_adjectives, (
"Please specify words for at least one of size_adjectives or "
"color_adjectives.")
all_rules = []
for verb in verbs_intrans:
vv_intrans_rule = LexicalRule(
lhs=VV_intransitive,
word=verb,
sem_type=world.EVENT,
specs=Weights(action=verb, is_transitive=False))
all_rules.append(vv_intrans_rule)
if self.type_grammar != "simple":
for verb in verbs_trans:
vv_trans_rule = LexicalRule(
lhs=VV_transitive,
word=verb,
sem_type=world.EVENT,
specs=Weights(action=verb, is_transitive=True))
all_rules.append(vv_trans_rule)
if self.type_grammar.endswith("adverb") or self.type_grammar == "full":
for word in adverbs:
rb_rule = LexicalRule(
lhs=RB, word=word, sem_type=world.EVENT, specs=Weights(manner=word))
all_rules.append(rb_rule)
for word in nouns:
nn_rule = LexicalRule(
lhs=NN, word=word, sem_type=world.ENTITY, specs=Weights(noun=word))
all_rules.append(nn_rule)
if color_adjectives:
for word in color_adjectives:
jj_rule = LexicalRule(
lhs=JJ,
word=word,
sem_type=world.ENTITY,
specs=Weights(adjective_type=world.COLOR))
all_rules.append(jj_rule)
if size_adjectives:
for word in size_adjectives:
jj_rule = LexicalRule(
lhs=JJ,
word=word,
sem_type=world.ENTITY,
specs=Weights(adjective_type=world.SIZE))
all_rules.append(jj_rule)
if self.type_grammar.startswith("relation"):
for word in location_preps:
loc_rule = LexicalRule(
lhs=LOC, word=word, sem_type=LOCATION, specs=Weights(location=word))
all_rules.append(loc_rule)
return all_rules
def __init__(self, vocabulary, max_recursion=1, type_grammar="normal"):
"""Defines a grammar of NT -> NT rules and NT -> T rules."""
if type_grammar not in self.RULES:
raise ValueError(f"Specified unsupported type grammar {type_grammar}")
self.type_grammar = type_grammar
if (type_grammar == "simple_intrans" and
not vocabulary.get_intransitive_verbs()):
raise ValueError("Please specify intransitive verbs.")
elif (type_grammar == "simple_trans" and
not vocabulary.get_transitive_verbs()):
raise ValueError("Please specify transitive verbs.")
self.rule_list = self.RULES[type_grammar] + self.lexical_rules(
vocabulary.get_intransitive_verbs(), vocabulary.get_transitive_verbs(),
vocabulary.get_adverbs(), vocabulary.get_nouns(),
vocabulary.get_color_adjectives(), vocabulary.get_size_adjectives(),
vocabulary.get_location_preps())
nonterminals = {rule.lhs for rule in self.rule_list}
self.rules = {nonterminal: [] for nonterminal in nonterminals}
self.nonterminals = {nt.name: nt for nt in nonterminals}
self.terminals = {}
self.vocabulary = vocabulary
self.rule_str_to_rules = {}
for rule in self.rule_list:
self.rules[rule.lhs].append(rule)
self.rule_str_to_rules[str(rule)] = rule
self.expandables = set(
rule.lhs
for rule in self.rule_list
if not isinstance(rule, LexicalRule))
self.categories = {
"manner": set(vocabulary.get_adverbs()),
"shape": set(vocabulary.get_nouns()),
"color": set(vocabulary.get_color_adjectives()),
"size": set(vocabulary.get_size_adjectives()),
"location": set(vocabulary.get_location_preps()),
}
self.word_to_category = {}
for category, words in self.categories.items():
for word in words:
self.word_to_category[word] = category
self.max_recursion = max_recursion
self.all_templates = []
self.all_derivations = {}
self.command_statistics = self.empty_command_statistics()
@staticmethod
def empty_command_statistics():
return {
VV_intransitive: {},
VV_transitive: {},
NN: {},
JJ: {},
RB: {},
LOC: {}
}
def generate_all_commands(self, exclude_templates=None):
"""Generate all commands but allow excluding unused templates."""
# Generate all possible templates from the grammar.
initial_template = grammar.Template()
initial_template.add_value(value=ROOT, expandable=True)
self.generate_all(
current_template=initial_template,
all_templates=self.all_templates,
rule_use_counter={})
# Remove duplicate templates due to ambiguous PP attachment.
self.remove_duplicate_templates()
if exclude_templates:
self.remove_exclude_templates(exclude_templates)
# For each template, form all possible commands
# by combining it with the lexicon.
for i, (derivation_template,
derivation_rules) in enumerate(self.all_templates):
derivations = self.form_commands_from_template(derivation_template,
derivation_rules)
self.all_derivations[i] = derivations
def form_commands_from_template(self, derivation_template, derivation_rules):
"""Similar to parent's function but use new Derivation class."""
# Replace each lexical rule with the possible words from the lexicon.
replaced_template = []
previous_symbol = None
lexicon = {}
for symbol in derivation_template:
if isinstance(symbol, grammar.Nonterminal):
# pytype: disable=attribute-error
possible_words = [s.name for s in self.rules[symbol]]
for rule in self.rules[symbol]:
lexicon[rule.name] = rule
if previous_symbol == symbol:
previous_words = replaced_template.pop()
first_words, second_words = self.split_on_category(previous_words)
replaced_template.append(first_words)
replaced_template.append(second_words)
else:
replaced_template.append(possible_words)
else:
lexicon[symbol.name] = symbol
replaced_template.append([symbol.name])
previous_symbol = symbol
# Generate all possible commands from the templates.
all_commands = list(itertools.product(*replaced_template))
all_derivations = []
for command in all_commands:
command_lexicon = {}
for word, symbol in zip(command, derivation_template):
if symbol not in command_lexicon:
command_lexicon[symbol] = [lexicon[word]]
else:
command_lexicon[symbol] = [lexicon[word]] + command_lexicon[symbol]
if isinstance(symbol, grammar.Nonterminal):
if word not in self.command_statistics[symbol].keys():
self.command_statistics[symbol][word] = 1
else:
self.command_statistics[symbol][word] += 1
derivation = Derivation.from_rules(
derivation_rules.copy(), symbol=ROOT, lexicon=command_lexicon)
if " ".join(derivation.words()) != " ".join(command):
raise ValueError("Derivation and command not the same.")
# pytype: enable=attribute-error
all_derivations.append(derivation)
return all_derivations
def remove_duplicate_templates(self):
"""Remove duplicate templates from the grammar."""
all_templates = []
current_templates = []
for template, rules in self.all_templates:
if template not in current_templates:
all_templates.append((template, rules))
current_templates.append(template)
self.all_templates = all_templates
def remove_exclude_templates(self, exclude_templates):
"""Remove specified exclude templates from the grammar."""
all_templates = []
exclude_templates = [(t[0], str(t[1])) for t in exclude_templates]
for template, rules in self.all_templates:
if (template, str(rules)) not in exclude_templates:
all_templates.append((template, rules))
self.all_templates = all_templates
| |
from functools import reduce, partial
import inspect
import operator
__all__ = ('identity', 'thread_first', 'thread_last', 'memoize', 'compose',
'pipe', 'complement', 'juxt', 'do', 'curry')
def identity(x):
return x
def thread_first(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_first(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the first input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_first(1, (add, 4), (pow, 2)) # pow(add(1, 4), 2)
25
So in general
thread_first(x, f, (g, y, z))
expands to
g(f(x), y, z)
See Also:
thread_last
"""
def evalform_front(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = (val,) + args
return func(*args)
return reduce(evalform_front, forms, val)
def thread_last(val, *forms):
""" Thread value through a sequence of functions/forms
>>> def double(x): return 2*x
>>> def inc(x): return x + 1
>>> thread_last(1, inc, double)
4
If the function expects more than one input you can specify those inputs
in a tuple. The value is used as the last input.
>>> def add(x, y): return x + y
>>> def pow(x, y): return x**y
>>> thread_last(1, (add, 4), (pow, 2)) # pow(2, add(4, 1))
32
So in general
thread_last(x, f, (g, y, z))
expands to
g(y, z, f(x))
>>> def iseven(x):
... return x % 2 == 0
>>> list(thread_last([1, 2, 3], (map, inc), (filter, iseven)))
[2, 4]
See Also:
thread_first
"""
def evalform_back(val, form):
if callable(form):
return form(val)
if isinstance(form, tuple):
func, args = form[0], form[1:]
args = args + (val,)
return func(*args)
return reduce(evalform_back, forms, val)
def _num_required_args(func):
""" Number of args for func
>>> def foo(a, b, c=None):
... return a + b + c
>>> _num_required_args(foo)
2
>>> def bar(*args):
... return sum(args)
>>> print(_num_required_args(bar))
None
"""
try:
spec = inspect.getargspec(func)
if spec.varargs:
return None
num_defaults = len(spec.defaults) if spec.defaults else 0
return len(spec.args) - num_defaults
except TypeError:
return None
class curry(object):
""" Curry a callable function
Enables partial application of arguments through calling a function with an
incomplete set of arguments.
>>> def mul(x, y):
... return x * y
>>> mul = curry(mul)
>>> double = mul(2)
>>> double(10)
20
Also supports keyword arguments
>>> @curry # Can use curry as a decorator
... def f(x, y, a=10):
... return a * (x + y)
>>> add = f(a=1)
>>> add(2, 3)
5
See Also:
toolz.curried - namespace of curried functions
http://toolz.readthedocs.org/en/latest/curry.html
"""
def __init__(self, func, *args, **kwargs):
if not callable(func):
raise TypeError("Input must be callable")
self.func = func
self.args = args
self.keywords = kwargs if kwargs else None
self.__doc__ = self.func.__doc__
try:
self.func_name = self.func.func_name
except AttributeError:
pass
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __call__(self, *args, **_kwargs):
args = self.args + args
if _kwargs:
kwargs = {}
if self.keywords:
kwargs.update(self.keywords)
kwargs.update(_kwargs)
elif self.keywords:
kwargs = self.keywords
else:
kwargs = {}
try:
return self.func(*args, **kwargs)
except TypeError:
required_args = _num_required_args(self.func)
# If there was a genuine TypeError
if required_args is not None and len(args) >= required_args:
raise
# If we only need one more argument
if (required_args is not None and required_args - len(args) == 1):
if kwargs:
return partial(self.func, *args, **kwargs)
else:
return partial(self.func, *args)
return curry(self.func, *args, **kwargs)
@curry
def memoize(func, cache=None, key=None):
""" Cache a function's result for speedy future evaluation
Considerations:
Trades memory for speed.
Only use on pure functions.
>>> def add(x, y): return x + y
>>> add = memoize(add)
Or use as a decorator
>>> @memoize
... def add(x, y):
... return x + y
Use the ``cache`` keyword to provide a dict-like object as an initial cache
>>> @memoize(cache={(1, 2): 3})
... def add(x, y):
... return x + y
Note that the above works as a decorator because ``memoize`` is curried.
It is also possible to provide a ``key(args, kwargs)`` function that
calculates keys used for the cache, which receives an ``args`` tuple and
``kwargs`` dict as input, and must return a hashable value. However,
the default key function should be sufficient most of the time.
>>> # Use key function that ignores extraneous keyword arguments
>>> @memoize(key=lambda args, kwargs: args)
... def add(x, y, verbose=False):
... if verbose:
... print('Calculating %s + %s' % (x, y))
... return x + y
"""
if cache is None:
cache = {}
try:
spec = inspect.getargspec(func)
may_have_kwargs = bool(not spec or spec.keywords or spec.defaults)
# Is unary function (single arg, no variadic argument or keywords)?
is_unary = (spec and spec.varargs is None and not may_have_kwargs
and len(spec.args) == 1)
except TypeError:
may_have_kwargs = True
is_unary = False
def memof(*args, **kwargs):
try:
if key is not None:
k = key(args, kwargs)
elif is_unary:
k = args[0]
elif may_have_kwargs:
k = (args or None,
frozenset(kwargs.items()) if kwargs else None)
else:
k = args
in_cache = k in cache
except TypeError:
raise TypeError("Arguments to memoized function must be hashable")
if in_cache:
return cache[k]
else:
result = func(*args, **kwargs)
cache[k] = result
return result
try:
memof.__name__ = func.__name__
except AttributeError:
pass
memof.__doc__ = func.__doc__
return memof
class Compose(object):
""" A composition of functions
See Also:
compose
"""
__slots__ = ['funcs']
def __init__(self, *funcs):
self.funcs = funcs
def __call__(self, *args, **kwargs):
fns = list(reversed(self.funcs))
ret = fns[0](*args, **kwargs)
for f in fns[1:]:
ret = f(ret)
return ret
def __getstate__(self):
return self.funcs
def __setstate__(self, state):
self.funcs = tuple(state)
def compose(*funcs):
""" Compose functions to operate in series.
Returns a function that applies other functions in sequence.
Functions are applied from right to left so that
``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``.
If no arguments are provided, the identity function (f(x) = x) is returned.
>>> inc = lambda i: i + 1
>>> compose(str, inc)(3)
'4'
See Also:
pipe
"""
if not funcs:
return identity
if len(funcs) == 1:
return funcs[0]
else:
return Compose(*funcs)
def pipe(data, *funcs):
""" Pipe a value through a sequence of functions
I.e. ``pipe(data, f, g, h)`` is equivalent to ``h(g(f(data)))``
We think of the value as progressing through a pipe of several
transformations, much like pipes in UNIX
``$ cat data | f | g | h``
>>> double = lambda i: 2 * i
>>> pipe(3, double, str)
'6'
See Also:
compose
thread_first
thread_last
"""
for func in funcs:
data = func(data)
return data
def complement(func):
""" Convert a predicate function to its logical complement.
In other words, return a function that, for inputs that normally
yield True, yields False, and vice-versa.
>>> def iseven(n): return n % 2 == 0
>>> isodd = complement(iseven)
>>> iseven(2)
True
>>> isodd(2)
False
"""
return compose(operator.not_, func)
def juxt(*funcs):
"""
Creates a function that calls several functions with the same arguments.
Takes several functions and returns a function that applies its arguments
to each of those functions then returns a sequence of the results.
Name comes from juxtaposition: the fact of two things being seen or placed
close together with contrasting effect.
>>> inc = lambda x: x + 1
>>> double = lambda x: x * 2
>>> list(juxt(inc, double)(10))
[11, 20]
>>> list(juxt([inc, double])(10))
[11, 20]
"""
if len(funcs) == 1 and not callable(funcs[0]):
funcs = tuple(funcs[0])
def juxt_inner(*args, **kwargs):
return (func(*args, **kwargs) for func in funcs)
return juxt_inner
def do(func, x):
""" Runs ``func`` on ``x``, returns ``x``
Because the results of ``func`` are not returned, only the side
effects of ``func`` are relevant.
Logging functions can be made by composing ``do`` with a storage function
like ``list.append`` or ``file.write``
>>> from toolz import compose
>>> from toolz.curried import do
>>> log = []
>>> inc = lambda x: x + 1
>>> inc = compose(inc, do(log.append))
>>> inc(1)
2
>>> inc(11)
12
>>> log
[1, 11]
"""
func(x)
return x
| |
"""Functions for builtin CherryPy tools."""
import logging
import re
import cherrypy
from cherrypy._cpcompat import basestring, md5, set, unicodestr
from cherrypy.lib import httputil as _httputil
from cherrypy.lib import is_iterator
# Conditional HTTP request support #
def validate_etags(autotags=False, debug=False):
"""Validate the current ETag against If-Match, If-None-Match headers.
If autotags is True, an ETag response-header value will be provided
from an MD5 hash of the response body (unless some other code has
already provided an ETag header). If False (the default), the ETag
will not be automatic.
WARNING: the autotags feature is not designed for URL's which allow
methods other than GET. For example, if a POST to the same URL returns
no content, the automatic ETag will be incorrect, breaking a fundamental
use for entity tags in a possibly destructive fashion. Likewise, if you
raise 304 Not Modified, the response body will be empty, the ETag hash
will be incorrect, and your application will break.
See :rfc:`2616` Section 14.24.
"""
response = cherrypy.serving.response
# Guard against being run twice.
if hasattr(response, "ETag"):
return
status, reason, msg = _httputil.valid_status(response.status)
etag = response.headers.get('ETag')
# Automatic ETag generation. See warning in docstring.
if etag:
if debug:
cherrypy.log('ETag already set: %s' % etag, 'TOOLS.ETAGS')
elif not autotags:
if debug:
cherrypy.log('Autotags off', 'TOOLS.ETAGS')
elif status != 200:
if debug:
cherrypy.log('Status not 200', 'TOOLS.ETAGS')
else:
etag = response.collapse_body()
etag = '"%s"' % md5(etag).hexdigest()
if debug:
cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS')
response.headers['ETag'] = etag
response.ETag = etag
# "If the request would, without the If-Match header field, result in
# anything other than a 2xx or 412 status, then the If-Match header
# MUST be ignored."
if debug:
cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS')
if status >= 200 and status <= 299:
request = cherrypy.serving.request
conditions = request.headers.elements('If-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions and not (conditions == ["*"] or etag in conditions):
raise cherrypy.HTTPError(412, "If-Match failed: ETag %r did "
"not match %r" % (etag, conditions))
conditions = request.headers.elements('If-None-Match') or []
conditions = [str(x) for x in conditions]
if debug:
cherrypy.log('If-None-Match conditions: %s' % repr(conditions),
'TOOLS.ETAGS')
if conditions == ["*"] or etag in conditions:
if debug:
cherrypy.log('request.method: %s' %
request.method, 'TOOLS.ETAGS')
if request.method in ("GET", "HEAD"):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412, "If-None-Match failed: ETag %r "
"matched %r" % (etag, conditions))
def validate_since():
"""Validate the current Last-Modified against If-Modified-Since headers.
If no code has set the Last-Modified response header, then no validation
will be performed.
"""
response = cherrypy.serving.response
lastmod = response.headers.get('Last-Modified')
if lastmod:
status, reason, msg = _httputil.valid_status(response.status)
request = cherrypy.serving.request
since = request.headers.get('If-Unmodified-Since')
if since and since != lastmod:
if (status >= 200 and status <= 299) or status == 412:
raise cherrypy.HTTPError(412)
since = request.headers.get('If-Modified-Since')
if since and since == lastmod:
if (status >= 200 and status <= 299) or status == 304:
if request.method in ("GET", "HEAD"):
raise cherrypy.HTTPRedirect([], 304)
else:
raise cherrypy.HTTPError(412)
# Tool code #
def allow(methods=None, debug=False):
"""Raise 405 if request.method not in methods (default ['GET', 'HEAD']).
The given methods are case-insensitive, and may be in any order.
If only one method is allowed, you may supply a single string;
if more than one, supply a list of strings.
Regardless of whether the current method is allowed or not, this
also emits an 'Allow' response header, containing the given methods.
"""
if not isinstance(methods, (tuple, list)):
methods = [methods]
methods = [m.upper() for m in methods if m]
if not methods:
methods = ['GET', 'HEAD']
elif 'GET' in methods and 'HEAD' not in methods:
methods.append('HEAD')
cherrypy.response.headers['Allow'] = ', '.join(methods)
if cherrypy.request.method not in methods:
if debug:
cherrypy.log('request.method %r not in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
raise cherrypy.HTTPError(405)
else:
if debug:
cherrypy.log('request.method %r in methods %r' %
(cherrypy.request.method, methods), 'TOOLS.ALLOW')
def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
scheme='X-Forwarded-Proto', debug=False):
"""Change the base URL (scheme://host[:port][/path]).
For running a CP server behind Apache, lighttpd, or other HTTP server.
For Apache and lighttpd, you should leave the 'local' argument at the
default value of 'X-Forwarded-Host'. For Squid, you probably want to set
tools.proxy.local = 'Origin'.
If you want the new request.base to include path info (not just the host),
you must explicitly set base to the full base path, and ALSO set 'local'
to '', so that the X-Forwarded-Host request header (which never includes
path info) does not override it. Regardless, the value for 'base' MUST
NOT end in a slash.
cherrypy.request.remote.ip (the IP address of the client) will be
rewritten if the header specified by the 'remote' arg is valid.
By default, 'remote' is set to 'X-Forwarded-For'. If you do not
want to rewrite remote.ip, set the 'remote' arg to an empty string.
"""
request = cherrypy.serving.request
if scheme:
s = request.headers.get(scheme, None)
if debug:
cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY')
if s == 'on' and 'ssl' in scheme.lower():
# This handles e.g. webfaction's 'X-Forwarded-Ssl: on' header
scheme = 'https'
else:
# This is for lighttpd/pound/Mongrel's 'X-Forwarded-Proto: https'
scheme = s
if not scheme:
scheme = request.base[:request.base.find("://")]
if local:
lbase = request.headers.get(local, None)
if debug:
cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY')
if lbase is not None:
base = lbase.split(',')[0]
if not base:
port = request.local.port
if port == 80:
base = '127.0.0.1'
else:
base = '127.0.0.1:%s' % port
if base.find("://") == -1:
# add http:// or https:// if needed
base = scheme + "://" + base
request.base = base
if remote:
xff = request.headers.get(remote)
if debug:
cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')
if xff:
if remote == 'X-Forwarded-For':
#Bug #1268
xff = xff.split(',')[0].strip()
request.remote.ip = xff
def ignore_headers(headers=('Range',), debug=False):
"""Delete request headers whose field names are included in 'headers'.
This is a useful tool for working behind certain HTTP servers;
for example, Apache duplicates the work that CP does for 'Range'
headers, and will doubly-truncate the response.
"""
request = cherrypy.serving.request
for name in headers:
if name in request.headers:
if debug:
cherrypy.log('Ignoring request header %r' % name,
'TOOLS.IGNORE_HEADERS')
del request.headers[name]
def response_headers(headers=None, debug=False):
"""Set headers on the response."""
if debug:
cherrypy.log('Setting response headers: %s' % repr(headers),
'TOOLS.RESPONSE_HEADERS')
for name, value in (headers or []):
cherrypy.serving.response.headers[name] = value
response_headers.failsafe = True
def referer(pattern, accept=True, accept_missing=False, error=403,
message='Forbidden Referer header.', debug=False):
"""Raise HTTPError if Referer header does/does not match the given pattern.
pattern
A regular expression pattern to test against the Referer.
accept
If True, the Referer must match the pattern; if False,
the Referer must NOT match the pattern.
accept_missing
If True, permit requests with no Referer header.
error
The HTTP error code to return to the client on failure.
message
A string to include in the response body on failure.
"""
try:
ref = cherrypy.serving.request.headers['Referer']
match = bool(re.match(pattern, ref))
if debug:
cherrypy.log('Referer %r matches %r' % (ref, pattern),
'TOOLS.REFERER')
if accept == match:
return
except KeyError:
if debug:
cherrypy.log('No Referer header', 'TOOLS.REFERER')
if accept_missing:
return
raise cherrypy.HTTPError(error, message)
class SessionAuth(object):
"""Assert that the user is logged in."""
session_key = "username"
debug = False
def check_username_and_password(self, username, password):
pass
def anonymous(self):
"""Provide a temporary user name for anonymous users."""
pass
def on_login(self, username):
pass
def on_logout(self, username):
pass
def on_check(self, username):
pass
def login_screen(self, from_page='..', username='', error_msg='',
**kwargs):
return (unicodestr("""<html><body>
Message: %(error_msg)s
<form method="post" action="do_login">
Login: <input type="text" name="username" value="%(username)s" size="10" />
<br />
Password: <input type="password" name="password" size="10" />
<br />
<input type="hidden" name="from_page" value="%(from_page)s" />
<br />
<input type="submit" />
</form>
</body></html>""") % vars()).encode("utf-8")
def do_login(self, username, password, from_page='..', **kwargs):
"""Login. May raise redirect, or return True if request handled."""
response = cherrypy.serving.response
error_msg = self.check_username_and_password(username, password)
if error_msg:
body = self.login_screen(from_page, username, error_msg)
response.body = body
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
return True
else:
cherrypy.serving.request.login = username
cherrypy.session[self.session_key] = username
self.on_login(username)
raise cherrypy.HTTPRedirect(from_page or "/")
def do_logout(self, from_page='..', **kwargs):
"""Logout. May raise redirect, or return True if request handled."""
sess = cherrypy.session
username = sess.get(self.session_key)
sess[self.session_key] = None
if username:
cherrypy.serving.request.login = None
self.on_logout(username)
raise cherrypy.HTTPRedirect(from_page)
def do_check(self):
"""Assert username. Raise redirect, or return True if request handled.
"""
sess = cherrypy.session
request = cherrypy.serving.request
response = cherrypy.serving.response
username = sess.get(self.session_key)
if not username:
sess[self.session_key] = username = self.anonymous()
if self.debug:
cherrypy.log(
'No session[username], trying anonymous', 'TOOLS.SESSAUTH')
if not username:
url = cherrypy.url(qs=request.query_string)
if self.debug:
cherrypy.log('No username, routing to login_screen with '
'from_page %r' % url, 'TOOLS.SESSAUTH')
response.body = self.login_screen(url)
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
return True
if self.debug:
cherrypy.log('Setting request.login to %r' %
username, 'TOOLS.SESSAUTH')
request.login = username
self.on_check(username)
def run(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
path = request.path_info
if path.endswith('login_screen'):
if self.debug:
cherrypy.log('routing %r to login_screen' %
path, 'TOOLS.SESSAUTH')
return self.login_screen(**request.params)
elif path.endswith('do_login'):
if request.method != 'POST':
response.headers['Allow'] = "POST"
if self.debug:
cherrypy.log('do_login requires POST', 'TOOLS.SESSAUTH')
raise cherrypy.HTTPError(405)
if self.debug:
cherrypy.log('routing %r to do_login' % path, 'TOOLS.SESSAUTH')
return self.do_login(**request.params)
elif path.endswith('do_logout'):
if request.method != 'POST':
response.headers['Allow'] = "POST"
raise cherrypy.HTTPError(405)
if self.debug:
cherrypy.log('routing %r to do_logout' %
path, 'TOOLS.SESSAUTH')
return self.do_logout(**request.params)
else:
if self.debug:
cherrypy.log('No special path, running do_check',
'TOOLS.SESSAUTH')
return self.do_check()
def session_auth(**kwargs):
sa = SessionAuth()
for k, v in kwargs.items():
setattr(sa, k, v)
return sa.run()
session_auth.__doc__ = """Session authentication hook.
Any attribute of the SessionAuth class may be overridden via a keyword arg
to this function:
""" + "\n".join(["%s: %s" % (k, type(getattr(SessionAuth, k)).__name__)
for k in dir(SessionAuth) if not k.startswith("__")])
def log_traceback(severity=logging.ERROR, debug=False):
"""Write the last error's traceback to the cherrypy error log."""
cherrypy.log("", "HTTP", severity=severity, traceback=True)
def log_request_headers(debug=False):
"""Write request headers to the cherrypy error log."""
h = [" %s: %s" % (k, v) for k, v in cherrypy.serving.request.header_list]
cherrypy.log('\nRequest Headers:\n' + '\n'.join(h), "HTTP")
def log_hooks(debug=False):
"""Write request.hooks to the cherrypy error log."""
request = cherrypy.serving.request
msg = []
# Sort by the standard points if possible.
from cherrypy import _cprequest
points = _cprequest.hookpoints
for k in request.hooks.keys():
if k not in points:
points.append(k)
for k in points:
msg.append(" %s:" % k)
v = request.hooks.get(k, [])
v.sort()
for h in v:
msg.append(" %r" % h)
cherrypy.log('\nRequest Hooks for ' + cherrypy.url() +
':\n' + '\n'.join(msg), "HTTP")
def redirect(url='', internal=True, debug=False):
"""Raise InternalRedirect or HTTPRedirect to the given url."""
if debug:
cherrypy.log('Redirecting %sto: %s' %
({True: 'internal ', False: ''}[internal], url),
'TOOLS.REDIRECT')
if internal:
raise cherrypy.InternalRedirect(url)
else:
raise cherrypy.HTTPRedirect(url)
def trailing_slash(missing=True, extra=False, status=None, debug=False):
"""Redirect if path_info has (missing|extra) trailing slash."""
request = cherrypy.serving.request
pi = request.path_info
if debug:
cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' %
(request.is_index, missing, extra, pi),
'TOOLS.TRAILING_SLASH')
if request.is_index is True:
if missing:
if not pi.endswith('/'):
new_url = cherrypy.url(pi + '/', request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
elif request.is_index is False:
if extra:
# If pi == '/', don't redirect to ''!
if pi.endswith('/') and pi != '/':
new_url = cherrypy.url(pi[:-1], request.query_string)
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
def flatten(debug=False):
"""Wrap response.body in a generator that recursively iterates over body.
This allows cherrypy.response.body to consist of 'nested generators';
that is, a set of generators that yield generators.
"""
def flattener(input):
numchunks = 0
for x in input:
if not is_iterator(x):
numchunks += 1
yield x
else:
for y in flattener(x):
numchunks += 1
yield y
if debug:
cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')
response = cherrypy.serving.response
response.body = flattener(response.body)
def accept(media=None, debug=False):
"""Return the client's preferred media-type (from the given Content-Types).
If 'media' is None (the default), no test will be performed.
If 'media' is provided, it should be the Content-Type value (as a string)
or values (as a list or tuple of strings) which the current resource
can emit. The client's acceptable media ranges (as declared in the
Accept request header) will be matched in order to these Content-Type
values; the first such string is returned. That is, the return value
will always be one of the strings provided in the 'media' arg (or None
if 'media' is None).
If no match is found, then HTTPError 406 (Not Acceptable) is raised.
Note that most web browsers send */* as a (low-quality) acceptable
media range, which should match any Content-Type. In addition, "...if
no Accept header field is present, then it is assumed that the client
accepts all media types."
Matching types are checked in order of client preference first,
and then in the order of the given 'media' values.
Note that this function does not honor accept-params (other than "q").
"""
if not media:
return
if isinstance(media, basestring):
media = [media]
request = cherrypy.serving.request
# Parse the Accept request header, and try to match one
# of the requested media-ranges (in order of preference).
ranges = request.headers.elements('Accept')
if not ranges:
# Any media type is acceptable.
if debug:
cherrypy.log('No Accept header elements', 'TOOLS.ACCEPT')
return media[0]
else:
# Note that 'ranges' is sorted in order of preference
for element in ranges:
if element.qvalue > 0:
if element.value == "*/*":
# Matches any type or subtype
if debug:
cherrypy.log('Match due to */*', 'TOOLS.ACCEPT')
return media[0]
elif element.value.endswith("/*"):
# Matches any subtype
mtype = element.value[:-1] # Keep the slash
for m in media:
if m.startswith(mtype):
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return m
else:
# Matches exact value
if element.value in media:
if debug:
cherrypy.log('Match due to %s' % element.value,
'TOOLS.ACCEPT')
return element.value
# No suitable media-range found.
ah = request.headers.get('Accept')
if ah is None:
msg = "Your client did not send an Accept header."
else:
msg = "Your client sent this Accept header: %s." % ah
msg += (" But this resource only emits these media types: %s." %
", ".join(media))
raise cherrypy.HTTPError(406, msg)
class MonitoredHeaderMap(_httputil.HeaderMap):
def __init__(self):
self.accessed_headers = set()
def __getitem__(self, key):
self.accessed_headers.add(key)
return _httputil.HeaderMap.__getitem__(self, key)
def __contains__(self, key):
self.accessed_headers.add(key)
return _httputil.HeaderMap.__contains__(self, key)
def get(self, key, default=None):
self.accessed_headers.add(key)
return _httputil.HeaderMap.get(self, key, default=default)
if hasattr({}, 'has_key'):
# Python 2
def has_key(self, key):
self.accessed_headers.add(key)
return _httputil.HeaderMap.has_key(self, key)
def autovary(ignore=None, debug=False):
"""Auto-populate the Vary response header based on request.header access.
"""
request = cherrypy.serving.request
req_h = request.headers
request.headers = MonitoredHeaderMap()
request.headers.update(req_h)
if ignore is None:
ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
def set_response_header():
resp_h = cherrypy.serving.response.headers
v = set([e.value for e in resp_h.elements('Vary')])
if debug:
cherrypy.log(
'Accessed headers: %s' % request.headers.accessed_headers,
'TOOLS.AUTOVARY')
v = v.union(request.headers.accessed_headers)
v = v.difference(ignore)
v = list(v)
v.sort()
resp_h['Vary'] = ', '.join(v)
request.hooks.attach('before_finalize', set_response_header, 95)
| |
"""Definitions for OptionLists.
Most parts of the game shouldn't need to worry about creating and managing
tkinter.Button instances for options; as such, this module defines an API for
creating an 'OptionList' which does not require an active instance of tkinter
and can create a new tkinter.Frame with the appropriate buttons when needed.
"""
import inspect
import tkinter
from rpg import event
from rpg.ui import widgets
import typing
if typing.TYPE_CHECKING:
from rpg import app
from typing import List, Optional, Tuple
OptionWidget = typing.Union[tkinter.Label, widgets.Button]
OptionDefinition = typing.Tuple[str, event.GameEvent]
class Option(object):
"""The definition of an option on the GameView.
An option encodes a name, an event, and if it is enabled.
"""
def __init__(self, name, event_instance: 'event.GameEvent',
visible: bool = True) -> None:
"""Initialize the option.
:param name: The name of the option, used for the text on the button
:param event_instance: A GameEvent instance which can be applied on the
button press event
:param visible: If the option is visible
"""
self.name = name
self.event = event_instance
self.visible = visible
if inspect.isclass(self.event):
raise Exception(
"Class given when instance expected for option '{}'".format(
self.name
)
)
def generate(self, game: 'app.Game',
root: 'tkinter.Frame') -> 'OptionWidget':
"""Create a button or label, dependent on the self.visible parameter.
:param game: The app.Game instance
:param root: The tkinter.Frame to place this tkinter widget into
:return: A tkinter.Button or tkinter.Label for this option.
"""
def _do_apply():
self.event.apply(game)
if self.visible:
return widgets.Button(root, self.name, _do_apply)
return tkinter.Label(root)
class OptionFrame(tkinter.Frame):
"""A tkinter.Frame which implements a custom layout for options.
This widget does not need to be used by anything outside of the
rpg.ui.options package.
"""
def __init__(self, parent: tkinter.Frame, game: 'app.Game',
opts: 'List[List[Optional[Option]]]', **kwargs) -> None:
"""Initialize this OptionFrame instance with the list of options.
:param parent: The root tkinter.Frame to place this OptionFrame in
:param game: The app.Game instance
:param opts: A list of options to place in this frame
:param kwargs: Any additional keyword arguments for this frame
"""
tkinter.Frame.__init__(self, parent, **kwargs)
# TODO: Figure out why the frame doesn't auto-resize
self.configure(width=800, height=OptionList.RowSize*OptionList.MaxRows)
self.bind("<Configure>", self._on_resize)
self._children = list() # type: List[Tuple[widgets.Button, int, int]]
for row in range(len(opts)):
option_list = opts[row]
for col in range(len(option_list)):
opt = option_list[col]
if opt is not None:
self._children.append((opt.generate(game, self), row, col))
def _do_place(self, width, height) -> None:
full_width = max(width, 800)
space_width = (full_width - 800) / 2
part_width = full_width - space_width
portion = 1 / OptionList.MaxColumns
rel_size = (part_width / full_width) * portion
offset = (portion * 0.5) * (space_width / full_width)
new_height = 80 + (max(600, height) - 600) * 0.5
self.configure(width=full_width, height=new_height)
for opt, row, col in self._children:
opt.place(
relheight=(1 / OptionList.MaxRows), relwidth=rel_size,
relx=(col / OptionList.MaxColumns + offset),
rely=(row / OptionList.MaxRows)
)
def _on_resize(self, tk_event) -> None:
for opt, _, _ in self._children:
opt.place_forget()
self._do_place(tk_event.width, tk_event.height)
class OptionList(object):
"""A 3x8 grid of options which can generate an OptionFrame at runtime.
This is the class which the GameView expects for the set_options() method.
Do not change the OptionList.MaxRows or OptionList.MaxColumns variables;
these should be treated as constants. The OptionList.RowSize and
OptionList.ColumnSize variables represent the minimum sizes that the
OptionFrame should assume work.
"""
MaxRows = 3
MaxColumns = 8
RowSize = 30
ColumnSize = 100
@staticmethod
def generate_paged_list(options: 'List[OptionDefinition]') -> 'OptionList':
"""Generate a set of OptionList objects which can be paged through.
This static method will create a number of OptionList objects which
link to each other through automatically generated 'Next' and
'Previous' options, then return the first such OptionList.
:param options: A listing of names and GameEvents to convert into an
OptionList group
:return: The first OptionList object in the set generated
"""
opts_per_page = OptionList.MaxRows * OptionList.MaxColumns
num_opts = len(options)
num_pages = int(num_opts // opts_per_page) + 1
# Initialize the List of OptionList instances
opt_lists = list() # type: List[OptionList]
for i in range(num_pages):
opts = OptionList()
base = i * opts_per_page
for j in range(min(num_opts - base, opts_per_page)):
option = options[base + j]
row = int(j // OptionList.MaxColumns)
col = int(j % OptionList.MaxRows)
opts.set(Option(option[0], option[1]), row, col)
opt_lists.append(opts)
# Add the next/prev/cancel buttons
next_row = 0
prev_row = next_row + 1
cancel_row = OptionList.MaxRows - 1
last_col = OptionList.MaxColumns - 1
if num_pages > 1:
opt_lists[0].set(
Option(
"Next",
event.UpdateOptionsEvent(opt_lists[1])
), next_row, last_col
)
opt_lists[0].set(
Option(
"Cancel", event.OptionListReturnEvent()
), cancel_row, last_col
)
for i in range(1, num_pages-1):
opt_lists[i].set(
Option(
"Next", event.UpdateOptionsEvent(opt_lists[i+1])
), next_row, last_col
)
opt_lists[i].set(
Option(
"Prev", event.UpdateOptionsEvent(opt_lists[i-1])
), prev_row, last_col
)
opt_lists[i].set(
Option(
"Cancel", event.OptionListReturnEvent()
), cancel_row, last_col
)
opt_lists[-1].set(
Option(
"Prev", event.UpdateOptionsEvent(opt_lists[-2])
), prev_row, last_col
)
opt_lists[-1].set(
Option(
"Cancel", event.OptionListReturnEvent()
), cancel_row, last_col
)
else:
opt_lists[0].set(
Option(
"Cancel", event.OptionListReturnEvent()
), cancel_row, last_col
)
return opt_lists[0]
def __init__(self, *options: 'Tuple[Option, int, int]') -> None:
"""Initialize the OptionList with the given options.
If no options are given, each slot in this OptionList is filled with
None instead of an option.
:param options: Optional listing of Option instances and the
(row, column) that the option is located at
"""
r = OptionList.MaxRows
c = OptionList.MaxColumns
self._options = [[None for _ in range(c)] for _ in range(r)]
for opt, row, col in options:
self.set(opt, row, col)
def clear(self) -> None:
"""Set all options of this OptionList to None."""
for row in range(OptionList.MaxRows):
for col in range(OptionList.MaxColumns):
self._options[row][col] = None
def set(self, option: 'Optional[Option]', row: int, column: int) -> None:
"""Set the option at (row, column) to the given option.
:param option: The option to set the slot at (row, column) to
:param row: The row of the option slot to set
:param column: The column of the option slot to set
"""
self._options[row][column] = option
def get(self, row: int, column: int) -> 'Optional[Option]':
"""Get the option at the given (row, column), or None if no option has
been assigned there.
:param row: The row of the option slot to query
:param column: The column of the option slot to query
:return: The option at the given (row, column) slot, or None if not set
"""
return self._options[row][column]
def generate(self, game: 'app.Game',
parent: 'tkinter.Frame') -> 'tkinter.Frame':
"""Create a new tkinter.Frame object holding all the options set.
This method is a wrapper for the OptionFrame constructor.
:param game: The app.Game instance
:param parent: The root frame to place the generated tkinter.Frame into
:return: A tkinter.Frame instance with buttons corresponding to the
options in this list
"""
return OptionFrame(parent, game, self._options)
| |
import numpy as np
import matplotlib.pyplot as plt
import glob
import h5py
from SIP import SIP, eval_freq
from gatspy.periodic import LombScargle
import sys
from fit_all_the_light_curves import load_lc, reconstruct_fake_lc
import time
plotpar = {'axes.labelsize': 20,
'text.fontsize': 20,
'legend.fontsize': 20,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'text.usetex': True}
plt.rcParams.update(plotpar)
def peak_detect(x, y):
peaks = np.array([i for i in range(1, len(x)-1) if y[i-1] < y[i] and
y[i+1] < y[i]])
l = y[peaks] == max(y[peaks])
mx, my = x[peaks][l][0], y[peaks][l][0]
return mx, my
# grid over amplitudes (the K2 pgram step takes the time)
def grid_over_amps(basis, flux, raw_x, raw_y, truth, fs, amps, true_a,
flag, n, plot=False, raw=False, random_amps=True):
# find the threshold level
_, initial_pgram, _ = SIP(raw_x, raw_y, basis, fs)
mx, threshold = peak_detect(fs, initial_pgram)
K2P, rawP, K2a, rawa = [], [], [], []
alla, allp = [], []
all_results = []
for i, a in enumerate(amps):
if random_amps:
if flag == "r":
a = 10**(np.random.uniform(np.log10(1e-5), np.log10(1e-3)))
elif flag == "a":
a = 10**(np.random.uniform(np.log10(1e-5), np.log10(1e-3)))
tf = 1./truth
print("period = ", truth)
# add lcs together
# plt.clf()
# plt.plot(flux * a, "k.")
noise = np.random.randn(len(flux))*50*13**.5*1e-6
# print 50*13**.5*1e-6, a
fx = flux * a + noise
# plt.plot(fx, "r.")
# plt.savefig("Test")
# assert 0
y = fx + raw_y
SN = np.var(fx) / np.var(raw_y)
if flag == "r":
threshold = .1
elif flag == "a":
threshold = .1
# # Calculate time
# start = time.time()
# amp2s, s2n, w = SIP(raw_x, y, basis, fs[:1000])
# end = time.time()
# print("SIP time = ", (end-start), "s")
# print("for", len(y), "data points and", len(fs), "freqs")
# calculate SIP
amp2s, s2n, w = SIP(raw_x, y, basis, fs)
pgram = s2n
best_f, best_pgram = peak_detect(fs, pgram) # find peaks
print("recovered period", 1./best_f)
s = 0 # success indicator
alla.append(a)
allp.append(truth)
all_results.append(best_f)
print(tf-threshold*tf, best_f, tf+threshold*tf)
if tf-threshold*tf < best_f and best_f < tf+threshold*tf:
K2P.append(truth)
K2a.append(a)
print("success!", "\n")
s = 1
# calculate periodogram of raw light curve
y = np.array([_y.astype("float64") for _y in y])
raw_x = np.array([_raw_x.astype("float64") for _raw_x in raw_x])
# Calculate time
start = time.time()
model = LombScargle().fit(raw_x, y, np.ones_like(y)*1e-5)
end = time.time()
print("LS time = ", (end-start)*1e3, "ms")
print("for", len(y), "data points and", len(fs), "freqs")
assert 0
# # Calculate time
# start = time.time()
# model = LombScargle().fit(raw_x, y, np.ones_like(y)*1e-5)
# end = time.time()
# print("SIP time = ", (end-start)*1e3, "ms")
# print("for", len(y), "data points and", len(fs), "freqs")
# assert 0
model = LombScargle().fit(raw_x, y, np.ones_like(y)*1e-5)
period = 1. / fs
pg = model.periodogram(period)
best_f2, best_pg2 = peak_detect(fs, pg)
if tf-threshold*tf < best_f2 and best_f2 < tf+threshold*tf:
rawP.append(truth)
rawa.append(a)
if plot:
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(raw_x, y, "k.")
plt.plot(raw_x, fx, color="g")
plt.title("$\mathrm{Amp = %s, P = %.3f}$" % (a, (1./tf)))
plt.subplot(2, 1, 2)
plt.axvline(best_f, color="r", linestyle="-")
plt.axvline(tf, color="k", linestyle="--")
print("best f = ", best_f)
print("true f = ", tf)
print(tf-threshold*tf, tf+threshold*tf)
c = "b"
if s == 1:
c = "m"
plt.plot(fs, pgram, color=c,
label="$\mathrm{SIP$}")
plt.savefig("../injections/sine/%s_%s_result_%s"
% (str(n).zfill(2), str(i).zfill(2), flag))
# n is the period index, i is the amplitude index
print("%s_%s_result_%s" % (str(n).zfill(2), str(i).zfill(2),
flag))
# raw_input('enter')
return np.array(K2a), np.array(K2P), np.array(rawa), np.array(rawP), \
np.array(alla), np.array(allp), np.array(all_results)
# add simulated to real light curves and grid over periods
def grid_over_periods(basis, raw_x, raw_y, true_p, fs, true_a, fnames, flag):
K2_amps, K2_Ps, raw_amps, raw_Ps = [], [], [], []
ar = []
allas, allps = [], []
for i, fname in enumerate(fnames):
print(fname)
print(true_p[i])
time, flux = np.genfromtxt(fname).T
K2a, K2P, rawa, rawP, alla, allp, all_results = \
grid_over_amps(basis, flux, raw_x, raw_y, true_p[i], fs,
amps, true_a[i], flag, i, plot=False)
K2_amps.append(K2a)
raw_amps.append(rawa)
K2_Ps.append(K2P)
raw_Ps.append(rawP)
allas.append(alla)
allps.append(allp)
ar.append(all_results)
K2_amps = np.array([j for i in K2_amps for j in i])
K2_Ps = np.array([j for i in K2_Ps for j in i])
raw_amps = np.array([j for i in raw_amps for j in i])
raw_Ps = np.array([j for i in raw_Ps for j in i])
allas = np.array([j for i in allas for j in i])
allps = np.array([j for i in allps for j in i])
ar = np.array([j for i in ar for j in i])
f = h5py.File("../injections/sine/histogram_%s_%s_%s.h5" % (start, stop,
flag), "w")
K2data = f.create_dataset("K2", (len(K2_amps), 2))
K2data[:, 0] = K2_amps
K2data[:, 1] = K2_Ps
rawdata = f.create_dataset("raw", (len(raw_amps), 2))
rawdata[:, 0] = raw_amps
rawdata[:, 1] = raw_Ps
f.close()
f = h5py.File("../injections/sine/truths_%s_%s_%s.h5" % (start, stop,
flag), "w")
K2data = f.create_dataset("K2", (len(allas), 3))
K2data[:, 0] = allas
K2data[:, 1] = allps
K2data[:, 2] = ar
f.close()
return K2_amps, K2_Ps, raw_amps, raw_Ps
if __name__ == "__main__":
# load example star
path = "/export/bbq2/dfm/k2/web/lightcurves/c1/201100000/21000"
fname = "ktwo201121245-c01_lpd-lc.fits"
raw_x, y, l = load_lc("%s/%s" % (path, fname))
raw_y = reconstruct_fake_lc()[l]
# load basis
with h5py.File("../data/c1.h5", "r") as f:
basis = f["basis"][:150, l]
# load injections and truths
sine = True
flag = str(sys.argv[1]) # r for rotation or a for asteroseismology
if sine:
fnames = glob.glob("../injections/sine/????_lc_%s.txt" % flag)
fnames = np.sort(fnames)
name, true_p = np.genfromtxt("../injections/sine/truth_%s.txt"
% flag).T
true_a = np.ones_like(true_p)
else:
fnames = glob.glob("../injections/*_lc.txt")
name, true_p, true_a = np.genfromtxt("truth.txt").T
# The sip grid
if flag == "r":
ps = np.linspace(.4, 50., 1000)
fs = 1./ps
# fs = np.linspace(1/50., 1/.4, 1000)
elif flag == "a":
fs = np.linspace(2./4., 26., 5000)
# this is just a place holder, amps are random
amps = 10**(np.linspace(np.log10(1e-5), np.log10(1e-3), 20))
# for parallelisation, provide the starting and stopping indices
start = int(sys.argv[2])
stop = int(sys.argv[3])
fnames = fnames[start:stop]
true_p = true_p[start:stop]
true_a = true_a[start:stop]
# calculate the 2d histogram of completeness over period and amplitude
K2_amps, K2_Ps, raw_amps, raw_Ps = grid_over_periods(basis, raw_x,
raw_y, true_p, fs,
true_a, fnames,
flag)
| |
import logging
from itertools import islice
from operator import methodcaller
from ..exceptions import ElasticsearchException, TransportError
from ..compat import map
logger = logging.getLogger('elasticsearch.helpers')
class BulkIndexError(ElasticsearchException):
@property
def errors(self):
""" List of errors from execution of the last chunk. """
return self.args[1]
class ScanError(ElasticsearchException):
pass
def expand_action(data):
"""
From one document or action definition passed in by the user extract the
action/data lines needed for elasticsearch's
:meth:`~elasticsearch.Elasticsearch.bulk` api.
"""
# make sure we don't alter the action
data = data.copy()
op_type = data.pop('_op_type', 'index')
action = {op_type: {}}
for key in ('_index', '_parent', '_percolate', '_routing', '_timestamp',
'_ttl', '_type', '_version', '_version_type', '_id', '_retry_on_conflict'):
if key in data:
action[op_type][key] = data.pop(key)
# no data payload for delete
if op_type == 'delete':
return action, None
return action, data.get('_source', data)
def streaming_bulk(client, actions, chunk_size=500, raise_on_error=True,
expand_action_callback=expand_action, raise_on_exception=True,
**kwargs):
"""
Streaming bulk consumes actions from the iterable passed in and yields
results per action. For non-streaming usecases use
:func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming
bulk that returns summary information about the bulk operation once the
entire input is consumed and sent.
This function expects the action to be in the format as returned by
:meth:`~elasticsearch.Elasticsearch.search`, for example::
{
'_index': 'index-name',
'_type': 'document',
'_id': 42,
'_parent': 5,
'_ttl': '1d',
'_source': {
...
}
}
Alternatively, if `_source` is not present, it will pop all metadata fields
from the doc and use the rest as the document data.
If you wish to perform other operations, like `delete` or `update` use the
`_op_type` field in your actions (`_op_type` defaults to `index`)::
{
'_op_type': 'delete',
'_index': 'index-name',
'_type': 'document',
'_id': 42,
}
{
'_op_type': 'update',
'_index': 'index-name',
'_type': 'document',
'_id': 42,
'doc': {'question': 'The life, universe and everything.'}
}
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterable containing the actions to be executed
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
"""
actions = map(expand_action_callback, actions)
# if raise on error is set, we need to collect errors per chunk before raising them
errors = []
while True:
chunk = islice(actions, chunk_size)
# raise on exception means we might need to iterate on chunk twice
if not raise_on_exception:
chunk = list(chunk)
bulk_actions = []
for action, data in chunk:
bulk_actions.append(action)
if data is not None:
bulk_actions.append(data)
if not bulk_actions:
return
try:
# send the actual request
resp = client.bulk(bulk_actions, **kwargs)
except TransportError as e:
# default behavior - just propagate exception
if raise_on_exception:
raise e
# if we are not propagating, mark all actions in current chunk as failed
err_message = str(e)
exc_errors = []
for action, data in chunk:
info = {"error": err_message, "status": e.status_code, "exception": e, "data": data}
op_type, action = action.popitem()
info.update(action)
exc_errors.append({op_type: info})
# emulate standard behavior for failed actions
if raise_on_error:
raise BulkIndexError('%i document(s) failed to index.' % len(exc_errors), exc_errors)
else:
for err in exc_errors:
yield False, err
continue
# go through request-reponse pairs and detect failures
for op_type, item in map(methodcaller('popitem'), resp['items']):
ok = 200 <= item.get('status', 500) < 300
if not ok and raise_on_error:
errors.append({op_type: item})
if not errors:
# if we are not just recording all errors to be able to raise
# them all at once, yield items individually
yield ok, {op_type: item}
if errors:
raise BulkIndexError('%i document(s) failed to index.' % len(errors), errors)
def bulk(client, actions, stats_only=False, **kwargs):
"""
Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides
a more human friendly interface - it consumes an iterator of actions and
sends them to elasticsearch in chunks. It returns a tuple with summary
information - number of successfully executed actions and either list of
errors or number of errors if `stats_only` is set to `True`.
See :func:`~elasticsearch.helpers.streaming_bulk` for more information
and accepted formats.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg stats_only: if `True` only report number of successful/failed
operations instead of just number of successful and a list of error responses
Any additional keyword arguments will be passed to
:func:`~elasticsearch.helpers.streaming_bulk` which is used to execute
the operation.
"""
success, failed = 0, 0
# list of errors to be collected is not stats_only
errors = []
for ok, item in streaming_bulk(client, actions, **kwargs):
# go through request-reponse pairs and detect failures
if not ok:
if not stats_only:
errors.append(item)
failed += 1
else:
success += 1
return success, failed if stats_only else errors
# preserve the name for backwards compatibility
bulk_index = bulk
def scan(client, query=None, scroll='5m', raise_on_error=True, preserve_order=False, **kwargs):
"""
Simple abstraction on top of the
:meth:`~elasticsearch.Elasticsearch.scroll` api - a simple iterator that
yields all hits as returned by underlining scroll requests.
By default scan does not return results in any pre-determined order. To
have a standard order in the returned documents (either by score or
explicit sort definition) when scrolling, use ``preserve_order=True``. This
may be an expensive operation and will negate the performance benefits of
using ``scan``.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg raise_on_error: raises an exception (``ScanError``) if an error is
encountered (some shards fail to execute). By default we raise.
:arg preserve_order: don't set the ``search_type`` to ``scan`` - this will
cause the scroll to paginate with preserving the order. Note that this
can be an extremely expensive operation and can easily lead to
unpredictable results, use with caution.
Any additional keyword arguments will be passed to the initial
:meth:`~elasticsearch.Elasticsearch.search` call::
scan(es,
query={"match": {"title": "python"}},
index="orders-*",
doc_type="books"
)
"""
if not preserve_order:
kwargs['search_type'] = 'scan'
# initial search
resp = client.search(body=query, scroll=scroll, **kwargs)
scroll_id = resp.get('_scroll_id')
if scroll_id is None:
return
first_run = True
while True:
# if we didn't set search_type to scan initial search contains data
if preserve_order and first_run:
first_run = False
else:
resp = client.scroll(scroll_id, scroll=scroll)
for hit in resp['hits']['hits']:
yield hit
# check if we have any errrors
if resp["_shards"]["failed"]:
logger.warning(
'Scrol request has failed on %d shards out of %d.',
resp['_shards']['failed'], resp['_shards']['total']
)
if raise_on_error:
raise ScanError(
'Scrol request has failed on %d shards out of %d.',
resp['_shards']['failed'], resp['_shards']['total']
)
scroll_id = resp.get('_scroll_id')
# end of scroll
if scroll_id is None or not resp['hits']['hits']:
break
def reindex(client, source_index, target_index, query=None, target_client=None,
chunk_size=500, scroll='5m', scan_kwargs={}, bulk_kwargs={}):
"""
Reindex all documents from one index that satisfy a given query
to another, potentially (if `target_client` is specified) on a different cluster.
If you don't specify the query you will reindex all the documents.
.. note::
This helper doesn't transfer mappings, just the data.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use (for
read if `target_client` is specified as well)
:arg source_index: index (or list of indices) to read documents from
:arg target_index: name of the index in the target cluster to populate
:arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api
:arg target_client: optional, is specified will be used for writing (thus
enabling reindex between clusters)
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg scan_kwargs: additional kwargs to be passed to
:func:`~elasticsearch.helpers.scan`
:arg bulk_kwargs: additional kwargs to be passed to
:func:`~elasticsearch.helpers.bulk`
"""
target_client = client if target_client is None else target_client
docs = scan(client, query=query, index=source_index, scroll=scroll, **scan_kwargs)
def _change_doc_index(hits, index):
for h in hits:
h['_index'] = index
yield h
kwargs = {
'stats_only': True,
}
kwargs.update(bulk_kwargs)
return bulk(target_client, _change_doc_index(docs, target_index),
chunk_size=chunk_size, **kwargs)
| |
import subprocess
import sys
import os
import re
import contextlib
# check if pip is installed. If not, raise an ImportError
PIP_INSTALLED = True
try:
import pip
except ImportError:
PIP_INSTALLED = False
if not PIP_INSTALLED:
raise ImportError('pip is not installed.')
def install_and_import(package):
import importlib
try:
importlib.import_module(package)
except ImportError:
pip.main(['install', package])
finally:
globals()[package] = importlib.import_module(package)
# check if setuptools is installed. If not, install setuptools
# automatically using pip.
install_and_import('setuptools')
from setuptools.command.build_ext import build_ext as _build_ext
from distutils import ccompiler, msvccompiler
from distutils.sysconfig import get_python_inc
## fix compiler and build options
COMPILE_OPTIONS = {
'msvc': ['/0x', '/EHsc'],
'mingw32': ['-O3', '-ffast-math', '-march=native'],
'other': ['-O3', '-ffast-math', '-march=native']
}
LINK_OPTIONS = {
'msvc': [],
'mingw32': [],
'other':[]
}
class build_ext_options:
def build_options(self):
for e in self.extensions:
e.extra_compile_args += COMPILE_OPTIONS.get(
self.compiler.compiler_type, COMPILE_OPTIONS['other'])
for e in self.extensions:
e.extra_link_args += LINK_OPTIONS.get(
self.compiler.compiler_type, LINK_OPTIONS['other'])
class build_ext(_build_ext, build_ext_options):
def build_extensions(self):
build_ext_options.build_options(self)
_build_ext.build_extensions(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable, os.path.join(cwd,
'build_tools',
'cythonize.py'),
'py_stringsimjoin'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
MODULES = {
"py_stringsimjoin.index.inverted_index_cy": {'sources':["py_stringsimjoin/index/inverted_index_cy.pyx"],
'comargs':[]
},
"py_stringsimjoin.index.position_index_cy": {'sources':["py_stringsimjoin/index/position_index_cy.pyx"],
'comargs':[]
},
"py_stringsimjoin.similarity_measure.edit_distance": {'sources':["py_stringsimjoin/similarity_measure/edit_distance.pyx"],
'comargs':[]
},
"py_stringsimjoin.similarity_measure.cosine": {'sources':["py_stringsimjoin/similarity_measure/cosine.pyx"],
'comargs':[]
},
"py_stringsimjoin.similarity_measure.dice": {'sources':["py_stringsimjoin/similarity_measure/dice.pyx"],
'comargs':[]
},
"py_stringsimjoin.similarity_measure.jaccard": {'sources':["py_stringsimjoin/similarity_measure/jaccard.pyx"],
'comargs':[]
},
"py_stringsimjoin.join.edit_distance_join_cy": {'sources':["py_stringsimjoin/join/edit_distance_join_cy.pyx",
],
'comargs':["-I./py_stringsimjoin/index/"]
},
"py_stringsimjoin.join.disk_edit_distance_join_cy": {'sources': ["py_stringsimjoin/join/disk_edit_distance_join_cy.pyx",
],
'comargs': ["-I./py_stringsimjoin/index/"]
},
"py_stringsimjoin.join.overlap_coefficient_join_cy": {'sources':["py_stringsimjoin/join/overlap_coefficient_join_cy.pyx",
],
'comargs':["-I./py_stringsimjoin/index/"]
},
"py_stringsimjoin.join.overlap_join_cy": {'sources':["py_stringsimjoin/join/overlap_join_cy.pyx",
],
'comargs':["-I./py_stringsimjoin/index/"]
},
"py_stringsimjoin.join.cosine_join_cy": {'sources':["py_stringsimjoin/join/cosine_join_cy.pyx"],
'comargs':["-I./py_stringsimjoin/index/"]
},
"py_stringsimjoin.join.dice_join_cy": {'sources':["py_stringsimjoin/join/dice_join_cy.pyx"],
'comargs':["-I./py_stringsimjoin/index/"]
},
"py_stringsimjoin.join.jaccard_join_cy": {'sources':["py_stringsimjoin/join/jaccard_join_cy.pyx"],
'comargs':["-I./py_stringsimjoin/index/"]
},
"py_stringsimjoin.join.set_sim_join_cy": {'sources':["py_stringsimjoin/join/set_sim_join_cy.pyx",
],
'comargs':["-I./py_stringsimjoin/index/"]
},
"py_stringsimjoin.utils.cython_utils": {'sources': ["py_stringsimjoin/utils/cython_utils.pyx",
],
'comargs': ["-I./py_stringsimjoin/index/"]
}
}
def is_source_release(path):
return os.path.exists(os.path.join(path, 'PKG-INFO'))
def purge(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
file_path=os.path.join(dir,f)
os.remove(file_path)
def clean(path):
for name in list(MODULES.keys()):
dir_list = name.split('.')[0:-1]
dir_name = '/'.join(dir_list)
purge(dir_name, r".*\.so$")
name = name.replace('.','/')
for ext in ['.cpp', '.c']:
file_path = os.path.join(path, name + ext)
if os.path.exists(file_path):
os.unlink(file_path)
@contextlib.contextmanager
def chdir(new_dir):
old_dir = os.getcwd()
try:
os.chdir(new_dir)
sys.path.insert(0, new_dir)
yield
finally:
del sys.path[0]
os.chdir(old_dir)
def setup_package():
root = os.path.abspath(os.path.dirname(__file__))
if len(sys.argv) > 1 and sys.argv[1] == 'clean':
return clean(root)
if len(sys.argv) > 1 and sys.argv[1] == 'touch':
return touch(root)
with chdir(root):
include_dirs = [get_python_inc(plat_specific=True)]
if (ccompiler.new_compiler().compiler_type == 'msvc'
and msvccompiler.get_build_version == 9):
include_dirs.append(os.path.join(root, 'include', 'msvc9'))
if not is_source_release(root):
generate_cython()
extensions = []
for name in list(MODULES.keys()):
curr_mod = MODULES[name]
e = setuptools.Extension(name, sources=curr_mod['sources'],
extra_compile_args=curr_mod['comargs'], language='c++')
extensions.append(e)
packages = setuptools.find_packages()
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
cmdclass = {"build_ext": build_ext}
setuptools.setup(
name='py_stringsimjoin',
version='0.3.2',
description='Python library for performing string similarity joins.',
long_description=LONG_DESCRIPTION,
url='https://sites.google.com/site/anhaidgroup/projects/magellan/py_stringsimjoin',
author='UW Magellan Team',
author_email='uwmagellan@gmail.com',
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries',
],
packages=packages,
ext_modules=extensions,
cmdclass=cmdclass,
install_requires=[
'joblib',
'pandas >= 0.16.0',
'PyPrind >= 2.9.3',
'py_stringmatching >= 0.2.1',
'six'
],
include_package_data=True,
zip_safe=False,
)
if __name__ == '__main__':
setup_package()
| |
"""
Copied+modified from rest_framework.decorators, which is licensed under the BSD license:
*******************************************************************************
Copyright (c) 2011-2016, Tom Christie
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
The most important decorator in this module is `@api_view`, which is used
for writing function-based views with REST framework.
There are also various decorators for setting the API policies on function
based views, as well as the `@detail_route` and `@list_route` decorators, which are
used to annotate methods on viewsets that should be included by routers.
"""
from __future__ import unicode_literals
import types
from time import time, sleep
from logging import getLogger
from functools import wraps, partial
from django.conf import settings
from django.http import Http404
from django.utils import six
from django.utils.decorators import available_attrs
from django.core.exceptions import PermissionDenied
from api.views import View
from api.exceptions import ServiceUnavailable
from api.utils.request import is_request
from api.dc.utils import get_dc
from que.lock import TaskLock
from vms.models import Dc, DefaultDc, DummyDc
logger = getLogger(__name__)
def api_view(http_method_names=None):
"""
Decorator that converts a function-based view into an APIView subclass.
Takes a list of allowed methods for the view as an argument.
"""
http_method_names = ['GET'] if (http_method_names is None) else http_method_names
def decorator(func):
# noinspection PyPep8Naming
WrappedAPIView = type(
six.PY3 and 'WrappedAPIView' or b'WrappedAPIView',
(View,),
{'__doc__': func.__doc__}
)
# Note, the above allows us to set the docstring.
# It is the equivalent of:
#
# class WrappedAPIView(APIView):
# pass
# WrappedAPIView.__doc__ = func.doc <--- Not possible to do this
# api_view applied without (method_names)
assert not(isinstance(http_method_names, types.FunctionType)), \
'@api_view missing list of allowed HTTP methods'
# api_view applied with eg. string instead of list of strings
assert isinstance(http_method_names, (list, tuple)), \
'@api_view expected a list of strings, received %s' % type(http_method_names).__name__
allowed_methods = set(http_method_names) | {'options'}
WrappedAPIView.http_method_names = [method.lower() for method in allowed_methods]
# noinspection PyUnusedLocal
def handler(self, *args, **kwargs):
return func(*args, **kwargs)
for method in http_method_names:
setattr(WrappedAPIView, method.lower(), handler)
WrappedAPIView.__name__ = func.__name__
WrappedAPIView.renderer_classes = getattr(func, 'renderer_classes',
View.renderer_classes)
WrappedAPIView.parser_classes = getattr(func, 'parser_classes',
View.parser_classes)
WrappedAPIView.authentication_classes = getattr(func, 'authentication_classes',
View.authentication_classes)
WrappedAPIView.throttle_classes = getattr(func, 'throttle_classes',
View.throttle_classes)
WrappedAPIView.permission_classes = getattr(func, 'permission_classes',
View.permission_classes)
# noinspection PyUnresolvedReferences
return WrappedAPIView.as_view()
return decorator
def renderer_classes(_renderer_classes):
def decorator(func):
func.renderer_classes = _renderer_classes
return func
return decorator
def parser_classes(_parser_classes):
def decorator(func):
func.parser_classes = _parser_classes
return func
return decorator
def authentication_classes(_authentication_classes):
def decorator(func):
func.authentication_classes = _authentication_classes
return func
return decorator
def throttle_classes(_throttle_classes):
def decorator(func):
func.throttle_classes = _throttle_classes
return func
return decorator
def permission_classes(_permission_classes):
def decorator(func):
func.permission_classes = _permission_classes
return func
return decorator
def _check_system_update(request):
"""Only SuperAdmins can access the API during system update"""
from api.system.update.api_views import UpdateView
if UpdateView.is_task_running() and not request.user.is_staff:
raise ServiceUnavailable('System update in progress')
def request_data(catch_dc=True, force_dc=None, permissions=()):
def request_data_decorator(fun):
"""
API view decorator. Updates "data" keyword argument with request.DATA if necessary.
Also sets the request.dc attribute to current Datacenter if specified.
And optionally checks additional permissions which cannot be checked via permission_classes,
because they are related to current datacenter.
"""
def wrap(request, *args, **kwargs):
if kwargs.get('data', None) is None: # data parameter must exist in view function
if request.method == 'GET':
data_key = 'query_params'
else:
data_key = 'data'
# noinspection PyBroadException
try:
kwargs['data'] = getattr(request, data_key, {})
except Exception:
kwargs['data'] = {}
dc = getattr(request, 'dc', DummyDc())
if catch_dc:
if '/api/dc/' in request.path:
try:
_dc_name = kwargs.pop('dc')
except KeyError:
_dc_name = None
else:
_dc_name = None
dc_name = kwargs['data'].get('dc', _dc_name)
# Override request.dc set in DcMiddleware
if dc_name and dc_name != dc.name:
request.dc = get_dc(request, dc_name)
if force_dc and (force_dc != request.dc.id or dc.is_dummy):
# Override request.dc set in DcMiddleware and by catch_dc
# WARNING: Make sure that the user has rights to access this DC
request.dc = Dc.objects.get_by_id(force_dc)
# Whenever we set a DC we have to set request.dc_user_permissions right after request.dc is available
request.dc_user_permissions = request.dc.get_user_permissions(request.user)
# request.dcs is used by some DC-mixed views - can be overridden by DcPermission
request.dcs = Dc.objects.none()
logger.debug('"%s %s (%s)" user="%s" dc="%s" permissions=%s', request.method, fun.__name__, request.path,
request.user.username, request.dc.name, request.dc_user_permissions)
# Run permission checks
for perm in permissions:
if not perm(request, fun, args, kwargs):
logger.error('Request by user "%s" to access API call "%s %s(%s, %s)" was denied by %s permission '
'in DC "%s"!',
request.user, request.method, fun.__name__, args, kwargs, perm.__name__, request.dc)
raise PermissionDenied
# Only SuperAdmins can access the API during system update
_check_system_update(request)
return fun(request, *args, **kwargs)
wrap.__name__ = fun.__name__
wrap.__doc__ = fun.__doc__
return wrap
return request_data_decorator
request_data_nodc = partial(request_data, catch_dc=False)
request_data_defaultdc = partial(request_data, catch_dc=False, force_dc=settings.VMS_DC_DEFAULT)
def setting_required(setting_name, dc_bound=True, default_dc=False, check_settings=True):
"""
API / GUI decorator for checking DC settings.
"""
def setting_required_decorator(fun):
def wrap(request, *args, **kwargs):
if check_settings:
opt = getattr(settings, setting_name)
else:
opt = True
if default_dc:
opt = getattr(DefaultDc().settings, setting_name) and opt
elif dc_bound:
try:
opt = getattr(request.dc.settings, setting_name) and opt
except AttributeError:
pass
if opt:
return fun(request, *args, **kwargs)
else:
raise Http404
wrap.__name__ = fun.__name__
wrap.__doc__ = fun.__doc__
return wrap
return setting_required_decorator
def catch_exception(fun):
"""
Used as decorator to catch all exceptions and log them without breaking the inner function.
Can be disabled by using the fail_silently keyword argument, which won't be passed to inner function.
"""
@wraps(fun, assigned=available_attrs(fun))
def wrap(*args, **kwargs):
if kwargs.pop('fail_silently', True):
try:
return fun(*args, **kwargs)
except Exception as e:
logger.exception(e)
logger.error('Got exception when running %s(%s, %s): %s.', fun.__name__, args, kwargs, e)
else:
return fun(*args, **kwargs)
return wrap
def catch_api_exception(fun):
"""
Like catch_exception above, but it logs the exception caught.
"""
from api.task.utils import task_log_exception # circular imports
@wraps(fun, assigned=available_attrs(fun))
def wrap(*args, **kwargs):
try:
return fun(*args, **kwargs)
except Exception as e:
logger.exception(e)
logger.error('Got exception when running %s(%s, %s): %s.', fun.__name__, args, kwargs, e)
for arg in args:
if is_request(arg):
try:
task_log_exception(arg, e, task_id=getattr(e, 'task_id', None))
except Exception as exc:
logger.exception(exc)
break
else:
logger.warning('API exception could not be logged into task log')
return wrap
def lock(timeout=settings.API_LOCK_TIMEOUT, key_args=(), key_kwargs=(), wait_for_release=False, bound=False,
base_name=None):
"""
Ensure that the decorated function does not run in parallel with the same function and arguments.
"""
def wrap(fun):
@wraps(fun, assigned=available_attrs(fun))
def inner(*args, **kwargs):
if bound:
params = args[1:] # The first parameter is a "self" object
else:
params = args
if base_name:
fun_name = base_name
else:
fun_name = fun.__name__
lock_keys = [fun_name]
lock_keys.extend(str(params[i]) for i in key_args)
lock_keys.extend(str(kwargs[i]) for i in key_kwargs)
task_lock = TaskLock(':'.join(lock_keys), desc='Function %s' % fun_name)
def acquire_lock():
return task_lock.acquire(time(), timeout=timeout, save_reverse=False)
if not acquire_lock():
if wait_for_release:
logger.warn('Function %s(%s, %s) must wait (%s), because another function is already running',
fun_name, args, kwargs, timeout or 'forever')
wait = 0
while wait < timeout:
sleep(1)
wait += 1
if acquire_lock():
break
else:
logger.warn('Function %s(%s, %s) will not run, because another function is still running and '
'we have waited for too long (%s)', fun_name, args, kwargs, wait)
return
else:
logger.warn('Function %s(%s, %s) will not run, because another function is already running',
fun_name, args, kwargs)
return
try:
return fun(*args, **kwargs)
finally:
task_lock.delete(fail_silently=True, delete_reverse=False)
return inner
return wrap
| |
#-------------------------------------------------------------------------------
# Name: searchLargestHiatusLens.py
# Purpose: Search for a specific attribute through ZMX files. In this program
# we are interested in finding the lens (design) that has the largest
# (or smallest) hiatus also called nodal space, Null space, or the
# interstitium (i.e. the distance between the two principal planes)
#
# Note [from Zemax Manual]:
# Object space positions are measured with respect to surface 1.
# Image space positions are measured with respect to the image surface.
# The index in both the object space and image space is considered.
#
# Assumptions:
# 1. The last surface is the image surface
# 2. Search only sequential zemax files. The prescription file output
# for Non-sequential Zemax analysis is different from sequential.
# 3. File-names within the search directory are unique.
#
# Note:
# 1. If Zemax is unable to open a .zmx certain file, it pops-up an
# error msg, which the user needs to click. So, in such scenarios
# this program execution would be stalled until the user has clicked
# on the message. That particular file is then excluded from the
# analysis.
#
# Copyright: (c) Indranil Sinharoy, Southern Methodist University, 2013 - 2014
# Licence: MIT License
#-------------------------------------------------------------------------------
from __future__ import division
from __future__ import print_function
import os
#import glob,
import sys
import fnmatch
from operator import itemgetter
import datetime
import Tkinter, tkFileDialog, Tkconstants
# The current Tkinter implementation is not working as expected in Python 3.x
#try: # Python 2
# import Tkinter, tkFileDialog, Tkconstants
#except: # Python 3
# import tkinter as Tkinter
# import tkinter.filedialog as tkFileDialog
# import tkinter.constants as Tkconstants
# Put both the "Examples" and the "PyZDDE" directory in the python search path.
exampleDirectory = os.path.dirname(os.path.realpath(__file__))
ind = exampleDirectory.find('Examples')
pyzddedirectory = exampleDirectory[0:ind-1]
if exampleDirectory not in sys.path:
sys.path.append(exampleDirectory)
if pyzddedirectory not in sys.path:
sys.path.append(pyzddedirectory)
import pyzdde.zdde as pyzdde
#Program control parameters
ORDERED_HIATUS_DATA_IN_FILE = True # Sorted output in a file ? [May take a little longer time]
SCALE_LENSES = True # Scale lenses/Normalize all lenses to
NORMALIZATION_EFL = 500.00 # Focal length to use for Normalization
ORDERING = 'large2small' # 'large2small' or 'small2large'
HIATUS_UPPER_LIMIT = 20000.00 # Ignore lenses for which hiatus is greater than some value
fDBG_PRINT = False # Turn off/on the debug prints
# ZEMAX file DIRECTORY to search (can have sub-directories)
zmxfp = pyzddedirectory + "\\ZMXFILES"
#A simple Tkinter GUI prompting for directory
root = Tkinter.Tk()
class TkFileDialog(Tkinter.Frame):
def __init__(self, root):
Tkinter.Frame.__init__(self, root, borderwidth=20,height=32,width=42)
#Top-level label
self.label0 = Tkinter.Label(self,text = "Find eXtreme Hiatus",
font=("Helvetica",16),fg='blue',justify=Tkinter.LEFT)
self.label0.pack()
# options for buttons
button_opt = {'fill': Tkconstants.BOTH, 'padx': 5, 'pady': 5}
checkBox_opt = {'fill': Tkconstants.BOTH, 'padx': 5, 'pady': 5}
# define first button
self.b1 = Tkinter.Button(self, text='Select Directory', command=self.askdirectory)
self.b1.pack(**button_opt)
#Add a checkbox button (for lens scaling option)
self.lensScaleOptVar = Tkinter.IntVar(value=0)
self.c1 = Tkinter.Checkbutton(self,text="Enable Lens scaling ?",
variable=self.lensScaleOptVar,command=self.cb1,onvalue=1)
self.c1.pack(**checkBox_opt)
self.c1.select() #The check-box is checked initially
#Add a label to indicate/enter normalization EFL
self.label1 = Tkinter.Label(self,text = "Normalization EFL", justify=Tkinter.LEFT)
self.label1.pack()
#Add Entry Widget to enter default normalization EFL
self.normEFLVar = Tkinter.StringVar()
self.normEFLentry = Tkinter.Entry(self,text="test",textvariable=self.normEFLVar)
self.normEFLentry.pack()
self.normEFLentry.insert(0, str(NORMALIZATION_EFL))
#Add another label
self.label2 = Tkinter.Label(self,text = "Ignore values above:", justify=Tkinter.LEFT)
self.label2.pack()
#Add an Entry Widget to enter value for upper level hiatus (string)
self.maxHiatusVar = Tkinter.StringVar()
self.maxHiatusEntry = Tkinter.Entry(self,text="test",textvariable=self.maxHiatusVar)
self.maxHiatusEntry.pack()
self.maxHiatusEntry.insert(0, str(HIATUS_UPPER_LIMIT))
# checkbox button 2 (For text dump option)
self.txtFileDumpVar = Tkinter.IntVar(value=0)
self.c2 = Tkinter.Checkbutton(self,text="Save to a TXT file?",
variable=self.txtFileDumpVar,command=self.cb2,onvalue=1)
self.c2.pack(**checkBox_opt)
self.c2.select() #The check-box is checked initially
#Add a "Find" button
self.b2 = Tkinter.Button(self,text='Find',fg="red",command=self.find)
self.b2.pack(**button_opt)
def askdirectory(self):
"""Returns a selected directoryname."""
global zmxfp
zmxfp = tkFileDialog.askdirectory(parent=root,initialdir=zmxfp,
title='Please navigate to a directory')
return
def cb1(self):
global SCALE_LENSES
SCALE_LENSES = bool(self.lensScaleOptVar.get())
if ~SCALE_LENSES:
#self.normEFLentry.
pass
return
def cb2(self):
global ORDERED_HIATUS_DATA_IN_FILE
ORDERED_HIATUS_DATA_IN_FILE = bool(self.txtFileDumpVar.get())
return
def find(self):
global HIATUS_UPPER_LIMIT
global NORMALIZATION_EFL
self.normEFLentry.focus_set()
NORMALIZATION_EFL = float(self.normEFLentry.get())
self.maxHiatusEntry.focus_set()
HIATUS_UPPER_LIMIT = float(self.maxHiatusEntry.get())
root.quit()
root.destroy()
TkFileDialog(root).pack()
root.mainloop()
#end of Tikinter GUI code
# Create a DDE channel object
ln = pyzdde.PyZDDE()
#Initialize the DDE link
stat = ln.zDDEInit()
#Get all the zemax files in the directories recursively
pattern = "*.zmx"
filenames = [os.path.join(dirpath,f)
for dirpath, subFolders, files in os.walk(zmxfp)
for f in fnmatch.filter(files,pattern)]
parentFolder = str(os.path.split(zmxfp)[1])
###To just use one file FOR DEBUGGING PURPOSE -- comment out this section
##oneFile = []
##oneFile.append(filenames[1])
##filenames = oneFile
###end of "just use one file to test"
print("SCALE_LENSES: ", SCALE_LENSES)
print("NORMALIZATION_EFL: ", NORMALIZATION_EFL)
now = datetime.datetime.now()
# ###################
# MAIN CODE LOGIC
# ###################
#Create a dictionary to store the filenames and hiatus
hiatusData = dict()
scaleFactorData = dict()
largestHiatusValue = 0.0 #init the variables for largest hiatus
largestHiatusLensFile = "None"
lensFileCount = 0
totalNumLensFiles = len(filenames)
totalFilesNotLoaded = 0 #File count of files that couldn't be loaded by Zemax
filesNotLoaded = [] #List of files that couldn't be loaded by Zemax
# Loop through all the files in filenames, load the zemax files, get the data
for lens_file in filenames:
if fDBG_PRINT:
print("Lens file: ",lens_file)
#Load the lens in to the Zemax DDE server
ret = ln.zLoadFile(lens_file)
if ret != 0:
print(ret, lens_file, " Couldn't open!")
filesNotLoaded.append(lens_file)
totalFilesNotLoaded +=1
continue
#assert ret == 0
#In order to maintain the units, set the units to mm for all lenses. Also
#ensure that the global reference surface for all lenses is set to surface 1,
#all other system settings should remain same.
recSystemData_g = ln.zGetSystem() #Get the current system parameters
numSurf = recSystemData_g[0]
unitCode = recSystemData_g[1] # lens units code (0,1,2,or 3 for mm, cm, in, or M)
stopSurf = recSystemData_g[2]
nonAxialFlag = recSystemData_g[3]
rayAimingType = recSystemData_g[4]
adjust_index = recSystemData_g[5]
temp = recSystemData_g[6]
pressure = recSystemData_g[7]
globalRefSurf = recSystemData_g[8]
#Set the system parameters
recSystemData_s = ln.zSetSystem(0,stopSurf,rayAimingType,0,temp,pressure,1)
#Scale lens to a normalized EFFL
scaleFactor = 1.00
if SCALE_LENSES:
#Get first order EFL
efl = ln.zGetFirst()[0]
#Determine scale factor
scaleFactor = abs(NORMALIZATION_EFL/efl)
if fDBG_PRINT:
print("EFFL: ",efl," Scale Factor: ", scaleFactor)
#Scale Lens
ret_ls = ln.zLensScale(scaleFactor)
if ret_ls == -1: # Lens scale failure, don't bother to calculate hiatus
print("Lens scaling failed for: ",lens_file)
continue
#Update the lens
#ret = ln.zGetUpdate() ... I don't think the designs should be updated...
#as we don't need to re-optimize, etc.
#assert ret == 0
textFileName = exampleDirectory + '\\' + "searchSpecAttr_Prescription.txt"
#Get the Hiatus for the lens design
hiatus = ln.zGetHiatus(textFileName, keepFile=False)
if hiatus > HIATUS_UPPER_LIMIT:
continue
lensFileCount +=1 #Increment the lens files count
if hiatus > largestHiatusValue:
largestHiatusValue = hiatus
largestHiatusLensFile = os.path.basename(lens_file)
#Add to the dictionary
hiatusData[os.path.basename(lens_file)] = hiatus
scaleFactorData[os.path.basename(lens_file)] = scaleFactor
#Close the DDE channel before processing the dictionary.
ln.zDDEClose()
if fDBG_PRINT:
print("Hiatus data dictionary:\n", hiatusData)
if ORDERED_HIATUS_DATA_IN_FILE:
#Sort the "dictionary" in 'large2small' or 'small2large' order
#The output (hiatusData_sorted) is a list of tuples
if ORDERING == 'small2large':
hiatusData_sorted = sorted(hiatusData.items(),key=itemgetter(1))
else:
hiatusData_sorted = sorted(hiatusData.items(),key=itemgetter(1),reverse=True)
#Open a file for writing the data
dtStamp = "_%d_%d_%d_%dh_%dm_%ds" %(now.year,now.month,now.day,now.hour,now.minute,now.second)
fileref_hds = open("searchLargestHiatusLens_"+parentFolder+dtStamp+".txt",'w')
fileref_hds.write("LENS HIATUS MEASUREMENT:\n\n")
fileref_hds.write("Date and time: " + now.strftime("%Y-%m-%d %H:%M"))
fileref_hds.write("\nUnits: mm")
if SCALE_LENSES:
fileref_hds.write("\nLens Scaling for normalization: ON. Normalization EFL = %1.2f"%(NORMALIZATION_EFL))
else:
fileref_hds.write("\nLens Scaling for normalization: OFF")
fileref_hds.write("\nDirectory: "+ zmxfp)
fileref_hds.write("\n%s Lenses analyzed out of %s lenses!"%(lensFileCount,
totalNumLensFiles))
fileref_hds.write("\nLens files not loaded by Zemax: %s (See list below)"%(totalFilesNotLoaded))
fileref_hds.write("\nLenses with hiatus above %s have been ignored.\n\n"%(HIATUS_UPPER_LIMIT))
fileref_hds.write("\nThe sorted list is:\n\n")
for i in hiatusData_sorted:
fileref_hds.write("%s\t\t%1.2f\t(scale factor = %1.2f)\n"%(i[0],i[1],scaleFactorData[i[0]]))
fileref_hds.write("\n\nLens files that Zemax couldn't open for analysis:\n\n")
for fl in filesNotLoaded:
fileref_hds.write("%s\n"%fl)
fileref_hds.close()
#Print the largest lens having the largest hiatus and the hiatus value
print(lensFileCount, "lenses analyzed for largest hiatus (in mm) out of", totalNumLensFiles, "lenses.")
print("Largest Hiatus Lens:", largestHiatusLensFile)
print("Hiatus:", largestHiatusValue)
| |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import jsonschema
from mongoengine import ValidationError
from st2common import log as logging
from st2common.exceptions.apivalidation import ValueValidationException
from st2common.exceptions.triggers import TriggerDoesNotExistException
from st2api.controllers import resource
from st2api.controllers.controller_transforms import transform_to_bool
from st2api.controllers.v1.ruleviews import RuleViewController
from st2common.models.api.rule import RuleAPI
from st2common.persistence.rule import Rule
from st2common.rbac.types import PermissionType
from st2common.rbac import utils as rbac_utils
from st2common.rbac.utils import assert_user_has_rule_trigger_and_action_permission
from st2common.router import exc
from st2common.router import abort
from st2common.router import Response
from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class RuleController(resource.ContentPackResourceController):
"""
Implements the RESTful web endpoint that handles
the lifecycle of Rules in the system.
"""
views = RuleViewController()
model = RuleAPI
access = Rule
supported_filters = {
'name': 'name',
'pack': 'pack',
'action': 'action.ref',
'trigger': 'trigger',
'enabled': 'enabled'
}
filter_transform_functions = {
'enabled': transform_to_bool
}
query_options = {
'sort': ['pack', 'name']
}
include_reference = True
def get_all(self, sort=None, offset=0, limit=None, **raw_filters):
from_model_kwargs = {'ignore_missing_trigger': True}
return super(RuleController, self)._get_all(from_model_kwargs=from_model_kwargs,
sort=sort,
offset=offset,
limit=limit,
raw_filters=raw_filters)
def get_one(self, ref_or_id, requester_user):
from_model_kwargs = {'ignore_missing_trigger': True}
return super(RuleController, self)._get_one(ref_or_id, from_model_kwargs=from_model_kwargs,
requester_user=requester_user,
permission_type=PermissionType.RULE_VIEW)
def post(self, rule, requester_user):
"""
Create a new rule.
Handles requests:
POST /rules/
"""
permission_type = PermissionType.RULE_CREATE
rbac_utils.assert_user_has_resource_api_permission(user_db=requester_user,
resource_api=rule,
permission_type=permission_type)
try:
rule_db = RuleAPI.to_model(rule)
LOG.debug('/rules/ POST verified RuleAPI and formulated RuleDB=%s', rule_db)
# Check referenced trigger and action permissions
# Note: This needs to happen after "to_model" call since to_model performs some
# validation (trigger exists, etc.)
assert_user_has_rule_trigger_and_action_permission(user_db=requester_user,
rule_api=rule)
rule_db = Rule.add_or_update(rule_db)
# After the rule has been added modify the ref_count. This way a failure to add
# the rule due to violated constraints will have no impact on ref_count.
increment_trigger_ref_count(rule_api=rule)
except (ValidationError, ValueError) as e:
LOG.exception('Validation failed for rule data=%s.', rule)
abort(http_client.BAD_REQUEST, str(e))
return
except (ValueValidationException, jsonschema.ValidationError) as e:
LOG.exception('Validation failed for rule data=%s.', rule)
abort(http_client.BAD_REQUEST, str(e))
return
except TriggerDoesNotExistException as e:
msg = ('Trigger "%s" defined in the rule does not exist in system or it\'s missing '
'required "parameters" attribute' % (rule.trigger['type']))
LOG.exception(msg)
abort(http_client.BAD_REQUEST, msg)
return
extra = {'rule_db': rule_db}
LOG.audit('Rule created. Rule.id=%s' % (rule_db.id), extra=extra)
rule_api = RuleAPI.from_model(rule_db)
return Response(json=rule_api, status=exc.HTTPCreated.code)
def put(self, rule, rule_ref_or_id, requester_user):
rule_db = self._get_by_ref_or_id(rule_ref_or_id)
permission_type = PermissionType.RULE_MODIFY
rbac_utils.assert_user_has_resource_db_permission(user_db=requester_user,
resource_db=rule,
permission_type=permission_type)
LOG.debug('PUT /rules/ lookup with id=%s found object: %s', rule_ref_or_id, rule_db)
try:
if rule.id is not None and rule.id is not '' and rule.id != rule_ref_or_id:
LOG.warning('Discarding mismatched id=%s found in payload and using uri_id=%s.',
rule.id, rule_ref_or_id)
old_rule_db = rule_db
rule_db = RuleAPI.to_model(rule)
# Check referenced trigger and action permissions
# Note: This needs to happen after "to_model" call since to_model performs some
# validation (trigger exists, etc.)
assert_user_has_rule_trigger_and_action_permission(user_db=requester_user,
rule_api=rule)
rule_db.id = rule_ref_or_id
rule_db = Rule.add_or_update(rule_db)
# After the rule has been added modify the ref_count. This way a failure to add
# the rule due to violated constraints will have no impact on ref_count.
increment_trigger_ref_count(rule_api=rule)
except (ValueValidationException, jsonschema.ValidationError, ValueError) as e:
LOG.exception('Validation failed for rule data=%s', rule)
abort(http_client.BAD_REQUEST, str(e))
return
# use old_rule_db for cleanup.
cleanup_trigger_db_for_rule(old_rule_db)
extra = {'old_rule_db': old_rule_db, 'new_rule_db': rule_db}
LOG.audit('Rule updated. Rule.id=%s.' % (rule_db.id), extra=extra)
rule_api = RuleAPI.from_model(rule_db)
return rule_api
def delete(self, rule_ref_or_id, requester_user):
"""
Delete a rule.
Handles requests:
DELETE /rules/1
"""
rule_db = self._get_by_ref_or_id(ref_or_id=rule_ref_or_id)
permission_type = PermissionType.RULE_DELETE
rbac_utils.assert_user_has_resource_db_permission(user_db=requester_user,
resource_db=rule_db,
permission_type=permission_type)
LOG.debug('DELETE /rules/ lookup with id=%s found object: %s', rule_ref_or_id, rule_db)
try:
Rule.delete(rule_db)
except Exception as e:
LOG.exception('Database delete encountered exception during delete of id="%s".',
rule_ref_or_id)
abort(http_client.INTERNAL_SERVER_ERROR, str(e))
return
# use old_rule_db for cleanup.
cleanup_trigger_db_for_rule(rule_db)
extra = {'rule_db': rule_db}
LOG.audit('Rule deleted. Rule.id=%s.' % (rule_db.id), extra=extra)
return Response(status=http_client.NO_CONTENT)
rule_controller = RuleController()
| |
# -*- coding: utf-8 -*-
# imageio is distributed under the terms of the (new) BSD License.
"""Read/Write video using FFMPEG
Backend Library: https://github.com/imageio/imageio-ffmpeg
.. note::
To use this plugin you have to install its backend::
pip install imageio[ffmpeg]
The ffmpeg format provides reading and writing for a wide range of movie formats
such as .avi, .mpeg, .mp4, etc. as well as the ability to read streams from
webcams and USB cameras. It is based on ffmpeg and is inspired by/based `moviepy
<https://github.com/Zulko/moviepy/>`_ by Zulko.
Parameters for reading
----------------------
fps : scalar
The number of frames per second to read the data at. Default None (i.e.
read at the file's own fps). One can use this for files with a
variable fps, or in cases where imageio is unable to correctly detect
the fps. In case of trouble opening camera streams, it may help to set an
explicit fps value matching a framerate supported by the camera.
loop : bool
If True, the video will rewind as soon as a frame is requested
beyond the last frame. Otherwise, IndexError is raised. Default False.
Setting this to True will internally call ``count_frames()``,
and set the reader's length to that value instead of inf.
size : str | tuple
The frame size (i.e. resolution) to read the images, e.g.
(100, 100) or "640x480". For camera streams, this allows setting
the capture resolution. For normal video data, ffmpeg will
rescale the data.
dtype : str | type
The dtype for the output arrays. Determines the bit-depth that
is requested from ffmpeg. Supported dtypes: uint8, uint16.
Default: uint8.
pixelformat : str
The pixel format for the camera to use (e.g. "yuyv422" or
"gray"). The camera needs to support the format in order for
this to take effect. Note that the images produced by this
reader are always RGB.
input_params : list
List additional arguments to ffmpeg for input file options.
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
Example ffmpeg arguments to use aggressive error handling:
['-err_detect', 'aggressive']
output_params : list
List additional arguments to ffmpeg for output file options (i.e. the
stream being read by imageio).
print_info : bool
Print information about the video file as reported by ffmpeg.
Parameters for writing
----------------------
fps : scalar
The number of frames per second. Default 10.
codec : str
the video codec to use. Default 'libx264', which represents the
widely available mpeg4. Except when saving .wmv files, then the
defaults is 'msmpeg4' which is more commonly supported for windows
quality : float | None
Video output quality. Default is 5. Uses variable bit rate. Highest
quality is 10, lowest is 0. Set to None to prevent variable bitrate
flags to FFMPEG so you can manually specify them using output_params
instead. Specifying a fixed bitrate using 'bitrate' disables this
parameter.
bitrate : int | None
Set a constant bitrate for the video encoding. Default is None causing
'quality' parameter to be used instead. Better quality videos with
smaller file sizes will result from using the 'quality' variable
bitrate parameter rather than specifiying a fixed bitrate with this
parameter.
pixelformat: str
The output video pixel format. Default is 'yuv420p' which most widely
supported by video players.
input_params : list
List additional arguments to ffmpeg for input file options (i.e. the
stream that imageio provides).
output_params : list
List additional arguments to ffmpeg for output file options.
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
Example ffmpeg arguments to use only intra frames and set aspect ratio:
['-intra', '-aspect', '16:9']
ffmpeg_log_level: str
Sets ffmpeg output log level. Default is "warning".
Values can be "quiet", "panic", "fatal", "error", "warning", "info"
"verbose", or "debug". Also prints the FFMPEG command being used by
imageio if "info", "verbose", or "debug".
macro_block_size: int
Size constraint for video. Width and height, must be divisible by this
number. If not divisible by this number imageio will tell ffmpeg to
scale the image up to the next closest size
divisible by this number. Most codecs are compatible with a macroblock
size of 16 (default), some can go smaller (4, 8). To disable this
automatic feature set it to None or 1, however be warned many players
can't decode videos that are odd in size and some codecs will produce
poor results or fail. See https://en.wikipedia.org/wiki/Macroblock.
Notes
-----
If you are using anaconda and ``anaconda/ffmpeg`` you will not be able to
encode/decode H.264 (likely due to licensing concerns). If you need this
format on anaconda install ``conda-forge/ffmpeg`` instead.
You can use the ``IMAGEIO_FFMPEG_EXE`` environment variable to force using a
specific ffmpeg executable.
To get the number of frames before having read them all, you can use the
``reader.count_frames()`` method (the reader will then use
``imageio_ffmpeg.count_frames_and_secs()`` to get the exact number of frames,
note that this operation can take a few seconds on large files). Alternatively,
the number of frames can be estimated from the fps and duration in the meta data
(though these values themselves are not always present/reliable).
"""
import re
import sys
import time
import logging
import platform
import threading
import subprocess as sp
import numpy as np
from ..core import Format, image_as_uint
logger = logging.getLogger(__name__)
# Get camera format
if sys.platform.startswith("win"):
CAM_FORMAT = "dshow" # dshow or vfwcap
elif sys.platform.startswith("linux"):
CAM_FORMAT = "video4linux2"
elif sys.platform.startswith("darwin"):
CAM_FORMAT = "avfoundation"
else: # pragma: no cover
CAM_FORMAT = "unknown-cam-format"
def download(directory=None, force_download=False): # pragma: no cover
raise RuntimeError(
"imageio.ffmpeg.download() has been deprecated. "
"Use 'pip install imageio-ffmpeg' instead.'"
)
# For backwards compatibility - we dont use this ourselves
def get_exe(): # pragma: no cover
"""Wrapper for imageio_ffmpeg.get_ffmpeg_exe()"""
import imageio_ffmpeg
return imageio_ffmpeg.get_ffmpeg_exe()
_ffmpeg_api = None
def _get_ffmpeg_api():
global _ffmpeg_api
if _ffmpeg_api is None:
try:
import imageio_ffmpeg
except ImportError:
raise ImportError(
"To use the imageio ffmpeg plugin you need to "
"'pip install imageio-ffmpeg'"
)
_ffmpeg_api = imageio_ffmpeg
return _ffmpeg_api
class FfmpegFormat(Format):
"""Read/Write ImageResources using FFMPEG.
See :mod:`imageio.plugins.ffmpeg`
"""
def _can_read(self, request):
if request.mode[1] not in "I?":
return False
# Read from video stream?
# Note that we could write the _video flag here, but a user might
# select this format explicitly (and this code is not run)
if re.match(r"<video(\d+)>", request.filename):
return True
# Read from file that we know?
if request.extension in self.extensions:
return True
def _can_write(self, request):
if request.mode[1] in (self.modes + "?"):
if request.extension in self.extensions:
return True
# --
class Reader(Format.Reader):
_frame_catcher = None
_read_gen = None
def _get_cam_inputname(self, index):
if sys.platform.startswith("linux"):
return "/dev/" + self.request._video[1:-1]
elif sys.platform.startswith("win"):
# Ask ffmpeg for list of dshow device names
ffmpeg_api = _get_ffmpeg_api()
cmd = [
ffmpeg_api.get_ffmpeg_exe(),
"-list_devices",
"true",
"-f",
CAM_FORMAT,
"-i",
"dummy",
]
# Set `shell=True` in sp.run to prevent popup of a command
# line window in frozen applications. Note: this would be a
# security vulnerability if user-input goes into the cmd.
# Note that the ffmpeg process returns with exit code 1 when
# using `-list_devices` (or `-list_options`), even if the
# command is successful, so we set `check=False` explicitly.
completed_process = sp.run(
cmd,
stdout=sp.PIPE,
stderr=sp.PIPE,
encoding="utf-8",
shell=True,
check=False,
)
# Return device name at index
try:
name = parse_device_names(completed_process.stderr)[index]
except IndexError:
raise IndexError("No ffdshow camera at index %i." % index)
return "video=%s" % name
elif sys.platform.startswith("darwin"):
# Appears that newer ffmpeg builds don't support -list-devices
# on OS X. But you can directly open the camera by index.
name = str(index)
return name
else: # pragma: no cover
return "??"
def _open(
self,
loop=False,
size=None,
dtype=None,
pixelformat=None,
print_info=False,
ffmpeg_params=None,
input_params=None,
output_params=None,
fps=None,
):
# Get generator functions
self._ffmpeg_api = _get_ffmpeg_api()
# Process input args
self._arg_loop = bool(loop)
if size is None:
self._arg_size = None
elif isinstance(size, tuple):
self._arg_size = "%ix%i" % size
elif isinstance(size, str) and "x" in size:
self._arg_size = size
else:
raise ValueError('FFMPEG size must be tuple of "NxM"')
if pixelformat is None:
pass
elif not isinstance(pixelformat, str):
raise ValueError("FFMPEG pixelformat must be str")
if dtype is None:
self._dtype = np.dtype("uint8")
else:
self._dtype = np.dtype(dtype)
allowed_dtypes = ["uint8", "uint16"]
if self._dtype.name not in allowed_dtypes:
raise ValueError(
"dtype must be one of: {}".format(", ".join(allowed_dtypes))
)
self._arg_pixelformat = pixelformat
self._arg_input_params = input_params or []
self._arg_output_params = output_params or []
self._arg_input_params += ffmpeg_params or [] # backward compat
# Write "_video"_arg - indicating webcam support
self.request._video = None
regex_match = re.match(r"<video(\d+)>", self.request.filename)
if regex_match:
self.request._video = self.request.filename
# Get local filename
if self.request._video:
index = int(regex_match.group(1))
self._filename = self._get_cam_inputname(index)
else:
self._filename = self.request.get_local_filename()
# When passed to ffmpeg on command line, carets need to be escaped.
self._filename = self._filename.replace("^", "^^")
# Determine pixel format and depth
self._depth = 3
if self._dtype.name == "uint8":
self._pix_fmt = "rgb24"
self._bytes_per_channel = 1
else:
self._pix_fmt = "rgb48le"
self._bytes_per_channel = 2
# Initialize parameters
self._pos = -1
self._meta = {"plugin": "ffmpeg"}
self._lastread = None
# Calculating this from fps and duration is not accurate,
# and calculating it exactly with ffmpeg_api.count_frames_and_secs
# takes too long to do for each video. But we need it for looping.
self._nframes = float("inf")
if self._arg_loop and not self.request._video:
self._nframes = self.count_frames()
self._meta["nframes"] = self._nframes
# Specify input framerate? (only on macOS)
# Ideally we'd get the supported framerate from the metadata, but we get the
# metadata when we boot ffmpeg ... maybe we could refactor this so we can
# get the metadata beforehand, but for now we'll just give it 2 tries on MacOS,
# one with fps 30 and one with fps 15.
need_fps = (
self.request._video
and platform.system().lower() == "darwin"
and "-framerate" not in str(self._arg_input_params)
)
if need_fps:
self._arg_input_params.extend(["-framerate", str(float(30))])
# Start ffmpeg subprocess and get meta information
try:
self._initialize()
except IndexError:
# Specify input framerate again, this time different.
if need_fps:
self._arg_input_params[-1] = str(float(15))
self._initialize()
else:
raise
# For cameras, create thread that keeps reading the images
if self.request._video:
self._frame_catcher = FrameCatcher(self._read_gen)
# For reference - but disabled, because it is inaccurate
# if self._meta["nframes"] == float("inf"):
# if self._meta.get("fps", 0) > 0:
# if self._meta.get("duration", 0) > 0:
# n = round(self._meta["duration"] * self._meta["fps"])
# self._meta["nframes"] = int(n)
def _close(self):
# First close the frame catcher, because we cannot close the gen
# if the frame catcher thread is using it
if self._frame_catcher is not None:
self._frame_catcher.stop_me()
self._frame_catcher = None
if self._read_gen is not None:
self._read_gen.close()
self._read_gen = None
def count_frames(self):
"""Count the number of frames. Note that this can take a few
seconds for large files. Also note that it counts the number
of frames in the original video and does not take a given fps
into account.
"""
# This would have been nice, but this does not work :(
# oargs = []
# if self.request.kwargs.get("fps", None):
# fps = float(self.request.kwargs["fps"])
# oargs += ["-r", "%.02f" % fps]
cf = self._ffmpeg_api.count_frames_and_secs
return cf(self._filename)[0]
def _get_length(self):
return self._nframes # only not inf if loop is True
def _get_data(self, index):
"""Reads a frame at index. Note for coders: getting an
arbitrary frame in the video with ffmpeg can be painfully
slow if some decoding has to be done. This function tries
to avoid fectching arbitrary frames whenever possible, by
moving between adjacent frames."""
# Modulo index (for looping)
if self._arg_loop and self._nframes < float("inf"):
index %= self._nframes
if index == self._pos:
return self._lastread, dict(new=False)
elif index < 0:
raise IndexError("Frame index must be >= 0")
elif index >= self._nframes:
raise IndexError("Reached end of video")
else:
if (index < self._pos) or (index > self._pos + 100):
self._initialize(index)
else:
self._skip_frames(index - self._pos - 1)
result, is_new = self._read_frame()
self._pos = index
return result, dict(new=is_new)
def _get_meta_data(self, index):
return self._meta
def _initialize(self, index=0):
# Close the current generator, and thereby terminate its subprocess
if self._read_gen is not None:
self._read_gen.close()
iargs = []
oargs = []
# Create input args
iargs += self._arg_input_params
if self.request._video:
iargs += ["-f", CAM_FORMAT]
if self._arg_pixelformat:
iargs += ["-pix_fmt", self._arg_pixelformat]
if self._arg_size:
iargs += ["-s", self._arg_size]
elif index > 0: # re-initialize / seek
# Note: only works if we initialized earlier, and now have meta
# Some info here: https://trac.ffmpeg.org/wiki/Seeking
# There are two ways to seek, one before -i (input_params) and
# after (output_params). The former is fast, because it uses
# keyframes, the latter is slow but accurate. According to
# the article above, the fast method should also be accurate
# from ffmpeg version 2.1, however in version 4.1 our tests
# start failing again. Not sure why, but we can solve this
# by combining slow and fast. Seek the long stretch using
# the fast method, and seek the last 10s the slow way.
starttime = index / self._meta["fps"]
seek_slow = min(10, starttime)
seek_fast = starttime - seek_slow
# We used to have this epsilon earlier, when we did not use
# the slow seek. I don't think we need it anymore.
# epsilon = -1 / self._meta["fps"] * 0.1
iargs += ["-ss", "%.06f" % (seek_fast)]
oargs += ["-ss", "%.06f" % (seek_slow)]
# Output args, for writing to pipe
if self._arg_size:
oargs += ["-s", self._arg_size]
if self.request.kwargs.get("fps", None):
fps = float(self.request.kwargs["fps"])
oargs += ["-r", "%.02f" % fps]
oargs += self._arg_output_params
# Get pixelformat and bytes per pixel
pix_fmt = self._pix_fmt
bpp = self._depth * self._bytes_per_channel
# Create generator
rf = self._ffmpeg_api.read_frames
self._read_gen = rf(
self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs
)
# Read meta data. This start the generator (and ffmpeg subprocess)
if self.request._video:
# With cameras, catch error and turn into IndexError
try:
meta = self._read_gen.__next__()
except IOError as err:
err_text = str(err)
if "darwin" in sys.platform:
if "Unknown input format: 'avfoundation'" in err_text:
err_text += (
"Try installing FFMPEG using "
"home brew to get a version with "
"support for cameras."
)
raise IndexError(
"No (working) camera at {}.\n\n{}".format(
self.request._video, err_text
)
)
else:
self._meta.update(meta)
elif index == 0:
self._meta.update(self._read_gen.__next__())
else:
self._read_gen.__next__() # we already have meta data
def _skip_frames(self, n=1):
"""Reads and throws away n frames"""
for i in range(n):
self._read_gen.__next__()
self._pos += n
def _read_frame(self):
# Read and convert to numpy array
w, h = self._meta["size"]
framesize = w * h * self._depth * self._bytes_per_channel
# t0 = time.time()
# Read frame
if self._frame_catcher: # pragma: no cover - camera thing
s, is_new = self._frame_catcher.get_frame()
else:
s = self._read_gen.__next__()
is_new = True
# Check
if len(s) != framesize:
raise RuntimeError(
"Frame is %i bytes, but expected %i." % (len(s), framesize)
)
result = np.frombuffer(s, dtype=self._dtype).copy()
result = result.reshape((h, w, self._depth))
# t1 = time.time()
# print('etime', t1-t0)
# Store and return
self._lastread = result
return result, is_new
# --
class Writer(Format.Writer):
_write_gen = None
def _open(
self,
fps=10,
codec="libx264",
bitrate=None,
pixelformat="yuv420p",
ffmpeg_params=None,
input_params=None,
output_params=None,
ffmpeg_log_level="quiet",
quality=5,
macro_block_size=16,
):
self._ffmpeg_api = _get_ffmpeg_api()
self._filename = self.request.get_local_filename()
self._pix_fmt = None
self._depth = None
self._size = None
def _close(self):
if self._write_gen is not None:
self._write_gen.close()
self._write_gen = None
def _append_data(self, im, meta):
# Get props of image
h, w = im.shape[:2]
size = w, h
depth = 1 if im.ndim == 2 else im.shape[2]
# Ensure that image is in uint8
im = image_as_uint(im, bitdepth=8)
# To be written efficiently, ie. without creating an immutable
# buffer, by calling im.tobytes() the array must be contiguous.
if not im.flags.c_contiguous:
# checkign the flag is a micro optimization.
# the image will be a numpy subclass. See discussion
# https://github.com/numpy/numpy/issues/11804
im = np.ascontiguousarray(im)
# Set size and initialize if not initialized yet
if self._size is None:
map = {1: "gray", 2: "gray8a", 3: "rgb24", 4: "rgba"}
self._pix_fmt = map.get(depth, None)
if self._pix_fmt is None:
raise ValueError("Image must have 1, 2, 3 or 4 channels")
self._size = size
self._depth = depth
self._initialize()
# Check size of image
if size != self._size:
raise ValueError("All images in a movie should have same size")
if depth != self._depth:
raise ValueError(
"All images in a movie should have same " "number of channels"
)
assert self._write_gen is not None # Check status
# Write. Yes, we can send the data in as a numpy array
self._write_gen.send(im)
def set_meta_data(self, meta):
raise RuntimeError(
"The ffmpeg format does not support setting " "meta data."
)
def _initialize(self):
# Close existing generator
if self._write_gen is not None:
self._write_gen.close()
# Get parameters
# Use None to let imageio-ffmpeg (or ffmpeg) select good results
fps = self.request.kwargs.get("fps", 10)
codec = self.request.kwargs.get("codec", None)
bitrate = self.request.kwargs.get("bitrate", None)
quality = self.request.kwargs.get("quality", None)
input_params = self.request.kwargs.get("input_params") or []
output_params = self.request.kwargs.get("output_params") or []
output_params += self.request.kwargs.get("ffmpeg_params") or []
pixelformat = self.request.kwargs.get("pixelformat", None)
macro_block_size = self.request.kwargs.get("macro_block_size", 16)
ffmpeg_log_level = self.request.kwargs.get("ffmpeg_log_level", None)
macro_block_size = macro_block_size or 1 # None -> 1
# Create generator
self._write_gen = self._ffmpeg_api.write_frames(
self._filename,
self._size,
pix_fmt_in=self._pix_fmt,
pix_fmt_out=pixelformat,
fps=fps,
quality=quality,
bitrate=bitrate,
codec=codec,
macro_block_size=macro_block_size,
ffmpeg_log_level=ffmpeg_log_level,
input_params=input_params,
output_params=output_params,
)
# Seed the generator (this is where the ffmpeg subprocess starts)
self._write_gen.send(None)
class FrameCatcher(threading.Thread):
"""Thread to keep reading the frame data from stdout. This is
useful when streaming from a webcam. Otherwise, if the user code
does not grab frames fast enough, the buffer will fill up, leading
to lag, and ffmpeg can also stall (experienced on Linux). The
get_frame() method always returns the last available image.
"""
def __init__(self, gen):
self._gen = gen
self._frame = None
self._frame_is_new = False
self._lock = threading.RLock()
threading.Thread.__init__(self)
self.daemon = True # do not let this thread hold up Python shutdown
self._should_stop = False
self.start()
def stop_me(self):
self._should_stop = True
while self.is_alive():
time.sleep(0.001)
def get_frame(self):
while self._frame is None: # pragma: no cover - an init thing
time.sleep(0.001)
with self._lock:
is_new = self._frame_is_new
self._frame_is_new = False # reset
return self._frame, is_new
def run(self):
# This runs in the worker thread
try:
while not self._should_stop:
time.sleep(0) # give control to other threads
frame = self._gen.__next__()
with self._lock:
self._frame = frame
self._frame_is_new = True
except (StopIteration, EOFError):
pass
def parse_device_names(ffmpeg_output):
"""Parse the output of the ffmpeg -list-devices command"""
# Collect device names - get [friendly_name, alt_name] of each
device_names = []
in_video_devices = False
for line in ffmpeg_output.splitlines():
if line.startswith("[dshow"):
logger.debug(line)
line = line.split("]", 1)[1].strip()
if in_video_devices and line.startswith('"'):
friendly_name = line[1:-1]
device_names.append([friendly_name, ""])
elif in_video_devices and line.lower().startswith("alternative name"):
alt_name = line.split(" name ", 1)[1].strip()[1:-1]
if sys.platform.startswith("win"):
alt_name = alt_name.replace("&", "^&") # Tested to work
else:
alt_name = alt_name.replace("&", "\\&") # Does this work?
device_names[-1][-1] = alt_name
elif "video devices" in line:
in_video_devices = True
elif "devices" in line:
# set False for subsequent "devices" sections
in_video_devices = False
# Post-process, see #441
# prefer friendly names, use alt name if two cams have same friendly name
device_names2 = []
for friendly_name, alt_name in device_names:
if friendly_name not in device_names2:
device_names2.append(friendly_name)
elif alt_name:
device_names2.append(alt_name)
else:
device_names2.append(friendly_name) # duplicate, but not much we can do
return device_names2
| |
from django.utils import timezone
from transitions import Machine
from api.providers.workflows import Workflows
from framework.auth import Auth
from osf.exceptions import InvalidTransitionError
from osf.models.action import ReviewAction, NodeRequestAction, PreprintRequestAction
from osf.models.preprintlog import PreprintLog
from osf.utils import permissions
from osf.utils.workflows import DefaultStates, DefaultTriggers, ReviewStates, DEFAULT_TRANSITIONS, REVIEWABLE_TRANSITIONS
from website.mails import mails
from website.reviews import signals as reviews_signals
from website.settings import DOMAIN, OSF_SUPPORT_EMAIL, OSF_CONTACT_EMAIL
class BaseMachine(Machine):
action = None
from_state = None
def __init__(self, machineable, state_attr, **kwargs):
self.machineable = machineable
self.__state_attr = state_attr
states = kwargs.get('states', [s.value for s in DefaultStates])
transitions = kwargs.get('transitions', DEFAULT_TRANSITIONS)
self._validate_transitions(transitions)
super(BaseMachine, self).__init__(
states=states,
transitions=transitions,
initial=self.state,
send_event=True,
prepare_event=['initialize_machine'],
ignore_invalid_triggers=True,
)
@property
def state(self):
return getattr(self.machineable, self.__state_attr)
@state.setter
def state(self, value):
setattr(self.machineable, self.__state_attr, value)
@property
def ActionClass(self):
raise NotImplementedError()
def _validate_transitions(self, transitions):
for transition in set(sum([t['after'] for t in transitions], [])):
if not hasattr(self, transition):
raise InvalidTransitionError(self, transition)
def initialize_machine(self, ev):
self.action = None
self.from_state = ev.state
def save_action(self, ev):
user = ev.kwargs.get('user')
self.action = self.ActionClass.objects.create(
target=self.machineable,
creator=user,
trigger=ev.event.name,
from_state=self.from_state.name,
to_state=ev.state.name,
comment=ev.kwargs.get('comment', ''),
auto=ev.kwargs.get('auto', False),
)
def update_last_transitioned(self, ev):
now = self.action.created if self.action is not None else timezone.now()
self.machineable.date_last_transitioned = now
class ReviewsMachine(BaseMachine):
ActionClass = ReviewAction
def __init__(self, *args, **kwargs):
kwargs['transitions'] = kwargs.get('transitions', REVIEWABLE_TRANSITIONS)
kwargs['states'] = kwargs.get('states', [s.value for s in ReviewStates])
super(ReviewsMachine, self).__init__(*args, **kwargs)
def save_changes(self, ev):
now = self.action.created if self.action is not None else timezone.now()
should_publish = self.machineable.in_public_reviews_state
if self.machineable.is_retracted:
pass # Do not alter published state
elif should_publish and not self.machineable.is_published:
if not (self.machineable.primary_file and self.machineable.primary_file.target == self.machineable):
raise ValueError('Preprint is not a valid preprint; cannot publish.')
if not self.machineable.provider:
raise ValueError('Preprint provider not specified; cannot publish.')
if not self.machineable.subjects.exists():
raise ValueError('Preprint must have at least one subject to be published.')
self.machineable.date_published = now
self.machineable.is_published = True
self.machineable.ever_public = True
elif not should_publish and self.machineable.is_published:
self.machineable.is_published = False
self.machineable.save()
def resubmission_allowed(self, ev):
return self.machineable.provider.reviews_workflow == Workflows.PRE_MODERATION.value
def withdrawal_submitter_is_moderator_or_admin(self, submitter):
# Returns True if the submitter of the request is a moderator or admin for the provider.
provider = self.machineable.provider
return provider.get_group('moderator').user_set.filter(id=submitter.id).exists() or \
provider.get_group(permissions.ADMIN).user_set.filter(id=submitter.id).exists()
def perform_withdraw(self, ev):
self.machineable.date_withdrawn = self.action.created if self.action is not None else timezone.now()
self.machineable.withdrawal_justification = ev.kwargs.get('comment', '')
def notify_submit(self, ev):
context = self.get_context()
context['referrer'] = ev.kwargs.get('user')
user = ev.kwargs.get('user')
auth = Auth(user)
self.machineable.add_log(
action=PreprintLog.PUBLISHED,
params={
'preprint': self.machineable._id
},
auth=auth,
save=False,
)
recipients = list(self.machineable.contributors)
reviews_signals.reviews_email_submit.send(context=context, recipients=recipients)
reviews_signals.reviews_email_submit_moderators_notifications.send(timestamp=timezone.now(), context=context)
def notify_resubmit(self, ev):
context = self.get_context()
reviews_signals.reviews_email.send(creator=ev.kwargs.get('user'), context=context,
template='reviews_resubmission_confirmation',
action=self.action)
def notify_accept_reject(self, ev):
context = self.get_context()
context['notify_comment'] = not self.machineable.provider.reviews_comments_private and self.action.comment
context['comment'] = self.action.comment
context['is_rejected'] = self.action.to_state == DefaultStates.REJECTED.value
context['was_pending'] = self.action.from_state == DefaultStates.PENDING.value
reviews_signals.reviews_email.send(creator=ev.kwargs.get('user'), context=context,
template='reviews_submission_status',
action=self.action)
def notify_edit_comment(self, ev):
context = self.get_context()
context['comment'] = self.action.comment
if not self.machineable.provider.reviews_comments_private and self.action.comment:
reviews_signals.reviews_email.send(creator=ev.kwargs.get('user'), context=context,
template='reviews_update_comment',
action=self.action)
def notify_withdraw(self, ev):
context = self.get_context()
context['ever_public'] = self.machineable.ever_public
try:
preprint_request_action = PreprintRequestAction.objects.get(target__target__id=self.machineable.id,
from_state='pending',
to_state='accepted',
trigger='accept')
context['requester'] = preprint_request_action.target.creator
except PreprintRequestAction.DoesNotExist:
# If there is no preprint request action, it means the withdrawal is directly initiated by admin/moderator
context['withdrawal_submitter_is_moderator_or_admin'] = True
for contributor in self.machineable.contributors.all():
context['contributor'] = contributor
if context.get('requester', None):
context['is_requester'] = context['requester'].username == contributor.username
mails.send_mail(
contributor.username,
mails.PREPRINT_WITHDRAWAL_REQUEST_GRANTED,
mimetype='html',
**context
)
def get_context(self):
return {
'domain': DOMAIN,
'reviewable': self.machineable,
'workflow': self.machineable.provider.reviews_workflow,
'provider_url': self.machineable.provider.domain or '{domain}preprints/{provider_id}'.format(domain=DOMAIN, provider_id=self.machineable.provider._id),
'provider_contact_email': self.machineable.provider.email_contact or OSF_CONTACT_EMAIL,
'provider_support_email': self.machineable.provider.email_support or OSF_SUPPORT_EMAIL,
}
class NodeRequestMachine(BaseMachine):
ActionClass = NodeRequestAction
def save_changes(self, ev):
""" Handles contributorship changes and state transitions
"""
if ev.event.name == DefaultTriggers.EDIT_COMMENT.value and self.action is not None:
self.machineable.comment = self.action.comment
self.machineable.save()
if ev.event.name == DefaultTriggers.ACCEPT.value:
if not self.machineable.target.is_contributor(self.machineable.creator):
contributor_permissions = ev.kwargs.get('permissions', permissions.READ)
self.machineable.target.add_contributor(
self.machineable.creator,
auth=Auth(ev.kwargs['user']),
permissions=contributor_permissions,
visible=ev.kwargs.get('visible', True),
send_email='{}_request'.format(self.machineable.request_type))
def resubmission_allowed(self, ev):
# TODO: [PRODUCT-395]
return False
def notify_submit(self, ev):
""" Notify admins that someone is requesting access
"""
context = self.get_context()
context['contributors_url'] = '{}contributors/'.format(self.machineable.target.absolute_url)
context['project_settings_url'] = '{}settings/'.format(self.machineable.target.absolute_url)
for admin in self.machineable.target.get_users_with_perm(permissions.ADMIN):
mails.send_mail(
admin.username,
mails.ACCESS_REQUEST_SUBMITTED,
admin=admin,
mimetype='html',
osf_contact_email=OSF_CONTACT_EMAIL,
**context
)
def notify_resubmit(self, ev):
""" Notify admins that someone is requesting access again
"""
# TODO: [PRODUCT-395]
raise NotImplementedError()
def notify_accept_reject(self, ev):
""" Notify requester that admins have approved/denied
"""
if ev.event.name == DefaultTriggers.REJECT.value:
context = self.get_context()
mails.send_mail(
self.machineable.creator.username,
mails.ACCESS_REQUEST_DENIED,
mimetype='html',
osf_contact_email=OSF_CONTACT_EMAIL,
**context
)
else:
# add_contributor sends approval notification email
pass
def notify_edit_comment(self, ev):
""" Not presently required to notify for this event
"""
pass
def get_context(self):
return {
'node': self.machineable.target,
'requester': self.machineable.creator
}
class PreprintRequestMachine(BaseMachine):
ActionClass = PreprintRequestAction
def save_changes(self, ev):
""" Handles preprint status changes and state transitions
"""
if ev.event.name == DefaultTriggers.EDIT_COMMENT.value and self.action is not None:
self.machineable.comment = self.action.comment
elif ev.event.name == DefaultTriggers.SUBMIT.value:
# If the provider is pre-moderated and target has not been through moderation, auto approve withdrawal
if self.auto_approval_allowed():
self.machineable.run_accept(user=self.machineable.creator, comment=self.machineable.comment, auto=True)
elif ev.event.name == DefaultTriggers.ACCEPT.value:
# If moderator accepts the withdrawal request
self.machineable.target.run_withdraw(user=self.action.creator, comment=self.action.comment)
self.machineable.save()
def auto_approval_allowed(self):
# Returns True if the provider is pre-moderated and the preprint is never public.
return self.machineable.target.provider.reviews_workflow == Workflows.PRE_MODERATION.value and not self.machineable.target.ever_public
def notify_submit(self, ev):
context = self.get_context()
if not self.auto_approval_allowed():
reviews_signals.reviews_email_withdrawal_requests.send(timestamp=timezone.now(), context=context)
def notify_accept_reject(self, ev):
if ev.event.name == DefaultTriggers.REJECT.value:
context = self.get_context()
mails.send_mail(
self.machineable.creator.username,
mails.PREPRINT_WITHDRAWAL_REQUEST_DECLINED,
mimetype='html',
**context
)
else:
pass
def notify_edit_comment(self, ev):
""" Not presently required to notify for this event
"""
pass
def notify_resubmit(self, ev):
""" Notify moderators that someone is requesting withdrawal again
Not presently required to notify for this event
"""
# TODO
pass
def get_context(self):
return {
'reviewable': self.machineable.target,
'requester': self.machineable.creator,
'is_request_email': True,
}
| |
# This program rearranges raw Egderyders data and builds two lists of dicts, userlist and ciommentslist, containing
# of the data needed to buildm graphs. These objects are then saved into files.
import os, sys
import json
import csv
from datetime import datetime
import time
import networkx as nx
import logging
import edgesense.utils as eu
from edgesense.utils.logger_initializer import initialize_logger
from edgesense.network.utils import extract_edges, build_network
from edgesense.metrics import compute_all_metrics
from edgesense.utils.extract import calculate_timestamp_range
def load_files(users_resource, nodes_resource, comments_resource, username, password, extraction_method, dumpto, generated):
if dumpto:
base_dump_dir = os.path.join(dumpto, generated.strftime('%Y-%m-%d-%H-%M-%S'))
eu.resource.mkdir(base_dump_dir)
# load users
if dumpto:
dump_to = os.path.join(base_dump_dir, 'users.json')
else:
dump_to = None
jusers = eu.resource.load(users_resource, username=username, password=password, dump_to=dump_to)
allusers = eu.extract.extract(extraction_method, 'users', jusers)
# load nodes
if dumpto:
dump_to = os.path.join(base_dump_dir, 'nodes.json')
else:
dump_to = None
jnodes = eu.resource.load(nodes_resource, username=username, password=password, dump_to=dump_to)
allnodes = eu.extract.extract(extraction_method, 'nodes', jnodes)
# load comments
if dumpto:
dump_to = os.path.join(base_dump_dir, 'comments.json')
else:
dump_to = None
jcomments = eu.resource.load(comments_resource, username=username, password=password, dump_to=dump_to)
allcomments = eu.extract.extract(extraction_method, 'comments', jcomments)
logging.info("file loaded")
return (allusers,allnodes,allcomments)
def parse_options(argv):
import getopt
users_resource = 'users.json'
nodes_resource = 'nodes.json'
comments_resource = 'comments.json'
node_title_field = 'uid'
timestep_size = 60*60*24*7
timestep_window = 1
timestep_count = None
username = None
password = None
extraction_method = 'nested'
admin_roles = set()
exclude_isolated = False
dumpto = None
basepath = os.path.dirname(__file__)
destination_path = os.path.abspath(os.path.join(basepath, "..", "static", "json"))
log_path = './log'
create_datapackage = False
datapackage_title = None
license_type = None
license_url = None
site_url = None
data = {}
try:
with open(argv[0], 'r') as datafile:
data = json.load(datafile)
except:
print 'Error reading the parameters file'
sys.exit(2)
if not(data):
print 'edgesense_drupal <path to the parameters file>'
sys.exit()
if data.has_key('users') and data['users']:
users_resource = data['users']
if data.has_key('nodes') and data['nodes']:
nodes_resource = data['nodes']
if data.has_key('comments') and data['comments']:
comments_resource = data['comments']
if data.has_key('node_title') and data['node_title']:
node_title_field = data['node_title']
if data.has_key('timestep_size') and data['timestep_size']:
timestep_size = int(data['timestep_size'])
if data.has_key('count_window') and data['count_window']:
timestep_window = int(data['count_window'])
if data.has_key('timestep_count') and data['timestep_count']:
timestep_count = int(data['timestep_count'])
if data.has_key('auth'):
try:
username = data['auth']['username']
except:
username = None
try:
password = data['auth']['password']
except:
password = None
if data.has_key('extraction_method') and data['extraction_method']:
extraction_method = data['extraction_method']
if data.has_key('moderator_roles') and data['moderator_roles']:
admin_roles = set([e.strip() for e in arg.split(",") if e.strip()])
if data.has_key('exclude_isolated') and data['exclude_isolated']:
exclude_isolated = True
if data.has_key('dumpto') and data['dumpto']:
dumpto = data['extraction_method']
if data.has_key('destination_path') and data['destination_path']:
destination_path = data['destination_path']
if data.has_key('log_path') and data['log_path']:
log_path = os.path.join(data['log_path'])
if data.has_key('datapackage'):
try:
license_type = data['datapackage']['license_type']
license_url = data['datapackage']['license_url']
if data['datapackage'].has_key('title'):
datapackage_title = data['datapackage']['title']
site_url = data['datapackage']['site_url']
create_datapackage = True
except:
license_type = None
license_url = None
site_url = None
create_datapackage = True
# set up logging to file (edgesense.log in the same dir as the parameters file)
initialize_logger(log_path, file_level=logging.DEBUG, console_level=logging.DEBUG, file_mode='w')
logging.info("parsing files %(u)s %(n)s %(c)s" % {'u': users_resource, 'n': nodes_resource, 'c': comments_resource})
return (users_resource,
nodes_resource,
comments_resource,
node_title_field,
timestep_size,
timestep_window,
timestep_count,
username, password,
extraction_method,
admin_roles,
exclude_isolated,
dumpto,
destination_path,
create_datapackage,
datapackage_title,
license_type,
license_url,
site_url)
def main():
users_resource, \
nodes_resource, \
comments_resource, \
node_title_field, \
timestep_size, \
timestep_window, \
timestep_count, \
username, \
password, \
extraction_method, \
admin_roles, \
exclude_isolated, \
dumpto, \
destination_path, \
create_datapackage, \
datapackage_title, \
license_type, \
license_url, \
site_url = parse_options(sys.argv[1:])
generated = datetime.now()
logging.info("Network processing - started")
# Load the files
allusers, allnodes, allcomments = load_files(users_resource, nodes_resource, comments_resource, username, password, extraction_method, dumpto, generated)
# extract a normalized set of data
nodes_map, posts_map, comments_map = eu.extract.normalized_data(allusers, allnodes, allcomments, node_title_field, admin_roles, exclude_isolated)
# this is the network object
# going forward it should be read from a serialized format to handle caching
network = {}
# Add some file metadata
network['meta'] = {}
# Timestamp of the file generation (to show in the dashboard)
network['meta']['generated'] = int(generated.strftime("%s"))
network['edges'] = extract_edges(nodes_map, comments_map)
# filter out nodes that have not participated to the full:conversations
inactive_nodes = [ v for v in nodes_map.values() if not v['active'] ]
logging.info("inactive nodes: %(n)i" % {'n':len(inactive_nodes)})
network['nodes'] = [ v for v in nodes_map.values() if v['active'] ]
# Parameters
timestep, timesteps_range = calculate_timestamp_range(network, timestep_size, timestep_window, timestep_count)
# build the whole network to use for metrics
directed_multiedge_network=build_network(network)
logging.info("network built")
# calculate the metrics
network['metrics'] = compute_all_metrics(nodes_map, posts_map, comments_map, directed_multiedge_network, timesteps_range, timestep, timestep_window)
logging.info("network metrics done")
tag = generated.strftime('%Y-%m-%d-%H-%M-%S')
tagged_dir = os.path.join(destination_path, 'data', tag)
# dump the network to a json file, minified
eu.resource.save(network, 'network.min.json', tagged_dir)
logging.info("network dumped")
# create the datapackage
if create_datapackage:
try:
# load the datapackage template
basepath = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(basepath, "datapackage_template.json"), 'r') as datafile:
datapackage = json.load(datafile)
datapackage['license'] = {'type': license_type, 'url': license_url}
if datapackage_title:
datapackage['title'] = datapackage_title
datapackage['last_updated'] = generated.strftime('%Y-%m-%dT%H:%M:%S')
datapackage['resources'][0]['url'] = site_url
datapackage['resources'][0]['path'] = os.path.join('data', tag, 'network.gexf')
# dump the gexf file
gexf_file = os.path.join(tagged_dir, 'network.gexf')
eu.gexf.save_gexf(directed_multiedge_network, gexf_file)
# dump the datapackage
eu.resource.save(datapackage, 'datapackage.json', destination_path, True)
logging.info("datapackage saved")
except:
logging.error("Error reading the datapackage template")
create_datapackage = False
eu.resource.save({'last': tag, 'datapackage': create_datapackage}, 'last.json', destination_path)
logging.info("Completed")
if __name__ == "__main__":
main()
| |
"""
byceps.services.news.service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
import dataclasses
from datetime import datetime
from functools import partial
from typing import Optional, Sequence
from ...database import db, paginate, Pagination, Query
from ...events.news import NewsItemPublished
from ...typing import UserID
from ..user import service as user_service
from .channel_service import _db_entity_to_channel
from . import html_service
from .dbmodels.channel import Channel as DbChannel
from .dbmodels.item import (
CurrentVersionAssociation as DbCurrentVersionAssociation,
Item as DbItem,
ItemVersion as DbItemVersion,
)
from . import image_service
from .transfer.models import (
ChannelID,
Headline,
ImageID,
Item,
ItemID,
ItemVersionID,
)
def create_item(
channel_id: ChannelID,
slug: str,
creator_id: UserID,
title: str,
body: str,
*,
image_url_path: Optional[str] = None,
) -> Item:
"""Create a news item, a version, and set the version as the item's
current one.
"""
item = DbItem(channel_id, slug)
db.session.add(item)
version = _create_version(
item, creator_id, title, body, image_url_path=image_url_path
)
db.session.add(version)
current_version_association = DbCurrentVersionAssociation(item, version)
db.session.add(current_version_association)
db.session.commit()
return _db_entity_to_item(item)
def update_item(
item_id: ItemID,
slug: str,
creator_id: UserID,
title: str,
body: str,
*,
image_url_path: Optional[str] = None,
) -> Item:
"""Update a news item by creating a new version of it and setting
the new version as the current one.
"""
item = _get_db_item(item_id)
item.slug = slug
version = _create_version(
item, creator_id, title, body, image_url_path=image_url_path
)
db.session.add(version)
item.current_version = version
db.session.commit()
return _db_entity_to_item(item)
def _create_version(
item: DbItem,
creator_id: UserID,
title: str,
body: str,
*,
image_url_path: Optional[str] = None,
) -> DbItemVersion:
version = DbItemVersion(item, creator_id, title, body)
if image_url_path:
version.image_url_path = image_url_path
return version
def set_featured_image(item_id: ItemID, image_id: ImageID) -> None:
"""Set an image as featured image."""
db_item = _get_db_item(item_id)
db_item.featured_image_id = image_id
db.session.commit()
def publish_item(
item_id: ItemID,
*,
publish_at: Optional[datetime] = None,
initiator_id: Optional[UserID] = None,
) -> NewsItemPublished:
"""Publish a news item."""
db_item = _get_db_item(item_id)
if db_item.published:
raise ValueError('News item has already been published')
now = datetime.utcnow()
if publish_at is None:
publish_at = now
if initiator_id is not None:
initiator = user_service.get_user(initiator_id)
else:
initiator = None
db_item.published_at = publish_at
db.session.commit()
item = _db_entity_to_item(db_item)
return NewsItemPublished(
occurred_at=now,
initiator_id=initiator.id if initiator else None,
initiator_screen_name=initiator.screen_name if initiator else None,
item_id=item.id,
channel_id=item.channel.id,
published_at=item.published_at,
title=item.title,
external_url=item.external_url,
)
def delete_item(item_id: ItemID) -> None:
"""Delete a news item and its versions."""
db.session.query(DbCurrentVersionAssociation) \
.filter_by(item_id=item_id) \
.delete()
db.session.query(DbItemVersion) \
.filter_by(item_id=item_id) \
.delete()
db.session.query(DbItem) \
.filter_by(id=item_id) \
.delete()
db.session.commit()
def find_item(item_id: ItemID) -> Optional[Item]:
"""Return the item with that id, or `None` if not found."""
item = _find_db_item(item_id)
if item is None:
return None
return _db_entity_to_item(item)
def _find_db_item(item_id: ItemID) -> Optional[DbItem]:
"""Return the item with that id, or `None` if not found."""
return db.session.query(DbItem) \
.options(
db.joinedload(DbItem.channel),
db.joinedload(DbItem.images)
) \
.get(item_id)
def _get_db_item(item_id: ItemID) -> DbItem:
"""Return the item with that id, or raise an exception."""
item = _find_db_item(item_id)
if item is None:
raise ValueError(f'Unknown news item ID "{item_id}".')
return item
def find_aggregated_item_by_slug(
channel_ids: set[ChannelID], slug: str, *, published_only: bool = False
) -> Optional[Item]:
"""Return the news item identified by that slug in one of the given
channels, or `None` if not found.
"""
query = db.session \
.query(DbItem) \
.filter(DbItem.channel_id.in_(channel_ids)) \
.options(
db.joinedload(DbItem.channel),
db.joinedload(DbItem.current_version_association)
.joinedload(DbCurrentVersionAssociation.version),
db.joinedload(DbItem.images)
) \
.filter_by(slug=slug)
if published_only:
query = query.filter(DbItem.published_at <= datetime.utcnow())
item = query.one_or_none()
if item is None:
return None
return _db_entity_to_item(item, render_body=True)
def get_aggregated_items_paginated(
channel_ids: set[ChannelID],
page: int,
items_per_page: int,
*,
published_only: bool = False,
) -> Pagination:
"""Return the news items to show on the specified page."""
query = _get_items_query(channel_ids)
if published_only:
query = query.filter(DbItem.published_at <= datetime.utcnow())
item_mapper = partial(_db_entity_to_item, render_body=True)
return paginate(query, page, items_per_page, item_mapper=item_mapper)
def get_items_paginated(
channel_ids: set[ChannelID], page: int, items_per_page: int
) -> Pagination:
"""Return the news items to show on the specified page."""
return _get_items_query(channel_ids) \
.paginate(page, items_per_page)
def get_recent_headlines(
channel_ids: set[ChannelID], limit: int
) -> list[Headline]:
"""Return the most recent headlines."""
items = db.session \
.query(DbItem) \
.filter(DbItem.channel_id.in_(channel_ids)) \
.options(
db.joinedload(DbItem.current_version_association)
.joinedload(DbCurrentVersionAssociation.version)
) \
.filter(DbItem.published_at <= datetime.utcnow()) \
.order_by(DbItem.published_at.desc()) \
.limit(limit) \
.all()
return [
Headline(
slug=item.slug,
published_at=item.published_at,
title=item.current_version.title,
)
for item in items
]
def _get_items_query(channel_ids: set[ChannelID]) -> Query:
return db.session \
.query(DbItem) \
.filter(DbItem.channel_id.in_(channel_ids)) \
.options(
db.joinedload(DbItem.channel),
db.joinedload(DbItem.current_version_association)
.joinedload(DbCurrentVersionAssociation.version),
db.joinedload(DbItem.images)
) \
.order_by(DbItem.published_at.desc())
def get_item_versions(item_id: ItemID) -> Sequence[DbItemVersion]:
"""Return all item versions, sorted from most recent to oldest."""
return db.session \
.query(DbItemVersion) \
.filter_by(item_id=item_id) \
.order_by(DbItemVersion.created_at.desc()) \
.all()
def get_current_item_version(item_id: ItemID) -> DbItemVersion:
"""Return the item's current version."""
item = _get_db_item(item_id)
return item.current_version
def find_item_version(version_id: ItemVersionID) -> DbItemVersion:
"""Return the item version with that ID, or `None` if not found."""
return db.session.query(DbItemVersion).get(version_id)
def has_channel_items(channel_id: ChannelID) -> bool:
"""Return `True` if the channel contains items."""
return db.session \
.query(
db.session
.query(DbItem)
.join(DbChannel)
.filter(DbChannel.id == channel_id)
.exists()
) \
.scalar()
def get_item_count_by_channel_id() -> dict[ChannelID, int]:
"""Return news item count (including 0) per channel, indexed by
channel ID.
"""
channel_ids_and_item_counts = db.session \
.query(
DbChannel.id,
db.func.count(DbItem.id)
) \
.outerjoin(DbItem) \
.group_by(DbChannel.id) \
.all()
return dict(channel_ids_and_item_counts)
def _db_entity_to_item(
db_item: DbItem, *, render_body: Optional[bool] = False
) -> Item:
channel = _db_entity_to_channel(db_item.channel)
external_url = channel.url_prefix + db_item.slug
image_url_path = _assemble_image_url_path(db_item)
images = [
image_service._db_entity_to_image(image, channel.id)
for image in db_item.images
]
item = Item(
id=db_item.id,
channel=channel,
slug=db_item.slug,
published_at=db_item.published_at,
published=db_item.published_at is not None,
title=db_item.current_version.title,
body=db_item.current_version.body,
external_url=external_url,
image_url_path=image_url_path,
images=images,
featured_image_id=db_item.featured_image_id,
)
if render_body:
rendered_body = _render_body(item)
item = dataclasses.replace(item, body=rendered_body)
return item
def _assemble_image_url_path(item: DbItem) -> Optional[str]:
url_path = item.current_version.image_url_path
if not url_path:
return None
return f'/data/global/news_channels/{item.channel_id}/{url_path}'
def _render_body(item: Item) -> Optional[str]:
"""Render body text to HTML."""
try:
return html_service.render_body(item, item.body)
except Exception as e:
return None # Not the best error indicator.
| |
# -*- test-case-name: twisted.words.test.test_service -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A module that needs a better name.
Implements new cred things for words.
How does this thing work?
- Network connection on some port expecting to speak some protocol
- Protocol-specific authentication, resulting in some kind of credentials object
- twisted.cred.portal login using those credentials for the interface
IUser and with something implementing IChatClient as the mind
- successful login results in an IUser avatar the protocol can call
methods on, and state added to the realm such that the mind will have
methods called on it as is necessary
- protocol specific actions lead to calls onto the avatar; remote events
lead to calls onto the mind
- protocol specific hangup, realm is notified, user is removed from active
play, the end.
"""
from time import time, ctime
from zope.interface import implements
from twisted.words import iwords, ewords
from twisted.python.components import registerAdapter
from twisted.cred import portal, credentials, error as ecred
from twisted.spread import pb
from twisted.words.protocols import irc
from twisted.internet import defer, protocol
from twisted.python import log, failure, reflect
from twisted import copyright
class Group(object):
implements(iwords.IGroup)
def __init__(self, name):
self.name = name
self.users = {}
self.meta = {
"topic": "",
"topic_author": "",
}
def _ebUserCall(self, err, p):
return failure.Failure(Exception(p, err))
def _cbUserCall(self, results):
for (success, result) in results:
if not success:
user, err = result.value # XXX
self.remove(user, err.getErrorMessage())
def add(self, user):
assert iwords.IChatClient.providedBy(user), "%r is not a chat client" % (user,)
if user.name not in self.users:
additions = []
self.users[user.name] = user
for p in self.users.itervalues():
if p is not user:
d = defer.maybeDeferred(p.userJoined, self, user)
d.addErrback(self._ebUserCall, p=p)
additions.append(d)
defer.DeferredList(additions).addCallback(self._cbUserCall)
return defer.succeed(None)
def remove(self, user, reason=None):
assert reason is None or isinstance(reason, unicode)
try:
del self.users[user.name]
except KeyError:
pass
else:
removals = []
for p in self.users.itervalues():
if p is not user:
d = defer.maybeDeferred(p.userLeft, self, user, reason)
d.addErrback(self._ebUserCall, p=p)
removals.append(d)
defer.DeferredList(removals).addCallback(self._cbUserCall)
return defer.succeed(None)
def size(self):
return defer.succeed(len(self.users))
def receive(self, sender, recipient, message):
assert recipient is self
receives = []
for p in self.users.itervalues():
if p is not sender:
d = defer.maybeDeferred(p.receive, sender, self, message)
d.addErrback(self._ebUserCall, p=p)
receives.append(d)
defer.DeferredList(receives).addCallback(self._cbUserCall)
return defer.succeed(None)
def setMetadata(self, meta):
self.meta = meta
sets = []
for p in self.users.itervalues():
d = defer.maybeDeferred(p.groupMetaUpdate, self, meta)
d.addErrback(self._ebUserCall, p=p)
sets.append(d)
defer.DeferredList(sets).addCallback(self._cbUserCall)
return defer.succeed(None)
def iterusers(self):
# XXX Deferred?
return iter(self.users.values())
class User(object):
implements(iwords.IUser)
realm = None
mind = None
def __init__(self, name):
self.name = name
self.groups = []
self.lastMessage = time()
def loggedIn(self, realm, mind):
self.realm = realm
self.mind = mind
self.signOn = time()
def join(self, group):
def cbJoin(result):
self.groups.append(group)
return result
return group.add(self.mind).addCallback(cbJoin)
def leave(self, group, reason=None):
def cbLeave(result):
self.groups.remove(group)
return result
return group.remove(self.mind, reason).addCallback(cbLeave)
def send(self, recipient, message):
self.lastMessage = time()
return recipient.receive(self.mind, recipient, message)
def itergroups(self):
return iter(self.groups)
def logout(self):
for g in self.groups[:]:
self.leave(g)
NICKSERV = 'NickServ!NickServ@services'
class IRCUser(irc.IRC):
"""
Protocol instance representing an IRC user connected to the server.
"""
implements(iwords.IChatClient)
# A list of IGroups in which I am participating
groups = None
# A no-argument callable I should invoke when I go away
logout = None
# An IUser we use to interact with the chat service
avatar = None
# To whence I belong
realm = None
# How to handle unicode (TODO: Make this customizable on a per-user basis)
encoding = 'utf-8'
# Twisted callbacks
def connectionMade(self):
self.irc_PRIVMSG = self.irc_NICKSERV_PRIVMSG
self.realm = self.factory.realm
self.hostname = self.realm.name
def connectionLost(self, reason):
if self.logout is not None:
self.logout()
self.avatar = None
# Make sendMessage a bit more useful to us
def sendMessage(self, command, *parameter_list, **kw):
if not kw.has_key('prefix'):
kw['prefix'] = self.hostname
if not kw.has_key('to'):
kw['to'] = self.name.encode(self.encoding)
arglist = [self, command, kw['to']] + list(parameter_list)
irc.IRC.sendMessage(*arglist, **kw)
# IChatClient implementation
def userJoined(self, group, user):
self.join(
"%s!%s@%s" % (user.name, user.name, self.hostname),
'#' + group.name)
def userLeft(self, group, user, reason=None):
assert reason is None or isinstance(reason, unicode)
self.part(
"%s!%s@%s" % (user.name, user.name, self.hostname),
'#' + group.name,
(reason or u"leaving").encode(self.encoding, 'replace'))
def receive(self, sender, recipient, message):
#>> :glyph!glyph@adsl-64-123-27-108.dsl.austtx.swbell.net PRIVMSG glyph_ :hello
# omg???????????
if iwords.IGroup.providedBy(recipient):
recipientName = '#' + recipient.name
else:
recipientName = recipient.name
text = message.get('text', '<an unrepresentable message>')
for L in text.splitlines():
self.privmsg(
'%s!%s@%s' % (sender.name, sender.name, self.hostname),
recipientName,
L)
def groupMetaUpdate(self, group, meta):
if 'topic' in meta:
topic = meta['topic']
author = meta.get('topic_author', '')
self.topic(
self.name,
'#' + group.name,
topic,
'%s!%s@%s' % (author, author, self.hostname)
)
# irc.IRC callbacks - starting with login related stuff.
nickname = None
password = None
def irc_PASS(self, prefix, params):
"""Password message -- Register a password.
Parameters: <password>
[REQUIRED]
Note that IRC requires the client send this *before* NICK
and USER.
"""
self.password = params[-1]
def irc_NICK(self, prefix, params):
"""Nick message -- Set your nickname.
Parameters: <nickname>
[REQUIRED]
"""
try:
nickname = params[0].decode(self.encoding)
except UnicodeDecodeError:
self.privmsg(
NICKSERV,
nickname,
'Your nickname is cannot be decoded. Please use ASCII or UTF-8.')
self.transport.loseConnection()
return
self.nickname = nickname
self.name = nickname
for code, text in self._motdMessages:
self.sendMessage(code, text % self.factory._serverInfo)
if self.password is None:
self.privmsg(
NICKSERV,
nickname,
'Password?')
else:
password = self.password
self.password = None
self.logInAs(nickname, password)
def irc_USER(self, prefix, params):
"""User message -- Set your realname.
Parameters: <user> <mode> <unused> <realname>
"""
# Note: who gives a crap about this? The IUser has the real
# information we care about. Save it anyway, I guess, just
# for fun.
self.realname = params[-1]
def irc_NICKSERV_PRIVMSG(self, prefix, params):
"""Send a (private) message.
Parameters: <msgtarget> <text to be sent>
"""
target = params[0]
password = params[-1]
if self.nickname is None:
# XXX Send an error response here
self.transport.loseConnection()
elif target.lower() != "nickserv":
self.privmsg(
NICKSERV,
self.nickname,
"Denied. Please send me (NickServ) your password.")
else:
nickname = self.nickname
self.nickname = None
self.logInAs(nickname, password)
def logInAs(self, nickname, password):
d = self.factory.portal.login(
credentials.UsernamePassword(nickname, password),
self,
iwords.IUser)
d.addCallbacks(self._cbLogin, self._ebLogin, errbackArgs=(nickname,))
_welcomeMessages = [
(irc.RPL_WELCOME,
":connected to Twisted IRC"),
(irc.RPL_YOURHOST,
":Your host is %(serviceName)s, running version %(serviceVersion)s"),
(irc.RPL_CREATED,
":This server was created on %(creationDate)s"),
# "Bummer. This server returned a worthless 004 numeric.
# I'll have to guess at all the values"
# -- epic
(irc.RPL_MYINFO,
# w and n are the currently supported channel and user modes
# -- specify this better
"%(serviceName)s %(serviceVersion)s w n")
]
_motdMessages = [
(irc.RPL_MOTDSTART,
":- %(serviceName)s Message of the Day - "),
(irc.RPL_ENDOFMOTD,
":End of /MOTD command.")
]
def _cbLogin(self, (iface, avatar, logout)):
assert iface is iwords.IUser, "Realm is buggy, got %r" % (iface,)
# Let them send messages to the world
del self.irc_PRIVMSG
self.avatar = avatar
self.logout = logout
for code, text in self._welcomeMessages:
self.sendMessage(code, text % self.factory._serverInfo)
def _ebLogin(self, err, nickname):
if err.check(ewords.AlreadyLoggedIn):
self.privmsg(
NICKSERV,
nickname,
"Already logged in. No pod people allowed!")
elif err.check(ecred.UnauthorizedLogin):
self.privmsg(
NICKSERV,
nickname,
"Login failed. Goodbye.")
else:
log.msg("Unhandled error during login:")
log.err(err)
self.privmsg(
NICKSERV,
nickname,
"Server error during login. Sorry.")
self.transport.loseConnection()
# Great, now that's out of the way, here's some of the interesting
# bits
def irc_PING(self, prefix, params):
"""Ping message
Parameters: <server1> [ <server2> ]
"""
if self.realm is not None:
self.sendMessage('PONG', self.hostname)
def irc_QUIT(self, prefix, params):
"""Quit
Parameters: [ <Quit Message> ]
"""
self.transport.loseConnection()
def _channelMode(self, group, modes=None, *args):
if modes:
self.sendMessage(
irc.ERR_UNKNOWNMODE,
":Unknown MODE flag.")
else:
self.channelMode(self.name, '#' + group.name, '+')
def _userMode(self, user, modes=None):
if modes:
self.sendMessage(
irc.ERR_UNKNOWNMODE,
":Unknown MODE flag.")
elif user is self.avatar:
self.sendMessage(
irc.RPL_UMODEIS,
"+")
else:
self.sendMessage(
irc.ERR_USERSDONTMATCH,
":You can't look at someone else's modes.")
def irc_MODE(self, prefix, params):
"""User mode message
Parameters: <nickname>
*( ( "+" / "-" ) *( "i" / "w" / "o" / "O" / "r" ) )
"""
try:
channelOrUser = params[0].decode(self.encoding)
except UnicodeDecodeError:
self.sendMessage(
irc.ERR_NOSUCHNICK, params[0],
":No such nickname (could not decode your unicode!)")
return
if channelOrUser.startswith('#'):
def ebGroup(err):
err.trap(ewords.NoSuchGroup)
self.sendMessage(
irc.ERR_NOSUCHCHANNEL, params[0],
":That channel doesn't exist.")
d = self.realm.lookupGroup(channelOrUser[1:])
d.addCallbacks(
self._channelMode,
ebGroup,
callbackArgs=tuple(params[1:]))
else:
def ebUser(err):
self.sendMessage(
irc.ERR_NOSUCHNICK,
":No such nickname.")
d = self.realm.lookupUser(channelOrUser)
d.addCallbacks(
self._userMode,
ebUser,
callbackArgs=tuple(params[1:]))
def irc_USERHOST(self, prefix, params):
"""Userhost message
Parameters: <nickname> *( SPACE <nickname> )
[Optional]
"""
pass
def irc_PRIVMSG(self, prefix, params):
"""Send a (private) message.
Parameters: <msgtarget> <text to be sent>
"""
try:
targetName = params[0].decode(self.encoding)
except UnicodeDecodeError:
self.sendMessage(
irc.ERR_NOSUCHNICK, targetName,
":No such nick/channel (could not decode your unicode!)")
return
messageText = params[-1]
if targetName.startswith('#'):
target = self.realm.lookupGroup(targetName[1:])
else:
target = self.realm.lookupUser(targetName).addCallback(lambda user: user.mind)
def cbTarget(targ):
if targ is not None:
return self.avatar.send(targ, {"text": messageText})
def ebTarget(err):
self.sendMessage(
irc.ERR_NOSUCHNICK, targetName,
":No such nick/channel.")
target.addCallbacks(cbTarget, ebTarget)
def irc_JOIN(self, prefix, params):
"""Join message
Parameters: ( <channel> *( "," <channel> ) [ <key> *( "," <key> ) ] )
"""
try:
groupName = params[0].decode(self.encoding)
except UnicodeDecodeError:
self.sendMessage(
irc.IRC_NOSUCHCHANNEL, params[0],
":No such channel (could not decode your unicode!)")
return
if groupName.startswith('#'):
groupName = groupName[1:]
def cbGroup(group):
def cbJoin(ign):
self.userJoined(group, self)
self.names(
self.name,
'#' + group.name,
[user.name for user in group.iterusers()])
self._sendTopic(group)
return self.avatar.join(group).addCallback(cbJoin)
def ebGroup(err):
self.sendMessage(
irc.ERR_NOSUCHCHANNEL, '#' + groupName,
":No such channel.")
self.realm.getGroup(groupName).addCallbacks(cbGroup, ebGroup)
def irc_PART(self, prefix, params):
"""Part message
Parameters: <channel> *( "," <channel> ) [ <Part Message> ]
"""
try:
groupName = params[0].decode(self.encoding)
except UnicodeDecodeError:
self.sendMessage(
irc.ERR_NOTONCHANNEL, params[0],
":Could not decode your unicode!")
return
if groupName.startswith('#'):
groupName = groupName[1:]
if len(params) > 1:
reason = params[1].decode('utf-8')
else:
reason = None
def cbGroup(group):
def cbLeave(result):
self.userLeft(group, self, reason)
return self.avatar.leave(group, reason).addCallback(cbLeave)
def ebGroup(err):
err.trap(ewords.NoSuchGroup)
self.sendMessage(
irc.ERR_NOTONCHANNEL,
'#' + groupName,
":" + err.getErrorMessage())
self.realm.lookupGroup(groupName).addCallbacks(cbGroup, ebGroup)
def irc_NAMES(self, prefix, params):
"""Names message
Parameters: [ <channel> *( "," <channel> ) [ <target> ] ]
"""
#<< NAMES #python
#>> :benford.openprojects.net 353 glyph = #python :Orban ... @glyph ... Zymurgy skreech
#>> :benford.openprojects.net 366 glyph #python :End of /NAMES list.
try:
channel = params[-1].decode(self.encoding)
except UnicodeDecodeError:
self.sendMessage(
irc.ERR_NOSUCHCHANNEL, params[-1],
":No such channel (could not decode your unicode!)")
return
if channel.startswith('#'):
channel = channel[1:]
def cbGroup(group):
self.names(
self.name,
'#' + group.name,
[user.name for user in group.iterusers()])
def ebGroup(err):
err.trap(ewords.NoSuchGroup)
# No group? Fine, no names!
self.names(
self.name,
'#' + channel,
[])
self.realm.lookupGroup(channel).addCallbacks(cbGroup, ebGroup)
def irc_TOPIC(self, prefix, params):
"""Topic message
Parameters: <channel> [ <topic> ]
"""
try:
channel = params[0].decode(self.encoding)
except UnicodeDecodeError:
self.sendMessage(
irc.ERR_NOSUCHCHANNEL,
":That channel doesn't exist (could not decode your unicode!)")
return
if channel.startswith('#'):
channel = channel[1:]
if len(params) > 1:
self._setTopic(channel, params[1])
else:
self._getTopic(channel)
def _sendTopic(self, group):
"""
Send the topic of the given group to this user, if it has one.
"""
topic = group.meta.get("topic")
if topic:
author = group.meta.get("topic_author") or "<noone>"
date = group.meta.get("topic_date", 0)
self.topic(self.name, '#' + group.name, topic)
self.topicAuthor(self.name, '#' + group.name, author, date)
def _getTopic(self, channel):
#<< TOPIC #python
#>> :benford.openprojects.net 332 glyph #python :<churchr> I really did. I sprained all my toes.
#>> :benford.openprojects.net 333 glyph #python itamar|nyc 994713482
def ebGroup(err):
err.trap(ewords.NoSuchGroup)
self.sendMessage(
irc.ERR_NOSUCHCHANNEL, '=', channel,
":That channel doesn't exist.")
self.realm.lookupGroup(channel).addCallbacks(self._sendTopic, ebGroup)
def _setTopic(self, channel, topic):
#<< TOPIC #divunal :foo
#>> :glyph!glyph@adsl-64-123-27-108.dsl.austtx.swbell.net TOPIC #divunal :foo
def cbGroup(group):
newMeta = group.meta.copy()
newMeta['topic'] = topic
newMeta['topic_author'] = self.name
newMeta['topic_date'] = int(time())
def ebSet(err):
self.sendMessage(
irc.ERR_CHANOPRIVSNEEDED,
"#" + group.name,
":You need to be a channel operator to do that.")
return group.setMetadata(newMeta).addErrback(ebSet)
def ebGroup(err):
err.trap(ewords.NoSuchGroup)
self.sendMessage(
irc.ERR_NOSUCHCHANNEL, '=', channel,
":That channel doesn't exist.")
self.realm.lookupGroup(channel).addCallbacks(cbGroup, ebGroup)
def list(self, channels):
"""Send a group of LIST response lines
@type channel: C{list} of C{(str, int, str)}
@param channel: Information about the channels being sent:
their name, the number of participants, and their topic.
"""
for (name, size, topic) in channels:
self.sendMessage(irc.RPL_LIST, name, str(size), ":" + topic)
self.sendMessage(irc.RPL_LISTEND, ":End of /LIST")
def irc_LIST(self, prefix, params):
"""List query
Return information about the indicated channels, or about all
channels if none are specified.
Parameters: [ <channel> *( "," <channel> ) [ <target> ] ]
"""
#<< list #python
#>> :orwell.freenode.net 321 exarkun Channel :Users Name
#>> :orwell.freenode.net 322 exarkun #python 358 :The Python programming language
#>> :orwell.freenode.net 323 exarkun :End of /LIST
if params:
# Return information about indicated channels
try:
channels = params[0].decode(self.encoding).split(',')
except UnicodeDecodeError:
self.sendMessage(
irc.ERR_NOSUCHCHANNEL, params[0],
":No such channel (could not decode your unicode!)")
return
groups = []
for ch in channels:
if ch.startswith('#'):
ch = ch[1:]
groups.append(self.realm.lookupGroup(ch))
groups = defer.DeferredList(groups, consumeErrors=True)
groups.addCallback(lambda gs: [r for (s, r) in gs if s])
else:
# Return information about all channels
groups = self.realm.itergroups()
def cbGroups(groups):
def gotSize(size, group):
return group.name, size, group.meta.get('topic')
d = defer.DeferredList([
group.size().addCallback(gotSize, group) for group in groups])
d.addCallback(lambda results: self.list([r for (s, r) in results if s]))
return d
groups.addCallback(cbGroups)
def _channelWho(self, group):
self.who(self.name, '#' + group.name,
[(m.name, self.hostname, self.realm.name, m.name, "H", 0, m.name) for m in group.iterusers()])
def _userWho(self, user):
self.sendMessage(irc.RPL_ENDOFWHO,
":User /WHO not implemented")
def irc_WHO(self, prefix, params):
"""Who query
Parameters: [ <mask> [ "o" ] ]
"""
#<< who #python
#>> :x.opn 352 glyph #python aquarius pc-62-31-193-114-du.blueyonder.co.uk y.opn Aquarius H :3 Aquarius
# ...
#>> :x.opn 352 glyph #python foobar europa.tranquility.net z.opn skreech H :0 skreech
#>> :x.opn 315 glyph #python :End of /WHO list.
### also
#<< who glyph
#>> :x.opn 352 glyph #python glyph adsl-64-123-27-108.dsl.austtx.swbell.net x.opn glyph H :0 glyph
#>> :x.opn 315 glyph glyph :End of /WHO list.
if not params:
self.sendMessage(irc.RPL_ENDOFWHO, ":/WHO not supported.")
return
try:
channelOrUser = params[0].decode(self.encoding)
except UnicodeDecodeError:
self.sendMessage(
irc.RPL_ENDOFWHO, params[0],
":End of /WHO list (could not decode your unicode!)")
return
if channelOrUser.startswith('#'):
def ebGroup(err):
err.trap(ewords.NoSuchGroup)
self.sendMessage(
irc.RPL_ENDOFWHO, channelOrUser,
":End of /WHO list.")
d = self.realm.lookupGroup(channelOrUser[1:])
d.addCallbacks(self._channelWho, ebGroup)
else:
def ebUser(err):
err.trap(ewords.NoSuchUser)
self.sendMessage(
irc.RPL_ENDOFWHO, channelOrUser,
":End of /WHO list.")
d = self.realm.lookupUser(channelOrUser)
d.addCallbacks(self._userWho, ebUser)
def irc_WHOIS(self, prefix, params):
"""Whois query
Parameters: [ <target> ] <mask> *( "," <mask> )
"""
def cbUser(user):
self.whois(
self.name,
user.name, user.name, self.realm.name,
user.name, self.realm.name, 'Hi mom!', False,
int(time() - user.lastMessage), user.signOn,
['#' + group.name for group in user.itergroups()])
def ebUser(err):
err.trap(ewords.NoSuchUser)
self.sendMessage(
irc.ERR_NOSUCHNICK,
params[0],
":No such nick/channel")
try:
user = params[0].decode(self.encoding)
except UnicodeDecodeError:
self.sendMessage(
irc.ERR_NOSUCHNICK,
params[0],
":No such nick/channel")
return
self.realm.lookupUser(user).addCallbacks(cbUser, ebUser)
# Unsupported commands, here for legacy compatibility
def irc_OPER(self, prefix, params):
"""Oper message
Parameters: <name> <password>
"""
self.sendMessage(irc.ERR_NOOPERHOST, ":O-lines not applicable")
class IRCFactory(protocol.ServerFactory):
"""
IRC server that creates instances of the L{IRCUser} protocol.
@ivar _serverInfo: A dictionary mapping:
"serviceName" to the name of the server,
"serviceVersion" to the copyright version,
"creationDate" to the time that the server was started.
"""
protocol = IRCUser
def __init__(self, realm, portal):
self.realm = realm
self.portal = portal
self._serverInfo = {
"serviceName": self.realm.name,
"serviceVersion": copyright.version,
"creationDate": ctime()
}
class PBMind(pb.Referenceable):
def __init__(self):
pass
def jellyFor(self, jellier):
return reflect.qual(PBMind), jellier.invoker.registerReference(self)
def remote_userJoined(self, user, group):
pass
def remote_userLeft(self, user, group, reason):
pass
def remote_receive(self, sender, recipient, message):
pass
def remote_groupMetaUpdate(self, group, meta):
pass
class PBMindReference(pb.RemoteReference):
implements(iwords.IChatClient)
def receive(self, sender, recipient, message):
if iwords.IGroup.providedBy(recipient):
rec = PBGroup(self.realm, self.avatar, recipient)
else:
rec = PBUser(self.realm, self.avatar, recipient)
return self.callRemote(
'receive',
PBUser(self.realm, self.avatar, sender),
rec,
message)
def groupMetaUpdate(self, group, meta):
return self.callRemote(
'groupMetaUpdate',
PBGroup(self.realm, self.avatar, group),
meta)
def userJoined(self, group, user):
return self.callRemote(
'userJoined',
PBGroup(self.realm, self.avatar, group),
PBUser(self.realm, self.avatar, user))
def userLeft(self, group, user, reason=None):
assert reason is None or isinstance(reason, unicode)
return self.callRemote(
'userLeft',
PBGroup(self.realm, self.avatar, group),
PBUser(self.realm, self.avatar, user),
reason)
pb.setUnjellyableForClass(PBMind, PBMindReference)
class PBGroup(pb.Referenceable):
def __init__(self, realm, avatar, group):
self.realm = realm
self.avatar = avatar
self.group = group
def processUniqueID(self):
return hash((self.realm.name, self.avatar.name, self.group.name))
def jellyFor(self, jellier):
return reflect.qual(self.__class__), self.group.name.encode('utf-8'), jellier.invoker.registerReference(self)
def remote_leave(self, reason=None):
return self.avatar.leave(self.group, reason)
def remote_send(self, message):
return self.avatar.send(self.group, message)
class PBGroupReference(pb.RemoteReference):
implements(iwords.IGroup)
def unjellyFor(self, unjellier, unjellyList):
clsName, name, ref = unjellyList
self.name = name.decode('utf-8')
return pb.RemoteReference.unjellyFor(self, unjellier, [clsName, ref])
def leave(self, reason=None):
return self.callRemote("leave", reason)
def send(self, message):
return self.callRemote("send", message)
pb.setUnjellyableForClass(PBGroup, PBGroupReference)
class PBUser(pb.Referenceable):
def __init__(self, realm, avatar, user):
self.realm = realm
self.avatar = avatar
self.user = user
def processUniqueID(self):
return hash((self.realm.name, self.avatar.name, self.user.name))
class ChatAvatar(pb.Referenceable):
implements(iwords.IChatClient)
def __init__(self, avatar):
self.avatar = avatar
def jellyFor(self, jellier):
return reflect.qual(self.__class__), jellier.invoker.registerReference(self)
def remote_join(self, groupName):
assert isinstance(groupName, unicode)
def cbGroup(group):
def cbJoin(ignored):
return PBGroup(self.avatar.realm, self.avatar, group)
d = self.avatar.join(group)
d.addCallback(cbJoin)
return d
d = self.avatar.realm.getGroup(groupName)
d.addCallback(cbGroup)
return d
registerAdapter(ChatAvatar, iwords.IUser, pb.IPerspective)
class AvatarReference(pb.RemoteReference):
def join(self, groupName):
return self.callRemote('join', groupName)
def quit(self):
d = defer.Deferred()
self.broker.notifyOnDisconnect(lambda: d.callback(None))
self.broker.transport.loseConnection()
return d
pb.setUnjellyableForClass(ChatAvatar, AvatarReference)
class WordsRealm(object):
implements(portal.IRealm, iwords.IChatService)
_encoding = 'utf-8'
def __init__(self, name):
self.name = name
def userFactory(self, name):
return User(name)
def groupFactory(self, name):
return Group(name)
def logoutFactory(self, avatar, facet):
def logout():
# XXX Deferred support here
getattr(facet, 'logout', lambda: None)()
avatar.realm = avatar.mind = None
return logout
def requestAvatar(self, avatarId, mind, *interfaces):
if isinstance(avatarId, str):
avatarId = avatarId.decode(self._encoding)
def gotAvatar(avatar):
if avatar.realm is not None:
raise ewords.AlreadyLoggedIn()
for iface in interfaces:
facet = iface(avatar, None)
if facet is not None:
avatar.loggedIn(self, mind)
mind.name = avatarId
mind.realm = self
mind.avatar = avatar
return iface, facet, self.logoutFactory(avatar, facet)
raise NotImplementedError(self, interfaces)
return self.getUser(avatarId).addCallback(gotAvatar)
# IChatService, mostly.
createGroupOnRequest = False
createUserOnRequest = True
def lookupUser(self, name):
raise NotImplementedError
def lookupGroup(self, group):
raise NotImplementedError
def addUser(self, user):
"""Add the given user to this service.
This is an internal method intented to be overridden by
L{WordsRealm} subclasses, not called by external code.
@type user: L{IUser}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with C{None} when the user is
added, or which fails with
L{twisted.words.ewords.DuplicateUser} if a user with the
same name exists already.
"""
raise NotImplementedError
def addGroup(self, group):
"""Add the given group to this service.
@type group: L{IGroup}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with C{None} when the group is
added, or which fails with
L{twisted.words.ewords.DuplicateGroup} if a group with the
same name exists already.
"""
raise NotImplementedError
def getGroup(self, name):
assert isinstance(name, unicode)
if self.createGroupOnRequest:
def ebGroup(err):
err.trap(ewords.DuplicateGroup)
return self.lookupGroup(name)
return self.createGroup(name).addErrback(ebGroup)
return self.lookupGroup(name)
def getUser(self, name):
assert isinstance(name, unicode)
if self.createUserOnRequest:
def ebUser(err):
err.trap(ewords.DuplicateUser)
return self.lookupUser(name)
return self.createUser(name).addErrback(ebUser)
return self.lookupUser(name)
def createUser(self, name):
assert isinstance(name, unicode)
def cbLookup(user):
return failure.Failure(ewords.DuplicateUser(name))
def ebLookup(err):
err.trap(ewords.NoSuchUser)
return self.userFactory(name)
name = name.lower()
d = self.lookupUser(name)
d.addCallbacks(cbLookup, ebLookup)
d.addCallback(self.addUser)
return d
def createGroup(self, name):
assert isinstance(name, unicode)
def cbLookup(group):
return failure.Failure(ewords.DuplicateGroup(name))
def ebLookup(err):
err.trap(ewords.NoSuchGroup)
return self.groupFactory(name)
name = name.lower()
d = self.lookupGroup(name)
d.addCallbacks(cbLookup, ebLookup)
d.addCallback(self.addGroup)
return d
class InMemoryWordsRealm(WordsRealm):
def __init__(self, *a, **kw):
super(InMemoryWordsRealm, self).__init__(*a, **kw)
self.users = {}
self.groups = {}
def itergroups(self):
return defer.succeed(self.groups.itervalues())
def addUser(self, user):
if user.name in self.users:
return defer.fail(failure.Failure(ewords.DuplicateUser()))
self.users[user.name] = user
return defer.succeed(user)
def addGroup(self, group):
if group.name in self.groups:
return defer.fail(failure.Failure(ewords.DuplicateGroup()))
self.groups[group.name] = group
return defer.succeed(group)
def lookupUser(self, name):
assert isinstance(name, unicode)
name = name.lower()
try:
user = self.users[name]
except KeyError:
return defer.fail(failure.Failure(ewords.NoSuchUser(name)))
else:
return defer.succeed(user)
def lookupGroup(self, name):
assert isinstance(name, unicode)
name = name.lower()
try:
group = self.groups[name]
except KeyError:
return defer.fail(failure.Failure(ewords.NoSuchGroup(name)))
else:
return defer.succeed(group)
__all__ = [
'Group', 'User',
'WordsRealm', 'InMemoryWordsRealm',
]
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Algorithm portfolio data set module.
The algorithm portfolio consists in running multiple algorithms in
parallel.
Current limitation: the portfolio data set must come from data sets that
are identical (same number of repetitions on the same instances of the
functions.
**Example:**
.. plot::
:width: 75%
import urllib
import tarfile
import glob
from pylab import *
import pickle
import bbob_pproc as bb
import bbob_pproc.compall.pprldmany
import bbob_pproc.algportfolio
# Collect and unarchive data
dsets = {}
for alg in ('BIPOP-CMA-ES', 'NEWUOA'):
dataurl = 'http://coco.lri.fr/BBOB2009/pythondata/' + alg + '.tar.gz'
filename, headers = urllib.urlretrieve(dataurl)
archivefile = tarfile.open(filename)
archivefile.extractall() # write to disc
dsets[alg] = bb.load(glob.glob('BBOB2009pythondata/' + alg + '/ppdata_f0*_20.pickle'))
# Generate the algorithm portfolio
dspf = bb.algportfolio.build(dsets)
dsets['Portfolio'] = dspf # store the portfolio in dsets
# plot the run lengths distribution functions
figure()
for algname, ds in dsets.iteritems():
bb.compall.pprldmany.plot(ds, label=algname)
bb.compall.pprldmany.beautify()
legend(loc='best') # Display legend
"""
# TODO: generalize behaviour for data sets that have different instances...
from __future__ import absolute_import
import os
import sys
import glob
import getopt
import pickle
from pdb import set_trace
import warnings
import numpy as np
from . import pproc as pp
from . import readalign as ra
#CLASS DEFINITIONS
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
class DataSet(pp.DataSet):
"""Unit element of algorithm portfolio data set.
Modified class attributes:
- comment -- comments for the setting (list)
- algId -- algorithm name (list)
"""
def __init__(self, dslist):
"""Instantiate one algorithm portfolio data set.
:param dict dslist: list of :py:class:`pproc.DataSetList`
instances.
"""
def _conv_evals(evals, algnb, maxevals):
if evals > maxevals[algnb]:
return np.nan
res = 0.
mevals = np.asarray(maxevals)
if evals > len(maxevals) or not isinstance(evals, int):
smevals = np.sort(mevals)
for i in smevals:
res += min(evals - 1, i)
else:
for i in range(1, evals):
res += np.sum(i <= mevals)
res += np.sum(evals <= mevals[:algnb+1])
return res
# Checking procedure
d = set()
f = set()
trials = []
for i in dslist:
d.add(i.dim)
f.add(i.funcId)
trials.append(i.createDictInstanceCount())
if len(f) > 1 or len(d) > 1:
raise Usage('%s: Expect the data of algorithms for only one '
'function and one dimension.' % (dslist))
elif trials[1:] != trials[:-1]:
# this check will be superfluous if we find that all instances
# are equivalent.
# raise Usage('%s: Expect the data to have the same instances.'
# % (dslist))
warnings.warn('portfolio will be generated from algorithm with different instances')
self.dim = d.pop()
self.funcId = f.pop()
algId = []
comment = []
for i in dslist:
algId.append(i.algId)
comment.append(i.comment)
self.algId = tuple(algId)
self.comment = tuple(comment)
# Data handling
nbruns = dslist[0].nbRuns() # all data sets have the same #runs
corresp = [[]] * len(dslist)
if False:
# find correspondence with respect to first element in dslist
dictref = dslist[0].createDictInstance()
for i, ds in enumerate(dslist):
tmpdict = ds.createDictInstance()
for j in sorted(dictref):
corresp[i].extend(tmpdict[j])
else:
for i in range(len(dslist)):
corresp[i] = range(nbruns)
self.instancenumbers = trials.pop()
maxevals = []
finalfunvals = []
evals = []
funvals = []
for i in range(nbruns):
tmpmaxevals = []
tmpfinalfunvals = []
tmpevals = []
tmpfunvals = []
for j, ds in enumerate(dslist):
tmpmaxevals.append(ds.maxevals[corresp[j][i]])
tmpfinalfunvals.append(ds.finalfunvals[corresp[j][i]])
tmpevals.append(ds.evals[:, np.r_[0, corresp[j][i]+1]])
tmpfunvals.append(ds.funvals[:, np.r_[0, corresp[j][i]+1]].copy())
maxevals.append(np.sum(tmpmaxevals))
finalfunvals.append(min(tmpfinalfunvals))
tmpevals = ra.alignArrayData(ra.HArrayMultiReader(tmpevals, dslist.isBiobjective()))
tmpres = []
for j in tmpevals:
tmp = []
for k, e in enumerate(j[1:]):
tmp.append(_conv_evals(e, k, tmpmaxevals))
tmpres.append(min(tmp))
evals.append(np.column_stack((tmpevals[:, 0], tmpres)))
for j, a in enumerate(tmpfunvals):
for k in range(len(a[:, 0])):
a[k, 0] = _conv_evals(a[k, 0], j, tmpmaxevals)
tmpfunvals = ra.alignArrayData(ra.VArrayMultiReader(tmpfunvals))
tmpres = []
for j in tmpfunvals:
tmpres.append(min(j[1:]))
funvals.append(np.column_stack((tmpfunvals[:, 0], tmpres)))
self.maxevals = np.array(maxevals)
self.finalfunvals = np.array(finalfunvals)
self.evals = ra.alignArrayData(ra.HArrayMultiReader(evals, dslist[0].isBiobjective()))
self.funvals = ra.alignArrayData(ra.VArrayMultiReader(funvals))
self.computeERTfromEvals()
#FUNCTION DEFINITIONS
def build(dictAlg, sortedAlg=None):
"""Merge datasets in an algorithm portfolio.
:param dict dictAlg: dictionary of data sets with algorithm name for
keys, see ``pproc.DataSetList.dictByAlg``
:param seq sortedAlgs: sequence for sorting the entries of
:py:data:`dictAlg`, if not provided,
dictAlg.keys() will be instead
:returns: an instance of :py:class:`DataSetList` with the porfolio
data sets
"""
if not sortedAlg:
sortedAlg = dictAlg.keys()
tmpres = []
for f, i in pp.dictAlgByFun(dictAlg).iteritems():
for d, j in pp.dictAlgByDim(i).iteritems():
tmp = []
if sortedAlg:
tmplist = list(j[k] for k in sortedAlg)
else:
tmplist = j.values()
for k in tmplist:
assert len(k) == 1 # one element list
tmp.append(k[0])
try:
tmpres.append(DataSet(tmp))
except Usage, err:
print >>sys.stderr, err.msg
res = pp.DataSetList()
res.extend(tmpres)
return res
| |
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy_utils.functions import get_primary_keys
from .comparators import TranslationComparator
from .exc import ImproperlyConfigured
from .expressions import current_locale
from .utils import get_fallback_locale, option
class HybridPropertyBuilder(object):
def __init__(self, manager, translation_model):
self.manager = manager
self.translation_model = translation_model
self.model = self.translation_model.__parent_class__
def getter_factory(self, property_name):
def attribute_getter(obj):
value = getattr(obj.current_translation, property_name)
if value:
return value
locale = get_fallback_locale(obj)
return getattr(
obj.translations[locale],
property_name
)
return attribute_getter
def setter_factory(self, property_name):
"""
Return a hybrid property setter for given property name.
:param property_name: Name of the property to generate a setter for
"""
return (
lambda obj, value:
setattr(obj.current_translation, property_name, value)
)
def generate_hybrid(self, property_name):
"""
Generate a SQLAlchemy hybrid property for given translation model
property.
:param property_name:
Name of the translation model property to generate hybrid property
for.
"""
setattr(
self.model,
property_name,
hybrid_property(
fget=self.getter_factory(property_name),
fset=self.setter_factory(property_name),
expr=lambda cls: getattr(
cls.__translatable__['class'], property_name
)
)
)
def detect_collisions(self, property_name):
"""
Detect possible naming collisions for given property name.
:raises sqlalchemy_i18n.exc.ImproperlyConfigured: if the model already
has a property with given name
"""
mapper = sa.inspect(self.model)
if mapper.has_property(property_name):
raise ImproperlyConfigured(
"Attribute name collision detected. Could not create "
"hybrid property for translated attribute '%s'. "
"An attribute with the same already exists in parent "
"class '%s'." % (
property_name,
self.model.__name__
)
)
def __call__(self):
mapper = sa.orm.class_mapper(self.translation_model)
for column in mapper.local_table.c:
exclude = self.manager.option(
self.model, 'exclude_hybrid_properties'
)
if column.key in exclude or column.primary_key:
continue
self.detect_collisions(column.key)
self.generate_hybrid(column.key)
class RelationshipBuilder(object):
def __init__(self, translation_cls):
self.translation_cls = translation_cls
self.parent_cls = self.translation_cls.__parent_class__
@property
def primary_key_conditions(self):
conditions = []
for key in get_primary_keys(self.parent_cls).keys():
conditions.append(
getattr(self.parent_cls, key) ==
getattr(self.translation_cls, key)
)
return conditions
def assign_single_translations(self):
mapper = sa.orm.class_mapper(self.parent_cls)
for locale in option(self.parent_cls, 'locales'):
key = '_translation_%s' % locale
if mapper.has_property(key):
continue
conditions = self.primary_key_conditions
conditions.append(self.translation_cls.locale == locale)
mapper.add_property(key, sa.orm.relationship(
self.translation_cls,
primaryjoin=sa.and_(*conditions),
foreign_keys=list(
get_primary_keys(self.parent_cls).values()
),
uselist=False,
viewonly=True
))
def assign_fallback_translation(self):
"""
Assign the current translation relationship for translatable parent
class.
"""
mapper = sa.orm.class_mapper(self.parent_cls)
if not mapper.has_property('_fallback_translation'):
conditions = self.primary_key_conditions
conditions.append(
self.translation_cls.locale ==
get_fallback_locale(self.parent_cls)
)
mapper.add_property('_fallback_translation', sa.orm.relationship(
self.translation_cls,
primaryjoin=sa.and_(*conditions),
foreign_keys=list(
get_primary_keys(self.parent_cls).values()
),
viewonly=True,
uselist=False
))
def assign_current_translation(self):
"""
Assign the current translation relationship for translatable parent
class.
"""
mapper = sa.orm.class_mapper(self.parent_cls)
if not mapper.has_property('_current_translation'):
conditions = self.primary_key_conditions
conditions.append(
self.translation_cls.locale == current_locale()
)
mapper.add_property('_current_translation', sa.orm.relationship(
self.translation_cls,
primaryjoin=sa.and_(*conditions),
foreign_keys=list(
get_primary_keys(self.parent_cls).values()
),
viewonly=True,
uselist=False
))
def assign_translations(self):
"""
Assigns translations relationship for translatable model. The assigned
attribute is a relationship to all translation locales.
"""
mapper = sa.orm.class_mapper(self.parent_cls)
if not mapper.has_property('_translations'):
foreign_keys = [
getattr(self.translation_cls, column_key)
for column_key in get_primary_keys(self.parent_cls).keys()
]
mapper.add_property('_translations', sa.orm.relationship(
self.translation_cls,
primaryjoin=sa.and_(*self.primary_key_conditions),
foreign_keys=foreign_keys,
collection_class=attribute_mapped_collection('locale'),
comparator_factory=TranslationComparator,
cascade='all, delete-orphan',
passive_deletes=True,
))
def assign_translation_parent(self):
mapper = sa.orm.class_mapper(self.translation_cls)
if not mapper.has_property('translation_parent'):
mapper.add_property('translation_parent', sa.orm.relationship(
self.parent_cls,
uselist=False,
viewonly=True
))
def __call__(self):
self.assign_single_translations()
self.assign_current_translation()
self.assign_fallback_translation()
self.assign_translations()
self.assign_translation_parent()
| |
"""Interface to the compiler's internal symbol tables"""
import _symtable
from _symtable import USE, DEF_GLOBAL, DEF_LOCAL, DEF_PARAM, \
DEF_STAR, DEF_DOUBLESTAR, DEF_INTUPLE, DEF_FREE, \
DEF_FREE_GLOBAL, DEF_FREE_CLASS, DEF_IMPORT, DEF_BOUND, \
OPT_IMPORT_STAR, OPT_EXEC, OPT_BARE_EXEC
import weakref
__all__ = ["symtable", "SymbolTable", "newSymbolTable", "Class",
"Function", "Symbol"]
def symtable(code, filename, compile_type):
raw = _symtable.symtable(code, filename, compile_type)
return newSymbolTable(raw[0], filename)
class SymbolTableFactory:
def __init__(self):
self.__memo = weakref.WeakValueDictionary()
def new(self, table, filename):
if table.type == _symtable.TYPE_FUNCTION:
return Function(table, filename)
if table.type == _symtable.TYPE_CLASS:
return Class(table, filename)
return SymbolTable(table, filename)
def __call__(self, table, filename):
key = table, filename
obj = self.__memo.get(key, None)
if obj is None:
obj = self.__memo[key] = self.new(table, filename)
return obj
newSymbolTable = SymbolTableFactory()
def bool(x):
"""Helper to force boolean result to 1 or 0"""
if x:
return 1
return 0
def is_free(flags):
if (flags & (USE | DEF_FREE)) \
and (flags & (DEF_LOCAL | DEF_PARAM | DEF_GLOBAL)):
return 1
if flags & DEF_FREE_CLASS:
return 1
return 0
class SymbolTable:
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "global":
return "<%sSymbolTable for module %s>" % (kind, self._filename)
else:
return "<%sSymbolTable for %s in %s>" % (kind, self._table.name,
self._filename)
def get_type(self):
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: %s" % self._table.type
def get_id(self):
return self._table.id
def get_name(self):
return self._table.name
def get_lineno(self):
return self._table.lineno
def is_optimized(self):
return bool(self._table.type == _symtable.TYPE_FUNCTION
and not self._table.optimized)
def is_nested(self):
return bool(self._table.nested)
def has_children(self):
return bool(self._table.children)
def has_exec(self):
"""Return true if the scope uses exec"""
return bool(self._table.optimized & (OPT_EXEC | OPT_BARE_EXEC))
def has_import_star(self):
"""Return true if the scope uses import *"""
return bool(self._table.optimized & OPT_IMPORT_STAR)
def get_identifiers(self):
return self._table.symbols.keys()
def lookup(self, name):
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
sym = self._symbols[name] = Symbol(name, flags, namespaces)
return sym
def get_symbols(self):
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
return [newSymbolTable(st, self._filename)
for st in self._table.children]
class Function(SymbolTable):
# Default values for instance variables
__params = None
__locals = None
__frees = None
__globals = None
def __idents_matching(self, test_func):
return tuple([ident for ident in self.get_identifiers()
if test_func(self._table.symbols[ident])])
def get_parameters(self):
if self.__params is None:
self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
return self.__params
def get_locals(self):
if self.__locals is None:
self.__locals = self.__idents_matching(lambda x:x & DEF_BOUND)
return self.__locals
def get_globals(self):
if self.__globals is None:
glob = DEF_GLOBAL | DEF_FREE_GLOBAL
self.__globals = self.__idents_matching(lambda x:x & glob)
return self.__globals
def get_frees(self):
if self.__frees is None:
self.__frees = self.__idents_matching(is_free)
return self.__frees
class Class(SymbolTable):
__methods = None
def get_methods(self):
if self.__methods is None:
d = {}
for st in self._table.children:
d[st.name] = 1
self.__methods = tuple(d.keys())
return self.__methods
class Symbol:
def __init__(self, name, flags, namespaces=None):
self.__name = name
self.__flags = flags
self.__namespaces = namespaces or ()
def __repr__(self):
return "<symbol '%s'>" % self.__name
def get_name(self):
return self.__name
def is_referenced(self):
return bool(self.__flags & _symtable.USE)
def is_parameter(self):
return bool(self.__flags & DEF_PARAM)
def is_global(self):
return bool((self.__flags & DEF_GLOBAL)
or (self.__flags & DEF_FREE_GLOBAL))
def is_vararg(self):
return bool(self.__flags & DEF_STAR)
def is_keywordarg(self):
return bool(self.__flags & DEF_DOUBLESTAR)
def is_local(self):
return bool(self.__flags & DEF_BOUND)
def is_free(self):
if (self.__flags & (USE | DEF_FREE)) \
and (self.__flags & (DEF_LOCAL | DEF_PARAM | DEF_GLOBAL)):
return 1
if self.__flags & DEF_FREE_CLASS:
return 1
return 0
def is_imported(self):
return bool(self.__flags & DEF_IMPORT)
def is_assigned(self):
return bool(self.__flags & DEF_LOCAL)
def is_in_tuple(self):
return bool(self.__flags & DEF_INTUPLE)
def is_namespace(self):
"""Returns true if name binding introduces new namespace.
If the name is used as the target of a function or class
statement, this will be true.
Note that a single name can be bound to multiple objects. If
is_namespace() is true, the name may also be bound to other
objects, like an int or list, that does not introduce a new
namespace.
"""
return bool(self.__namespaces)
def get_namespaces(self):
"""Return a list of namespaces bound to this name"""
return self.__namespaces
def get_namespace(self):
"""Returns the single namespace bound to this name.
Raises ValueError if the name is bound to multiple namespaces.
"""
if len(self.__namespaces) != 1:
raise ValueError, "name is bound to multiple namespaces"
return self.__namespaces[0]
if __name__ == "__main__":
import os, sys
src = open(sys.argv[0]).read()
mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
for ident in mod.get_identifiers():
info = mod.lookup(ident)
print info, info.is_local(), info.is_namespace()
| |
#!/usr/bin/env python
# Author:
# Rudiger Birkner (Networked Systems Group ETH Zurich)
# Arpit Gupta (Princeton)
import argparse
from collections import namedtuple
import json
from multiprocessing.connection import Listener, Client
from netaddr import IPNetwork, IPAddress
import os
import socket
import struct
import sys
from threading import Thread, Lock
np = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if np not in sys.path:
sys.path.append(np)
import util.log
from utils import parse_packet, craft_arp_packet, craft_eth_frame, craft_garp_response
logger = util.log.getLogger('arp')
ETH_BROADCAST = 'ff:ff:ff:ff:ff:ff'
ETH_TYPE_ARP = 0x0806
Config = namedtuple('Config', 'vnhs garp_socket interface')
arpListener = None
config = None
participantsLock = Lock()
portmac2Participant = {}
clientPoolLock = Lock()
clientActivePool = dict()
clientDeadPool = set()
class PctrlClient(object):
def __init__(self, conn, addr):
self.conn = conn
self.addr = addr
def start(self):
logger.info('ARP Pctrl Client started for client ip %s.', self.addr)
while True:
try:
rv = self.conn.recv()
except EOFError as ee:
rv = None
if not (rv and self.process_message(**json.loads(rv))):
self.close()
break
def process_message(self, msgType=None, **data):
if msgType == 'hello':
rv = self.process_hello_message(**data)
elif msgType == 'garp':
rv = self.process_garp_message(**data)
else:
logger.warn("Unrecognized or absent msgType: %s. Message ignored.", msgType)
rv = True
return rv
def process_hello_message(self, macs=None):
if isinstance(macs, list):
with participantsLock:
for mac in macs:
portmac2Participant[mac] = self
else:
logger.warn("hello message from %s is missing MAC list. 'macs' has value: %s. Closing connection.", self.addr, macs)
return False
return True
def process_garp_message(self, **data):
"""
Process the incoming ARP data from the Participant Controller:
-Format ARP Reply:
eth_src = VMAC, eth_dst = requester_mac,
SHA = VMAC, SPA = vnhip,
THA = requester_mac, TPA = requester_ip
-Format Gratuitous ARP:
eth_src = VMAC, eth_dst = 00..00<part_id>,
SHA = VMAC, SPA = vnhip,
THA = VMAC, TPA = vnhip
"""
if data["THA"] == data["eth_dst"]:
logger.debug("ARP Reply relayed: "+str(data))
else:
logger.debug("Gratuitous ARP relayed: "+str(data))
garp_message = craft_garp_response(**data)
arpListener.send(garp_message)
return True
def send(self, srcmac, ip):
# ARP request is sent by participant with its own SDN controller
logger.debug("relay ARP-REQUEST to participant %s", self.addr)
data = {}
data['arp'] = [srcmac, ip]
self.conn.send(json.dumps(data))
def close(self):
with clientPoolLock:
s, t = clientActivePool[self.conn]
del clientActivePool[self.conn]
# we can't join() inside the thread,
# so move to a list and remove later.
clientDeadPool.add(t)
self.conn.close()
with participantsLock:
macs = [mac for mac,pctl in portmac2Participant.items() if pctl == self]
for mac in macs:
del portmac2Participant[mac]
class PctrlListener(object):
def __init__(self):
# "Set listener for ARP replies from the participants' controller"
logger.info("Starting the PctrlListener")
self.listener_garp = Listener(config.garp_socket, authkey=None, backlog=100)
def start(self):
logger.info("ARP Response Handler started")
while True:
conn = self.listener_garp.accept()
pc = PctrlClient(conn, self.listener_garp.last_accepted)
t = Thread(target=pc.start)
with clientPoolLock:
clientActivePool[conn] = (pc, t)
# while here, join dead threads.
while clientDeadPool:
clientDeadPool.pop().join()
t.start()
class ArpListener(object):
def __init__(self):
# info about non-sdn participants
# TODO: Create a mapping between actual interface IP addresses
# and the corresponding MAC addresses for all the non-SDN participants
# In case of MDS, it is actual mac adresses of these interfaces, in case
# of the superset scheme it is : 1XXXX-nexthop_id
# self.nonSDN_nhip_2_nhmac = {}
try:
self.sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(ETH_TYPE_ARP))
self.sock.bind((config.interface, 0))
except socket.error as msg:
logger.error("Can't open socket %s", str(config.interface))
logger.exception('Failed to create socket. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
raise
def start(self):
while True:
# receive arp requests
packet, addr = self.sock.recvfrom(65565)
eth_frame, arp_packet = parse_packet(packet)
arp_type = struct.unpack("!h", arp_packet["oper"])[0]
logger.debug("Received ARP-" + ("REQUEST" if (arp_type == 1) else "REPLY") +" SRC: "+eth_frame["src_mac"]+" / "+arp_packet["src_ip"]+" "+"DST: "+eth_frame["dst_mac"]+" / "+arp_packet["dst_ip"])
if arp_type == 1:
# check if the arp request stems from one of the participants
requester_srcmac = eth_frame["src_mac"]
requested_ip = arp_packet["dst_ip"]
# Send the ARP request message to respective controller and forget about it
if IPAddress(requested_ip) in config.vnhs:
self.send_arp_request(requester_srcmac, requested_ip)
# TODO: If the requested IP address belongs to a non-SDN participant
# then refer the structure `self.nonSDN_nhip_2_nhmac` and
# send an immediate ARP response.
"""
response_vmac = self.get_vmac_default(requester_srcmac, requested_ip)
if response_vmac != "":
logger.debug("ARP-PROXY: reply with VMAC "+response_vmac)
data = self.craft_arp_packet(arp_packet, response_vmac)
eth_packet = self.craft_eth_frame(eth_frame, response_vmac, data)
self.sock.send(''.join(eth_packet))
"""
def send_arp_request(self, requester_srcmac, requested_ip):
"Send the arp request to the corresponding pctrl"
with participantsLock:
try:
pctrlClient = portmac2Participant[requester_srcmac]
except KeyError:
pctrlClient = None
if pctrlClient:
pctrlClient.send(requester_srcmac, requested_ip)
def send(self, data):
self.sock.send(data)
def parse_config(config_file):
"Parse the config file"
with open(config_file, 'r') as f:
config = json.load(f)
vnhs = IPNetwork(config["VNHs"])
host, port = config["ARP Proxy"]["GARP_SOCKET"]
garp_socket = (host, int(port))
interface = config["ARP Proxy"]["Interface"]
return Config(vnhs, garp_socket, interface)
def main():
global arpListener, config
parser = argparse.ArgumentParser()
parser.add_argument('dir', help='the directory of the example')
args = parser.parse_args()
# locate config file
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),"..","examples",args.dir,"config","sdx_global.cfg")
logger.info("Reading config file %s", config_file)
config = parse_config(config_file)
logger.info("Starting ARP Listener")
arpListener = ArpListener()
ap_thread = Thread(target=arpListener.start)
ap_thread.start()
# start pctrl listener in foreground
logger.info("Starting PCTRL Listener")
pctrlListener = PctrlListener()
pctrlListener.start()
if __name__ == '__main__':
main()
| |
# Author: Miguel Martinez Lopez
try:
from Tkinter import Frame, N, S, E, W
except ImportError:
from tkinter import Frame, N, S, E, W
class Animation(object):
def __init__(self, w, ticks, config_function, duration=1, interval_time=None, easing_function=None, start_value=0, end_value=1, callback=None):
self._w = w
self._tick = 0
self._total_ticks = float(ticks)
if easing_function is None:
self._easing_function = lambda x: x
self._duration = duration
if interval_time:
self._interval_time = int(interval_time * 1000)
else:
self._interval_time = int(duration * 1000 / self._total_ticks)
self._start_value = start_value
self._end_value = end_value
self._interval_value = end_value - start_value
self._config_function = config_function
self._callback = callback
def start_animation(self, after=0):
if after != 0:
self.after(int(after*1000), self._animate)
else:
self._animate()
def _animate(self):
t = self._tick / self._total_ticks
value = self._start_value + self._interval_value * self._easing_function(t)
self._config_function(value)
self._tick += 1
if self._tick <= self._total_ticks:
self._w.after(self._interval_time, self._animate)
else:
if self._callback is not None:
self._w.after(self._interval_time, self._callback)
class Stacked_Frame(Frame):
def __init__(self, master, animate=False, animate_direction=W, **kw):
Frame.__init__(self, master, **kw)
self._list_of_widgets = []
self._current_index = None
self._current_widget = None
self._animate = animate
if animate:
if animate_direction not in (E, W, N, S):
raise ValueError("Invalid animate_direction value: %s"%animate_direction)
self._animate_direction = animate_direction
self._is_animating = False
def add_widget(self, widget):
self._list_of_widgets.append(widget)
if self._current_index is None:
self._current_index = 0
self._show_widget(widget)
index = len(self._list_of_widgets) - 1
return index
def remove_widget(self, widget):
self._list_of_widgets.remove(widget)
def insert_widget(self, index, widget):
self._list_of_widgets.insert(index, widget)
if self._current_index is None:
self._current_index = 0
self._show_widget(widget)
def count(self):
return len(self._list_of_widgets)
def current_index(self):
return self._current_index
def index_of(self, widget):
return self._list_of_widgets.index(widget)
def set_current_index(self, index):
if self._is_animating:
return
if index == self._current_index: return
widget = self._list_of_widgets[index]
self._current_index = index
self._show_widget(widget)
def set_current_widget(self, widget):
if self._is_animating:
return
index = self._list_of_widgets.index(widget)
self._current_index = index
self._show_widget(widget)
def widget(self, index):
return self._list_of_widgets[index]
def next(self):
if self._current_index == len(self._list_of_widgets) - 1:
return
if self._is_animating:
return
self._current_index += 1
widget = self._list_of_widgets[self._current_index]
self._show_widget(widget)
def previous(self):
if not self._current_index:
return
if self._is_animating:
return
self._current_index -= 1
widget = self._list_of_widgets[self._current_index]
self._show_widget(widget)
def _show_widget(self, widget):
if self._current_widget is None:
self._current_widget = widget
widget.place(x=0, y=0, relwidth=1, relheight=1)
else:
if self._animate:
old_widget = self._current_widget
widget.place(relwidth=1, relheight=1)
if self._animate_direction == W:
start_value = 0
end_value = self.winfo_width()
def config_function(position):
widget.place(x=position, y=0, anchor=N+E)
old_widget.place(x=position, y=0, anchor=N+W)
elif self._animate_direction == E:
start_value = self.winfo_width()
end_value = 0
def config_function(position):
widget.place(x=position, y=0, anchor=N+W)
old_widget.place(x=position, y=0, anchor=N+E)
elif self._animate_direction == S:
start_value = 0
end_value = self.winfo_height()
def config_function(position):
widget.place(x=0, y=position, anchor=S+W)
old_widget.place(x=0, y=position, anchor=N+W)
elif self._animate_direction == N:
start_value = self.winfo_height()
end_value = 0
def config_function(position):
widget.place(x=0, y=position, anchor=N+W)
old_widget.place(x=0, y=position, anchor=S+W)
animation = Animation(
self,
ticks=20,
interval_time=0.05,
start_value=start_value,
end_value=end_value,
config_function=config_function,
callback=lambda widget=widget: self._on_finnish_animation(widget))
animation.start_animation()
self._is_animating = True
else:
self._current_widget.place_forget()
self._current_widget = widget
widget.place(x=0, y=0, relwidth=1, relheight=1)
def _on_finnish_animation(self, widget):
self._current_widget.place_forget()
self._current_widget = widget
self._is_animating = False
if __name__ == "__main__":
try:
from Tkinter import Tk, Button, Label
except ImportError:
from tkinter import Tk, Button, Label
root = Tk()
stack = Stacked_Frame(root, width=300, height=400, animate=True, animate_direction=S)
stack.pack(padx=5)
frame1 = Frame(stack, background="red")
Label(frame1, text="this is frame1").pack(expand=True)
frame2 = Frame(stack, background="white")
Label(frame2, text="this is frame2").pack(expand=True)
frame3 = Frame(stack, background="yellow")
Label(frame3, text="this is frame3").pack(expand=True)
frame4 = Frame(stack, background="green")
Label(frame4, text="this is frame4").pack(expand=True)
stack.add_widget(frame1)
stack.add_widget(frame2)
stack.add_widget(frame3)
stack.add_widget(frame4)
row = Frame(root)
row.pack(fill="x", pady=10, padx=5)
Button(row, text="previous", command= lambda: stack.previous()).pack(side="left")
Button(row, text="next", command= lambda: stack.next()).pack(side="left", padx=(8,0))
root.mainloop()
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
import eventlet
import mock
from test.debug_logger import debug_logger
from test.unit import FakeMemcache
from swift.common.middleware import ratelimit
from swift.proxy.controllers.base import get_cache_key, \
headers_to_container_info
from swift.common.swob import Request
from swift.common import registry
threading = eventlet.patcher.original('threading')
class FakeApp(object):
skip_handled_check = False
def __call__(self, env, start_response):
assert self.skip_handled_check or env.get('swift.ratelimit.handled')
start_response('200 OK', [])
return [b'Some Content']
class FakeReq(object):
def __init__(self, method, env=None):
self.method = method
self.environ = env or {}
def start_response(*args):
pass
time_ticker = 0
time_override = []
def mock_sleep(x):
global time_ticker
time_ticker += x
def mock_time():
global time_override
global time_ticker
if time_override:
cur_time = time_override.pop(0)
if cur_time is None:
time_override = [None if i is None else i + time_ticker
for i in time_override]
return time_ticker
return cur_time
return time_ticker
class TestRateLimit(unittest.TestCase):
def _reset_time(self):
global time_ticker
time_ticker = 0
def setUp(self):
self.was_sleep = eventlet.sleep
eventlet.sleep = mock_sleep
self.was_time = time.time
time.time = mock_time
self._reset_time()
def tearDown(self):
eventlet.sleep = self.was_sleep
time.time = self.was_time
def _run(self, callable_func, num, rate, check_time=True):
global time_ticker
begin = time.time()
for x in range(num):
callable_func()
end = time.time()
total_time = float(num) / rate - 1.0 / rate # 1st request not limited
# Allow for one second of variation in the total time.
time_diff = abs(total_time - (end - begin))
if check_time:
self.assertEqual(round(total_time, 1), round(time_ticker, 1))
return time_diff
def test_get_maxrate(self):
conf_dict = {'container_ratelimit_10': 200,
'container_ratelimit_50': 100,
'container_ratelimit_75': 30}
test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
test_ratelimit.logger = debug_logger()
self.assertIsNone(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 0))
self.assertIsNone(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 5))
self.assertEqual(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 10), 200)
self.assertEqual(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 60), 72)
self.assertEqual(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 160), 30)
def test_get_ratelimitable_key_tuples(self):
current_rate = 13
conf_dict = {'account_ratelimit': current_rate,
'container_ratelimit_3': 200}
fake_memcache = FakeMemcache()
fake_memcache.store[get_cache_key('a', 'c')] = \
{'object_count': '5'}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
FakeReq('DELETE', environ), 'a', None, None)), 0)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
FakeReq('PUT', environ), 'a', 'c', None)), 1)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
FakeReq('DELETE', environ), 'a', 'c', None)), 1)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
FakeReq('GET', environ), 'a', 'c', 'o')), 0)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
FakeReq('PUT', environ), 'a', 'c', 'o')), 1)
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)), 2)
self.assertEqual(the_app.get_ratelimitable_key_tuples(
FakeReq('PUT', environ), 'a', 'c', None, global_ratelimit=10)[1],
('ratelimit/global-write/a', 10))
self.assertEqual(len(the_app.get_ratelimitable_key_tuples(
FakeReq('PUT', environ), 'a', 'c', None,
global_ratelimit='notafloat')), 1)
def test_memcached_container_info_dict(self):
mdict = headers_to_container_info({'x-container-object-count': '45'})
self.assertEqual(mdict['object_count'], '45')
def test_ratelimit_old_memcache_format(self):
current_rate = 13
conf_dict = {'account_ratelimit': current_rate,
'container_ratelimit_3': 200}
fake_memcache = FakeMemcache()
fake_memcache.store[get_cache_key('a', 'c')] = \
{'container_size': 5}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
req = FakeReq('PUT', {
'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache})
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
tuples = the_app.get_ratelimitable_key_tuples(req, 'a', 'c', 'o')
self.assertEqual(tuples, [('ratelimit/a/c', 200.0)])
def test_account_ratelimit(self):
current_rate = 5
num_calls = 50
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
with mock.patch('swift.common.middleware.ratelimit.get_container_info',
lambda *args, **kwargs: {}):
with mock.patch(
'swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
for meth, exp_time in [('DELETE', 9.8), ('GET', 0),
('POST', 0), ('PUT', 9.8)]:
req = Request.blank('/v1/a%s/c' % meth)
req.method = meth
req.environ['swift.cache'] = FakeMemcache()
make_app_call = lambda: self.test_ratelimit(
req.environ.copy(), start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate,
check_time=bool(exp_time))
self.assertEqual(round(time.time() - begin, 1), exp_time)
self._reset_time()
def test_ratelimit_set_incr(self):
current_rate = 5
num_calls = 50
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
req = Request.blank('/v1/a/c')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].init_incr_return_neg = True
make_app_call = lambda: self.test_ratelimit(req.environ.copy(),
start_response)
begin = time.time()
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
self._run(make_app_call, num_calls, current_rate, check_time=False)
self.assertEqual(round(time.time() - begin, 1), 9.8)
def test_ratelimit_old_white_black_list(self):
global time_ticker
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2,
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
with mock.patch.object(self.test_ratelimit,
'memcache_client', FakeMemcache()):
self.assertEqual(
self.test_ratelimit.handle_ratelimit(
Request.blank('/'), 'a', 'c', 'o'),
None)
self.assertEqual(
self.test_ratelimit.handle_ratelimit(
Request.blank('/'), 'b', 'c', 'o').status_int,
497)
def test_ratelimit_whitelist_sysmeta(self):
global time_ticker
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2,
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
req = Request.blank('/v1/a/c')
req.environ['swift.cache'] = FakeMemcache()
class rate_caller(threading.Thread):
def __init__(self, parent):
threading.Thread.__init__(self)
self.parent = parent
def run(self):
self.result = self.parent.test_ratelimit(req.environ,
start_response)
def get_fake_ratelimit(*args, **kwargs):
return {'sysmeta': {'global-write-ratelimit': 'WHITELIST'}}
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
get_fake_ratelimit):
nt = 5
threads = []
for i in range(nt):
rc = rate_caller(self)
rc.start()
threads.append(rc)
for thread in threads:
thread.join()
the_498s = [
t for t in threads
if b''.join(t.result).startswith(b'Slow down')]
self.assertEqual(len(the_498s), 0)
self.assertEqual(time_ticker, 0)
def test_ratelimit_blacklist(self):
global time_ticker
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2,
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
self.test_ratelimit.logger = debug_logger()
self.test_ratelimit.BLACK_LIST_SLEEP = 0
req = Request.blank('/v1/b/c')
req.environ['swift.cache'] = FakeMemcache()
class rate_caller(threading.Thread):
def __init__(self, parent):
threading.Thread.__init__(self)
self.parent = parent
def run(self):
self.result = self.parent.test_ratelimit(req.environ.copy(),
start_response)
def get_fake_ratelimit(*args, **kwargs):
return {'sysmeta': {'global-write-ratelimit': 'BLACKLIST'}}
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
get_fake_ratelimit):
nt = 5
threads = []
for i in range(nt):
rc = rate_caller(self)
rc.start()
threads.append(rc)
for thread in threads:
thread.join()
the_497s = [
t for t in threads
if b''.join(t.result).startswith(b'Your account')]
self.assertEqual(len(the_497s), 5)
self.assertEqual(time_ticker, 0)
def test_ratelimit_max_rate_double(self):
global time_ticker
global time_override
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'clock_accuracy': 100,
'max_sleep_time_seconds': 1}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v1/a/c')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content')
def test_ratelimit_max_rate_double_container(self):
global time_ticker
global time_override
current_rate = 2
conf_dict = {'container_ratelimit_0': current_rate,
'clock_accuracy': 100,
'max_sleep_time_seconds': 1}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v1/a/c/o')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].set(
get_cache_key('a', 'c'),
{'object_count': 1})
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content')
def test_ratelimit_max_rate_double_container_listing(self):
global time_ticker
global time_override
current_rate = 2
conf_dict = {'container_listing_ratelimit_0': current_rate,
'clock_accuracy': 100,
'max_sleep_time_seconds': 1}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v1/a/c')
req.method = 'GET'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].set(
get_cache_key('a', 'c'),
{'object_count': 1})
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ.copy(), start_response)
self.assertEqual(r[0], b'Some Content')
mc = self.test_ratelimit.memcache_client
try:
self.test_ratelimit.memcache_client = None
self.assertIsNone(
self.test_ratelimit.handle_ratelimit(req, 'n', 'c', None))
finally:
self.test_ratelimit.memcache_client = mc
def test_ratelimit_max_rate_multiple_acc(self):
num_calls = 4
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2}
fake_memcache = FakeMemcache()
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
class rate_caller(threading.Thread):
def __init__(self, name):
self.myname = name
threading.Thread.__init__(self)
def run(self):
for j in range(num_calls):
self.result = the_app.handle_ratelimit(
FakeReq('PUT'), self.myname, 'c', None)
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
nt = 15
begin = time.time()
threads = []
for i in range(nt):
rc = rate_caller('a%s' % i)
rc.start()
threads.append(rc)
for thread in threads:
thread.join()
time_took = time.time() - begin
self.assertEqual(1.5, round(time_took, 1))
def test_call_invalid_path(self):
env = {'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '//v1/AUTH_1234567890',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '80',
'swift.cache': FakeMemcache(),
'SERVER_PROTOCOL': 'HTTP/1.0'}
app = lambda *args, **kwargs: ['fake_app']
rate_mid = ratelimit.filter_factory({})(app)
class a_callable(object):
def __call__(self, *args, **kwargs):
pass
resp = rate_mid.__call__(env, a_callable())
self.assertEqual('fake_app', resp[0])
def test_call_non_swift_api_path(self):
env = {'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/ive/got/a/lovely/bunch/of/coconuts',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '80',
'swift.cache': FakeMemcache(),
'SERVER_PROTOCOL': 'HTTP/1.0'}
app = lambda *args, **kwargs: ['some response']
rate_mid = ratelimit.filter_factory({})(app)
class a_callable(object):
def __call__(self, *args, **kwargs):
pass
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
side_effect=Exception("you shouldn't call this")):
resp = rate_mid(env, a_callable())
self.assertEqual(resp[0], 'some response')
def test_no_memcache(self):
current_rate = 13
num_calls = 5
conf_dict = {'account_ratelimit': current_rate}
fake_app = FakeApp()
fake_app.skip_handled_check = True
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(fake_app)
req = Request.blank('/v1/a')
req.environ['swift.cache'] = None
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting
def test_already_handled(self):
current_rate = 13
num_calls = 5
conf_dict = {'container_listing_ratelimit_0': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
fake_cache = FakeMemcache()
fake_cache.set(
get_cache_key('a', 'c'),
{'object_count': 1})
req = Request.blank('/v1/a/c', environ={'swift.cache': fake_cache})
req.environ['swift.ratelimit.handled'] = True
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assertEqual(round(time_took, 1), 0) # no memcache, no limiting
def test_restarting_memcache(self):
current_rate = 2
num_calls = 5
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
req = Request.blank('/v1/a/c')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].error_on_incr = True
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assertEqual(round(time_took, 1), 0) # no memcache, no limit
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def test_registered_defaults(self):
def check_key_is_absent(key):
try:
swift_info[key]
except KeyError as err:
if key not in str(err):
raise
test_limits = {'account_ratelimit': 1,
'max_sleep_time_seconds': 60,
'container_ratelimit_0': 0,
'container_ratelimit_10': 10,
'container_ratelimit_50': 50,
'container_listing_ratelimit_0': 0,
'container_listing_ratelimit_10': 10,
'container_listing_ratelimit_50': 50}
ratelimit.filter_factory(test_limits)('have to pass in an app')
swift_info = registry.get_swift_info()
self.assertIn('ratelimit', swift_info)
self.assertEqual(swift_info['ratelimit']
['account_ratelimit'], 1.0)
self.assertEqual(swift_info['ratelimit']
['max_sleep_time_seconds'], 60.0)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][0][0], 0)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][0][1], 0.0)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][1][0], 10)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][1][1], 10.0)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][2][0], 50)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][2][1], 50.0)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][0][0], 0)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][0][1], 0.0)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][1][0], 10)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][1][1], 10.0)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][2][0], 50)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][2][1], 50.0)
# these were left out on purpose
for key in ['log_sleep_time_seconds', 'clock_accuracy',
'rate_buffer_seconds', 'ratelimit_whitelis',
'ratelimit_blacklist']:
check_key_is_absent(key)
if __name__ == '__main__':
unittest.main()
| |
__all__ = ['kd_tree']
from math import sqrt
from heapq import heappush,heappop
class kd_tree:
class node:
def point_distance(self,point):
return sqrt(sum([ (a - b)**2 for (a,b) in zip(point,self.point)]))
def separator_distance(self,point):
return point[self.axis] - self.point[self.axis]
def __repr__(self):
output = ""
return "kd_tree< %s points in %s-dimensions >"% (self.num_points,self.k)
def __init__(self, points, values=None):
"""kD-Tree spatial data structure
Parameters
----------
points : array-like
An N-by-K array of N point coordinates in K dimensions
Optional Parameters
-------------------
values : array-like
A sequence of N elements associated with the points.
By default, the integers [0,1,...N-1] are used.
Examples
--------
>>> points = [[0,0],[1,0],[0,1],[1,1]]
>>> values = ['A','B','C','D']
>>> kd = kd_tree(points, values)
>>> kd
kd_tree< 4 points in 2-dimensions >
>>> kd.nearest([2,0])
'B'
>>> kd.nearest_n([2,0],2)
['B', 'D']
>>> kd.in_sphere([0.1,0.2], 1.1)
['A', 'C', 'B']
"""
lengths = [len(p) for p in points]
min_dim,max_dim = min(lengths),max(lengths)
if min_dim != max_dim:
raise ValueError('points must all have the same dimension')
if values is None:
values = range(len(points))
if len(points) != len(values):
raise ValueError('points and values must have the same lengths')
self.k = min_dim
self.num_points = len(points)
self.root = self.__build(zip(points,values),depth=0)
def __build(self,pv_pairs,depth):
if not pv_pairs:
return None
axis = depth % self.k #cycle axis
pv_pairs.sort(key=lambda x: x[0][axis])
mid = len(pv_pairs) / 2
node = self.node()
node.axis = axis
node.point = pv_pairs[mid][0]
node.value = pv_pairs[mid][1]
node.left_child = self.__build(pv_pairs[:mid], depth+1)
node.right_child = self.__build(pv_pairs[mid+1:], depth+1)
return node
def nearest(self, point, max_dist=float('inf')):
"""Returns the value associated with the nearest points to a given location
Parameters
----------
point : array-like
Location in space, e.g. [1.5, 2.0]
Optional Parameters
-------------------
max_dist : float
Ignore points farther than max_dist away from the query point.
Returns
-------
value : single element
The value associated with the point nearest to the query point.
Returns None if no points lie within max_dist of the query point
or the tree is empty.
"""
x = self.nearest_n(point,n=1,max_dist=max_dist) #list with 0 or 1 elements
if len(x) == 0:
return None
else:
return x[0]
def in_sphere(self, point, radius, max_points=None):
"""Returns the values of all points in a given sphere
Parameters
----------
point : array-like
Center of the sphere, e.g. [1.5, 2.0]
radius : float
Radius of the sphere, e.g. 0.3
Optional Parameters
-------------------
max_points : integer
An upper-bound on the number of points to return.
Returns
-------
values : list
List of values associated with all points in the sphere
defined by point and radius.
"""
if max_points is None:
max_points = float('inf')
return self.nearest_n(point, n=max_points, max_dist=radius)
def nearest_n(self, point, n, max_dist=float('inf')):
"""Returns the values of the nearest n points to a given location
Parameters
----------
point : array-like
Location in space, e.g. [1.5, 2.0]
n : integer
(Maximum) Number of values to return. Will return
fewer than n values if the kd_tree contains fewer
than n points.
Optional Parameters
-------------------
max_dist : float
Ignore points farther than max_dist away from the query point.
Returns
-------
values : list
List of values associated with the n nearest points to
the query location.
"""
heap = []
self.__nearest_n(point, n, max_dist, self.root, heap)
heap.sort()
return [ node.value for (neg_dist,node) in reversed(heap) ]
def __nearest_n(self,point,n,max_dist,current,heap):
if current is None:
return max_dist
pt_dist = current.point_distance(point) #distance to this node's point
sep_dist = current.separator_distance(point) #signed distance to this node's separating plane
if pt_dist < max_dist:
heappush(heap,(-pt_dist,current)) #add this point to the queue
if len(heap) > n:
heappop(heap)
if len(heap) == n:
max_dist = min(-heap[0][0],max_dist)
if sep_dist < 0:
max_dist = self.__nearest_n(point,n,max_dist,current.left_child,heap)
else:
max_dist = self.__nearest_n(point,n,max_dist,current.right_child,heap)
if abs(sep_dist) < max_dist:
#explore other subtree
if sep_dist < 0:
return self.__nearest_n(point,n,max_dist,current.right_child,heap)
else:
return self.__nearest_n(point,n,max_dist,current.left_child,heap)
else:
return max_dist
##def inorder(x):
## if x is not None:
## return inorder(x.left_child) + [x.value] + inorder(x.right_child)
## else:
## return []
| |
"""
freshbooks.py - Python interface to the FreshBooks API (http://developers.freshbooks.com)
Library Maintainer:
Matt Culbreth
mattculbreth@gmail.com
http://mattculbreth.com
#####################################################################
This work is distributed under an MIT License:
http://www.opensource.org/licenses/mit-license.php
The MIT License
Copyright (c) 2008 Matt Culbreth (http://mattculbreth.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
#####################################################################
Hello, this is an open source Python library that serves as an interface to FreshBooks.
The code is heavily based on the existing Ruby implementation
by Ben Vinegar of the same interface:
http://freshbooks.rubyforge.org/
USAGE:
import freshbooks
# get data
freshbooks.setup('YOU.freshbooks.com', '<YOUR AUTH TOKEN>')
clients = freshbooks.Client.list()
client_1 = freshbooks.Client.get(<client_id>)
# update data
changed_client = freshbooks.Client()
changed_client.client_id = client_1.client_id
changed_client.first_name = u'Jane'
r = freshbooks.call_api('client.update', changed_client)
assert(r.success)
"""
import datetime
import urllib2
import xml.dom.minidom as xml_lib
# module level constants
VERSION = '0.5' # Library version
API_VERSION = '2.1' # FreshBooks API version
SERVICE_URL = "/api/%s/xml-in" % API_VERSION
# module level variables
account_url = None
account_name = None
auth_token = None
user_agent = None
request_headers = None
last_response = None
def setup(url, token, user_agent_name=None, headers={}):
'''
This funtion sets the high level variables for use in the interface.
'''
global account_url, account_name, auth_token, user_agent, request_headers
account_url = url
if url.find('//') == -1:
account_name = url[:(url.find('freshbooks.com') - 1)]
else:
account_name = url[(url.find('//') + 2):(url.find('freshbooks.com') - 1)]
auth_token = token
user_agent = user_agent_name
request_headers = headers
if 'user-agent' not in [x.lower() for x in request_headers.keys()]:
if not user_agent:
user_agent = 'Python:%s' % account_name
request_headers['User-Agent'] = user_agent
# these three classes are for typed exceptions
class InternalError(Exception):
pass
class AuthenticationError(Exception):
pass
class UnknownSystemError(Exception):
pass
class InvalidParameterError(Exception):
pass
def call_api(method, elems = {}):
'''
This function calls into the FreshBooks API and returns the Response
'''
global last_response
# make the request, which is an XML document
doc = xml_lib.Document()
request = doc.createElement('request')
request.setAttribute('method', method)
if isinstance(elems, BaseObject):
request.appendChild(elems.to_xml(doc))
else:
for key, value in elems.items():
e = doc.createElement(key)
e.appendChild(doc.createTextNode(str(value)))
request.appendChild(e)
doc.appendChild(request)
# send it
result = post(doc.toxml('utf-8'))
last_response = Response(result)
# check for failure and throw an exception
if not last_response.success:
msg = last_response.error_message
if not msg:
raise Exception("Error in response: %s" % last_response.doc.toxml())
if 'not formatted correctly' in msg:
raise InternalError(msg)
elif 'uthentication failed' in msg:
raise AuthenticationError(msg)
elif 'does not exit' in msg:
raise UnknownSystemError(msg)
elif 'Invalid parameter' in msg:
raise InvalidParameterError(msg)
else:
raise Exception(msg)
return last_response
def post(body):
'''
This function actually communicates with the FreshBooks API
'''
# setup HTTP basic authentication
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
url = ""
if account_url.find('//') == -1:
url = "https://"
url += account_url + SERVICE_URL
password_mgr.add_password(None, url, auth_token, '')
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
# make the request and return the response body
request = urllib2.Request(url, body, request_headers)
response = urllib2.urlopen(request)
response_content = response.read()
return response_content
class Response(object):
'''
A response from FreshBooks
'''
def __init__(self, xml_raw):
'''
The constructor, taking in the xml as the source
'''
self._doc = xml_lib.parseString(xml_raw)
def __repr__(self):
'''
Print the Response and show the XML document
'''
s = "Response: success: %s, error_message: %s" % \
(self.success,self.error_message)
s += "\nResponse Document: \n%s" % self.doc.toxml()
return s
@property
def doc(self):
'''
Return the document
'''
return self._doc
@property
def elements(self):
'''
Return the doc's elements
'''
return self._doc.childNodes
@property
def success(self):
'''
return True if this is a successful response from the API
'''
return self._doc.firstChild.attributes['status'].firstChild.nodeValue == 'ok'
@property
def error_message(self):
'''
returns the error message associated with this API response
'''
error = self._doc.getElementsByTagName('error')
if error:
return error[0].childNodes[0].nodeValue
else:
return None
class BaseObject(object):
'''
This serves as the base object for all FreshBooks objects.
'''
# this is used to provide typing help for certain type, ie
# client.id is an int
TYPE_MAPPINGS = {}
# anonymous functions to do the conversions on type
MAPPING_FUNCTIONS = {
'int' : lambda val: int(val),
'float' : lambda val: float(val),
'bool' : lambda val: bool(int(val)) if val in ('0', '1') else val,
'datetime' : lambda val: \
datetime.datetime.strptime(val,
'%Y-%m-%d %H:%M:%S') if (val != '0000-00-00 00:00:00' and len(val) == 19) else datetime.datetime.strptime(val, '%Y-%m-%d') if len(val) == 10 else val
}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in self.attrs:
setattr(self, att, None)
def __dict__(self):
'''
Dump the object as a dictionary
'''
this_dict = {}
for att in self.attrs:
this_dict.update({att:getattr(self, att, None)})
return this_dict
def __unicode__(self):
return unicode(getattr(self, self.str_attr))
def sync(self, model):
defaults = self.__dict__()
defaults.pop(self.pk_attr)
obj, created = model.objects.get_or_create(
id=getattr(self, self.pk_attr),
defaults=defaults
)
if created:
print 'Created %s %s' % (
self.object_name,
self.__unicode__()
)
else:
print 'Updated %s %s' % (
self.object_name,
self.__unicode__()
)
@classmethod
def _new_from_xml(cls, element):
'''
This internal method is used to create a new FreshBooks
object from the XML.
'''
obj = cls()
# basically just go through the XML creating attributes on the
# object.
for elem in [node for node in element.childNodes if node.nodeType == node.ELEMENT_NODE]:
val = None
if elem.firstChild:
val = elem.firstChild.nodeValue
# HACK: find another way to detect arrays, probably
# based on a list of elements instead of a textnode
if elem.nodeName == 'lines':
val = []
for item in [node for node in elem.childNodes if node.nodeType == node.ELEMENT_NODE]:
c = eval(item.nodeName.capitalize())
if c:
val.append(c._new_from_xml(item))
# if there is typing information supplied by
# the child class then use that
elif cls.TYPE_MAPPINGS.has_key(elem.nodeName):
val = \
cls.MAPPING_FUNCTIONS[\
cls.TYPE_MAPPINGS[elem.nodeName]](val)
setattr(obj, elem.nodeName, val)
return obj
@classmethod
def get(cls, object_id, element_name = None):
'''
Get a single object from the API
'''
resp = call_api('%s.get' % cls.object_name, {'%s_id' % cls.object_name : object_id})
if resp.success:
items = resp.doc.getElementsByTagName(element_name or cls.object_name)
if items:
return cls._new_from_xml(items[0])
return None
@classmethod
def list(cls, options = {}, element_name = None, get_all=False):
'''
Get a summary list of this object.
If get_all is True then the paging will be checked to get all of the items.
'''
result = None
if get_all:
options['per_page'] = 100
options['page'] = 1
objects = []
while True:
resp = call_api('%s.list' % cls.object_name, options)
if not resp.success:
return result
new_objects = resp.doc.getElementsByTagName(element_name or cls.object_name)
objects.extend(new_objects)
if len(new_objects) < options['per_page']:
break
options['page'] += 1
result = [cls._new_from_xml(elem) for elem in objects]
else:
resp = call_api('%s.list' % cls.object_name, options)
if (resp.success):
result = [cls._new_from_xml(elem) for elem in \
resp.doc.getElementsByTagName(element_name or cls.object_name)]
return result
def to_xml(self, doc, element_name=None):
'''
Create an XML representation of the object for use
in sending to FreshBooks
'''
# The root element is the class name, downcased
element_name = element_name or \
self.object_name.lower()
root = doc.createElement(element_name)
# Add each member to the root element
for key, value in self.__dict__.items():
if isinstance(value, list):
array = doc.createElement(key)
for item in value:
item_name = 'line' if key == 'lines' else key[:-1]
array_item = doc.createElement(item_name)
array_item.appendChild(doc.createTextNode(str(item)))
root.append(array)
elif value:
elem = doc.createElement(key)
elem.appendChild(doc.createTextNode(str(value)))
root.appendChild(elem)
return root
#-----------------------------------------------#
# Client
#-----------------------------------------------#
class Client(BaseObject):
'''
The Client object
'''
object_name = 'client'
attrs = ('client_id', 'first_name', 'last_name', 'organization', 'email',
'username', 'password', 'work_phone', 'home_phone', 'mobile',
'fax', 'notes', 'p_street1', 'p_street2', 'p_city', 'p_state',
'p_country', 'p_code', 's_street1', 's_street2', 's_city',
's_state', 's_country', 's_code', 'url'
)
TYPE_MAPPINGS = {'client_id' : 'int'}
pk_attr = 'client_id'
str_attr = 'organization'
#-----------------------------------------------#
# Invoice
#-----------------------------------------------#
class Invoice(BaseObject):
'''
The Invoice object
'''
object_name = 'invoice'
attrs = ('invoice_id', 'client_id', 'number', 'date', 'po_number',
'terms', 'first_name', 'last_name', 'organization', 'p_street1',
'p_street2', 'p_city','p_state', 'p_country', 'p_code', 'amount',
'amount_outstanding', 'paid', 'lines', 'discount', 'status',
'notes', 'url')
TYPE_MAPPINGS = {'invoice_id' : 'int', 'client_id' : 'int',
'discount' : 'float', 'amount' : 'float', 'date' : 'datetime',
'amount_outstanding' : 'float', 'paid' : 'float'}
pk_attr = 'invoice_id'
str_attr = 'number'
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
super(Invoice, self).__init__()
self.lines = []
self.links = []
def sync(self, model):
defaults = self.__dict__()
defaults.pop(self.pk_attr)
defaults.pop('lines')
obj, created = model.objects.get_or_create(
id=getattr(self, self.pk_attr),
defaults=defaults
)
if created:
print 'Created %s %s' % (
self.object_name,
self.__unicode__()
)
else:
print 'Updated %s %s' % (
self.object_name,
self.__unicode__()
)
#TODO: Unpack lines
#-----------------------------------------------#
# Line--really just a part of Invoice
#-----------------------------------------------#
class Line(BaseObject):
TYPE_MAPPINGS = {'unit_cost' : 'float', 'quantity' : 'float',
'tax1_percent' : 'float', 'tax2_percent' : 'float', 'amount' : 'float'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('name', 'description', 'unit_cost', 'quantity', 'tax1_name',
'tax2_name', 'tax1_percent', 'tax2_percent', 'amount'):
setattr(self, att, None)
@classmethod
def get(cls, object_id, element_name = None):
'''
The Line doesn't do this
'''
raise NotImplementedError("the Line doesn't support this")
@classmethod
def list(cls, options = {}, element_name = None):
'''
The Line doesn't do this
'''
raise NotImplementedError("the Line doesn't support this")
#-----------------------------------------------#
# Item
#-----------------------------------------------#
class Item(BaseObject):
'''
The Item object
'''
object_name = 'item'
TYPE_MAPPINGS = {'item_id' : 'int', 'unit_cost' : 'float',
'quantity' : 'int', 'inventory' : 'int'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('item_id', 'name', 'description', 'unit_cost',
'quantity', 'inventory'):
setattr(self, att, None)
#-----------------------------------------------#
# Payment
#-----------------------------------------------#
class Payment(BaseObject):
'''
The Payment object
'''
object_name = 'payment'
TYPE_MAPPINGS = {'client_id' : 'int', 'invoice_id' : 'int',
'amount' : 'float', 'date' : 'datetime'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('payment_id', 'client_id', 'invoice_id', 'date',
'amount', 'type', 'notes'):
setattr(self, att, None)
#-----------------------------------------------#
# Recurring
#-----------------------------------------------#
class Recurring(BaseObject):
'''
The Recurring object
'''
object_name = 'recurring'
TYPE_MAPPINGS = {'recurring_id' : 'int', 'client_id' : 'int',
'po_number' : 'int', 'discount' : 'float', 'amount' : 'float',
'occurrences' : 'int', 'date' : 'datetime'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('recurring_id', 'client_id', 'date', 'po_number',
'terms', 'first_name', 'last_name', 'organization', 'p_street1', 'p_street2', 'p_city','p_state', 'p_country', 'p_code', 'amount', 'lines', 'discount', 'status', 'notes', 'occurrences', 'frequency', 'stopped', 'send_email', 'send_snail_mail'):
setattr(self, att, None)
self.lines = []
#-----------------------------------------------#
# Project
#-----------------------------------------------#
class Project(BaseObject):
'''
The Project object
'''
object_name = 'project'
TYPE_MAPPINGS = {'project_id' : 'int', 'client_id' : 'int',
'rate' : 'float'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('project_id', 'client_id', 'name', 'bill_method','rate',
'description', 'tasks'):
setattr(self, att, None)
self.tasks = []
#-----------------------------------------------#
# Task
#-----------------------------------------------#
class Task(BaseObject):
'''
The Task object
'''
object_name = 'task'
TYPE_MAPPINGS = {'task_id' : 'int', 'rate' : 'float', 'billable' : 'bool'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('task_id', 'name', 'billable', 'rate',
'description'):
setattr(self, att, None)
#-----------------------------------------------#
# TimeEntry
#-----------------------------------------------#
class TimeEntry(BaseObject):
'''
The TimeEntry object
'''
object_name = 'time_entry'
TYPE_MAPPINGS = {'time_entry_id' : 'int', 'project_id' : 'int', 'task_id' : 'int', 'hours' : 'float', 'date' : 'datetime'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('time_entry_id', 'project_id', 'task_id', 'hours',
'notes', 'date'):
setattr(self, att, None)
#-----------------------------------------------#
# Estimate
#-----------------------------------------------#
class Estimate(BaseObject):
'''
The Estimate object
'''
object_name = 'estimate'
TYPE_MAPPINGS = {'estimate_id' : 'int', 'client_id' : 'int',
'po_number' : 'int', 'discount' : 'float', 'amount' : 'float',
'date' : 'datetime'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('estimate_id', 'client_id', 'status', 'date', 'po_number',
'terms', 'first_name', 'last_name', 'organization', 'p_street1', 'p_street2', 'p_city','p_state', 'p_country', 'p_code', 'lines', 'discount', 'amount', 'notes'):
setattr(self, att, None)
self.lines = []
#-----------------------------------------------#
# Expense
#-----------------------------------------------#
class Expense(BaseObject):
'''
The Expense object
'''
object_name = 'expense'
TYPE_MAPPINGS = {'expense_id' : 'int', 'staff_id' : 'int',
'client_id' : 'int', 'category_id' : 'int', 'project_id' : 'int',
'amount' : 'float', 'date' : 'datetime'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('expense_id', 'staff_id', 'category_id', 'client_id', 'project_id', 'date', 'amount', 'notes', 'status'):
setattr(self, att, None)
#-----------------------------------------------#
# Category
#-----------------------------------------------#
class Category(BaseObject):
'''
The Category object
'''
object_name = 'category'
TYPE_MAPPINGS = {'category_id' : 'int', 'tax1' : 'float',
'tax2' : 'float'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('category_id', 'name', 'tax1', 'tax2'):
setattr(self, att, None)
#-----------------------------------------------#
# Staff
#-----------------------------------------------#
class Staff(BaseObject):
'''
The Staff object
'''
object_name = 'staff'
TYPE_MAPPINGS = {'staff_id' : 'int', 'rate' : 'float',
'last_login' : 'datetime',
'signup_date' : 'datetime'}
def __init__(self):
'''
The constructor is where we initially create the
attributes for this class
'''
for att in ('staff_id', 'username', 'first_name', 'last_name',
'email', 'business_phone', 'mobile_phone', 'rate', 'last_login',
'number_of_logins', 'signup_date',
'street1', 'street2', 'city', 'state', 'country', 'code'):
setattr(self, att, None)
@classmethod
def list(cls, options = {}, get_all=False):
'''
Return a list of this object
'''
return super(Staff, cls).list(options, element_name='member', get_all=get_all)
| |
# Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def constant(f):
def fset(self, value):
raise SyntaxError('Unable to change constants')
def fget(self):
return f()
return property(fget, fset)
class __EzBakePropertyConstants(object):
# [Accumulo Constants]
@constant # (type: string) Property which represents the name of the accumulo instance
def ACCUMULO_INSTANCE_NAME():
return "accumulo.instance.name"
@constant # (type: string) Property which represents the namespace in accumulo we are working in
def ACCUMULO_NAMESPACE():
return "accumulo.namespace"
@constant # (type: string) Encrypted Property which represents the password for the user to connect to the database with
def ACCUMULO_PASSWORD():
return "accumulo.password"
@constant # (type: boolean) Property used to indicate whether our connector is in mock mode
def ACCUMULO_USE_MOCK():
return "accumulo.use.mock"
@constant # (type: string) Property which is the username we connect to the database with
def ACCUMULO_USERNAME():
return "accumulo.username"
@constant # (type: string) Property which is a CSV of zookeeper connection strings (host:port) which are the zookeeper servers that accumulo users
def ACCUMULO_ZOOKEEPERS():
return "accumulo.zookeepers"
@constant # (type: int) Property which specifies the port of the accumulo proxy
def ACCUMULO_PROXY_PORT():
return "accumulo.proxy.port"
@constant # (type: string) Property which specifies the hostname of the accumulo proxy
def ACCUMULO_PROXY_HOST():
return "accumulo.proxy.host"
@constant # (type: boolean) Property which specifies if accumulo clients should use SSL
def ACCUMULO_USE_SSL():
return "accumulo.use.ssl"
@constant # (type: string) Property which specifies the path to the accumulo truststore
def ACCUMULO_SSL_TRUSTSTORE_PATH():
return "accumulo.ssl.truststore.path"
@constant # (type: string) Property which specifies the type of the accumulo truststore
def ACCUMULO_SSL_TRUSTSTORE_TYPE():
return "accumulo.ssl.truststore.type"
@constant # (type: string) Property which specifies the password for the accumulo truststore
def ACCUMULO_SSL_TRUSTSTORE_PASSWORD():
return "accumulo.ssl.truststore.password"
# [Application Constants]
@constant # (type: string) Property which represents the name of the application
def EZBAKE_APPLICATION_NAME():
return "application.name"
@constant # (type: string) Property which represents the version of the application
def EZBAKE_APPLICATION_VERSION():
return "ezbake.application.version"
# [Azkaban Constants]
@constant # (type: string) Property which represents url for azkaban
def AZKABAN_URL():
return "azkaban.url"
@constant # (type: string) Property which represents azkaban password
def AZKABAN_PASSWORD():
return "azkaban.password"
@constant # (type: string) Property which represents azkaban username
def AZKABAN_USERNAME():
return "azkaban.username"
# [Common Services Constants]
@constant # (type: string) Property which represents capco service
def CAPCO():
return "CapcoService"
@constant # (type: string) Property which represents datawarehouse service
def DATAWAREHOUSE():
return "warehaus"
@constant # (type: string) Property which represents document extraction service
def DOCUMENT_EXTRACTION():
return "docextract"
@constant # (type: string) Property which represents entity extraction service
def ENTITY_EXTRACTION():
return "entityextract"
@constant # (type: string) Property which represents ezdeployer service
def EZDEPLOYER():
return "ezdeployer"
@constant # (type: string) Property which represents ezsecurity service
def EZSECURITY():
return "EzbakeSecurityService"
@constant # (type: string) Property which represents ezsecurity registration service
def EZSECURITY_REGISTRATION():
return "EzSecurityRegistration"
@constant # (type: string) Property which represents geospatial extraction service
def GEOSPATIAL_EXTRACTION():
return "geosvc"
@constant # (type: string) Property which represents image indexer service
def IMAGE_INDEXER():
return "imageindexingservice"
@constant # (type: string) Property which represents image metadata extraction service
def IMAGE_METADATA_EXTRACTION():
return "imagemetadataextractionservice"
@constant # (type: string) Property which represents internal name service
def INTERNAL_NAME_SERVICE():
return "ins"
@constant # (type: string) Property which represents selector extraction service
def SELECTOR_EXTRACTION():
return "selext"
@constant # (type: string) Property which represents ssr service
def SSR():
return "ssrService"
@constant # (type: string) Property which represents temporal normalizer service
def TEMPORAL_NORMALIZER():
return "temporalsvc"
# [Elastic Search Constants]
@constant # (type: string) Property which represents elastic search cluster name key in ezconfig
def ELASTICSEARCH_CLUSTER_NAME():
return "elastic.cluster.name"
@constant # (type: string) Property which represents elastic search force refresh key in ezconfig
def ELASTICSEARCH_FORCE_REFRESH_ON_PUT():
return "elastic.force.refresh"
@constant # (type: string) Property which represents elastic search host name key in ezconfig
def ELASTICSEARCH_HOST():
return "elastic.host.name"
@constant # (type: string) Property which represents elastic search port key in ezconfig
def ELASTICSEARCH_PORT():
return "elastic.port"
# [Hadoop Constants]
@constant # (type: string) Property which represents ezconfig string to get default filesystem name
def HADOOP_FILESYSTEM_NAME():
return "fs.default.name"
@constant # (type: string) Property which represents ezconfig string to get hdfs implementation
def HADOOP_FILESYSTEM_IMPL():
return "fs.hdfs.impl"
@constant # (type: string) Property which represents ezconfig string to get filesystem use local value
def HADOOP_FILESYSTEM_USE_LOCAL():
return "fs.use.local"
# [Flume Constants]
@constant # (type: string) Property which represents flume key for agent type
def FLUME_AGENT_TYPE():
return "flume.agent.type"
@constant # (type: string) Property which represents flume key for backoff
def FLUME_BACK_OFF():
return "flume.backoff"
@constant # (type: string) Property which represents flume key for batch size
def FLUME_BATCH_SIZE():
return "flume.batch.size"
@constant # (type: string) Property which represents flume key for connect attempts
def FLUME_CONNECT_ATTEMPTS():
return "flume.connect.attempts"
@constant # (type: string) Property which represents flume key for connect timeout
def FLUME_CONNECT_TIMEOUT():
return "flume.connect.timeout"
@constant # (type: string) Property which represents flume key for headers
def FLUME_HEADERS():
return "flume.headers"
@constant # (type: string) Property which represents flume key for host selector
def FLUME_HOST_SELECTOR():
return "flume.host.selector"
@constant # (type: string) Property which represents flume key for hosts
def FLUME_HOSTS():
return "flume.hosts"
@constant # (type: string) Property which represents flume key for max attempts
def FLUME_MAX_ATTEMPTS():
return "flume.max.attempts"
@constant # (type: string) Property which represents flume key for max backoff
def FLUME_MAX_BACKOFF():
return "flume.max.backoff"
@constant # (type: string) Property which represents flume key for max events
def FLUME_MAX_EVENTS():
return "flume.max.events"
@constant # (type: string) Property which represents flume key for request timeout
def FLUME_REQUEST_TIMEOUT():
return "flume.request.timeout"
@constant # (type: string) Property which represents flume key for run interval
def FLUME_RUN_INTERVAL():
return "flume.run.interval"
@constant # (type: string) Property which represents flume key for sleep interval
def FLUME_SLEEP_INTERVAL():
return "flume.sleep.interval"
# [Kafka Constants]
@constant # (type: string) Property which represents kafka zookeeper connection string
def KAFKA_ZOOKEEPER():
return "kafka.zookeeper.connect"
@constant # (type: string) Property which represents kafka broker list ezconfig property
def KAFKA_BROKER_LIST():
return "kafka.metadata.broker.list"
@constant # (type: int) Property which represents the time that messages stay in memory before flushed to Kafka if using an async producer (in milliseconds)
def KAFKA_QUEUE_TIME():
return "kafka.queue.time"
@constant # (type: string) Property which represents the amount of messages that are queued in memory before flushing to Kafka if using an async producer
def KAFKA_QUEUE_SIZE():
return "kafka.queue.size"
@constant # (type: string) Property which represents the type of producer (sync or async) used by Kafka
def KAFKA_PRODUCER_TYPE():
return "kafka.producer.type"
@constant # (type: int) Property which represents the zookeeper timeout for Kafka consumers
def KAFKA_ZOOKEEPER_SESSION_TIMEOUT():
return "kafka.zk.sessiontimeout.ms"
# [Mongo Configuration Constants]
@constant # (type: string) Property which represents mongo db host name ezconfig key
def MONGODB_HOST_NAME():
return "mongodb.host.name"
@constant # (type: int) Property which represents mongo db port number key
def MONGODB_PORT():
return "mongodb.port"
@constant # (type: string) Property which represents mongo db database name ezconfig key
def MONGODB_DB_NAME():
return "mongodb.database.name"
@constant # (type: string) Property which represents mongo db user name ezconfig key
def MONGODB_USER_NAME():
return "mongodb.user.name"
@constant # (type: string) Property which represents mongo db password ezconfig key
def MONGODB_PASSWORD():
return "mongodb.password"
@constant # (type: string) Property which represents mongo db use ssl ezconfig key
def MONGODB_USE_SSL():
return "mongodb.use.ssl"
@constant # (type: string) Property which represents the connection string that can be used to access mongo
def MONGODB_CONNECTION_STRING():
return "mongodb.connection.string"
# [Postgres Constants]
@constant # (type: string) Property which represents postgres db ezconfig key
def POSTGRES_DB():
return "postgres.db"
@constant # (type: string) Property which represents postgres host ezconfig key
def POSTGRES_HOST():
return "postgres.host"
@constant # (type: string) Property which represents postgres password ezconfig key
def POSTGRES_PASSWORD():
return "postgres.password"
@constant # (type: string) Property which represents postgres port ezconfig key
def POSTGRES_PORT():
return "postgres.port"
@constant # (type: string) Property which represents postgres username ezconfig key
def POSTGRES_USERNAME():
return "postgres.username"
@constant # (type: string) Property which represents whether postgres connection uses ssl ezconfig key
def POSTGRES_USE_SSL():
return "postgres.use.ssl"
# [Redis Constants]
@constant # (type: int) Property which represents redis host ezconfig key
def REDIS_HOST():
return "redis.host"
@constant # (type: string) Property which represents redis post ezconfig key
def REDIS_PORT():
return "redis.port"
@constant # (type: int) Property which represents redis db index ezconfig key
def REDIS_DB_INDEX():
return "redis.db.index"
# [Security Constants]
@constant # (type: string) Property which represents the security id
def EZBAKE_SECURITY_ID():
return "ezbake.security.app.id"
@constant # (type: string) Property which represents cache type ezconfig key
def EZBAKE_USER_CACHE_TYPE():
return "ezbake.security.cache.type"
@constant # (type: string) Property which represents cache ttl ezconfig key
def EZBAKE_USER_CACHE_TTL():
return "ezbake.security.cache.ttl"
@constant # (type: string) Property which represents cache size ezconfig key
def EZBAKE_USER_CACHE_SIZE():
return "ezbake.security.cache.size"
@constant # (type: string) Property which represents request expiration ezconfig key
def EZBAKE_REQUEST_EXPIRATION():
return "ezbake.security.request.expiration"
@constant # (type: string) Property which represents token expiration ezconfig key
def EZBAKE_TOKEN_EXPIRATION():
return "ezbake.security.token.ttl"
@constant # (type: int) How long after being issued a proxy token should be valid
def EZBAKE_SECURITY_PROXYTOKEN_TTL():
return "ezbake.security.proxytoken.ttl"
@constant # (type: int) How long after expiration a token can be re-issued
def EZBAKE_SECURITY_TOKEN_REFRESH_LIMIT():
return "ezbake.security.token.refresh.limit"
@constant # (type: string) Property which represents app registration implementation ezconfig key
def EZBAKE_APP_REGISTRATION_IMPL():
return "ezbake.security.app.service.impl"
@constant # (type: string) Property which represents admins file ezconfig key
def EZBAKE_ADMINS_FILE():
return "ezbake.security.admins.file"
@constant # (type: string) Property which represents service implementation ezconfig key
def EZBAKE_USER_SERVICE_IMPL():
return "ezbake.security.user.service.impl"
@constant # (type: string) Property which represents mock server ezconfig key
def EZBAKE_SECURITY_SERVICE_MOCK_SERVER():
return "ezbake.security.server.mock"
@constant # (type: string) Property which represents use forward proxy ezconfig key
def EZBAKE_USE_FORWARD_PROXY():
return "ezbake.frontend.use.forward.proxy"
@constant # (type: string) Property which represents ssl protocol ezconfig key
def EZBAKE_SSL_PROTOCOL_KEY():
return "ezbake.ssl.protocol"
@constant # (type: string) Property which represents ssl ciphers ezconfig key
def EZBAKE_SSL_CIPHERS_KEY():
return "ezbake.ssl.ciphers"
@constant # (type: string) Property which represents peer validation ezconfig key
def EZBAKE_SSL_PEER_AUTH_REQUIRED():
return "ezbake.ssl.peer.validation"
@constant # (type: boolean) Property which tells us if we are using the default ssl key
def EZBAKE_SSL_USE_DEFAULT_SSL_KEY():
return "ezbake.security.default.ssl"
@constant # (type: string) Property which represents the trusted certificates file
def EZBAKE_APPLICATION_TRUSTED_CERT():
return "ezbake.ssl.trustedcert.file"
@constant # (type: string) Property which represents the private key file
def EZBAKE_APPLICATION_PRIVATE_KEY_FILE():
return "ezbake.ssl.privatekey.file"
@constant # (type: string) Property which represents the certificates file
def EZBAKE_APPLICATION_CERT_FILE():
return "ezbake.ssl.certificate.file"
@constant # (type: string) Property which represents the public key file for a service
def EZBAKE_APPLICATION_PUBLIC_KEY_FILE():
return "ezbake.ssl.servicekey.file"
# [SSL Constants]
@constant # (type: string) Property which represents the path to the system keystore
def SYSTEM_KEYSTORE_PATH():
return "system.keystore.path"
@constant # (type: string) Property which represents the type of the system keystore
def SYSTEM_KEYSTORE_TYPE():
return "system.keystore.type"
@constant # (type: string) Property which represents the password for the system keystore
def SYSTEM_KEYSTORE_PASSWORD():
return "system.keystore.password"
@constant # (type: string) Property which represents the path to the system truststore
def SYSTEM_TRUSTSTORE_PATH():
return "system.truststore.path"
@constant # (type: string) Property which represents the type of the system truststore
def SYSTEM_TRUSTSTORE_TYPE():
return "system.truststore.type"
@constant # (type: string) Property which represents the password for the system truststore
def SYSTEM_TRUSTSTORE_PASSWORD():
return "system.truststore.password"
@constant # (type: string) Property which represents keystore file ezconfig key
def EZBAKE_APPLICATION_KEYSTORE_FILE():
return "ezbake.ssl.keystore.file"
@constant # (type: string) Property which represents keystore type ezconfig key
def EZBAKE_APPLICATION_KEYSTORE_TYPE():
return "ezbake.ssl.keystore.type"
@constant # (type: string) Property which represents keystore password ezconfig key
def EZBAKE_APPLICATION_KEYSTORE_PASS():
return "ezbake.ssl.keystore.pass"
@constant # (type: string) Property which represents truststore file ezconfig key
def EZBAKE_APPLICATION_TRUSTSTORE_FILE():
return "ezbake.ssl.truststore.file"
@constant # (type: string) Property which represents truststore type ezconfig key
def EZBAKE_APPLICATION_TRUSTSTORE_TYPE():
return "ezbake.ssl.truststore.type"
@constant # (type: string) Property which represents truststore password ezconfig key
def EZBAKE_APPLICATION_TRUSTSTORE_PASS():
return "ezbake.ssl.truststore.pass"
# [Service Constants]
@constant # (type: string) Property representing the location to the certificates directory
def EZBAKE_CERTIFICATES_DIRECTORY():
return "ezbake.security.ssl.dir"
@constant # (type: string) Property which represents the name of the service
def EZBAKE_SERVICE_NAME():
return "service.name"
# [Storm Constants]
@constant # (type: string) Property representing the nimbus host
def STORM_NIMBUS_HOST():
return "storm.nimbus.host"
@constant # (type: int) Property representing the nimbus port
def STORM_NIMBUS_THRIFT_PORT():
return "storm.nimbus.thrift.port"
# [System Constants]
@constant # (type: string) Property which represents ezbake admin application deployment ezconfig key
def EZBAKE_ADMIN_APPLICATION_DEPLOYMENT():
return "ezbake.system.admin.application.deployment"
@constant # (type: string) Property which represents ezbake log directory ezconfig key
def EZBAKE_LOG_DIRECTORY():
return "ezbake.log.directory"
@constant # (type: string) Property which represents ezbake log standard out ezconfig key
def EZBAKE_LOG_TO_STDOUT():
return "ezbake.log.stdout"
@constant # (type: string) Property which represents the environment variable for the shared secret
def EZBAKE_SHARED_SECRET_ENVIRONMENT_VARIABLE():
return "ezbake.shared.secret.environment.variable"
# [System Services Constants]
@constant # (type: string) Property which represents frontend service
def FRONTEND():
return "ezfrontend"
# [Thrift Constants]
@constant # (type: string) Property which represents thrifts max idle clients ezconfig key
def THRIFT_MAX_IDLE_CLIENTS():
return "thrift.max.idle.clients"
@constant # (type: string) Property which represents thrifts max pool clients ezconfig key
def THRIFT_MAX_POOL_CLIENTS():
return "thrift.max.pool.clients"
@constant # (type: string) Property which represents thrifts milliseconds between client eviction checks ezconfig key
def THRIFT_MILLIS_BETWEEN_CLIENT_EVICTION_CHECKS():
return "thrift.millis.between.client.eviction.checks"
@constant # (type: string) Property which represents thrifts milliseconds before client eviction ezconfig key
def THRIFT_MILLIS_IDLE_BEFORE_EVICTION():
return "thrift.millis.idle.before.eviction"
@constant # (type: string) Property which represents thrifts server mode ezconfig key
def THRIFT_SERVER_MODE():
return "thrift.server.mode"
@constant # (type: string) Property which represents thrifts test pool on borrow ezconfig key
def THRIFT_TEST_ON_BORROW():
return "thrift.test.pool.on.borrow"
@constant # (type: string) Property which represents thrifts test while idle ezconfig key
def THRIFT_TEST_WHILE_IDLE():
return "thrift.test.pool.while.idle"
@constant # (type: string) Property which represents thrifts use ssl ezconfig key
def THRIFT_USE_SSL():
return "thrift.use.ssl"
@constant # (type: boolean) Property which represents if the client pool should block on exhaustion or throw an exception
def THRIFT_BLOCK_WHEN_EXHAUSTED():
return "thrift.block.when.exhausted"
@constant # (type: boolean) Property which tells us to actually pool clients or not
def THRIFT_ACTUALLY_POOL_CLIENTS():
return "thrift.actually.pool.clients"
@constant # (type: boolean) Log a stack trace whenever an object is abandonded from the pool
def THRIFT_LOG_ABANDONDED():
return "thrift.pool.log.abandoned"
@constant # (type: boolean) Whether to abandon objects if they exceed the abandon timeout when borrow is called
def THRIFT_ABANDON_ON_BORROW():
return "thrift.pool.abandon.on.borrow"
@constant # (type: boolean) Whether to abandon objects if they exceed the abandon timeout when the evictor runs
def THRIFT_ABANDON_ON_MAINTENANCE():
return "thrift.pool.abandon.on.maintenance"
@constant # (type: string) Timeout in seconds before an abandoned object is removed
def THRIFT_ABANDON_TIMEOUT():
return "thrift.pool.abandon.timeout"
# [Web Application Constants]
@constant # (type: string) Property which represents web application external domain ezconfig key
def EZBAKE_WEB_APPLICATION_EXTERNAL_DOMAIN():
return "web.application.external.domain"
@constant # (type: string) Property which represents web application metrics endpoint ezconfig key
def EZBAKE_WEB_APPLICATION_METRICS_ENDPOINT():
return "web.application.metrics.endpoint"
@constant # (type: string) Property which represents web application metrics siteid ezconfig key
def EZBAKE_WEB_APPLICATION_METRICS_SITEID():
return "web.application.metrics.siteid"
@constant # (type: string) Property which represents security description banner: text
def EZBAKE_WEB_APPLICATION_BANNER_TEXT():
return "web.application.security.banner.text"
@constant # (type: string) Property which represents security description banner: background color
def EZBAKE_WEB_APPLICATION_BANNER_BGCOLOR():
return "web.application.security.banner.background.color"
@constant # (type: string) Property which represents security description banner: text color
def EZBAKE_WEB_APPLICATION_BANNER_TEXTCOLOR():
return "web.application.security.banner.text.color"
# [Zookeeper Constants]
@constant # (type: string) Property which is a CSV of zookeeper servers (host:port)
def ZOOKEEPER_CONNECTION_STRING():
return "zookeeper.connection.string"
# [MonetDB Constants]
@constant # (type: string) The MonetDB Username
def MONETDB_USERNAME():
return "monetdb.username"
@constant # (type: string) The MonetDB Password
def MONETDB_PASSWORD():
return "monetdb.password"
@constant # (type: string) The hostname of the MonetDB server
def MONETDB_HOSTNAME():
return "monetdb.hostname"
@constant # (type: int) The port number that MonetDB is running on
def MONETDB_PORT():
return "monetdb.port"
EzBakePropertyConstants = __EzBakePropertyConstants()
| |
from django.contrib.auth.models import User
from rest_framework.reverse import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from workflows.models import Workflow, Widget, Input
TEST_USERNAME = 'testuser'
TEST_PASSWORD = '123'
# Test workflow ids
TEST_WORKFLOW_USERS_PK = 2
TEST_WORKFLOW_OTHER_USER_PRIVATE_PK = 4
TEST_WORKFLOW_OTHER_USER_PUBLIC_PK = 6
TEST_OUTPUT_PK = 9
# Test widget ids
TEST_WIDGET_USERS_PK = 6
TEST_WIDGET_OTHER_USER_PRIVATE_PK = 33
TEST_WIDGET_OTHER_USER_PUBLIC_PK = 34
# Test widget parameters
TEST_PARAMETER_USERS_PK = 10
TEST_PARAMETER_OTHER_USER_PRIVATE_PK = 98
TEST_PARAMETER_OTHER_USER_PUBLIC_PK = 99
class BaseAPITestCase(APITestCase):
fixtures = ['test_data_api', ]
def _login(self):
self.client.login(username=TEST_USERNAME, password=TEST_PASSWORD)
def _logout(self):
self.client.logout()
def _test_multiple_response_codes(self, verb, urls, codes, data=None):
for url, code in zip(urls, codes):
response = verb(url, data) if data else verb(url)
self.assertEqual(response.status_code, code)
class SupportingAPITests(BaseAPITestCase):
def test_register(self):
url = reverse('user-create')
response = self.client.post(url, {
'username': 'testuser3',
'password': '123',
'email': 'testuser3@testdomain.com'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.filter(username='testuser3').count(), 1)
def test_login(self):
url = reverse('token-create')
response = self.client.post(url, {
'username': 'testuser',
'password': '123'
})
self.assertEqual(response.status_code, 200)
def test_logout(self):
url = reverse('token-destroy')
self._login()
response = self.client.post(url) # HTTP_AUTHORIZATION="Token %s" % auth_token)
self.assertEqual(response.status_code, 204)
def test_widget_library(self):
url = reverse('widget-library-list')
# Test without authentication - this should fail
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._logout()
class WorkflowAPITests(BaseAPITestCase):
def test_create(self):
url = reverse('workflow-list')
workflow_data = {
'name': 'Untitled workflow',
'is_public': False,
'description': '',
'widget': None,
'template_parent': None
}
# Test without authentication - this should not be allowed
response = self.client.post(url, workflow_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, workflow_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self._logout()
def test_patch(self):
url = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
workflowData = {
'name': 'Test workflow',
'is_public': True,
'description': 'Test description'
}
# Test without authentication - this should not be allowed
response = self.client.patch(url, workflowData)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.patch(url, workflowData)
updated_workflow = response.data
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(updated_workflow['name'], 'Test workflow')
self.assertEqual(updated_workflow['is_public'], True)
self.assertEqual(updated_workflow['description'], 'Test description')
# Try to patch
self._test_multiple_response_codes(
self.client.patch,
[url_other_user_private, url_other_user_public],
[status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN],
data=workflowData
)
self._logout()
def test_delete(self):
url = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
self._test_multiple_response_codes(
self.client.delete,
[url, url_other_user_private, url_other_user_public],
[status.HTTP_204_NO_CONTENT, status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN]
)
self._logout()
def test_reset(self):
url = reverse('workflow-reset', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-reset', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-reset', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, format="json")
data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data['status'], 'ok')
workflow = Workflow.objects.get(pk=TEST_WORKFLOW_USERS_PK)
for widget in workflow.widgets.all():
self.assertEqual(widget.finished, False)
self.assertEqual(widget.error, False)
self.assertEqual(widget.running, False)
self._test_multiple_response_codes(
self.client.post,
[url_other_user_private, url_other_user_public],
[status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN]
)
self._logout()
def test_run(self):
url = reverse('workflow-run', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-run', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-run', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, format="json")
# data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# self.assertEqual(data['status'], 'ok')
workflow = Workflow.objects.get(pk=TEST_WORKFLOW_USERS_PK)
for widget in workflow.widgets.all():
self.assertEqual(widget.finished, True)
self._test_multiple_response_codes(
self.client.post,
[url_other_user_private, url_other_user_public],
[status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN]
)
self._logout()
def test_subprocess(self):
url = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, format="json")
widget = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(widget['type'], 'subprocess')
self._test_multiple_response_codes(
self.client.post,
[url_other_user_private, url_other_user_public],
[status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN]
)
# Get subprocess workflow object
subprocess_workflow = Widget.objects.get(pk=widget['id']).workflow_link
# Test adding input
url = reverse('workflow-subprocess-input', kwargs={'pk': subprocess_workflow.pk})
response = self.client.post(url)
widget = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(widget['type'], 'input')
# Test adding output
url = reverse('workflow-subprocess-output', kwargs={'pk': subprocess_workflow.pk})
response = self.client.post(url)
widget = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(widget['type'], 'output')
self._logout()
def test_subprocess_forloop(self):
url = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
self._login()
# First add a subprocess
response = self.client.post(url)
widget = response.json()
subprocess_workflow = Widget.objects.get(pk=widget['id']).workflow_link
# Test adding for loop widgets
url = reverse('workflow-subprocess-forloop', kwargs={'pk': subprocess_workflow.pk})
response = self.client.post(url)
data = response.json()
self.assertNotIn('status', data)
widget_types = {w['type'] for w in data}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertSetEqual(widget_types, {'for_input', 'for_output'})
self._logout()
def test_subprocess_xvalidation(self):
url = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
self._login()
# First add a subprocess
response = self.client.post(url)
data = response.json()
self.assertNotIn('status', data)
subprocess_workflow = Widget.objects.get(pk=data['id']).workflow_link
# Test adding cross validation widgets
url = reverse('workflow-subprocess-xvalidation', kwargs={'pk': subprocess_workflow.pk})
response = self.client.post(url)
widgets = response.json()
widget_types = {w['type'] for w in widgets}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertSetEqual(widget_types, {'cv_input', 'cv_output'})
self._logout()
class WidgetAPITests(BaseAPITestCase):
def test_fetch_value(self):
url = reverse('output-value', kwargs={'pk': TEST_OUTPUT_PK})
self._login()
response = self.client.get(url)
data = response.json()
self.assertEqual(data['value'], '5')
def test_create(self):
url = reverse('widget-list')
workflow_url = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
workflow_url_private = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
workflow_url_public = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
widget_data = {
'workflow': workflow_url,
'x': 50,
'y': 50,
'name': 'Test widget',
'abstract_widget': 3, # Multiply integers abstract widget
'finished': False,
'error': False,
'running': False,
'interaction_waiting': False,
'type': 'regular',
'progress': 0
}
# Test without authentication - this should not be allowed
response = self.client.post(url, widget_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, widget_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Test on other user's workflows
widget_data['workflow'] = workflow_url_private
response = self.client.post(url, widget_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
widget_data['workflow'] = workflow_url_public
response = self.client.post(url, widget_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_patch(self):
widget_url = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
widget_data = {
'x': 12,
'y': 34,
'name': 'Test name'
}
# Test without authentication - this should not be allowed
response = self.client.patch(widget_url, widget_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.patch(widget_url, widget_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK)
self.assertEqual(widget.x, widget_data['x'])
self.assertEqual(widget.y, widget_data['y'])
self.assertEqual(widget.name, widget_data['name'])
# Test on other user's widgets
response = self.client.patch(widget_url_private, widget_data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.patch(widget_url_public, widget_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_reset(self):
widget_url = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(widget_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(widget_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK)
self.assertEqual(widget.finished, False)
# Test on other user's widgets
response = self.client.post(widget_url_private)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.post(widget_url_public)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_run(self):
widget_url = reverse('widget-run', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_reset_url = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-run', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-run', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(widget_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
# First reset the widget
response = self.client.post(widget_reset_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK)
self.assertEqual(widget.finished, False)
# .. then run
response = self.client.post(widget_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK)
self.assertEqual(widget.finished, True)
# Test on other user's widgets
response = self.client.post(widget_url_private)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.post(widget_url_public)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_delete(self):
widget_url = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.delete(widget_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.delete(widget_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget_count = Widget.objects.filter(pk=TEST_WIDGET_USERS_PK).count()
self.assertEqual(widget_count, 0)
# Test on other user's widgets
response = self.client.delete(widget_url_private)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.delete(widget_url_public)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_save_parameters(self):
widget_url = reverse('widget-save-parameters', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-save-parameters', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-save-parameters', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
parameters = [{
'id': TEST_PARAMETER_USERS_PK,
'value': '42'
}]
# Test without authentication - this should not be allowed
response = self.client.patch(widget_url, parameters)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.patch(widget_url, parameters)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parameter = Input.objects.get(pk=TEST_PARAMETER_USERS_PK)
self.assertEqual(parameter.value, '42')
# Test on other user's widgets
parameters[0]['id'] = TEST_PARAMETER_OTHER_USER_PRIVATE_PK
response = self.client.patch(widget_url_private, parameters)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
parameters[0]['id'] = TEST_PARAMETER_OTHER_USER_PUBLIC_PK
response = self.client.patch(widget_url_public, parameters)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
| |
# vim: fdm=indent
# author: Fabio Zanini
# date: 16/08/17
# content: Dataset functions to plot gene expression and phenotypes
# Modules
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import cm
from .plugins import Plugin
from ..config import config
try:
import seaborn as sns
except (ImportError, RuntimeError):
if 'seaborn_import' not in config['_once_warnings']:
warnings.warn('Unable to import seaborn: plotting will not work')
config['_once_warnings'].append('seaborn_import')
sns = None
try:
import matplotlib.pyplot as plt
except (ImportError, RuntimeError):
if 'pyplot_import' not in config['_once_warnings']:
warnings.warn('Unable to import matplotlib.pyplot: plotting will not work')
config['_once_warnings'].append('pyplot_import')
plt = None
# Classes / functions
class Plot(Plugin):
'''Plot gene expression and phenotype in single cells'''
@staticmethod
def _update_properties(kwargs, defaults):
Plot._sanitize_plot_properties(kwargs)
Plot._sanitize_plot_properties(defaults)
for key, val in defaults.items():
if key not in kwargs:
kwargs[key] = val
@staticmethod
def _sanitize_plot_properties(kwargs):
aliases = {
'linewidth': 'lw',
'antialiased': 'aa',
'color': 'c',
'linestyle': 'ls',
'markeredgecolor': 'mec',
'markeredgewidth': 'mew',
'markerfacecolor': 'mfc',
'markerfacecoloralt': 'mfcalt',
'markersize': 'ms',
}
for key, alias in aliases.items():
if alias in kwargs:
kwargs[key] = kwargs.pop(alias)
def plot_coverage(
self,
features='total',
kind='cumulative',
ax=None,
tight_layout=True,
legend=False,
**kwargs):
'''Plot number of reads for each sample
Args:
features (list or string): Features to sum over. The string \
'total' means all features including spikeins and other, \
'mapped' means all features excluding spikeins and other, \
'spikeins' means only spikeins, and 'other' means only \
'other' features.
kind (string): Kind of plot (default: cumulative distribution).
ax (matplotlib.axes.Axes): The axes to plot into. If None \
(default), a new figure with one axes is created. ax must \
not strictly be a matplotlib class, but it must have \
common methods such as 'plot' and 'set'.
tight_layout (bool or dict): Whether to call \
matplotlib.pyplot.tight_layout at the end of the \
plotting. If it is a dict, pass it unpacked to that \
function.
legend (bool or dict): If True, call ax.legend(). If a dict, \
pass as **kwargs to ax.legend.
**kwargs: named arguments passed to the plot function.
Returns:
matplotlib.axes.Axes with the axes contaiing the plot.
'''
if ax is None:
new_axes = True
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(13, 8))
else:
new_axes = False
defaults = {
'linewidth': 2,
'color': 'darkgrey',
}
Plot._update_properties(kwargs, defaults)
counts = self.dataset.counts
if features == 'total':
pass
elif features == 'mapped':
counts = counts.exclude_features(spikeins=True, other=True)
elif features == 'spikeins':
counts = counts.get_spikeins()
elif features == 'other':
counts = counts.get_other_features()
else:
counts = counts.loc[features]
if kind == 'cumulative':
x = counts.values.sum(axis=0)
x.sort()
y = 1.0 - np.linspace(0, 1, len(x))
ax.plot(x, y, **kwargs)
ax_props = {
'ylim': (-0.05, 1.05),
'ylabel': 'Cumulative distribution'}
else:
raise ValueError('Plot kind not understood')
if not counts._normalized:
ax_props['xlabel'] = 'Number of reads'
elif counts._normalized != 'custom':
ax_props['xlabel'] = counts._normalized.capitalize().replace('_', ' ')
if new_axes:
xmin = 0.5
xmax = 1.05 * x.max()
ax_props['xlim'] = (xmin, xmax)
ax_props['xscale'] = 'log'
ax.grid(True)
ax.set(**ax_props)
if legend:
if np.isscalar(legend):
ax.legend()
else:
ax.legend(**legend)
if tight_layout:
if isinstance(tight_layout, dict):
plt.tight_layout(**tight_layout)
else:
plt.tight_layout()
return ax
def scatter_statistics(
self,
features='mapped',
x='mean',
y='cv',
ax=None,
tight_layout=True,
legend=False,
grid=None,
**kwargs):
'''Scatter plot statistics of features.
Args:
features (list or string): List of features to plot. The string \
'mapped' means everything excluding spikeins and other, \
'all' means everything including spikeins and other.
x (string): Statistics to plot on the x axis.
y (string): Statistics to plot on the y axis.
ax (matplotlib.axes.Axes): The axes to plot into. If None \
(default), a new figure with one axes is created. ax must \
not strictly be a matplotlib class, but it must have \
common methods such as 'plot' and 'set'.
tight_layout (bool or dict): Whether to call \
matplotlib.pyplot.tight_layout at the end of the \
plotting. If it is a dict, pass it unpacked to that \
function.
legend (bool or dict): If True, call ax.legend(). If a dict, \
pass as **kwargs to ax.legend.
grid (bool or None): Whether to add a grid to the plot. None \
defaults to your existing settings.
**kwargs: named arguments passed to the plot function.
Returns:
matplotlib.axes.Axes with the axes contaiing the plot.
'''
if ax is None:
new_axes = True
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(13, 8))
else:
new_axes = False
defaults = {
's': 10,
'color': 'darkgrey',
}
Plot._update_properties(kwargs, defaults)
counts = self.dataset.counts
if features == 'total':
if not counts._otherfeatures.isin(counts.index).all():
raise ValueError('Other features not found in counts')
if not counts._spikeins.isin(counts.index).all():
raise ValueError('Spike-ins not found in counts')
pass
elif features == 'mapped':
counts = counts.exclude_features(
spikeins=True, other=True,
errors='ignore')
else:
counts = counts.loc[features]
stats = counts.get_statistics(metrics=(x, y))
ax_props = {'xlabel': x, 'ylabel': y}
x = stats.loc[:, x]
y = stats.loc[:, y]
ax.scatter(x, y, **kwargs)
if ax_props['xlabel'] == 'mean':
xmin = 0.5
xmax = 1.05 * x.max()
ax_props['xlim'] = (xmin, xmax)
ax_props['xscale'] = 'log'
elif ax_props['ylabel'] == 'mean':
ymin = 0.5
ymax = 1.05 * y.max()
ax_props['ylim'] = (ymin, ymax)
ax_props['yscale'] = 'log'
if ax_props['xlabel'] == 'cv':
xmin = 0
xmax = 1.05 * x.max()
ax_props['xlim'] = (xmin, xmax)
elif ax_props['ylabel'] == 'cv':
ymin = 0
ymax = 1.05 * y.max()
ax_props['ylim'] = (ymin, ymax)
if grid is not None:
ax.grid(grid)
ax.set(**ax_props)
if legend:
if np.isscalar(legend):
ax.legend()
else:
ax.legend(**legend)
if tight_layout:
if isinstance(tight_layout, dict):
plt.tight_layout(**tight_layout)
else:
plt.tight_layout()
def plot_distributions(
self,
features,
kind='violin',
ax=None,
tight_layout=True,
legend=False,
orientation='vertical',
sort=False,
bottom=0,
grid=None,
**kwargs):
'''Plot distribution of spike-in controls
Args:
features (list or string): List of features to plot. If it is the \
string 'spikeins', plot all spikeins, if the string \
'other', plot other features.
kind (string): Kind of plot, one of 'violin' (default), 'box', \
'swarm'.
ax (matplotlib.axes.Axes): Axes to plot into. If None (default), \
create a new figure and axes.
tight_layout (bool or dict): Whether to call \
matplotlib.pyplot.tight_layout at the end of the \
plotting. If it is a dict, pass it unpacked to that \
function.
legend (bool or dict): If True, call ax.legend(). If a dict, \
pass as **kwargs to ax.legend. Notice that legend has a \
special meaning in these kinds of seaborn plots.
orientation (string): 'horizontal' or 'vertical'.
sort (bool or string): True or 'ascending' sorts the features by \
median, 'descending' uses the reverse order.
bottom (float or string): The value of zero-count features. If \
you are using a log axis, you may want to set this to \
0.1 or any other small positive number. If a string, it \
must be 'pseudocount', then the CountsTable.pseudocount \
will be used.
grid (bool or None): Whether to add a grid to the plot. None \
defaults to your existing settings.
**kwargs: named arguments passed to the plot function.
Return:
matplotlib.axes.Axes: The axes with the plot.
'''
if ax is None:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(18, 8))
counts = self.dataset.counts
if features == 'spikeins':
counts = counts.get_spikeins()
elif features == 'other':
counts = counts.get_other_features()
else:
counts = counts.loc[features]
if sort:
asc = sort != 'descending'
ind = counts.median(axis=1).sort_values(ascending=asc).index
counts = counts.loc[ind]
if bottom == 'pseudocount':
bottom = counts.pseudocount
counts = np.maximum(counts, bottom)
ax_props = {}
if kind == 'violin':
defaults = {
'scale': 'width',
'inner': 'stick',
}
Plot._update_properties(kwargs, defaults)
sns.violinplot(
data=counts.T,
orient=orientation,
ax=ax,
**kwargs)
elif kind == 'box':
defaults = {}
Plot._update_properties(kwargs, defaults)
sns.boxplot(
data=counts.T,
orient=orientation,
ax=ax,
**kwargs)
elif kind == 'swarm':
defaults = {}
Plot._update_properties(kwargs, defaults)
sns.swarmplot(
data=counts.T,
orient=orientation,
ax=ax,
**kwargs)
else:
raise ValueError('Plot kind not understood')
if orientation == 'vertical':
ax_props['ylim'] = (0.9 * bottom, 1.1 * counts.values.max())
if not counts._normalized:
ax_props['ylabel'] = 'Number of reads'
elif counts._normalized != 'custom':
ax_props['ylabel'] = counts._normalized.capitalize().replace('_', ' ')
for label in ax.get_xmajorticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
ax.grid(True, 'y')
elif orientation == 'horizontal':
ax_props['xlim'] = (0.9 * bottom, 1.1 * counts.values.max())
if not counts._normalized:
ax_props['xlabel'] = 'Number of reads'
elif counts._normalized != 'custom':
ax_props['xlabel'] = counts._normalized.capitalize().replace('_', ' ')
ax.grid(True, axis='x')
ax.set(**ax_props)
if grid is not None:
ax.grid(grid)
if legend:
if np.isscalar(legend):
ax.legend()
else:
ax.legend(**legend)
if tight_layout:
if isinstance(tight_layout, dict):
plt.tight_layout(**tight_layout)
else:
plt.tight_layout()
return ax
def scatter_reduced(
self,
vectors_reduced,
color_by=None,
color_log=None,
cmap='viridis',
default_color='darkgrey',
ax=None,
tight_layout=True,
high_on_top=False,
**kwargs):
'''Scatter samples or features after dimensionality reduction.
Args:
vectors_reduced (tuple of str or pandas.Dataframe): if a tuple of
str, the names of the columns with the coordinates in the
samplesheet or featuresheet. If a pandas.Dataframe, the matrix
of coordinates of the samples/features in low dimensions. Rows
are samples/features, columns (typically 2 or 3) are the
component in the low-dimensional embedding.
color_by (string or None): color sample dots by phenotype or
expression of a certain feature.
color_log (bool or None): use log of phenotype/expression in the
colormap. Default None only logs expression, but not
phenotypes.
cmap (string or matplotlib colormap): color map to use for the
sample dots. For categorical coloring, a palette with the
right number of colors or more can be passed.
ax (matplotlib.axes.Axes): The axes to plot into. If None
(default), a new figure with one axes is created. ax must
not strictly be a matplotlib class, but it must have
common methods such as 'plot' and 'set'.
default_color (str or matplotlib color): default color for missing
categories, NaNs, and no coloring at all
tight_layout (bool or dict): Whether to call
matplotlib.pyplot.tight_layout at the end of the
plotting. If it is a dict, pass it unpacked to that
function.
high_on_top (bool): Plot high expression/phenotype values on top.
This argument is ignored for categorical phenotypes.
**kwargs: named arguments passed to the plot function.
Returns:
matplotlib.axes.Axes with the axes containing the plot.
NOTE: if a categorical colormap is used, the mapping of category to
color is stored into ax._singlet_cmap.
'''
if isinstance(vectors_reduced, tuple):
if pd.Index(vectors_reduced).isin(self.dataset.samplesheet.columns).all():
vectors_reduced = self.dataset.samplesheet[list(vectors_reduced)]
data = self.dataset.counts
metadata = self.dataset.samplesheet
elif pd.Index(vectors_reduced).isin(self.dataset.featuresheet.columns).all():
vectors_reduced = self.dataset.featuresheet[list(vectors_reduced)]
data = self.dataset.counts.T
metadata = self.dataset.featuresheet
else:
raise ValueError('reduced_vectors is not consistent with samples nor features')
else:
if (vectors_reduced.index == self.dataset.samplesheet.index).all():
data = self.dataset.counts
metadata = self.dataset.samplesheet
elif (vectors_reduced.index == self.dataset.featuresheet.index).all():
data = self.dataset.counts.T
metadata = self.dataset.featuresheet
else:
raise ValueError('reduced_vectors is not consistent with samples nor features')
if ax is None:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(13, 8))
defaults = {
's': 90,
}
Plot._update_properties(kwargs, defaults)
tiers = np.ones(vectors_reduced.shape[0])
if color_by is None:
kwargs['color'] = default_color
else:
if isinstance(cmap, str):
cmap = cm.get_cmap(cmap)
if color_by in metadata.columns:
color_data = metadata.loc[:, color_by]
if hasattr(color_data, 'cat'):
color_is_numeric = False
else:
color_is_numeric = np.issubdtype(color_data.dtype, np.number)
color_by_phenotype = True
elif color_by in data.index:
color_data = data.loc[color_by]
color_is_numeric = True
color_by_phenotype = False
else:
raise ValueError(
'The label '+color_by+' is neither a phenotype nor a feature')
# Categorical columns get just a list or a dict of colors
if (hasattr(color_data, 'cat')) or (not color_is_numeric):
cd_unique = list(np.unique(color_data.values))
if callable(cmap):
c_unique = cmap(np.linspace(0, 1, len(cd_unique)))
elif isinstance(cmap, dict):
c_unique = np.asarray([cmap.get(x, default_color) for x in cd_unique])
else:
c_unique = np.asarray(cmap)
# Assign the actual colors to categories. Missing ones default
c = c_unique[[cd_unique.index(x) for x in color_data.values]]
# For categories, we have to tell the user about the mapping
if not hasattr(ax, '_singlet_cmap'):
ax._singlet_cmap = {}
ax._singlet_cmap.update(dict(zip(cd_unique, c_unique)))
# Non-categorical numeric types are more tricky: check for NaNs
else:
if np.isnan(color_data.values).any():
unmask = ~np.isnan(color_data.values)
else:
unmask = np.ones(len(color_data), bool)
cd_min = color_data.values[unmask].min()
cd_max = color_data.values[unmask].max()
if color_log is None:
color_log = not color_by_phenotype
if color_log:
if color_by_phenotype:
pc = 0.1 * cd_min
else:
pc = self.dataset.counts.pseudocount
color_data = np.log10(color_data + pc)
cd_min = np.log10(cd_min + pc)
cd_max = np.log10(cd_max + pc)
cd_norm = (color_data.values - cd_min) / (cd_max - cd_min)
if high_on_top:
tiers = pd.qcut(
cd_norm, np.linspace(0, 1, 5),
retbins=False, labels=False,
duplicates='drop',
)
c = np.zeros((len(color_data), 4), float)
c[unmask] = cmap(cd_norm[unmask])
# Grey-ish semitransparency for NaNs
c[~unmask] = mpl.colors.to_rgba(default_color, alpha=0.3)
kwargs['c'] = c
tiers_unique = np.sort(np.unique(tiers))
for t in tiers_unique:
ind = tiers == t
vec = vectors_reduced.loc[ind]
kw = dict(kwargs)
if 'c' in kw:
if (not isinstance(kw['c'], str)) and (len(kw['c']) == len(vec)):
kw['c'] = kw['c'][ind]
if not np.isscalar(kw['s']):
kw['s'] = kw['s'][ind]
vec.plot(
x=vec.columns[0],
y=vec.columns[1],
kind='scatter',
ax=ax,
**kw)
ax.grid(True)
if tight_layout:
if isinstance(tight_layout, dict):
plt.tight_layout(**tight_layout)
else:
plt.tight_layout()
return ax
def scatter_reduced_samples(
self,
vectors_reduced,
color_by=None,
color_log=None,
cmap='viridis',
ax=None,
tight_layout=True,
**kwargs):
'''Scatter samples after dimensionality reduction.
Args:
vectors_reduced (pandas.Dataframe): matrix of coordinates of the
samples after dimensionality reduction. Rows are samples,
columns (typically 2 or 3) are the component in the
low-dimensional embedding.
color_by (string or None): color sample dots by phenotype or
expression of a certain feature.
color_log (bool or None): use log of phenotype/expression in the
colormap. Default None only logs expression, but not
phenotypes.
cmap (string or matplotlib colormap): color map to use for the
sample dots. For categorical coloring, a palette with the
right number of colors or more can be passed.
ax (matplotlib.axes.Axes): The axes to plot into. If None
(default), a new figure with one axes is created. ax must
not strictly be a matplotlib class, but it must have
common methods such as 'plot' and 'set'.
tight_layout (bool or dict): Whether to call
matplotlib.pyplot.tight_layout at the end of the
plotting. If it is a dict, pass it unpacked to that
function.
**kwargs: named arguments passed to the plot function.
Returns:
matplotlib.axes.Axes with the axes containing the plot.
'''
return self.scatter_reduced(
vectors_reduced=vectors_reduced,
color_by=color_by,
color_log=color_log,
cmap=cmap,
ax=ax,
tight_layout=tight_layout,
**kwargs,
)
def clustermap(
self,
cluster_samples=False,
cluster_features=False,
phenotypes_cluster_samples=(),
phenotypes_cluster_features=(),
annotate_samples=False,
annotate_features=False,
labels_samples=True,
labels_features=True,
orientation='horizontal',
colorbars=False,
**kwargs):
'''Samples versus features / phenotypes.
Args:
cluster_samples (bool or linkage): Whether to cluster samples and
show the dendrogram. Can be either, False, True, or a
linkage from scipy.cluster.hierarchy.linkage.
cluster_features (bool or linkage): Whether to cluster features
and show the dendrogram. Can be either, False, True, or a
linkage from scipy.cluster.hierarchy.linkage.
phenotypes_cluster_samples (iterable of strings): Phenotypes to
add to the features for joint clustering of the samples.
If the clustering has been
precomputed including phenotypes and the linkage matrix
is explicitely set as cluster_samples, the *same*
phenotypes must be specified here, in the same order.
phenotypes_cluster_features (iterable of strings): Phenotypes to
add to the features for joint clustering of the features
and phenotypes. If the clustering has been
precomputed including phenotypes and the linkage matrix
is explicitely set as cluster_features, the *same*
phenotypes must be specified here, in the same order.
annotate_samples (dict, or False): Whether and how to
annotate the samples with separate colorbars. The
dictionary must have phenotypes or features as keys. For
qualitative phenotypes, the values can be palette names
or palettes (with at least as many colors as there are
categories). For quantitative phenotypes and features,
they can be colormap names or colormaps.
annotate_features (dict, or False): Whether and how to
annotate the featues with separate colorbars. The
dictionary must have features metadata as keys. For
qualitative annotations, the values can be palette names
or palettes (with at least as many colors as there are
categories). For quantitative annotatoins, the values
can be colormap names or colormaps. Keys must be columns
of the Dataset.featuresheet, except for the key 'mean
expression' which is interpreted to mean the average of
the counts for that feature.
labels_samples (bool): Whether to show the sample labels. If you
have hundreds or more samples, you may want to turn this
off to make the plot tidier.
labels_features (bool): Whether to show the feature labels. If you
have hundreds or more features, you may want to turn this
off to make the plot tidier.
orientation (string): Whether the samples are on the abscissa
('horizontal') or on the ordinate ('vertical').
tight_layout (bool or dict): Whether to call
matplotlib.pyplot.tight_layout at the end of the
plotting. If it is a dict, pass it unpacked to that
function.
colorbars (bool): Whether to add colorbars. One colorbar refers
to the heatmap. Moreover, if annotations for samples or
features are shown, a colorbar for each of them will be
shown as well.
**kwargs: named arguments passed to seaborn.clustermap.
Returns:
A seaborn ClusterGrid instance.
'''
data = self.dataset.counts.copy()
for pheno in phenotypes_cluster_features:
data.loc[pheno] = self.dataset.samplesheet.loc[:, pheno]
# FIXME: what to do with NaN?
if cluster_samples is True:
cluster_samples = self.dataset.cluster.hierarchical(
axis='samples',
phenotypes=phenotypes_cluster_samples,
)
linkage_samples = cluster_samples['linkage']
elif cluster_samples is False:
linkage_samples = None
else:
linkage_samples = cluster_samples
if cluster_features is True:
cluster_features = self.dataset.cluster.hierarchical(
axis='features',
phenotypes=phenotypes_cluster_features,
)
linkage_features = cluster_features['linkage']
elif cluster_features is False:
linkage_features = None
else:
linkage_features = cluster_features
if annotate_samples:
cbars_samples = []
col_samples = []
for key, val in annotate_samples.items():
if key in self.dataset.samplesheet.columns:
color_data = self.dataset.samplesheet.loc[:, key]
is_numeric = np.issubdtype(color_data.dtype, np.number)
if (color_data.dtype.name == 'category') or (not is_numeric):
cmap_type = 'qualitative'
else:
cmap_type = 'sequential'
else:
color_data = self.dataset.counts.loc[key]
cmap_type = 'sequential'
if isinstance(val, str):
if cmap_type == 'qualitative':
cd_unique = list(np.unique(color_data.values))
n_colors = len(cd_unique)
palette = sns.color_palette(val, n_colors=n_colors)
c = [palette[cd_unique.index(x)] for x in color_data.values]
cbi = {'name': key, 'palette': palette,
'ticklabels': cd_unique,
'type': 'qualitative',
'n_colors': n_colors}
else:
cmap = cm.get_cmap(val)
vmax = np.nanmax(color_data.values)
vmin = np.nanmin(color_data.values)
cval = (color_data.values - vmin) / (vmax - vmin)
c = cmap(cval)
cbi = {'name': key, 'cmap': cmap,
'vmin': vmin, 'vmax': vmax,
'type': 'sequential'}
else:
if cmap_type == 'qualitative':
cd_unique = list(np.unique(color_data.values))
n_colors = len(cd_unique)
if len(palette) < n_colors:
raise ValueError(
'Palettes must have as many colors as there are categories')
palette = val
c = [palette[cd_unique.index(x)] for x in color_data.values]
cbi = {'name': key, 'palette': palette[:n_colors],
'ticks': cd_unique,
'type': 'qualitative',
'n_colors': n_colors}
else:
cmap = val
vmax = np.nanmax(color_data.values)
vmin = np.nanmin(color_data.values)
cval = (color_data.values - vmin) / (vmax - vmin)
c = cmap(cval)
cbi = {'name': key, 'cmap': cmap,
'vmin': vmin, 'vmax': vmax,
'type': 'sequential'}
col_samples.append(c)
cbars_samples.append(cbi)
col_samples = pd.DataFrame(
data=[list(a) for a in col_samples],
columns=color_data.index,
index=annotate_samples.keys()).T
else:
col_samples = None
if annotate_features:
cbars_features = []
col_features = []
for key, val in annotate_features.items():
if key == 'mean expression':
color_data = self.dataset.counts.mean(axis=1)
else:
color_data = self.dataset.featuresheet.loc[:, key]
is_numeric = np.issubdtype(color_data.dtype, np.number)
if (color_data.dtype.name == 'category') or (not is_numeric):
cmap_type = 'qualitative'
else:
cmap_type = 'sequential'
if isinstance(val, str):
if cmap_type == 'qualitative':
cd_unique = list(np.unique(color_data.values))
n_colors = len(cd_unique)
palette = sns.color_palette(val, n_colors=n_colors)
c = [palette[cd_unique.index(x)] for x in color_data.values]
cbi = {'name': key, 'palette': palette,
'ticklabels': cd_unique,
'type': 'qualitative',
'n_colors': n_colors}
else:
cmap = cm.get_cmap(val)
vmax = np.nanmax(color_data.values)
vmin = np.nanmin(color_data.values)
cval = (color_data.values - vmin) / (vmax - vmin)
c = cmap(cval)
cbi = {'name': key, 'cmap': cmap,
'vmin': vmin, 'vmax': vmax,
'type': 'sequential'}
else:
if cmap_type == 'qualitative':
cd_unique = list(np.unique(color_data.values))
n_colors = len(cd_unique)
if len(palette) < n_colors:
raise ValueError(
'Palettes must have as many colors as there are categories')
palette = val
c = [palette[cd_unique.index(x)] for x in color_data.values]
cbi = {'name': key, 'palette': palette[:n_colors],
'ticks': cd_unique,
'type': 'qualitative',
'n_colors': n_colors}
else:
cmap = val
vmax = np.nanmax(color_data.values)
vmin = np.nanmin(color_data.values)
cval = (color_data.values - vmin) / (vmax - vmin)
c = cmap(cval)
cbi = {'name': key, 'cmap': cmap,
'vmin': vmin, 'vmax': vmax,
'type': 'sequential'}
col_features.append(c)
cbars_features.append(cbi)
col_features = pd.DataFrame(
data=[list(a) for a in col_features],
columns=color_data.index,
index=annotate_features.keys()).T
else:
col_features = None
if orientation == 'horizontal':
row_linkage = linkage_features
col_linkage = linkage_samples
row_colors = col_features
col_colors = col_samples
row_labels = labels_features
col_labels = labels_samples
if not row_labels:
ylabel = 'features'
if not col_labels:
xlabel = 'samples'
elif orientation == 'vertical':
data = data.T
row_linkage = linkage_samples
col_linkage = linkage_features
row_colors = col_samples
col_colors = col_features
row_labels = labels_samples
col_labels = labels_features
if not row_labels:
ylabel = 'samples'
if not col_labels:
xlabel = 'features'
else:
raise ValueError('Orientation must be "horizontal" or "vertical".')
defaults = {
'yticklabels': row_labels,
'xticklabels': col_labels,
'row_colors': row_colors,
'col_colors': col_colors}
if row_linkage is not None:
defaults.update({
'row_cluster': True,
'row_linkage': row_linkage})
else:
defaults.update({'row_cluster': False})
if col_linkage is not None:
defaults.update({
'col_cluster': True,
'col_linkage': col_linkage})
else:
defaults.update({'col_cluster': False})
Plot._update_properties(kwargs, defaults)
g = sns.clustermap(
data=data,
**kwargs)
ax = g.ax_heatmap
for label in ax.get_xmajorticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
for label in ax.get_ymajorticklabels():
label.set_rotation(0)
label.set_verticalalignment("center")
if not row_labels:
ax.set_ylabel(ylabel)
if not col_labels:
ax.set_xlabel(xlabel)
if colorbars:
# The colorbar for the heatmap is shown anyway
if col_samples is not None:
n_cbars = len(cbars_samples)
caxs = []
if orientation == 'horizontal':
wcb = min(0.3, 0.4 / n_cbars)
xcb = 0.98 - wcb * n_cbars - 0.05 * (n_cbars - 1)
else:
hcb = min(0.3, 0.4 / n_cbars)
ycb = 0.98 - hcb
for i, cbi in enumerate(cbars_samples):
if orientation == 'horizontal':
cax = g.fig.add_axes((xcb, 0.955, wcb, 0.025))
else:
cax = g.fig.add_axes((0.01, ycb, 0.02, hcb))
caxs.append(cax)
kw = {}
if cbi['type'] == 'sequential':
kw['norm'] = mpl.colors.Normalize(
vmin=cbi['vmin'], vmax=cbi['vmax'])
cb = mpl.colorbar.ColorbarBase(
cax,
cmap=cbi['cmap'],
orientation=orientation,
**kw)
else:
n_colors = cbi['n_colors']
bounds = [1.0 * i / n_colors for i in range(n_colors + 1)]
ticks = [(2.0 * i + 1) / (n_colors * 2) for i in range(n_colors)]
kw['norm'] = mpl.colors.Normalize(vmin=0, vmax=1)
cmap = mpl.colors.ListedColormap(cbi['palette'])
cb = mpl.colorbar.ColorbarBase(
cax,
cmap=cmap,
boundaries=bounds,
ticks=ticks,
orientation=orientation,
**kw)
if orientation == 'horizontal':
cb.ax.set_xticklabels([str(x) for x in cbi['ticklabels']])
else:
cb.ax.set_yticklabels([str(x) for x in cbi['ticklabels']])
cb.set_label(cbi['name'])
if orientation == 'horizontal':
xcb += wcb + 0.05
else:
ycb -= hcb + 0.05
if orientation == 'horizontal':
g.ax_cbars_columns = caxs
else:
g.ax_cbars_rows = caxs
if col_features is not None:
n_cbars = len(cbars_features)
caxs = []
if orientation == 'horizontal':
orientation_cb = 'vertical'
else:
orientation_cb = 'horizontal'
if orientation_cb == 'horizontal':
wcb = min(0.3, 0.4 / n_cbars)
xcb = 0.98 - wcb * n_cbars - 0.05 * (n_cbars - 1)
else:
hcb = min(0.3, 0.4 / n_cbars)
ycb = 0.98 - hcb
for i, cbi in enumerate(cbars_features):
if orientation_cb == 'horizontal':
cax = g.fig.add_axes((xcb, 0.955, wcb, 0.025))
else:
cax = g.fig.add_axes((0.01, ycb, 0.02, hcb))
caxs.append(cax)
kw = {}
if cbi['type'] == 'sequential':
kw['norm'] = mpl.colors.Normalize(
vmin=cbi['vmin'], vmax=cbi['vmax'])
cb = mpl.colorbar.ColorbarBase(
cax,
cmap=cbi['cmap'],
orientation=orientation_cb,
**kw)
else:
n_colors = cbi['n_colors']
bounds = [1.0 * i / n_colors for i in range(n_colors + 1)]
ticks = [(2.0 * i + 1) / (n_colors * 2) for i in range(n_colors)]
kw['norm'] = mpl.colors.Normalize(vmin=0, vmax=1)
cmap = mpl.colors.ListedColormap(cbi['palette'])
cb = mpl.colorbar.ColorbarBase(
cax,
cmap=cmap,
boundaries=bounds,
ticks=ticks,
orientation=orientation_cb,
**kw)
if orientation_cb == 'horizontal':
cb.ax.set_xticklabels([str(x) for x in cbi['ticklabels']])
else:
cb.ax.set_yticklabels([str(x) for x in cbi['ticklabels']])
cb.set_label(cbi['name'])
if orientation_cb == 'horizontal':
xcb += wcb + 0.05
else:
ycb -= hcb + 0.05
if orientation_cb == 'horizontal':
g.ax_cbars_columns = caxs
else:
g.ax_cbars_rows = caxs
else:
# Remove colorbar
g.fig.get_axes()[-1].remove()
# TODO: reimplement some heuristic tight_layout
return g
def dot_plot(
self,
group_axis='samples',
group_by=None,
group_order=None,
plot_list=None,
color_log=None,
vmin='min',
vmax='max',
threshold=10,
min_size=2,
layout='horizontal',
cmap='plasma',
ax=None,
tight_layout=True,
**kwargs):
'''Group samples and plot fraction and levels of counts.
For every group, the size of the dot indicates the fraction of samples
in that group with counts above threshold, while the color indicates
the average counts within the group.
Args:
group_axis (str): It must be 'samples' or 'features'. The former
looks at feature counts within sample groups, the latter at
sample counts within feature groups.
group_by (string or None): group samples/features by metadata.
group_order (list or None): an optional order of the groups. If
None, an automatic order will be used.
plot_list (list of str): the features/samples to plot.
color_log (bool or None): use log of phenotype/expression in the
colormap. Default None only logs expression, but not
phenotypes.
vmin (str or float): minimum value to scale the coloring
with. If this is a string, it must be one of 'min' (minimum
across plot_list), 'min_single' (minimum for each element of
plot_list). If a float, it is used as the minimum.
vmax (str or float): maximum value to scale the coloring
with. If this is a string, it must be one of 'max' (maximum
across plot_list), 'max_single' (maximum for each element of
plot_list). If a float, it is used as the maximum.
threshold (float): a features/sample is considered if >= this
value.
min_size (float): the minimal size of a dot in the plot.
layout (str): 'horizontal' or 'vertical'. The former has groups as
rows, the latter as columns.
cmap (string or matplotlib colormap): color map to use for the
sample dots. For categorical coloring, a palette with the
right number of colors or more can be passed.
ax (matplotlib.axes.Axes): The axes to plot into. If None
(default), a new figure with one axes is created. ax must
not strictly be a matplotlib class, but it must have
common methods such as 'plot' and 'set'.
tight_layout (bool or dict): Whether to call
matplotlib.pyplot.tight_layout at the end of the
plotting. If it is a dict, pass it unpacked to that
function.
**kwargs: named arguments passed to the plot function.
Returns:
matplotlib.axes.Axes with the axes containing the plot.
NOTE: the mappings of fraction to size and count level to color are
stored into ax._singlet_dotmap.
'''
if ax is None:
new_axes = True
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 8))
else:
new_axes = False
defaults = {}
Plot._update_properties(kwargs, defaults)
def size_fun(fraction):
return min_size + (fraction * 11)**2
if group_axis == 'samples':
countnames = self.dataset.featurenames
plot_listc = [p for p in plot_list if p in countnames]
data = self.dataset.counts.loc[plot_listc].fillna(0).T
for count in plot_list:
if count not in data.columns:
data[count] = self.dataset.samplesheet[count]
data = data.loc[:, plot_list]
data[group_by] = self.dataset.samplesheet[group_by]
else:
countnames = self.dataset.samplenames
plot_listc = [p for p in plot_list if p in countnames]
data = self.dataset.counts.loc[:, plot_listc].fillna(0)
for count in plot_list:
if count not in data.columns:
data[count] = self.dataset.featuresheet[count]
data = data.loc[:, plot_list]
data[group_by] = self.dataset.featuresheet[group_by]
if group_order is None:
groups = list(set(data[group_by]))
else:
groups = list(group_order)
points = []
for ig, count in enumerate(plot_list):
gexist = data[group_by].unique()
gby = data[[count, group_by]].groupby(group_by)
clog = color_log or ((color_log is None) and (count in plot_listc))
for ct in groups:
if ct not in gexist:
continue
dfi = gby.get_group(ct)
frac_exp = (dfi[count] >= threshold).mean()
if clog:
mean_exp = np.log10(dfi[count].values + self.dataset.counts.pseudocount).mean()
else:
mean_exp = dfi[count].values.mean()
point = {
'fraction': frac_exp,
'level': mean_exp,
'group': ct,
'count': count,
}
if layout == 'horizontal':
point['x'] = ig
point['y'] = groups.index(ct)
elif layout == 'vertical':
point['x'] = groups.index(ct)
point['y'] = ig
else:
raise ValueError(
'Layout must be "horizontal" or "vertical"')
points.append(point)
points = pd.DataFrame(points)
points.set_index(['count', 'group'], inplace=True, drop=False)
# Set size and color based on fraction and level
points['s'] = 0.0
points['c'] = 0.0
for count in plot_list:
if vmin == 'min':
vm = points['level'].values.min()
elif vmin == 'min_single':
vm = points.loc[points['count'] == count, 'level'].values.min()
else:
vm = vmin
if vmax == 'max':
vM = points['level'].values.max()
elif vmax == 'max_single':
vM = points.loc[points['count'] == count, 'level'].values.max()
else:
vM = vmax
for gr in groups:
if gr not in gexist:
continue
size = size_fun(points.at[(count, gr), 'fraction'])
shade = (points.at[(count, gr), 'level'] - vm) / (vM - vm)
points.at[(count, gr), 's'] = size
points.at[(count, gr), 'c'] = shade
if isinstance(cmap, str):
cmap = cm.get_cmap(cmap)
ax.scatter(
points['x'].values,
points['y'].values,
s=points['s'].values,
c=cmap(points['c'].values),
)
if layout == 'horizontal':
ax.set_xticks(np.arange(len(plot_list)))
ax.set_xticklabels(plot_list)
ax.set_yticks(np.arange(len(groups)))
ax.set_yticklabels(groups)
ax.set_xlim(-0.5, len(plot_list) - 0.5)
ax.set_ylim(-0.5, len(groups) - 0.5)
else:
ax.set_yticks(np.arange(len(plot_list)))
ax.set_yticklabels(plot_list)
ax.set_xticks(np.arange(len(groups)))
ax.set_xticklabels(groups)
ax.set_ylim(-0.5, len(plot_list) - 0.5)
ax.set_xlim(-0.5, len(groups) - 0.5)
if tight_layout:
if isinstance(tight_layout, dict):
plt.tight_layout(**tight_layout)
else:
plt.tight_layout()
if not hasattr(ax, '_singlet_dotmap'):
ax._singlet_dotmap = {
'fraction_size_map': size_fun,
'level_color_map': cmap,
}
return ax
def plot_group_abundance_changes(
self,
groupby,
along,
kind='number',
group_order=None,
along_order=None,
scatter=True,
interpolate=False,
cmap=None,
scatter_kwargs=None,
interpolate_kwargs=None,
ax=None,
log=False,
ymin=0,
tight_layout=True,
legend=False,
):
'''Plot changes in sample abundance groups (e.g. in time)
Args:
groupby (string): column of the SampleSheet to group samples by
along (string): column of the SampleSheet to plot abundances along
kind (string): 'number', 'fraction', or 'percent' based on what
kind of normalization across groups is requested
group_order (sequence or None): optional sequence of values found
within the "groupby" column to decide the order of the legend
along_order (sequence or None): optional sequence of values found
within the "along" column to decide the order of the dots
scatter (bool): whether to show the scatter plot
interpolate (bool): whether to show a monotonic spline
interpolation between subsequent values in the "along" column
cmap (dict, list, or None): a dictionary or a list of colors to
plot the different groups. If a list, colors are paired to
groups in the same order (see "group_order" argument)
scatter_kwargs (dict or None): additional keyword arguments for the
scatter plot
interpolate_kwargs (dict or None): additional keyword arguments for
the line plot of the interpolation
ax (matplotlib.axes.Axes): The axes to plot into. If None
(default), a new figure with one axes is created. ax must
not strictly be a matplotlib class, but it must have
common methods such as 'plot' and 'set'.
log (False or float): whether to log the abundances. If not False,
sets the base of the logarithm
ymin (float): pseudocount to enable logarithmic plots of abundance
as opposed to the default 0
tight_layout (bool or dict): Whether to call
matplotlib.pyplot.tight_layout at the end of the
plotting. If it is a dict, pass it unpacked to that function.
legend (bool or dict): If True, call ax.legend(). If a dict, pass
as **kwargs to ax.legend.
Returns:
matplotlib.pyplot axes with the abundance changes
'''
from scipy import interpolate
data = self.dataset.samplesheet[[groupby, along]].copy()
data['__count__'] = 1
data = (data.groupby([groupby, along])
.count()
.loc[:, '__count__']
.unstack(fill_value=0))
if kind == 'fraction':
data = 1.0 * data / data.sum(axis=0)
elif kind == 'percent':
data = 100.0 * data / data.sum(axis=0)
elif kind != 'number':
raise ValueError('kind not supported')
data = np.maximum(data, ymin)
if log:
data = np.log(data) / np.log(log)
if ax is None:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(13, 8))
if group_order is not None:
data = data.loc[group_order]
if along_order is not None:
data = data.loc[:, along_order]
x = np.arange(data.shape[1])
xorder = data.columns
gorder = data.index
for ig, go in enumerate(gorder):
y = data.loc[go]
kwargs = {}
if isinstance(cmap, dict):
kwargs['color'] = cmap[go]
elif cmap is not None:
kwargs['color'] = cmap[ig]
sc_kwargs = kwargs.copy()
sc_kwargs.update(scatter_kwargs)
ax.scatter(
x, y,
label=go,
**sc_kwargs,
)
if interpolate:
outx = np.linspace(x[0], x[-1], 100)
outy = interpolate.pchip_interpolate(x, y, outx)
in_kwargs = kwargs.copy()
in_kwargs.update(interpolate_kwargs)
ax.plot(outx, outy,
**in_kwargs,
)
ax.set_xticks(x)
ax.set_xticklabels(xorder)
ax.set_xlabel(along)
ax.set_ylabel('{:} of samples'.format(kind.capitalize()))
if legend:
if np.isscalar(legend):
ax.legend()
else:
ax.legend(**legend)
if tight_layout:
if isinstance(tight_layout, dict):
plt.tight_layout(**tight_layout)
else:
plt.tight_layout()
return ax
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
from pymatgen.core import Molecule
from pymatgen.io.qchem_io.inputs import QCInput
from pymatgen.io.qchem_io.utils import lower_and_check_unique
# Classes for reading/manipulating/writing QChem ouput files.
__author__ = "Samuel Blau, Brandon Wood, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
logger = logging.getLogger(__name__)
class QChemDictSet(QCInput):
"""
Build a QCInput given all the various input parameters. Can be extended by standard implementations below.
"""
def __init__(self,
molecule,
job_type,
basis_set,
scf_algorithm,
dft_rung=4,
pcm_dielectric=None,
max_scf_cycles=200,
geom_opt_max_cycles=200,
overwrite_inputs=None):
"""
Args:
molecule (Pymatgen molecule object)
job_type (str)
basis_set (str)
scf_algorithm (str)
dft_rung (int)
pcm_dielectric (str)
max_scf_cycles (int)
geom_opt_max_cycles (int)
overwrite_inputs (dict): This is dictionary of QChem input sections to add or overwrite variables,
the available sections are currently rem, pcm, and solvent. So the accepted keys are rem, pcm, or solvent
and the value is a dictionary of key value pairs relevant to the section. An example would be adding a
new variable to the rem section that sets symmetry to false.
ex. overwrite_inputs = {"rem": {"symmetry": "false"}}
***It should be noted that if something like basis is added to the rem dict it will overwrite
the default basis.***
"""
self.molecule = molecule
self.job_type = job_type
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.dft_rung = dft_rung
self.pcm_dielectric = pcm_dielectric
self.max_scf_cycles = max_scf_cycles
self.geom_opt_max_cycles = geom_opt_max_cycles
self.overwrite_inputs = overwrite_inputs
pcm_defaults = {
"heavypoints": "194",
"hpoints": "194",
"radii": "uff",
"theory": "cpcm",
"vdwscale": "1.1"
}
mypcm = {}
mysolvent = {}
myrem = {}
myrem["job_type"] = job_type
myrem["basis"] = self.basis_set
myrem["max_scf_cycles"] = self.max_scf_cycles
myrem["gen_scfman"] = "true"
myrem["scf_algorithm"] = self.scf_algorithm
if self.dft_rung == 1:
myrem["exchange"] = "B3LYP"
elif self.dft_rung == 2:
myrem["method"] = "B97-D3"
myrem["dft_D"] = "D3_BJ"
elif self.dft_rung == 3:
myrem["method"] = "B97M-rV"
elif self.dft_rung == 4:
myrem["method"] = "wb97xd"
elif self.dft_rung == 5:
myrem["method"] = "wB97M-V"
else:
raise ValueError("dft_rung should be between 1 and 5!")
if self.job_type.lower() == "opt":
myrem["geom_opt_max_cycles"] = self.geom_opt_max_cycles
if self.pcm_dielectric != None:
mypcm = pcm_defaults
mysolvent["dielectric"] = self.pcm_dielectric
myrem["solvent_method"] = 'pcm'
if self.overwrite_inputs:
for sec, sec_dict in self.overwrite_inputs.items():
if sec == "rem":
temp_rem = lower_and_check_unique(sec_dict)
for k, v in temp_rem.items():
myrem[k] = v
if sec == "pcm":
temp_pcm = lower_and_check_unique(sec_dict)
for k, v in temp_pcm.items():
mypcm[k] = v
if sec == "solvent":
temp_solvent = lower_and_check_unique(sec_dict)
for k, v in temp_solvent.items():
mysolvent[k] = v
super(QChemDictSet, self).__init__(
self.molecule, rem=myrem, pcm=mypcm, solvent=mysolvent)
class OptSet(QChemDictSet):
"""
QChemDictSet for a geometry optimization
"""
def __init__(self,
molecule,
dft_rung=4,
basis_set="6-311++G*",
pcm_dielectric=None,
scf_algorithm="diis",
max_scf_cycles=200,
geom_opt_max_cycles=200,
overwrite_inputs=None):
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.max_scf_cycles = max_scf_cycles
self.geom_opt_max_cycles = geom_opt_max_cycles
super(OptSet, self).__init__(
molecule=molecule,
job_type="opt",
dft_rung=dft_rung,
pcm_dielectric=pcm_dielectric,
basis_set=self.basis_set,
scf_algorithm=self.scf_algorithm,
max_scf_cycles=self.max_scf_cycles,
geom_opt_max_cycles=self.geom_opt_max_cycles,
overwrite_inputs=overwrite_inputs)
class SinglePointSet(QChemDictSet):
"""
QChemDictSet for a single point calculation
"""
def __init__(self,
molecule,
dft_rung=4,
basis_set="6-311++G*",
pcm_dielectric=None,
scf_algorithm="diis",
max_scf_cycles=200,
overwrite_inputs=None):
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.max_scf_cycles = max_scf_cycles
super(SinglePointSet, self).__init__(
molecule=molecule,
job_type="sp",
dft_rung=dft_rung,
pcm_dielectric=pcm_dielectric,
basis_set=self.basis_set,
scf_algorithm=self.scf_algorithm,
max_scf_cycles=self.max_scf_cycles,
overwrite_inputs=overwrite_inputs)
class FreqSet(QChemDictSet):
"""
QChemDictSet for a single point calculation
"""
def __init__(self,
molecule,
dft_rung=4,
basis_set="6-311++G*",
pcm_dielectric=None,
scf_algorithm="diis",
max_scf_cycles=200,
overwrite_inputs=None):
self.basis_set = basis_set
self.scf_algorithm = scf_algorithm
self.max_scf_cycles = max_scf_cycles
super(FreqSet, self).__init__(
molecule=molecule,
job_type="freq",
dft_rung=dft_rung,
pcm_dielectric=pcm_dielectric,
basis_set=self.basis_set,
scf_algorithm=self.scf_algorithm,
max_scf_cycles=self.max_scf_cycles,
overwrite_inputs=overwrite_inputs)
| |
"""Process the IDOT RWIS Data files"""
# stdlib
import datetime
import json
import os
import sys
import subprocess
# third party
import requests
import pandas as pd
import numpy as np
from pyiem.tracker import TrackerEngine
from pyiem.network import Table as NetworkTable
from pyiem.observation import Observation
from pyiem import util
LOG = util.logger()
NT = NetworkTable("IA_RWIS")
IEM = util.get_dbconn("iem")
PORTFOLIO = util.get_dbconn("portfolio")
RWIS2METAR = {
"00": "XADA",
"01": "XALG",
"02": "XATN",
"03": "XALT",
"04": "XAME",
"05": "XANK",
"06": "XAVO",
"07": "XBUR",
"08": "XCAR",
"09": "XCDR",
"10": "XCID",
"11": "XCEN",
"12": "XCOU",
"13": "XCRE",
"14": "XDAV",
"15": "XDEC",
"16": "XDSM",
"17": "XDES",
"18": "XDST",
"19": "XDEW",
"20": "XDUB",
"21": "XFOD",
"22": "XGRI",
"23": "XIAC",
"24": "XIOW",
"25": "XJEF",
"26": "XLEO",
"27": "XMAN",
"28": "XMAQ",
"29": "XMAR",
"30": "XMCW",
"31": "XMIS",
"32": "XMOU",
"33": "XNEW",
"34": "XONA",
"35": "XOSC",
"36": "XOTT",
"37": "XPEL",
"38": "XRED",
"39": "XSID",
"40": "XSIG",
"41": "XSIO",
"42": "XSPE",
"43": "XSTO",
"44": "XTIP",
"45": "XURB",
"46": "XWAT",
"47": "XWIL",
"48": "XWBG",
"49": "XHAN",
"50": "XSBI",
"51": "XIGI",
"52": "XCRI",
"53": "XCFI",
"54": "XSYI",
"55": "XBFI",
"56": "XDYI",
"57": "XTMI",
"58": "XPFI",
"59": "XCTI",
"60": "XDNI",
"61": "XQCI",
"62": "XSMI",
"63": "XRWI",
"64": "XETI",
"65": "XCCI",
"66": "XKSI",
"67": "XKNI",
"68": "XCMI",
"69": "XRGI",
"70": "XKYI",
"72": "XCTI",
}
ATMOS_URI = (
"https://services.arcgis.com/8lRhdTsQyJpO52F1/arcgis/rest/services/"
"RWIS_Atmospheric_Data_View/FeatureServer/0/query?where=STATUS%3D1"
"&f=json&outFields=DATA_LAST_UPDATED,AIR_TEMP,RELATIVE_HUMIDITY,DEW_POINT,"
"VISIBILITY,AVG_WINDSPEED_KNOTS,MAX_WINDSPEED_KNOTS,WIND_DIRECTION_DEG,"
"PRECIPITATION_RATE,PRECIPITATION_ACCUMULATION,NWS_ID"
)
SURFACE_URI = (
"https://services.arcgis.com/8lRhdTsQyJpO52F1/arcgis/rest/services/"
"RWIS_Surface_Data_View/FeatureServer/0/query?where=STATUS%3D1&f=json&"
"outFields=NWS_ID,SURFACE_CONDITION,SURFACE_TEMP,ICE_PERCENTAGE,"
"FREEZE_TEMP,SENSOR_ID,FrictionIndex,DATA_LAST_UPDATED"
)
def merge(atmos, surface):
"""Merge the surface data into the atmospheric one, return a dict.
Args:
atmos (DataFrame): atmospherics
surface (DataFrame): surface data
Returns:
dictionary of values
"""
atmos = atmos.set_index("NWS_ID")
# pivot
surface["SENSOR_ID"] = surface["SENSOR_ID"].astype(int)
surface = surface.pivot(
index="NWS_ID",
columns="SENSOR_ID",
values=[
"valid",
"SURFACE_CONDITION",
"SURFACE_TEMP",
"ICE_PERCENTAGE",
"FREEZE_TEMP",
"FrictionIndex",
],
)
surface.columns = surface.columns.to_flat_index()
df = atmos.join(surface)
LOG.debug("We have %s rows of data", len(df.index))
data = {}
for nwsli, row in df.iterrows():
if nwsli not in NT.sts:
LOG.debug("station %s is unknown to us, skipping", nwsli)
continue
data[nwsli] = {
"valid": row["valid"].to_pydatetime(),
"tmpf": row["AIR_TEMP"],
"dwpf": row["DEW_POINT"],
"relh": row["RELATIVE_HUMIDITY"],
"sknt": row["AVG_WINDSPEED_KNOTS"],
"gust": row["MAX_WINDSPEED_KNOTS"],
"drct": row["WIND_DIRECTION_DEG"],
"pday": row["PRECIPITATION_ACCUMULATION"],
}
for sid in range(4):
try:
data[nwsli][f"scond{sid}"] = row[("SURFACE_CONDITION", sid)]
data[nwsli][f"tsf{sid}"] = row[("SURFACE_TEMP", sid)]
except KeyError as exp:
LOG.info("KeyError raised for nwsli: '%s' %s", nwsli, exp)
return data
def do_iemtracker(obs):
"""Iterate over the obs and do IEM Tracker related activities"""
threshold = util.utc() - datetime.timedelta(hours=3)
tracker = TrackerEngine(IEM.cursor(), PORTFOLIO.cursor())
tracker.process_network(obs, "iarwis", NT, threshold)
tracker.send_emails()
IEM.commit()
PORTFOLIO.commit()
def METARtemp(val):
"""convert temp to METAR"""
f_temp = float(val)
i_temp = int(round(f_temp, 0))
f1_temp = int(round(f_temp * 10.0, 0))
if i_temp < 0:
i_temp = 0 - i_temp
m_temp = "M%02i" % (i_temp,)
else:
m_temp = "%02i" % (i_temp,)
if f1_temp < 0:
t_temp = "1%03i" % (0 - f1_temp,)
else:
t_temp = "0%03i" % (f1_temp,)
return m_temp, t_temp
def METARwind(sknt, drct, gust):
"""convert to METAR"""
s = ""
d5 = drct
if str(d5)[-1] == "5":
d5 -= 5
s += "%03.0f%02.0f" % (d5, sknt)
if gust is not None:
s += "G%02.0f" % (gust,)
s += "KT"
return s
def gen_metars(obs, filename, convids=False):
"""Create METAR Data files
Args:
obs (list): list of dictionaries with obs in them
filename (str): filename to write data to
convids (bool): should we use special logic for ID conversion
"""
mtime = util.utc().strftime("%d%H%M")
thres = util.utc() - datetime.timedelta(hours=3)
with open(filename, "w", encoding="utf-8") as fp:
fp.write("\001\015\015\012001\n")
fp.write(f"SAUS43 KDMX {mtime}\015\015\012METAR\015\015\012")
for sid in obs:
ob = obs[sid]
if ob["valid"] < thres:
continue
if sid in ["RIOI4", "ROSI4", "RSMI4", "RMCI4"]:
continue
metarid = sid[:4]
remoteid = NT.sts[sid]["remote_id"]
if remoteid is None:
LOG.info("nwsli: %s is unknown remote_id", sid)
continue
if convids:
metarid = RWIS2METAR.get("%02i" % (remoteid,), "XXXX")
temptxt = ""
t_temptxt = ""
windtxt = ""
if ob.get("sknt") is not None and ob.get("drct") is not None:
windtxt = METARwind(ob["sknt"], ob["drct"], ob.get("gust"))
if obs.get("tmpf") is not None and obs.get("dwpf") is not None:
m_tmpc, t_tmpc = METARtemp(
util.convert_value(ob["tmpf"], "degF", "degC")
)
m_dwpc, t_dwpc = METARtemp(
util.convert_value(ob["dwpf"], "degF", "degC")
)
temptxt = "%s/%s" % (m_tmpc, m_dwpc)
t_temptxt = "T%s%s " % (t_tmpc, t_dwpc)
fp.write(
("%s %s %s %s RMK AO2 %s%s\015\015\012" "")
% (
metarid,
ob["valid"].strftime("%d%H%MZ"),
windtxt,
temptxt,
t_temptxt,
"=",
)
)
fp.write("\015\015\012\003")
def update_iemaccess(obs):
"""Update the IEMAccess database"""
icursor = IEM.cursor()
for sid in obs:
ob = obs[sid]
iemob = Observation(sid, "IA_RWIS", ob["valid"])
for varname in ob:
if varname in ["valid"]:
continue
# Don't insert NaN values into iemaccess
thisval = ob.get(varname)
if thisval is None:
continue
# strings fail the isnan check
if isinstance(thisval, str):
iemob.data[varname] = ob.get(varname)
elif not np.isnan(thisval):
iemob.data[varname] = ob.get(varname)
iemob.save(icursor)
icursor.close()
IEM.commit()
def process_features(features):
"""Make a dataframe."""
rows = []
for feat in features:
props = feat["attributes"]
props["valid"] = (
datetime.datetime(1970, 1, 1)
+ datetime.timedelta(seconds=props["DATA_LAST_UPDATED"] / 1000.0)
).replace(tzinfo=datetime.timezone.utc)
rows.append(props)
return pd.DataFrame(rows).replace({9999: np.nan})
def fetch(uri):
"""Download the files we need"""
res = util.exponential_backoff(requests.get, uri, timeout=30)
if res is None:
LOG.info("failed to fetch %s", uri)
sys.exit()
data = res.json()
if "features" not in data:
LOG.info(
"Got status_code: %s for %s, invalid result of: %s",
res.status_code,
uri,
json.dumps(data, sort_keys=True, indent=4, separators=(",", ": ")),
)
sys.exit()
return process_features(data["features"])
def ldm_insert_metars(fn1, fn2):
"""Insert into LDM please"""
for fn in [fn1, fn2]:
proc = subprocess.Popen(
("pqinsert -p '%s' %s") % (fn.replace("/tmp/", ""), fn),
shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
os.waitpid(proc.pid, 0)
os.unlink(fn)
def main():
"""Go Main Go"""
atmos = fetch(ATMOS_URI)
surface = fetch(SURFACE_URI)
if atmos.empty or surface.empty:
LOG.info(
"FAIL, empty dataframe atmos sz:%s, surface sz:%s",
len(atmos.index),
len(surface.index),
)
return
obs = merge(atmos, surface)
do_iemtracker(obs)
ts = util.utc().strftime("%d%H%M")
fn1 = f"/tmp/IArwis{ts}.sao"
fn2 = f"/tmp/IA.rwis{ts}.sao"
gen_metars(obs, fn1, False)
gen_metars(obs, fn2, True)
ldm_insert_metars(fn1, fn2)
update_iemaccess(obs)
if __name__ == "__main__":
main()
IEM.commit()
PORTFOLIO.commit()
| |
# -*- encoding: utf-8 -*-
'''
Hubble Nova plugin for running arbitrary commands and checking the output of
those commands
This module is deprecated, and must be explicitly enabled in pillar/minion
config via the hubblestack:nova:enable_command_module (should be set to True
to enable this module). This allows nova to run arbitrary commands via yaml
profiles.
:maintainer: HubbleStack / basepi
:maturity: 2016.7.0
:platform: All
:requires: SaltStack
Sample YAML data, with inline comments:
# Top level key lets the module know it should look at this data
command:
# Unique ID for this set of audits
nodev:
data:
# 'osfinger' grain, for multiplatform support
'Red Hat Enterprise Linux Server-6':
# tag is required
tag: CIS-1.1.10
# `commands` is a list of commands with individual flags
commands:
# Command to be run
- 'grep "[[:space:]]/home[[:space:]]" /etc/fstab':
# Check the output for this pattern
# If match_output not provided, any output will be a match
match_output: nodev
# Use regex when matching the output (default False)
match_output_regex: False
# Invert the success criteria. If True, a match will cause failure (default False)
fail_if_matched: False
- 'mount | grep /home':
match_output: nodev
match_output_regex: False
# Match each line of the output against our pattern
# Any that don't match will make the audit fail (default False)
match_output_by_line: True
- ?
|
echo 'this is a multi-line'
echo 'bash script'
echo 'note the special ? syntax'
:
# Shell through which the script will be run, must be abs path
shell: /bin/bash
match_output: this
# Aggregation strategy for multiple commands. Defaults to 'and', other option is 'or'
aggregation: 'and'
# Catch-all, if no other osfinger match was found
'*':
tag: generic_tag
commands:
- 'grep "[[:space:]]/home[[:space:]]" /etc/fstab':
match_output: nodev
match_output_regex: False
fail_if_matched: False
- 'mount | grep /home':
match_output: nodev
match_output_regex: False
match_output_by_line: True
aggregation: 'and'
# Description will be output with the results
description: '/home should be nodev'
'''
from __future__ import absolute_import
import logging
import fnmatch
import yaml
import os
import copy
import re
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
if salt.utils.is_windows():
return False, 'This audit module only runs on linux'
return True
def audit(data_list, tags, verbose=False, show_profile=False, debug=False):
'''
Run the command audits contained in the data_list
'''
__data__ = {}
for profile, data in data_list:
if show_profile:
_merge_yaml(__data__, data, profile)
else:
_merge_yaml(__data__, data)
__tags__ = _get_tags(__data__)
if debug:
log.debug('command audit __data__:')
log.debug(__data__)
log.debug('command audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
if __tags__ and not __salt__['config.get']('hubblestack:nova:enable_command_module',
False):
ret['Error'] = ['command module has not been explicitly enabled in '
'config. Please set hubblestack:nova:enable_command_module '
'to True in pillar or minion config to allow this module.']
return ret
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
if 'commands' not in tag_data:
continue
command_results = []
for command_data in tag_data['commands']:
for command, command_args in command_data.iteritems():
if 'shell' in command_args:
cmd_ret = __salt__['cmd.run'](command,
python_shell=True,
shell=command_args['shell'])
else:
cmd_ret = __salt__['cmd.run'](command,
python_shell=True)
found = False
if cmd_ret:
found = True
if 'match_output' in command_args:
if command_args.get('match_output_by_line'):
cmd_ret_lines = cmd_ret.splitlines()
else:
cmd_ret_lines = [cmd_ret]
for line in cmd_ret_lines:
if command_args.get('match_output_regex'):
if not re.match(command_args['match_output'], line):
found = False
else: # match without regex
if command_args['match_output'] not in line:
found = False
if command_args.get('fail_if_matched'):
found = not found
command_results.append(found)
aggregation = tag_data.get('aggregation', 'and')
if aggregation.lower() == 'or':
if any(command_results):
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
else: # assume 'and' if it's not 'or'
if all(command_results):
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
failure = []
success = []
controlled = []
if not verbose:
# Pull out just the tag and description
tags_descriptions = set()
for tag_data in ret['Failure']:
tag = tag_data['tag']
description = tag_data.get('description')
if (tag, description) not in tags_descriptions:
failure.append({tag: description})
tags_descriptions.add((tag, description))
tags_descriptions = set()
for tag_data in ret['Success']:
tag = tag_data['tag']
description = tag_data.get('description')
if (tag, description) not in tags_descriptions:
success.append({tag: description})
tags_descriptions.add((tag, description))
control_reasons = set()
for tag_data in ret['Controlled']:
tag = tag_data['tag']
control_reason = tag_data.get('control', '')
description = tag_data.get('description')
if (tag, description, control_reason) not in control_reasons:
tag_dict = {'description': description,
'control': control_reason}
controlled.append({tag: tag_dict})
control_reasons.add((tag, description, control_reason))
else:
# Format verbose output as single-key dictionaries with tag as key
for tag_data in ret['Failure']:
tag = tag_data['tag']
failure.append({tag: tag_data})
for tag_data in ret['Success']:
tag = tag_data['tag']
success.append({tag: tag_data})
for tag_data in ret['Controlled']:
tag = tag_data['tag']
controlled.append({tag: tag_data})
ret['Controlled'] = controlled
ret['Success'] = success
ret['Failure'] = failure
if not ret['Controlled']:
ret.pop('Controlled')
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the command level
'''
if 'command' not in ret:
ret['command'] = []
if 'command' in data:
for key, val in data['command'].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret['command'].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfinger')
for audit_dict in data.get('command', []):
# command:0
for audit_id, audit_data in audit_dict.iteritems():
# command:0:nodev
tags_dict = audit_data.get('data', {})
# command:0:nodev:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', {})
# command:0:nodev:data:Debian-8
if 'tag' not in tags:
tags['tag'] = ''
tag = tags['tag']
if tag not in ret:
ret[tag] = []
formatted_data = {'tag': tag,
'module': 'command'}
formatted_data.update(audit_data)
formatted_data.update(tags)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.2.1.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class StorageAccountsOperations(object):
"""StorageAccountsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2015-06-15".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-15"
self.config = config
def check_name_availability(
self, account_name, custom_headers=None, raw=False, **operation_config):
"""Checks that account name is valid and is not in use.
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name:
:class:`StorageAccountCheckNameAvailabilityParameters
<storage.models.StorageAccountCheckNameAvailabilityParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`CheckNameAvailabilityResult
<storage.models.CheckNameAvailabilityResult>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`CheckNameAvailabilityResult
<storage.models.CheckNameAvailabilityResult>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Asynchronously creates a new storage account with the specified
parameters. Existing accounts cannot be updated with this API and
should instead use the Update Storage Account API. If an account is
already created and subsequent PUT request is issued with exact same
set of properties, then HTTP 200 would be returned. .
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: :class:`StorageAccountCreateParameters
<storage.models.StorageAccountCreateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`StorageAccount
<storage.models.StorageAccount>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_properties(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Returns the properties for the specified storage account including but
not limited to name, account type, location, and account status. The
ListKeys operation should be used to retrieve storage keys.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`StorageAccount <storage.models.StorageAccount>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`StorageAccount <storage.models.StorageAccount>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates the account type or tags for a storage account. It can also be
used to add a custom domain (note that custom domains cannot be added
via the Create operation). Only one custom domain is supported per
storage account. In order to replace a custom domain, the old value
must be cleared before a new value may be set. To clear a custom
domain, simply update the custom domain with empty string. Then call
update again with the new cutsom domain name. The update API can only
be used to update one of tags, accountType, or customDomain per call.
To update multiple of these properties, call the API multiple times
with one change per call. This call does not change the storage keys
for the account. If you want to change storage account keys, use the
RegenerateKey operation. The location and name of the storage account
cannot be changed after creation.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param parameters: The parameters to update on the account. Note that
only one property can be changed at a time using this API.
:type parameters: :class:`StorageAccountUpdateParameters
<storage.models.StorageAccountUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`StorageAccount <storage.models.StorageAccount>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`StorageAccount <storage.models.StorageAccount>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the access keys for the specified storage account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the storage account.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`StorageAccountKeys
<storage.models.StorageAccountKeys>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`StorageAccountKeys
<storage.models.StorageAccountKeys>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the subscription. Note
that storage keys are not returned; use the ListKeys operation for
this.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`StorageAccount
<storage.models.StorageAccount>`
:rtype: :class:`StorageAccountPaged
<storage.models.StorageAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the given resource
group. Note that storage keys are not returned; use the ListKeys
operation for this.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`StorageAccount
<storage.models.StorageAccount>`
:rtype: :class:`StorageAccountPaged
<storage.models.StorageAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def regenerate_key(
self, resource_group_name, account_name, regenerate_key, custom_headers=None, raw=False, **operation_config):
"""Regenerates the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param regenerate_key: Specifies name of the key which should be
regenerated. key1 or key2 for the default keys
:type regenerate_key: :class:`StorageAccountRegenerateKeyParameters
<storage.models.StorageAccountRegenerateKeyParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`StorageAccountKeys
<storage.models.StorageAccountKeys>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`StorageAccountKeys
<storage.models.StorageAccountKeys>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| |
""" This file contains view functions for Flask-User forms.
:copyright: (c) 2013 by Ling Thio
:author: Ling Thio (ling.thio@gmail.com)
:license: Simplified BSD License, see LICENSE.txt for more details."""
from datetime import datetime
from flask import current_app, flash, redirect, render_template, request, url_for
from flask_login import current_user, login_user, logout_user
try: # Handle Python 2.x and Python 3.x
from urllib.parse import quote # Python 3.x
except ImportError:
from urllib import quote # Python 2.x
from .decorators import confirm_email_required, login_required
from . import emails
from . import signals
from .translations import gettext as _
def confirm_email(token):
""" Verify email confirmation token and activate the user account."""
# Verify token
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
is_valid, has_expired, object_id = user_manager.verify_token(
token,
user_manager.confirm_email_expiration)
if has_expired:
flash(_('Your confirmation token has expired.'), 'error')
return redirect(url_for('user.login'))
if not is_valid:
flash(_('Invalid confirmation token.'), 'error')
return redirect(url_for('user.login'))
# Confirm email by setting User.confirmed_at=utcnow() or UserEmail.confirmed_at=utcnow()
user = None
if db_adapter.UserEmailClass:
user_email = user_manager.get_user_email_by_id(object_id)
if user_email:
user_email.confirmed_at = datetime.utcnow()
user = user_email.user
else:
user_email = None
user = user_manager.get_user_by_id(object_id)
if user:
user.confirmed_at = datetime.utcnow()
if user:
user.set_active(True)
db_adapter.commit()
else: # pragma: no cover
flash(_('Invalid confirmation token.'), 'error')
return redirect(url_for('user.login'))
# Send email_confirmed signal
signals.user_confirmed_email.send(current_app._get_current_object(), user=user)
# Prepare one-time system message
flash(_('Your email has been confirmed.'), 'success')
# Auto-login after confirm or redirect to login page
next = request.args.get('next', _endpoint_url(user_manager.after_confirm_endpoint))
if user_manager.auto_login_after_confirm:
return _do_login_user(user, next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+next) # redirect to login page
@login_required
@confirm_email_required
def change_password():
""" Prompt for old password and new password and change the user's password."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.change_password_form(request.form)
form.next.data = request.args.get('next', _endpoint_url(user_manager.after_change_password_endpoint)) # Place ?next query param in next form field
# Process valid POST
if request.method=='POST' and form.validate():
# Hash password
hashed_password = user_manager.hash_password(form.new_password.data)
# Change password
user_manager.update_password(current_user, hashed_password)
# Send 'password_changed' email
if user_manager.enable_email and user_manager.send_password_changed_email:
emails.send_password_changed_email(current_user)
# Send password_changed signal
signals.user_changed_password.send(current_app._get_current_object(), user=current_user)
# Prepare one-time system message
flash(_('Your password has been changed successfully.'), 'success')
# Redirect to 'next' URL
return redirect(form.next.data)
# Process GET or invalid POST
return render_template(user_manager.change_password_template, form=form)
@login_required
@confirm_email_required
def change_username():
""" Prompt for new username and old password and change the user's username."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.change_username_form(request.form)
form.next.data = request.args.get('next', _endpoint_url(user_manager.after_change_username_endpoint)) # Place ?next query param in next form field
# Process valid POST
if request.method=='POST' and form.validate():
new_username = form.new_username.data
# Change username
user_auth = current_user.user_auth if db_adapter.UserAuthClass and hasattr(current_user, 'user_auth') else current_user
db_adapter.update_object(user_auth, username=new_username)
db_adapter.commit()
# Send 'username_changed' email
if user_manager.enable_email and user_manager.send_username_changed_email:
emails.send_username_changed_email(current_user)
# Send username_changed signal
signals.user_changed_username.send(current_app._get_current_object(), user=current_user)
# Prepare one-time system message
flash(_("Your username has been changed to '%(username)s'.", username=new_username), 'success')
# Redirect to 'next' URL
return redirect(form.next.data)
# Process GET or invalid POST
return render_template(user_manager.change_username_template, form=form)
@login_required
@confirm_email_required
def email_action(id, action):
""" Perform action 'action' on UserEmail object 'id'
"""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Retrieve UserEmail by id
user_email = db_adapter.find_first_object(db_adapter.UserEmailClass, id=id)
# Users may only change their own UserEmails
if not user_email or user_email.user_id != int(current_user.get_id()):
return unauthorized()
if action=='delete':
# Primary UserEmail can not be deleted
if user_email.is_primary:
return unauthorized()
# Delete UserEmail
db_adapter.delete_object(user_email)
db_adapter.commit()
elif action=='make-primary':
# Disable previously primary emails
user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=int(current_user.get_id()))
for ue in user_emails:
if ue.is_primary:
ue.is_primary = False
# Enable current primary email
user_email.is_primary = True
# Commit
db_adapter.commit()
elif action=='confirm':
_send_confirm_email(user_email.user, user_email)
else:
return unauthorized()
return redirect(url_for('user.manage_emails'))
def forgot_password():
"""Prompt for email and send reset password email."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.forgot_password_form(request.form)
# Process valid POST
if request.method=='POST' and form.validate():
email = form.email.data
user, user_email = user_manager.find_user_by_email(email)
if user:
user_manager.send_reset_password_email(email)
# Prepare one-time system message
flash(_("A reset password email has been sent to '%(email)s'. Open that email and follow the instructions to reset your password.", email=email), 'success')
# Redirect to the login page
return redirect(_endpoint_url(user_manager.after_forgot_password_endpoint))
# Process GET or invalid POST
return render_template(user_manager.forgot_password_template, form=form)
def login():
""" Prompt for username/email and password and sign the user in."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
next = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint))
reg_next = request.args.get('reg_next', _endpoint_url(user_manager.after_register_endpoint))
# Immediately redirect already logged in users
if current_user.is_authenticated() and user_manager.auto_login_at_login:
return redirect(next)
# Initialize form
login_form = user_manager.login_form(request.form) # for login.html
register_form = user_manager.register_form() # for login_or_register.html
if request.method!='POST':
login_form.next.data = register_form.next.data = next
login_form.reg_next.data = register_form.reg_next.data = reg_next
# Process valid POST
if request.method=='POST' and login_form.validate():
# Retrieve User
user = None
user_email = None
if user_manager.enable_username:
# Find user record by username
user = user_manager.find_user_by_username(login_form.username.data)
user_email = None
# Find primary user_email record
if user and db_adapter.UserEmailClass:
user_email = db_adapter.find_first_object(db_adapter.UserEmailClass,
user_id=int(user.get_id()),
is_primary=True,
)
# Find user record by email (with form.username)
if not user and user_manager.enable_email:
user, user_email = user_manager.find_user_by_email(login_form.username.data)
else:
# Find user by email (with form.email)
user, user_email = user_manager.find_user_by_email(login_form.email.data)
if user:
# Log user in
return _do_login_user(user, login_form.next.data, login_form.remember_me.data)
# Process GET or invalid POST
return render_template(user_manager.login_template,
form=login_form,
login_form=login_form,
register_form=register_form,
support_cas=user_manager.support_cas,
cas_server=user_manager.cas_server,
cas_service=user_manager.cas_service,
next=next)
def cas():
""" Prompt for username/email and password and sign the user in."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
next = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint))
if user_manager.support_cas:
ticket = request.args.get('ticket', '')
import urllib
import ssl
url = user_manager.cas_server + "/serviceValidate?ticket=" + ticket + "&service=" + user_manager.cas_service + "%2Fuser%2Fcas%3Fnext=" + next
#cas_data = b"<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>\n\t<cas:authenticationSuccess>\n\t\t<cas:user>lbaudin</cas:user>\n\n\n\t</cas:authenticationSuccess>\n</cas:serviceResponse>"
cas_data = None
#FIXME: the certificate should be checked, unfortunately most of the
# time the CAS certificate is self-signed
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
cas_data = urllib.request.urlopen(url, context=ctx).read()
# for python2:
except AttributeError:
try:
cas_data = urllib.urlopen(url).read()
except IOError:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
cas_data = urllib.urlopen(url, context=ctx).read()
import xml.etree.ElementTree as ET
tr = ET.fromstring(cas_data)
print("URL requested to the cas:")
print(url)
print("DATA got from the cas:")
print(cas_data)
user_response = tr.findall("{http://www.yale.edu/tp/cas}authenticationSuccess/{http://www.yale.edu/tp/cas}user")
flask_user = None
if len(user_response) == 1:
[username] = user_response
flask_user = user_manager.find_user_by_username(username.text)
assert(type(username.text) == str)
assert(len(username.text) > 1)
# Immediately redirect already logged in users
if current_user.is_authenticated() and user_manager.auto_login_at_login:
return redirect(next)
if not flask_user:
print("User " + str(username.text) + " not found in our own db, creating a new account.")
# Create a User object using Form fields that have a corresponding User field
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
# Create a UserAuth object using Form fields that have a corresponding UserAuth field
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
for field_name, field_value in {"username":username.text}.items():
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
if db_adapter.UserProfileClass:
user_profile = user
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
flask_user = user
db_adapter.commit()
if flask_user:
# Log user in
return _do_login_user(flask_user, next, False)
return login()
def logout():
""" Sign the user out."""
user_manager = current_app.user_manager
# Send user_logged_out signal
signals.user_logged_out.send(current_app._get_current_object(), user=current_user)
# Use Flask-Login to sign out user
logout_user()
# Prepare one-time system message
flash(_('You have signed out successfully.'), 'success')
# Redirect to logout_next endpoint or '/'
next = request.args.get('next', _endpoint_url(user_manager.after_logout_endpoint)) # Get 'next' query param
return redirect(next)
@login_required
@confirm_email_required
def manage_emails():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=int(current_user.get_id()))
form = user_manager.add_email_form()
# Process valid POST request
if request.method=="POST" and form.validate():
user_emails = db_adapter.add_object(db_adapter.UserEmailClass,
user_id=int(current_user.get_id()),
email=form.email.data)
db_adapter.commit()
return redirect(url_for('user.manage_emails'))
# Process GET or invalid POST request
return render_template(user_manager.manage_emails_template,
user_emails=user_emails,
form=form,
)
def register():
""" Display registration form and create new User."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
next = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint))
reg_next = request.args.get('reg_next', _endpoint_url(user_manager.after_register_endpoint))
# Initialize form
login_form = user_manager.login_form() # for login_or_register.html
register_form = user_manager.register_form(request.form) # for register.html
# invite token used to determine validity of registeree
invite_token = request.values.get("token")
# require invite without a token should disallow the user from registering
if user_manager.require_invitation and not invite_token:
flash("Registration is invite only", "error")
return redirect(url_for('user.login'))
user_invite = None
if invite_token and db_adapter.UserInvitationClass:
user_invite = db_adapter.find_first_object(db_adapter.UserInvitationClass, token=invite_token)
if user_invite:
register_form.invite_token.data = invite_token
else:
flash("Invalid invitation token", "error")
return redirect(url_for('user.login'))
if request.method!='POST':
login_form.next.data = register_form.next.data = next
login_form.reg_next.data = register_form.reg_next.data = reg_next
if user_invite:
register_form.email.data = user_invite.email
# Process valid POST
if request.method=='POST' and register_form.validate():
# Create a User object using Form fields that have a corresponding User field
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
# Create a UserEmail object using Form fields that have a corresponding UserEmail field
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
# Create a UserAuth object using Form fields that have a corresponding UserAuth field
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
for field_name, field_value in register_form.data.items():
# Hash password field
if field_name=='password':
hashed_password = user_manager.hash_password(field_value)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = hashed_password
else:
user_fields['password'] = hashed_password
# Store corresponding Form fields into the User object and/or UserProfile object
else:
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
if db_adapter.UserProfileClass:
user_profile = user
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
require_email_confirmation = True
if user_invite:
if user_invite.email == register_form.email.data:
require_email_confirmation = False
db_adapter.update_object(user, confirmed_at=datetime.utcnow())
db_adapter.commit()
# Send 'registered' email and delete new User object if send fails
if user_manager.send_registered_email:
try:
# Send 'registered' email
_send_registered_email(user, user_email, require_email_confirmation)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user)
db_adapter.commit()
raise
# Send user_registered signal
signals.user_registered.send(current_app._get_current_object(),
user=user,
user_invite=user_invite)
# Redirect if USER_ENABLE_CONFIRM_EMAIL is set
if user_manager.enable_confirm_email and require_email_confirmation:
next = request.args.get('next', _endpoint_url(user_manager.after_register_endpoint))
return redirect(next)
# Auto-login after register or redirect to login page
next = request.args.get('next', _endpoint_url(user_manager.after_confirm_endpoint))
if user_manager.auto_login_after_register:
return _do_login_user(user, reg_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+reg_next) # redirect to login page
# Process GET or invalid POST
return render_template(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form)
@login_required
def invite():
""" Allows users to send invitations to register an account """
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
next = request.args.get('next',
_endpoint_url(user_manager.after_invite_endpoint))
invite_form = user_manager.invite_form(request.form)
if request.method=='POST' and invite_form.validate():
email = invite_form.email.data
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {
"email": email
}
user, user_email = user_manager.find_user_by_email(email)
if user:
flash("User with that email has already registered", "error")
return redirect(url_for('user.invite'))
else:
user_invite = db_adapter \
.add_object(db_adapter.UserInvitationClass, **{
"email": email,
"invited_by_user_id": current_user.id
})
db_adapter.commit()
token = user_manager.generate_token(user_invite.id)
accept_invite_link = url_for('user.register',
token=token,
_external=True)
# Store token
if hasattr(db_adapter.UserInvitationClass, 'token'):
user_invite.token = token
db_adapter.commit()
try:
# Send 'invite' email
emails.send_invite_email(user_invite, accept_invite_link)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user_invite)
db_adapter.commit()
raise
signals \
.user_sent_invitation \
.send(current_app._get_current_object(), user_invite=user_invite,
form=invite_form)
flash(_('Invitation has been sent.'), 'success')
return redirect(next)
return render_template(user_manager.invite_template, form=invite_form)
def resend_confirm_email():
"""Prompt for email and re-send email conformation email."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.resend_confirm_email_form(request.form)
# Process valid POST
if request.method=='POST' and form.validate():
email = form.email.data
# Find user by email
user, user_email = user_manager.find_user_by_email(email)
if user:
_send_confirm_email(user, user_email)
# Redirect to the login page
return redirect(_endpoint_url(user_manager.after_resend_confirm_email_endpoint))
# Process GET or invalid POST
return render_template(user_manager.resend_confirm_email_template, form=form)
def reset_password(token):
""" Verify the password reset token, Prompt for new password, and set the user's password."""
# Verify token
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
if current_user.is_authenticated():
logout_user()
is_valid, has_expired, user_id = user_manager.verify_token(
token,
user_manager.reset_password_expiration)
if has_expired:
flash(_('Your reset password token has expired.'), 'error')
return redirect(url_for('user.login'))
if not is_valid:
flash(_('Your reset password token is invalid.'), 'error')
return redirect(url_for('user.login'))
user = user_manager.get_user_by_id(user_id)
if user:
# Avoid re-using old tokens
if hasattr(user, 'reset_password_token'):
verified = user.reset_password_token == token
else:
verified = True
if not user or not verified:
flash(_('Your reset password token is invalid.'), 'error')
return redirect(_endpoint_url(user_manager.login_endpoint))
# Mark email as confirmed
user_email = emails.get_primary_user_email(user)
user_email.confirmed_at = datetime.utcnow()
# Initialize form
form = user_manager.reset_password_form(request.form)
# Process valid POST
if request.method=='POST' and form.validate():
# Invalidate the token by clearing the stored token
if hasattr(user, 'reset_password_token'):
db_adapter.update_object(user, reset_password_token='')
# Change password
hashed_password = user_manager.hash_password(form.new_password.data)
user_auth = user.user_auth if db_adapter.UserAuthClass and hasattr(user, 'user_auth') else user
db_adapter.update_object(user_auth, password=hashed_password)
db_adapter.commit()
# Send 'password_changed' email
if user_manager.enable_email and user_manager.send_password_changed_email:
emails.send_password_changed_email(user)
# Prepare one-time system message
flash(_("Your password has been reset successfully."), 'success')
# Auto-login after reset password or redirect to login page
next = request.args.get('next', _endpoint_url(user_manager.after_reset_password_endpoint))
if user_manager.auto_login_after_reset_password:
return _do_login_user(user, next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+next) # redirect to login page
# Process GET or invalid POST
return render_template(user_manager.reset_password_template, form=form)
def unconfirmed():
""" Prepare a Flash message and redirect to USER_UNCONFIRMED_ENDPOINT"""
# Prepare Flash message
url = request.script_root + request.path
flash(_("You must confirm your email to access '%(url)s'.", url=url), 'error')
# Redirect to USER_UNCONFIRMED_EMAIL_ENDPOINT
user_manager = current_app.user_manager
return redirect(_endpoint_url(user_manager.unconfirmed_email_endpoint))
def unauthenticated():
""" Prepare a Flash message and redirect to USER_UNAUTHENTICATED_ENDPOINT"""
# Prepare Flash message
url = request.url
flash(_("You must be signed in to access '%(url)s'.", url=url), 'error')
# quote the fully qualified url
quoted_url = quote(url)
# Redirect to USER_UNAUTHENTICATED_ENDPOINT
user_manager = current_app.user_manager
return redirect(_endpoint_url(user_manager.unauthenticated_endpoint)+'?next='+ quoted_url)
def unauthorized():
""" Prepare a Flash message and redirect to USER_UNAUTHORIZED_ENDPOINT"""
# Prepare Flash message
url = request.script_root + request.path
flash(_("You do not have permission to access '%(url)s'.", url=url), 'error')
# Redirect to USER_UNAUTHORIZED_ENDPOINT
user_manager = current_app.user_manager
return redirect(_endpoint_url(user_manager.unauthorized_endpoint))
@login_required
@confirm_email_required
def user_profile():
user_manager = current_app.user_manager
return render_template(user_manager.user_profile_template)
def _send_registered_email(user, user_email, require_email_confirmation=True):
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Send 'confirm_email' or 'registered' email
if user_manager.enable_email and user_manager.enable_confirm_email:
# Generate confirm email link
object_id = user_email.id if user_email else int(user.get_id())
token = user_manager.generate_token(object_id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
# Send email
emails.send_registered_email(user, user_email, confirm_email_link)
# Prepare one-time system message
if user_manager.enable_confirm_email and require_email_confirmation:
email = user_email.email if user_email else user.email
flash(_('A confirmation email has been sent to %(email)s with instructions to complete your registration.', email=email), 'success')
else:
flash(_('You have registered successfully.'), 'success')
def _send_confirm_email(user, user_email):
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Send 'confirm_email' or 'registered' email
if user_manager.enable_email and user_manager.enable_confirm_email:
# Generate confirm email link
object_id = user_email.id if user_email else int(user.get_id())
token = user_manager.generate_token(object_id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
# Send email
emails.send_confirm_email_email(user, user_email, confirm_email_link)
# Prepare one-time system message
email = user_email.email if user_email else user.email
flash(_('A confirmation email has been sent to %(email)s with instructions to complete your registration.', email=email), 'success')
def _do_login_user(user, next, remember_me=False):
# User must have been authenticated
if not user: return unauthenticated()
# Check if user account has been disabled
if not user.is_active():
flash(_('Your account has not been enabled.'), 'error')
return redirect(url_for('user.login'))
# Check if user has a confirmed email address
user_manager = current_app.user_manager
if user_manager.enable_email and user_manager.enable_confirm_email \
and not current_app.user_manager.enable_login_without_confirm_email \
and not user.has_confirmed_email():
url = url_for('user.resend_confirm_email')
flash(_('Your email address has not yet been confirmed. Check your email Inbox and Spam folders for the confirmation email or <a href="%(url)s">Re-send confirmation email</a>.', url=url), 'error')
return redirect(url_for('user.login'))
# Use Flask-Login to sign in user
#print('login_user: remember_me=', remember_me)
login_user(user, remember=remember_me)
# Send user_logged_in signal
signals.user_logged_in.send(current_app._get_current_object(), user=user)
# Prepare one-time system message
flash(_('You have signed in successfully.'), 'success')
# Redirect to 'next' URL
return redirect(next)
def _endpoint_url(endpoint):
url = '/'
if endpoint:
url = url_for(endpoint)
return url
| |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''DDS texture loader.
Reference: http://msdn2.microsoft.com/en-us/library/bb172993.aspx
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: dds.py 878 2007-06-09 04:58:51Z Alex.Holkner $'
from ctypes import *
import struct
from pyglet.gl import *
from pyglet.gl import gl_info
from pyglet.image import CompressedImageData
from pyglet.image import codecs
from pyglet.image.codecs import s3tc
class DDSException(codecs.ImageDecodeException):
pass
# dwFlags of DDSURFACEDESC2
DDSD_CAPS = 0x00000001
DDSD_HEIGHT = 0x00000002
DDSD_WIDTH = 0x00000004
DDSD_PITCH = 0x00000008
DDSD_PIXELFORMAT = 0x00001000
DDSD_MIPMAPCOUNT = 0x00020000
DDSD_LINEARSIZE = 0x00080000
DDSD_DEPTH = 0x00800000
# ddpfPixelFormat of DDSURFACEDESC2
DDPF_ALPHAPIXELS = 0x00000001
DDPF_FOURCC = 0x00000004
DDPF_RGB = 0x00000040
# dwCaps1 of DDSCAPS2
DDSCAPS_COMPLEX = 0x00000008
DDSCAPS_TEXTURE = 0x00001000
DDSCAPS_MIPMAP = 0x00400000
# dwCaps2 of DDSCAPS2
DDSCAPS2_CUBEMAP = 0x00000200
DDSCAPS2_CUBEMAP_POSITIVEX = 0x00000400
DDSCAPS2_CUBEMAP_NEGATIVEX = 0x00000800
DDSCAPS2_CUBEMAP_POSITIVEY = 0x00001000
DDSCAPS2_CUBEMAP_NEGATIVEY = 0x00002000
DDSCAPS2_CUBEMAP_POSITIVEZ = 0x00004000
DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x00008000
DDSCAPS2_VOLUME = 0x00200000
class _filestruct(object):
def __init__(self, data):
if len(data) < self.get_size():
raise DDSException('Not a DDS file')
items = struct.unpack(self.get_format(), data)
for field, value in map(None, self._fields, items):
setattr(self, field[0], value)
def __repr__(self):
name = self.__class__.__name__
return '%s(%s)' % \
(name, (', \n%s' % (' ' * (len(name) + 1))).join( \
['%s = %s' % (field[0], repr(getattr(self, field[0]))) \
for field in self._fields]))
@classmethod
def get_format(cls):
return '<' + ''.join([f[1] for f in cls._fields])
@classmethod
def get_size(cls):
return struct.calcsize(cls.get_format())
class DDSURFACEDESC2(_filestruct):
_fields = [
('dwMagic', '4s'),
('dwSize', 'I'),
('dwFlags', 'I'),
('dwHeight', 'I'),
('dwWidth', 'I'),
('dwPitchOrLinearSize', 'I'),
('dwDepth', 'I'),
('dwMipMapCount', 'I'),
('dwReserved1', '44s'),
('ddpfPixelFormat', '32s'),
('dwCaps1', 'I'),
('dwCaps2', 'I'),
('dwCapsReserved', '8s'),
('dwReserved2', 'I')
]
def __init__(self, data):
super(DDSURFACEDESC2, self).__init__(data)
self.ddpfPixelFormat = DDPIXELFORMAT(self.ddpfPixelFormat)
class DDPIXELFORMAT(_filestruct):
_fields = [
('dwSize', 'I'),
('dwFlags', 'I'),
('dwFourCC', '4s'),
('dwRGBBitCount', 'I'),
('dwRBitMask', 'I'),
('dwGBitMask', 'I'),
('dwBBitMask', 'I'),
('dwRGBAlphaBitMask', 'I')
]
_compression_formats = {
('DXT1', False): (GL_COMPRESSED_RGB_S3TC_DXT1_EXT, s3tc.decode_dxt1_rgb),
('DXT1', True): (GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, s3tc.decode_dxt1_rgba),
('DXT3', False): (GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, s3tc.decode_dxt3),
('DXT3', True): (GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, s3tc.decode_dxt3),
('DXT5', False): (GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, s3tc.decode_dxt5),
('DXT5', True): (GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, s3tc.decode_dxt5),
}
def _check_error():
e = glGetError()
if e != 0:
print 'GL error %d' % e
class DDSImageDecoder(codecs.ImageDecoder):
def get_file_extensions(self):
return ['.dds']
def decode(self, file, filename):
header = file.read(DDSURFACEDESC2.get_size())
desc = DDSURFACEDESC2(header)
if desc.dwMagic != 'DDS ' or desc.dwSize != 124:
raise DDSException('Invalid DDS file (incorrect header).')
width = desc.dwWidth
height = desc.dwHeight
compressed = False
volume = False
mipmaps = 1
if desc.dwFlags & DDSD_PITCH:
pitch = desc.dwPitchOrLinearSize
elif desc.dwFlags & DDSD_LINEARSIZE:
image_size = desc.dwPitchOrLinearSize
compressed = True
if desc.dwFlags & DDSD_DEPTH:
raise DDSException('Volume DDS files unsupported')
volume = True
depth = desc.dwDepth
if desc.dwFlags & DDSD_MIPMAPCOUNT:
mipmaps = desc.dwMipMapCount
if desc.ddpfPixelFormat.dwSize != 32:
raise DDSException('Invalid DDS file (incorrect pixel format).')
if desc.dwCaps2 & DDSCAPS2_CUBEMAP:
raise DDSException('Cubemap DDS files unsupported')
if not desc.ddpfPixelFormat.dwFlags & DDPF_FOURCC:
raise DDSException('Uncompressed DDS textures not supported.')
has_alpha = desc.ddpfPixelFormat.dwRGBAlphaBitMask != 0
format = None
format, decoder = _compression_formats.get(
(desc.ddpfPixelFormat.dwFourCC, has_alpha), None)
if not format:
raise DDSException('Unsupported texture compression %s' % \
desc.ddpfPixelFormat.dwFourCC)
if format == GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
block_size = 8
else:
block_size = 16
datas = []
w, h = width, height
for i in range(mipmaps):
if not w and not h:
break
if not w:
w = 1
if not h:
h = 1
size = ((w + 3) / 4) * ((h + 3) / 4) * block_size
data = file.read(size)
datas.append(data)
w >>= 1
h >>= 1
image = CompressedImageData(width, height, format, datas[0],
'GL_EXT_texture_compression_s3tc', decoder)
level = 0
for data in datas[1:]:
level += 1
image.set_mipmap_data(level, data)
return image
def get_decoders():
return [DDSImageDecoder()]
def get_encoders():
return []
| |
"""The Bravia TV component."""
from __future__ import annotations
import asyncio
from collections.abc import Iterable
from datetime import timedelta
import logging
from typing import Final
from bravia_tv import BraviaRC
from bravia_tv.braviarc import NoIPControl
from homeassistant.components.media_player import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.components.remote import DOMAIN as REMOTE_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_PIN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CLIENTID_PREFIX, CONF_IGNORED_SOURCES, DOMAIN, NICKNAME
_LOGGER = logging.getLogger(__name__)
PLATFORMS: Final[list[str]] = [MEDIA_PLAYER_DOMAIN, REMOTE_DOMAIN]
SCAN_INTERVAL: Final = timedelta(seconds=10)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up a config entry."""
host = config_entry.data[CONF_HOST]
mac = config_entry.data[CONF_MAC]
pin = config_entry.data[CONF_PIN]
ignored_sources = config_entry.options.get(CONF_IGNORED_SOURCES, [])
coordinator = BraviaTVCoordinator(hass, host, mac, pin, ignored_sources)
config_entry.async_on_unload(config_entry.add_update_listener(update_listener))
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
class BraviaTVCoordinator(DataUpdateCoordinator[None]):
"""Representation of a Bravia TV Coordinator.
An instance is used per device to share the same power state between
several platforms.
"""
def __init__(
self,
hass: HomeAssistant,
host: str,
mac: str,
pin: str,
ignored_sources: list[str],
) -> None:
"""Initialize Bravia TV Client."""
self.braviarc = BraviaRC(host, mac)
self.pin = pin
self.ignored_sources = ignored_sources
self.muted: bool = False
self.channel_name: str | None = None
self.media_title: str | None = None
self.source: str | None = None
self.source_list: list[str] = []
self.original_content_list: list[str] = []
self.content_mapping: dict[str, str] = {}
self.duration: int | None = None
self.content_uri: str | None = None
self.program_media_type: str | None = None
self.audio_output: str | None = None
self.min_volume: int | None = None
self.max_volume: int | None = None
self.volume_level: float | None = None
self.is_on = False
# Assume that the TV is in Play mode
self.playing = True
self.state_lock = asyncio.Lock()
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
hass, _LOGGER, cooldown=1.0, immediate=False
),
)
def _send_command(self, command: str, repeats: int = 1) -> None:
"""Send a command to the TV."""
for _ in range(repeats):
for cmd in command:
self.braviarc.send_command(cmd)
def _get_source(self) -> str | None:
"""Return the name of the source."""
for key, value in self.content_mapping.items():
if value == self.content_uri:
return key
return None
def _refresh_volume(self) -> bool:
"""Refresh volume information."""
volume_info = self.braviarc.get_volume_info(self.audio_output)
if volume_info is not None:
volume = volume_info.get("volume")
self.volume_level = volume / 100 if volume is not None else None
self.audio_output = volume_info.get("target")
self.min_volume = volume_info.get("minVolume")
self.max_volume = volume_info.get("maxVolume")
self.muted = volume_info.get("mute", False)
return True
return False
def _refresh_channels(self) -> bool:
"""Refresh source and channels list."""
if not self.source_list:
self.content_mapping = self.braviarc.load_source_list()
self.source_list = []
if not self.content_mapping:
return False
for key in self.content_mapping:
if key not in self.ignored_sources:
self.source_list.append(key)
return True
def _refresh_playing_info(self) -> None:
"""Refresh playing information."""
playing_info = self.braviarc.get_playing_info()
program_name = playing_info.get("programTitle")
self.channel_name = playing_info.get("title")
self.program_media_type = playing_info.get("programMediaType")
self.content_uri = playing_info.get("uri")
self.source = self._get_source()
self.duration = playing_info.get("durationSec")
if not playing_info:
self.channel_name = "App"
if self.channel_name is not None:
self.media_title = self.channel_name
if program_name is not None:
self.media_title = f"{self.media_title}: {program_name}"
else:
self.media_title = None
def _update_tv_data(self) -> None:
"""Connect and update TV info."""
power_status = self.braviarc.get_power_status()
if power_status != "off":
connected = self.braviarc.is_connected()
if not connected:
try:
connected = self.braviarc.connect(
self.pin, CLIENTID_PREFIX, NICKNAME
)
except NoIPControl:
_LOGGER.error("IP Control is disabled in the TV settings")
if not connected:
power_status = "off"
if power_status == "active":
self.is_on = True
if self._refresh_volume() and self._refresh_channels():
self._refresh_playing_info()
return
self.is_on = False
async def _async_update_data(self) -> None:
"""Fetch the latest data."""
if self.state_lock.locked():
return
await self.hass.async_add_executor_job(self._update_tv_data)
async def async_turn_on(self) -> None:
"""Turn the device on."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.turn_on)
await self.async_request_refresh()
async def async_turn_off(self) -> None:
"""Turn off device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.turn_off)
await self.async_request_refresh()
async def async_set_volume_level(self, volume: float) -> None:
"""Set volume level, range 0..1."""
async with self.state_lock:
await self.hass.async_add_executor_job(
self.braviarc.set_volume_level, volume, self.audio_output
)
await self.async_request_refresh()
async def async_volume_up(self) -> None:
"""Send volume up command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(
self.braviarc.volume_up, self.audio_output
)
await self.async_request_refresh()
async def async_volume_down(self) -> None:
"""Send volume down command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(
self.braviarc.volume_down, self.audio_output
)
await self.async_request_refresh()
async def async_volume_mute(self, mute: bool) -> None:
"""Send mute command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.mute_volume, mute)
await self.async_request_refresh()
async def async_media_play(self) -> None:
"""Send play command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_play)
self.playing = True
await self.async_request_refresh()
async def async_media_pause(self) -> None:
"""Send pause command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_pause)
self.playing = False
await self.async_request_refresh()
async def async_media_stop(self) -> None:
"""Send stop command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_stop)
self.playing = False
await self.async_request_refresh()
async def async_media_next_track(self) -> None:
"""Send next track command."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_next_track)
await self.async_request_refresh()
async def async_media_previous_track(self) -> None:
"""Send previous track command."""
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.media_previous_track)
await self.async_request_refresh()
async def async_select_source(self, source: str) -> None:
"""Set the input source."""
if source in self.content_mapping:
uri = self.content_mapping[source]
async with self.state_lock:
await self.hass.async_add_executor_job(self.braviarc.play_content, uri)
await self.async_request_refresh()
async def async_send_command(self, command: Iterable[str], repeats: int) -> None:
"""Send command to device."""
async with self.state_lock:
await self.hass.async_add_executor_job(self._send_command, command, repeats)
await self.async_request_refresh()
| |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.resources.system import Directory, Execute, File
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.resources.template_config import TemplateConfig
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.source import InlineTemplate, Template
from resource_management.libraries.functions.format import format
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons.str_utils import compress_backslashes
import glob
import os
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def ams(name=None):
import params
if name == 'collector':
if not check_windows_service_exists(params.ams_collector_win_service_name):
Execute(format("cmd /C cd {ams_collector_home_dir} & ambari-metrics-collector.cmd setup"))
Directory(params.ams_collector_conf_dir,
owner=params.ams_user,
create_parents = True
)
Directory(params.ams_checkpoint_dir,
owner=params.ams_user,
create_parents = True
)
XmlConfig("ams-site.xml",
conf_dir=params.ams_collector_conf_dir,
configurations=params.config['configurations']['ams-site'],
configuration_attributes=params.config['configuration_attributes']['ams-site'],
owner=params.ams_user,
)
merged_ams_hbase_site = {}
merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
if params.security_enabled:
merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
XmlConfig( "hbase-site.xml",
conf_dir = params.ams_collector_conf_dir,
configurations = merged_ams_hbase_site,
configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
owner = params.ams_user,
)
if (params.log4j_props != None):
File(os.path.join(params.ams_collector_conf_dir, "log4j.properties"),
owner=params.ams_user,
content=params.log4j_props
)
File(os.path.join(params.ams_collector_conf_dir, "ams-env.cmd"),
owner=params.ams_user,
content=InlineTemplate(params.ams_env_sh_template)
)
ServiceConfig(params.ams_collector_win_service_name,
action="change_user",
username = params.ams_user,
password = Script.get_password(params.ams_user))
if not params.is_local_fs_rootdir:
# Configuration needed to support NN HA
XmlConfig("hdfs-site.xml",
conf_dir=params.ams_collector_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.ams_user,
group=params.user_group,
mode=0644
)
XmlConfig("hdfs-site.xml",
conf_dir=params.hbase_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.ams_user,
group=params.user_group,
mode=0644
)
XmlConfig("core-site.xml",
conf_dir=params.ams_collector_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.ams_user,
group=params.user_group,
mode=0644
)
XmlConfig("core-site.xml",
conf_dir=params.hbase_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.ams_user,
group=params.user_group,
mode=0644
)
else:
ServiceConfig(params.ams_embedded_hbase_win_service_name,
action="change_user",
username = params.ams_user,
password = Script.get_password(params.ams_user))
# creating symbolic links on ams jars to make them available to services
links_pairs = [
("%COLLECTOR_HOME%\\hbase\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
"%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
]
for link_pair in links_pairs:
link, target = link_pair
real_link = os.path.expandvars(link)
target = compress_backslashes(glob.glob(os.path.expandvars(target))[0])
if not os.path.exists(real_link):
#TODO check the symlink destination too. Broken in Python 2.x on Windows.
Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target))
pass
elif name == 'monitor':
if not check_windows_service_exists(params.ams_monitor_win_service_name):
Execute(format("cmd /C cd {ams_monitor_home_dir} & ambari-metrics-monitor.cmd setup"))
# creating symbolic links on ams jars to make them available to services
links_pairs = [
("%HADOOP_HOME%\\share\\hadoop\\common\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
"%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
("%HBASE_HOME%\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
"%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
]
for link_pair in links_pairs:
link, target = link_pair
real_link = os.path.expandvars(link)
target = compress_backslashes(glob.glob(os.path.expandvars(target))[0])
if not os.path.exists(real_link):
#TODO check the symlink destination too. Broken in Python 2.x on Windows.
Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target))
Directory(params.ams_monitor_conf_dir,
owner=params.ams_user,
create_parents = True
)
if params.host_in_memory_aggregation and params.log4j_props is not None:
File(os.path.join(params.ams_monitor_conf_dir, "log4j.properties"),
owner=params.ams_user,
content=params.log4j_props
)
XmlConfig("ams-site.xml",
conf_dir=params.ams_monitor_conf_dir,
configurations=params.config['configurations']['ams-site'],
configuration_attributes=params.config['configuration_attributes']['ams-site'],
owner=params.ams_user,
group=params.user_group
)
TemplateConfig(
os.path.join(params.ams_monitor_conf_dir, "metric_monitor.ini"),
owner=params.ams_user,
template_tag=None
)
TemplateConfig(
os.path.join(params.ams_monitor_conf_dir, "metric_groups.conf"),
owner=params.ams_user,
template_tag=None
)
ServiceConfig(params.ams_monitor_win_service_name,
action="change_user",
username = params.ams_user,
password = Script.get_password(params.ams_user))
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def ams(name=None, action=None):
import params
if name == 'collector':
Directory(params.ams_collector_conf_dir,
owner=params.ams_user,
group=params.user_group,
create_parents = True,
recursive_ownership = True,
)
Directory(params.ams_checkpoint_dir,
owner=params.ams_user,
group=params.user_group,
cd_access="a",
create_parents = True,
recursive_ownership = True
)
XmlConfig("ams-site.xml",
conf_dir=params.ams_collector_conf_dir,
configurations=params.config['configurations']['ams-site'],
configuration_attributes=params.config['configuration_attributes']['ams-site'],
owner=params.ams_user,
group=params.user_group
)
XmlConfig("ssl-server.xml",
conf_dir=params.ams_collector_conf_dir,
configurations=params.config['configurations']['ams-ssl-server'],
configuration_attributes=params.config['configuration_attributes']['ams-ssl-server'],
owner=params.ams_user,
group=params.user_group
)
merged_ams_hbase_site = {}
merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
if params.security_enabled:
merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
# Add phoenix client side overrides
merged_ams_hbase_site['phoenix.query.maxGlobalMemoryPercentage'] = str(params.phoenix_max_global_mem_percent)
merged_ams_hbase_site['phoenix.spool.directory'] = params.phoenix_client_spool_dir
XmlConfig( "hbase-site.xml",
conf_dir = params.ams_collector_conf_dir,
configurations = merged_ams_hbase_site,
configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
owner = params.ams_user,
group = params.user_group
)
if params.security_enabled:
TemplateConfig(os.path.join(params.hbase_conf_dir, "ams_collector_jaas.conf"),
owner = params.ams_user,
template_tag = None)
if (params.log4j_props != None):
File(format("{params.ams_collector_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.ams_user,
content=InlineTemplate(params.log4j_props)
)
File(format("{ams_collector_conf_dir}/ams-env.sh"),
owner=params.ams_user,
content=InlineTemplate(params.ams_env_sh_template)
)
Directory(params.ams_collector_log_dir,
owner=params.ams_user,
group=params.user_group,
cd_access="a",
create_parents = True,
mode=0755,
)
Directory(params.ams_collector_pid_dir,
owner=params.ams_user,
group=params.user_group,
cd_access="a",
create_parents = True,
mode=0755,
)
# Hack to allow native HBase libs to be included for embedded hbase
File(os.path.join(params.ams_hbase_home_dir, "bin", "hadoop"),
owner=params.ams_user,
mode=0755
)
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
create_parents = True,
owner='root',
group='root'
)
# Setting up security limits
File(os.path.join(params.limits_conf_dir, 'ams.conf'),
owner='root',
group='root',
mode=0644,
content=Template("ams.conf.j2")
)
# Phoenix spool file dir if not /tmp
if not os.path.exists(params.phoenix_client_spool_dir):
Directory(params.phoenix_client_spool_dir,
owner=params.ams_user,
mode = 0755,
group=params.user_group,
cd_access="a",
create_parents = True
)
pass
if not params.is_local_fs_rootdir and params.is_ams_distributed:
# Configuration needed to support NN HA
XmlConfig("hdfs-site.xml",
conf_dir=params.ams_collector_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.ams_user,
group=params.user_group,
mode=0644
)
XmlConfig("hdfs-site.xml",
conf_dir=params.hbase_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.ams_user,
group=params.user_group,
mode=0644
)
# Remove spnego configs from core-site if platform does not have python-kerberos library
truncated_core_site = {}
truncated_core_site.update(params.config['configurations']['core-site'])
if is_spnego_enabled(params) and is_redhat_centos_6_plus() == False:
truncated_core_site.pop('hadoop.http.authentication.type')
truncated_core_site.pop('hadoop.http.filter.initializers')
XmlConfig("core-site.xml",
conf_dir=params.ams_collector_conf_dir,
configurations=truncated_core_site,
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.ams_user,
group=params.user_group,
mode=0644
)
XmlConfig("core-site.xml",
conf_dir=params.hbase_conf_dir,
configurations=truncated_core_site,
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.ams_user,
group=params.user_group,
mode=0644
)
if params.metric_collector_https_enabled:
export_ca_certs(params.ams_collector_conf_dir)
pass
elif name == 'monitor':
if is_spnego_enabled(params) and is_redhat_centos_6_plus():
try:
import kerberos
except ImportError:
raise ImportError("python-kerberos package need to be installed to run AMS in SPNEGO mode")
Directory(params.ams_monitor_conf_dir,
owner=params.ams_user,
group=params.user_group,
create_parents = True
)
Directory(params.ams_monitor_log_dir,
owner=params.ams_user,
group=params.user_group,
mode=0755,
create_parents = True
)
if params.host_in_memory_aggregation and params.log4j_props is not None:
File(format("{params.ams_monitor_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.ams_user,
content=InlineTemplate(params.log4j_props)
)
XmlConfig("ams-site.xml",
conf_dir=params.ams_monitor_conf_dir,
configurations=params.config['configurations']['ams-site'],
configuration_attributes=params.config['configuration_attributes']['ams-site'],
owner=params.ams_user,
group=params.user_group
)
Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_log_dir}")
)
Directory(params.ams_monitor_pid_dir,
owner=params.ams_user,
group=params.user_group,
cd_access="a",
mode=0755,
create_parents = True
)
Directory(format("{ams_monitor_dir}/psutil/build"),
owner=params.ams_user,
group=params.user_group,
cd_access="a",
create_parents = True)
Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_dir}")
)
TemplateConfig(
format("{ams_monitor_conf_dir}/metric_monitor.ini"),
owner=params.ams_user,
group=params.user_group,
template_tag=None
)
TemplateConfig(
format("{ams_monitor_conf_dir}/metric_groups.conf"),
owner=params.ams_user,
group=params.user_group,
template_tag=None
)
File(format("{ams_monitor_conf_dir}/ams-env.sh"),
owner=params.ams_user,
content=InlineTemplate(params.ams_env_sh_template)
)
if params.metric_collector_https_enabled:
export_ca_certs(params.ams_monitor_conf_dir)
pass
elif name == 'grafana':
ams_grafana_directories = [
params.ams_grafana_conf_dir,
params.ams_grafana_log_dir,
params.ams_grafana_data_dir,
params.ams_grafana_pid_dir
]
for ams_grafana_directory in ams_grafana_directories:
Directory(ams_grafana_directory,
owner=params.ams_user,
group=params.user_group,
mode=0755,
create_parents = True,
recursive_ownership = True
)
File(format("{ams_grafana_conf_dir}/ams-grafana-env.sh"),
owner=params.ams_user,
group=params.user_group,
content=InlineTemplate(params.ams_grafana_env_sh_template)
)
File(format("{ams_grafana_conf_dir}/ams-grafana.ini"),
owner=params.ams_user,
group=params.user_group,
content=InlineTemplate(params.ams_grafana_ini_template),
mode=0600
)
if action != 'stop':
for dir in ams_grafana_directories:
Execute(('chown', '-R', params.ams_user, dir),
sudo=True
)
if params.metric_collector_https_enabled:
export_ca_certs(params.ams_grafana_conf_dir)
pass
def is_spnego_enabled(params):
if 'core-site' in params.config['configurations'] \
and 'hadoop.http.authentication.type' in params.config['configurations']['core-site'] \
and params.config['configurations']['core-site']['hadoop.http.authentication.type'] == "kerberos" \
and 'hadoop.http.filter.initializers' in params.config['configurations']['core-site'] \
and params.config['configurations']['core-site']['hadoop.http.filter.initializers'] == "org.apache.hadoop.security.AuthenticationFilterInitializer":
return True
return False
def is_redhat_centos_6_plus():
import platform
if platform.dist()[0] in ['redhat', 'centos'] and platform.dist()[1] > '6.0':
return True
return False
def export_ca_certs(dir_path):
# export ca certificates on every restart to handle changed truststore content
import params
import tempfile
ca_certs_path = os.path.join(dir_path, params.metric_truststore_ca_certs)
truststore = params.metric_truststore_path
tmpdir = tempfile.mkdtemp()
truststore_p12 = os.path.join(tmpdir,'truststore.p12')
if (params.metric_truststore_type.lower() == 'jks'):
# Convert truststore from JKS to PKCS12
cmd = format("{sudo} {java64_home}/bin/keytool -importkeystore -srckeystore {metric_truststore_path} -destkeystore {truststore_p12} -srcalias {metric_truststore_alias} -deststoretype PKCS12 -srcstorepass {metric_truststore_password} -deststorepass {metric_truststore_password}")
Execute(cmd,
)
truststore = truststore_p12
# Export all CA certificates from the truststore to the conf directory
cmd = format("{sudo} openssl pkcs12 -in {truststore} -out {ca_certs_path} -cacerts -nokeys -passin pass:{metric_truststore_password}")
Execute(cmd,
)
Execute(('chown', format('{ams_user}:{user_group}'), ca_certs_path),
sudo=True
)
Execute(('chmod', '644', ca_certs_path),
sudo = True,
)
Execute(format('{sudo} rm -rf {tmpdir}')
)
pass
| |
__all__ = ['DataTable']
import FileIO
from ..common import requires
from warnings import warn
import numpy as np
__author__ = "Charles R Schmidt <schmidtc@gmail.com>"
class DataTable(FileIO.FileIO):
""" DataTable provides additional functionality to FileIO for data table file tables
FileIO Handlers that provide data tables should subclass this instead of FileIO """
class _By_Col:
def __init__(self, parent):
self.p = parent
def __repr__(self):
return "keys: " + self.p.header.__repr__()
def __getitem__(self, key):
return self.p._get_col(key)
def __setitem__(self, key, val):
self.p.cast(key, val)
def __call__(self, key):
return self.p._get_col(key)
def __init__(self, *args, **kwargs):
FileIO.FileIO.__init__(self, *args, **kwargs)
def __repr__(self):
return 'DataTable: % s' % self.dataPath
def __len__(self):
""" __len__ should be implemented by DataTable Subclasses """
raise NotImplementedError
@property
def by_col(self):
return self._By_Col(self)
def _get_col(self, key):
""" returns the column vector
"""
if not self.header:
raise AttributeError('Please set the header')
if key in self.header:
return self[:, self.header.index(key)]
else:
raise AttributeError('Field: % s does not exist in header' % key)
def by_col_array(self, *args):
"""
Return columns of table as a numpy array.
Parameters
----------
*args: any number of strings of length k
names of variables to extract
Returns
-------
implicit: numpy array of shape (n,k)
Notes
-----
If the variables are not all of the same data type, then numpy rules
for casting will result in a uniform type applied to all variables.
If only strings are passed to the function, then an array with those
columns will be constructed.
If only one list of strings is passed, the output is identical to those
strings being passed.
If at least one list is passed and other strings or lists are passed,
this returns a tuple containing arrays constructed from each positional
argument.
Examples
--------
>>> import pysal as ps
>>> dbf = ps.open(ps.examples.get_path('NAT.dbf'))
>>> hr = dbf.by_col_array('HR70', 'HR80')
>>> hr[0:5]
array([[ 0. , 8.85582713],
[ 0. , 17.20874204],
[ 1.91515848, 3.4507747 ],
[ 1.28864319, 3.26381409],
[ 0. , 7.77000777]])
>>> hr = dbf.by_col_array(['HR80', 'HR70'])
>>> hr[0:5]
array([[ 8.85582713, 0. ],
[ 17.20874204, 0. ],
[ 3.4507747 , 1.91515848],
[ 3.26381409, 1.28864319],
[ 7.77000777, 0. ]])
>>> hr = dbf.by_col_array(['HR80'])
>>> hr[0:5]
array([[ 8.85582713],
[ 17.20874204],
[ 3.4507747 ],
[ 3.26381409],
[ 7.77000777]])
Numpy only supports homogeneous arrays. See Notes above.
>>> hr = dbf.by_col_array('STATE_NAME', 'HR80')
>>> hr[0:5]
array([['Minnesota', '8.8558271343'],
['Washington', '17.208742041'],
['Washington', '3.4507746989'],
['Washington', '3.2638140931'],
['Washington', '7.77000777']],
dtype='|S20')
>>> y, X = dbf.by_col_array('STATE_NAME', ['HR80', 'HR70'])
>>> y[0:5]
array([['Minnesota'],
['Washington'],
['Washington'],
['Washington'],
['Washington']],
dtype='|S20')
>>> X[0:5]
array([[ 8.85582713, 0. ],
[ 17.20874204, 0. ],
[ 3.4507747 , 1.91515848],
[ 3.26381409, 1.28864319],
[ 7.77000777, 0. ]])
"""
if any([isinstance(arg, list) for arg in args]):
results = []
for namelist in args:
if isinstance(namelist, str):
results.append([self._get_col(namelist)])
else:
results.append([self._get_col(vbl) for vbl in namelist])
if len(results) == 1:
return np.array(results[0]).T
else:
return tuple(np.array(lst).T for lst in results)
else:
return np.array([self._get_col(name) for name in args]).T
def __getitem__(self, key):
""" DataTables fully support slicing in 2D,
To provide slicing, handlers must provide __len__
Slicing accepts up to two arguments.
Syntax,
table[row]
table[row, col]
table[row_start:row_stop]
table[row_start:row_stop:row_step]
table[:, col]
table[:, col_start:col_stop]
etc.
ALL indices are Zero-Offsets,
i.e.
#>>> assert index in range(0, len(table))
"""
prevPos = self.tell()
if issubclass(type(key), basestring):
raise TypeError("index should be int or slice")
if issubclass(type(key), int) or isinstance(key, slice):
rows = key
cols = None
elif len(key) > 2:
raise TypeError("DataTables support two dimmensional slicing, % d slices provided" % len(key))
elif len(key) == 2:
rows, cols = key
else:
raise TypeError("Key: % r, is confusing me. I don't know what to do" % key)
if isinstance(rows, slice):
row_start, row_stop, row_step = rows.indices(len(self))
self.seek(row_start)
data = [self.next() for i in range(row_start, row_stop, row_step)]
else:
self.seek(slice(rows).indices(len(self))[1])
data = [self.next()]
if cols is not None:
if isinstance(cols, slice):
col_start, col_stop, col_step = cols.indices(len(data[0]))
data = [r[col_start:col_stop:col_step] for r in data]
else:
#col_start, col_stop, col_step = cols, cols+1, 1
data = [r[cols] for r in data]
self.seek(prevPos)
return data
@requires('pandas')
def to_df(self, n=-1, read_shp=None, **df_kws):
import pandas as pd
self.seek(0)
header = self.header
records = self.read(n)
df = pd.DataFrame(records, columns=header, **df_kws)
if read_shp is not False:
if read_shp is True or self.dataPath.endswith('.dbf'):
read_shp = self.dataPath[:-3] + 'shp'
try:
import pysal.contrib.pdio.shp as shp
df['geometry'] = shp.shp2series(self.dataPath[:-3] + 'shp')
except IOError as e:
warn('Encountered the following error in attempting to read'
' the shapefile {}. Proceeding with read, but the error'
' will be reproduced below:\n'
' {}'.format(self.dataPath[:-3]+'shp', e))
return df
def _test():
import doctest
doctest.testmod(verbose=True)
if __name__ == '__main__':
_test()
| |
"""Support for XBee Zigbee devices."""
from binascii import hexlify, unhexlify
import logging
from serial import Serial, SerialException
import voluptuous as vol
from xbee_helper import ZigBee
import xbee_helper.const as xb_const
from xbee_helper.device import convert_adc
from xbee_helper.exceptions import ZigBeeException, ZigBeeTxFailure
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_NAME,
CONF_PIN,
EVENT_HOMEASSISTANT_STOP,
PERCENTAGE,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SIGNAL_XBEE_FRAME_RECEIVED = "xbee_frame_received"
CONF_BAUD = "baud"
DEFAULT_DEVICE = "/dev/ttyUSB0"
DEFAULT_BAUD = 9600
DEFAULT_ADC_MAX_VOLTS = 1.2
ATTR_FRAME = "frame"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_BAUD, default=DEFAULT_BAUD): cv.string,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORM_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PIN): cv.positive_int,
vol.Optional(CONF_ADDRESS): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the connection to the XBee Zigbee device."""
usb_device = config[DOMAIN].get(CONF_DEVICE, DEFAULT_DEVICE)
baud = int(config[DOMAIN].get(CONF_BAUD, DEFAULT_BAUD))
try:
ser = Serial(usb_device, baud)
except SerialException as exc:
_LOGGER.exception("Unable to open serial port for XBee: %s", exc)
return False
zigbee_device = ZigBee(ser)
def close_serial_port(*args):
"""Close the serial port we're using to communicate with the XBee."""
zigbee_device.zb.serial.close()
def _frame_received(frame):
"""Run when a XBee Zigbee frame is received.
Pickles the frame, then encodes it into base64 since it contains
non JSON serializable binary.
"""
dispatcher_send(hass, SIGNAL_XBEE_FRAME_RECEIVED, frame)
hass.data[DOMAIN] = zigbee_device
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, close_serial_port)
zigbee_device.add_frame_rx_handler(_frame_received)
return True
def frame_is_relevant(entity, frame):
"""Test whether the frame is relevant to the entity."""
if frame.get("source_addr_long") != entity.config.address:
return False
return "samples" in frame
class XBeeConfig:
"""Handle the fetching of configuration from the config file."""
def __init__(self, config):
"""Initialize the configuration."""
self._config = config
self._should_poll = config.get("poll", True)
@property
def name(self):
"""Return the name given to the entity."""
return self._config["name"]
@property
def address(self):
"""Return the address of the device.
If an address has been provided, unhexlify it, otherwise return None
as we're talking to our local XBee device.
"""
address = self._config.get("address")
if address is not None:
address = unhexlify(address)
return address
@property
def should_poll(self):
"""Return the polling state."""
return self._should_poll
class XBeePinConfig(XBeeConfig):
"""Handle the fetching of configuration from the configuration file."""
@property
def pin(self):
"""Return the GPIO pin number."""
return self._config["pin"]
class XBeeDigitalInConfig(XBeePinConfig):
"""A subclass of XBeePinConfig."""
def __init__(self, config):
"""Initialise the XBee Zigbee Digital input config."""
super().__init__(config)
self._bool2state, self._state2bool = self.boolean_maps
@property
def boolean_maps(self):
"""Create mapping dictionaries for potential inversion of booleans.
Create dicts to map the pin state (true/false) to potentially inverted
values depending on the on_state config value which should be set to
"low" or "high".
"""
if self._config.get("on_state", "").lower() == "low":
bool2state = {True: False, False: True}
else:
bool2state = {True: True, False: False}
state2bool = {v: k for k, v in bool2state.items()}
return bool2state, state2bool
@property
def bool2state(self):
"""Return a dictionary mapping the internal value to the Zigbee value.
For the translation of on/off as being pin high or low.
"""
return self._bool2state
@property
def state2bool(self):
"""Return a dictionary mapping the Zigbee value to the internal value.
For the translation of pin high/low as being on or off.
"""
return self._state2bool
class XBeeDigitalOutConfig(XBeePinConfig):
"""A subclass of XBeePinConfig.
Set _should_poll to default as False instead of True. The value will
still be overridden by the presence of a 'poll' config entry.
"""
def __init__(self, config):
"""Initialize the XBee Zigbee Digital out."""
super().__init__(config)
self._bool2state, self._state2bool = self.boolean_maps
self._should_poll = config.get("poll", False)
@property
def boolean_maps(self):
"""Create dicts to map booleans to pin high/low and vice versa.
Depends on the config item "on_state" which should be set to "low"
or "high".
"""
if self._config.get("on_state", "").lower() == "low":
bool2state = {
True: xb_const.GPIO_DIGITAL_OUTPUT_LOW,
False: xb_const.GPIO_DIGITAL_OUTPUT_HIGH,
}
else:
bool2state = {
True: xb_const.GPIO_DIGITAL_OUTPUT_HIGH,
False: xb_const.GPIO_DIGITAL_OUTPUT_LOW,
}
state2bool = {v: k for k, v in bool2state.items()}
return bool2state, state2bool
@property
def bool2state(self):
"""Return a dictionary mapping booleans to GPIOSetting objects.
For the translation of on/off as being pin high or low.
"""
return self._bool2state
@property
def state2bool(self):
"""Return a dictionary mapping GPIOSetting objects to booleans.
For the translation of pin high/low as being on or off.
"""
return self._state2bool
class XBeeAnalogInConfig(XBeePinConfig):
"""Representation of a XBee Zigbee GPIO pin set to analog in."""
@property
def max_voltage(self):
"""Return the voltage for ADC to report its highest value."""
return float(self._config.get("max_volts", DEFAULT_ADC_MAX_VOLTS))
class XBeeDigitalIn(Entity):
"""Representation of a GPIO pin configured as a digital input."""
def __init__(self, config, device):
"""Initialize the device."""
self._config = config
self._device = device
self._state = False
async def async_added_to_hass(self):
"""Register callbacks."""
def handle_frame(frame):
"""Handle an incoming frame.
Handle an incoming frame and update our status if it contains
information relating to this device.
"""
if not frame_is_relevant(self, frame):
return
sample = next(iter(frame["samples"]))
pin_name = xb_const.DIGITAL_PINS[self._config.pin]
if pin_name not in sample:
# Doesn't contain information about our pin
return
# Set state to the value of sample, respecting any inversion
# logic from the on_state config variable.
self._state = self._config.state2bool[
self._config.bool2state[sample[pin_name]]
]
self.schedule_update_ha_state()
async_dispatcher_connect(self.hass, SIGNAL_XBEE_FRAME_RECEIVED, handle_frame)
@property
def name(self):
"""Return the name of the input."""
return self._config.name
@property
def config(self):
"""Return the entity's configuration."""
return self._config
@property
def should_poll(self):
"""Return the state of the polling, if needed."""
return self._config.should_poll
@property
def is_on(self):
"""Return True if the Entity is on, else False."""
return self._state
def update(self):
"""Ask the Zigbee device what state its input pin is in."""
try:
sample = self._device.get_sample(self._config.address)
except ZigBeeTxFailure:
_LOGGER.warning(
"Transmission failure when attempting to get sample from "
"Zigbee device at address: %s",
hexlify(self._config.address),
)
return
except ZigBeeException as exc:
_LOGGER.exception("Unable to get sample from Zigbee device: %s", exc)
return
pin_name = xb_const.DIGITAL_PINS[self._config.pin]
if pin_name not in sample:
_LOGGER.warning(
"Pin %s (%s) was not in the sample provided by Zigbee device %s",
self._config.pin,
pin_name,
hexlify(self._config.address),
)
return
self._state = self._config.state2bool[sample[pin_name]]
class XBeeDigitalOut(XBeeDigitalIn):
"""Representation of a GPIO pin configured as a digital input."""
def _set_state(self, state):
"""Initialize the XBee Zigbee digital out device."""
try:
self._device.set_gpio_pin(
self._config.pin, self._config.bool2state[state], self._config.address
)
except ZigBeeTxFailure:
_LOGGER.warning(
"Transmission failure when attempting to set output pin on "
"Zigbee device at address: %s",
hexlify(self._config.address),
)
return
except ZigBeeException as exc:
_LOGGER.exception("Unable to set digital pin on XBee device: %s", exc)
return
self._state = state
if not self.should_poll:
self.schedule_update_ha_state()
def turn_on(self, **kwargs):
"""Set the digital output to its 'on' state."""
self._set_state(True)
def turn_off(self, **kwargs):
"""Set the digital output to its 'off' state."""
self._set_state(False)
def update(self):
"""Ask the XBee device what its output is set to."""
try:
pin_state = self._device.get_gpio_pin(
self._config.pin, self._config.address
)
except ZigBeeTxFailure:
_LOGGER.warning(
"Transmission failure when attempting to get output pin status"
" from Zigbee device at address: %s",
hexlify(self._config.address),
)
return
except ZigBeeException as exc:
_LOGGER.exception(
"Unable to get output pin status from XBee device: %s", exc
)
return
self._state = self._config.state2bool[pin_state]
class XBeeAnalogIn(Entity):
"""Representation of a GPIO pin configured as an analog input."""
def __init__(self, config, device):
"""Initialize the XBee analog in device."""
self._config = config
self._device = device
self._value = None
async def async_added_to_hass(self):
"""Register callbacks."""
def handle_frame(frame):
"""Handle an incoming frame.
Handle an incoming frame and update our status if it contains
information relating to this device.
"""
if not frame_is_relevant(self, frame):
return
sample = frame["samples"].pop()
pin_name = xb_const.ANALOG_PINS[self._config.pin]
if pin_name not in sample:
# Doesn't contain information about our pin
return
self._value = convert_adc(
sample[pin_name], xb_const.ADC_PERCENTAGE, self._config.max_voltage
)
self.schedule_update_ha_state()
async_dispatcher_connect(self.hass, SIGNAL_XBEE_FRAME_RECEIVED, handle_frame)
@property
def name(self):
"""Return the name of the input."""
return self._config.name
@property
def config(self):
"""Return the entity's configuration."""
return self._config
@property
def should_poll(self):
"""Return the polling state, if needed."""
return self._config.should_poll
@property
def state(self):
"""Return the state of the entity."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return PERCENTAGE
def update(self):
"""Get the latest reading from the ADC."""
try:
self._value = self._device.read_analog_pin(
self._config.pin,
self._config.max_voltage,
self._config.address,
xb_const.ADC_PERCENTAGE,
)
except ZigBeeTxFailure:
_LOGGER.warning(
"Transmission failure when attempting to get sample from "
"Zigbee device at address: %s",
hexlify(self._config.address),
)
except ZigBeeException as exc:
_LOGGER.exception("Unable to get sample from Zigbee device: %s", exc)
| |
"""
Define a set of scopes to be used by COS Internal OAuth implementation, specifically tailored to work with APIv2.
List of scopes, nomenclature, and rationale can be found in the relevant "Login as OSF- phase 2" proposal document
"""
from collections import namedtuple
from website import settings
# Public scopes are described with 3 pieces of information: list of constituent scopes, a description, and whether or
# not this scope is available to be requested by the general public
class scope(namedtuple('scope', ['parts_', 'description', 'is_public'])):
""" Patch to add `ALWAYS_PUBLIC` scope to every selectable scope,
ensuring that public endpoints are accessible with any token.
"""
@property
def parts(self):
return frozenset((CoreScopes.ALWAYS_PUBLIC, )).union(self.parts_)
class CoreScopes(object):
"""
The smallest units of permission that can be granted- all other scopes are built out of these.
Each named constant is a single string."""
# IMPORTANT: All views should be based on the smallest number of Core scopes required to describe
# the data in that view
USERS_READ = 'users_read'
USERS_WRITE = 'users_write'
USERS_CREATE = 'users_create'
USER_EMAIL_READ = 'users.email_read'
USER_ADDON_READ = 'users.addon_read'
SUBSCRIPTIONS_READ = 'subscriptions_read'
SUBSCRIPTIONS_WRITE = 'subscriptions_write'
NODE_BASE_READ = 'nodes.base_read'
NODE_BASE_WRITE = 'nodes.base_write'
NODE_CHILDREN_READ = 'nodes.children_read'
NODE_CHILDREN_WRITE = 'nodes.children_write'
NODE_FORKS_READ = 'nodes.forks_read'
NODE_FORKS_WRITE = 'nodes.forks_write'
NODE_CONTRIBUTORS_READ = 'nodes.contributors_read'
NODE_CONTRIBUTORS_WRITE = 'nodes.contributors_write'
NODE_FILE_READ = 'nodes.files_read'
NODE_FILE_WRITE = 'nodes.files_write'
NODE_ADDON_READ = 'nodes.addon_read'
NODE_ADDON_WRITE = 'nodes.addon_write'
NODE_LINKS_READ = 'nodes.links_read'
NODE_LINKS_WRITE = 'nodes.links_write'
NODE_VIEW_ONLY_LINKS_READ = 'node.view_only_links_read'
NODE_VIEW_ONLY_LINKS_WRITE = 'node.view_only_links_write'
NODE_PREPRINTS_READ = 'node.preprints_read'
NODE_PREPRINTS_WRITE = 'node.preprints_write'
REGISTRATION_VIEW_ONLY_LINKS_READ = 'registration.view_only_links_read'
REGISTRATION_VIEW_ONLY_LINKS_WRITE = 'registration.view_only_links_write'
METASCHEMA_READ = 'metaschemas.read'
NODE_DRAFT_REGISTRATIONS_READ = 'nodes.draft_registrations_read'
NODE_DRAFT_REGISTRATIONS_WRITE = 'nodes.draft_registrations_write'
NODE_REGISTRATIONS_READ = 'nodes.registrations_read'
NODE_REGISTRATIONS_WRITE = 'nodes.registrations_write'
NODE_CITATIONS_READ = 'nodes.citations_read'
NODE_CITATIONS_WRITE = 'nodes.citations_write'
NODE_COMMENTS_READ = 'comments.data_read'
NODE_COMMENTS_WRITE = 'comments.data_write'
LICENSE_READ = 'license.data_read'
COMMENT_REPORTS_READ = 'comments.reports_read'
COMMENT_REPORTS_WRITE = 'comments.reports_write'
APPLICATIONS_READ = 'applications_read'
APPLICATIONS_WRITE = 'applications_write'
NODE_LOG_READ = 'nodes.logs_read'
TOKENS_READ = 'tokens_read'
TOKENS_WRITE = 'tokens_write'
ALERTS_READ = 'alerts_read'
ALERTS_WRITE = 'alerts_write'
INSTITUTION_READ = 'institutions_read'
SCOPES_READ = 'scopes_read'
SEARCH = 'search_read'
ACTIONS_READ = 'actions_read'
ACTIONS_WRITE = 'actions_write'
MODERATORS_READ = 'moderators_read'
MODERATORS_WRITE = 'moderators_write'
NODE_REQUESTS_READ = 'node_requests_read'
NODE_REQUESTS_WRITE = 'node_requests_write'
PROVIDERS_WRITE = 'providers_write'
WAFFLE_READ = 'waffle_read'
NULL = 'null'
# NOTE: Use with extreme caution.
# This should NEVER be assigned to endpoints:
# - with mutable data,
# - that might contain *anything* that could be personally-identifiable,
# - as a write scope
ALWAYS_PUBLIC = 'always_public'
ORGANIZER_COLLECTIONS_BASE_READ = 'collections.base_read'
ORGANIZER_COLLECTIONS_BASE_WRITE = 'collections.base_write'
COLLECTED_META_READ = 'collected_meta_read'
COLLECTED_META_WRITE = 'collected_meta_write'
GUIDS_READ = 'guids.base_read'
WIKI_BASE_READ = 'wikis.base_read'
WIKI_BASE_WRITE = 'wikis.base_write'
IDENTIFIERS_READ = 'identifiers.data_read'
class ComposedScopes(object):
"""
Composed scopes, listed in increasing order of access (most restrictive first). Each named constant is a tuple.
"""
# IMPORTANT: Composed scopes exist only as an internal implementation detail.
# All views should be based on selections from CoreScopes, above
# Users collection
USERS_READ = (CoreScopes.USERS_READ, CoreScopes.SUBSCRIPTIONS_READ, CoreScopes.ALERTS_READ)
USERS_WRITE = USERS_READ + (CoreScopes.USERS_WRITE, CoreScopes.SUBSCRIPTIONS_WRITE, CoreScopes.ALERTS_WRITE)
USERS_CREATE = USERS_READ + (CoreScopes.USERS_CREATE, )
# User extensions
USER_EMAIL_READ = (CoreScopes.USER_EMAIL_READ, )
# Applications collection
APPLICATIONS_READ = (CoreScopes.APPLICATIONS_READ, )
APPLICATIONS_WRITE = APPLICATIONS_READ + (CoreScopes.APPLICATIONS_WRITE,)
# Tokens collection
TOKENS_READ = (CoreScopes.TOKENS_READ,)
TOKENS_WRITE = TOKENS_READ + (CoreScopes.TOKENS_WRITE,)
# Guid redirect view
GUIDS_READ = (CoreScopes.GUIDS_READ, )
# Metaschemas collection
METASCHEMAS_READ = (CoreScopes.METASCHEMA_READ, )
# Draft registrations
DRAFT_READ = (CoreScopes.NODE_DRAFT_REGISTRATIONS_READ, )
DRAFT_WRITE = (CoreScopes.NODE_DRAFT_REGISTRATIONS_WRITE, )
# Identifier views
IDENTIFIERS_READ = (CoreScopes.IDENTIFIERS_READ, )
# Comment reports collection
COMMENT_REPORTS_READ = (CoreScopes.COMMENT_REPORTS_READ,)
COMMENT_REPORTS_WRITE = COMMENT_REPORTS_READ + (CoreScopes.COMMENT_REPORTS_WRITE,)
# Nodes collection.
# Base node data includes node metadata, links, children, and preprints.
NODE_METADATA_READ = (CoreScopes.NODE_BASE_READ, CoreScopes.NODE_CHILDREN_READ, CoreScopes.NODE_LINKS_READ,
CoreScopes.NODE_CITATIONS_READ, CoreScopes.NODE_COMMENTS_READ, CoreScopes.NODE_LOG_READ,
CoreScopes.NODE_FORKS_READ, CoreScopes.WIKI_BASE_READ, CoreScopes.LICENSE_READ,
CoreScopes.IDENTIFIERS_READ, CoreScopes.NODE_PREPRINTS_READ)
NODE_METADATA_WRITE = NODE_METADATA_READ + \
(CoreScopes.NODE_BASE_WRITE, CoreScopes.NODE_CHILDREN_WRITE, CoreScopes.NODE_LINKS_WRITE,
CoreScopes.NODE_CITATIONS_WRITE, CoreScopes.NODE_COMMENTS_WRITE, CoreScopes.NODE_FORKS_WRITE,
CoreScopes.NODE_PREPRINTS_WRITE, CoreScopes.WIKI_BASE_WRITE)
# Organizer Collections collection
# Using Organizer Collections and the node links they collect. Reads Node Metadata.
ORGANIZER_READ = (CoreScopes.ORGANIZER_COLLECTIONS_BASE_READ, CoreScopes.COLLECTED_META_READ,) + NODE_METADATA_READ
ORGANIZER_WRITE = ORGANIZER_READ + (CoreScopes.ORGANIZER_COLLECTIONS_BASE_WRITE, CoreScopes.NODE_LINKS_WRITE, CoreScopes.COLLECTED_META_WRITE)
# Privileges relating to editing content uploaded under that node
NODE_DATA_READ = (CoreScopes.NODE_FILE_READ, CoreScopes.WIKI_BASE_READ)
NODE_DATA_WRITE = NODE_DATA_READ + \
(CoreScopes.NODE_FILE_WRITE, CoreScopes.WIKI_BASE_WRITE)
# Privileges relating to who can access a node (via contributors or registrations)
NODE_ACCESS_READ = (CoreScopes.NODE_CONTRIBUTORS_READ, CoreScopes.NODE_REGISTRATIONS_READ,
CoreScopes.NODE_VIEW_ONLY_LINKS_READ, CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_READ,
CoreScopes.NODE_REQUESTS_READ)
NODE_ACCESS_WRITE = NODE_ACCESS_READ + \
(CoreScopes.NODE_CONTRIBUTORS_WRITE, CoreScopes.NODE_REGISTRATIONS_WRITE,
CoreScopes.NODE_VIEW_ONLY_LINKS_WRITE, CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_WRITE,
CoreScopes.NODE_REQUESTS_WRITE)
# Combine all sets of node permissions into one convenience level
NODE_ALL_READ = NODE_METADATA_READ + NODE_DATA_READ + NODE_ACCESS_READ
NODE_ALL_WRITE = NODE_ALL_READ + NODE_METADATA_WRITE + NODE_DATA_WRITE + NODE_ACCESS_WRITE
# Reviews
REVIEWS_READ = (CoreScopes.ACTIONS_READ, CoreScopes.MODERATORS_READ)
REVIEWS_WRITE = (CoreScopes.ACTIONS_WRITE, CoreScopes.MODERATORS_WRITE, CoreScopes.PROVIDERS_WRITE)
# Full permissions: all routes intended to be exposed to third party API users
FULL_READ = NODE_ALL_READ + USERS_READ + ORGANIZER_READ + GUIDS_READ + METASCHEMAS_READ + DRAFT_READ + REVIEWS_READ + (CoreScopes.INSTITUTION_READ, CoreScopes.SEARCH, CoreScopes.SCOPES_READ)
FULL_WRITE = FULL_READ + NODE_ALL_WRITE + USERS_WRITE + ORGANIZER_WRITE + DRAFT_WRITE + REVIEWS_WRITE
# Admin permissions- includes functionality not intended for third-party use
ADMIN_LEVEL = FULL_WRITE + APPLICATIONS_WRITE + TOKENS_WRITE + COMMENT_REPORTS_WRITE + USERS_CREATE + REVIEWS_WRITE +\
(CoreScopes.USER_EMAIL_READ, CoreScopes.USER_ADDON_READ, CoreScopes.NODE_ADDON_READ, CoreScopes.NODE_ADDON_WRITE, CoreScopes.WAFFLE_READ, )
# List of all publicly documented scopes, mapped to composed scopes defined above.
# Return as sets to enable fast comparisons of provided scopes vs those required by a given node
# These are the ***only*** scopes that will be recognized from CAS
public_scopes = {
'osf.full_read': scope(parts_=frozenset(ComposedScopes.FULL_READ),
description='View all information associated with this account, including for '
'private projects.',
is_public=True),
'osf.full_write': scope(parts_=frozenset(ComposedScopes.FULL_WRITE),
description='View and edit all information associated with this account, including for '
'private projects.',
is_public=True),
'osf.users.profile_read': scope(parts_=frozenset(ComposedScopes.USERS_READ),
description='Read your profile data',
is_public=True),
'osf.users.email_read': scope(parts_=frozenset(ComposedScopes.USER_EMAIL_READ),
description='Read your primary email address.',
is_public=True),
}
if settings.DEV_MODE:
public_scopes.update({
'osf.users.profile_write': scope(parts_=frozenset(ComposedScopes.USERS_WRITE),
description='Read and edit your profile data',
is_public=True),
'osf.nodes.metadata_read': scope(parts_=frozenset(ComposedScopes.NODE_METADATA_READ),
description='Read a list of all public and private nodes accessible to this '
'account, and view associated metadata such as project descriptions '
'and titles',
is_public=True),
'osf.nodes.metadata_write': scope(parts_=frozenset(ComposedScopes.NODE_METADATA_WRITE),
description='Read a list of all public and private nodes accessible to this '
'account, and view and edit associated metadata such as project '
'descriptions and titles',
is_public=True),
'osf.nodes.data_read': scope(parts_=frozenset(ComposedScopes.NODE_DATA_READ),
description='List and view files associated with any public or private projects '
'accessible to this account.',
is_public=True),
'osf.nodes.data_write': scope(parts_=frozenset(ComposedScopes.NODE_DATA_WRITE),
description='List, view, and update files associated with any public or private '
'projects accessible to this account.',
is_public=True),
'osf.nodes.access_read': scope(parts_=frozenset(ComposedScopes.NODE_ACCESS_READ),
description='View the contributors list and any established registrations '
'associated with public or private projects',
is_public=True),
'osf.nodes.access_write': scope(parts_=frozenset(ComposedScopes.NODE_ACCESS_WRITE),
description='View and edit the contributors list associated with public or '
'private projects accessible to this account. Also view and create '
'registrations.',
is_public=True), # TODO: Language: Does registrations endpoint allow creation of registrations? Is that planned?
'osf.nodes.full_read': scope(parts_=frozenset(ComposedScopes.NODE_ALL_READ),
description='View all metadata, files, and access rights associated with all public '
'and private projects accessible to this account.',
is_public=True),
'osf.nodes.full_write': scope(parts_=frozenset(ComposedScopes.NODE_ALL_WRITE),
description='View and edit all metadata, files, and access rights associated with '
'all public and private projects accessible to this account.',
is_public=True),
# Undocumented scopes that can not be requested by third parties (per CAS restriction)
'osf.users.create': scope(parts_=frozenset(ComposedScopes.USERS_CREATE),
description='This permission should only be granted to OSF collaborators. Allows a site to '
'programmatically create new users with this account.',
is_public=False),
'osf.admin': scope(parts_=frozenset(ComposedScopes.ADMIN_LEVEL),
description='This permission should only be granted to OSF administrators. Allows a site to '
'create, read, edit, and delete all information associated with this account.',
is_public=False),
})
def normalize_scopes(scopes):
"""
Given a list of public-facing scope names from a CAS token, return the list of internal scopes
This is useful for converting a single broad scope name (from CAS) into the small constituent parts
(as used by views)
:param list scopes: a list public facing scopes
"""
all_scopes = set()
for sc in scopes:
try:
scope_tuple = public_scopes[sc]
all_scopes |= scope_tuple.parts
except KeyError:
pass
return all_scopes
if __name__ == '__main__':
# Print some data to console, to help audit what views/core scopes map to a given public/composed scope
# Although represented internally as a set, print as a sorted list for readability.
from pprint import pprint as pp
pp({k: sorted(v.parts)
for k, v in public_scopes.iteritems()})
| |
import json
import time
import threading
import datetime
import requests
import dateutil.parser
import sys
import logging
__author__ = 'AfterShip <support@aftership.com>'
__version__ = '0.2'
logger = logging.getLogger(__name__)
# Values for test described in APIv3 class definition below.
# To run test cases go to the directory with current file and run:
# $ python __init__.py
TEST_SLUG = 'russian-post'
TEST_TRACKING_NUMBER = '65600077151512'
TEST_API_KEY = 'YOUR_API_KEY'
py_ver = sys.version_info[0]
if py_ver == 3:
unicode_type = str
else:
unicode_type = unicode
class APIRequestException(Exception):
def __getitem__(self, attribute):
if self.args and isinstance(self.args[0], dict):
return self.args[0].get(attribute, {})
return {}
class APIv3RequestException(APIRequestException):
def code(self):
return self['meta'].get('code') or 500
def type(self):
return self['meta'].get('error_type') or 'InternalError'
def message(self):
return self['meta'].get('error_message') or str(self)
def data(self):
return self['data']
class APIv4RequestException(APIRequestException):
def code(self):
return self['meta'].get('code') or 500
def type(self):
return self['meta'].get('type') or 'InternalError'
def message(self):
return self['meta'].get('message') or str(self)
def data(self):
return self['data']
class RequestPart(object):
def __init__(self, path='', base=None):
self._path = path
self._base = base
def __getitem__(self, attribute):
return self.__getattr__(attribute)
def __getattr__(self, chunk):
return RequestPart('%s/%s' % (self._path, chunk), self._base)
def request(self, method, *args, **body):
return self._base.call(method, self._path, *args, **body)
def get(self, *args, **body):
return self.request('get', *args, **body)
def post(self, *args, **body):
return self.request('post', *args, **body)
def put(self, *args, **body):
return self.request('put', *args, **body)
def delete(self, *args, **body):
return self.request('delete', *args, **body)
class API(RequestPart):
DEFAULT_HEADERS = {
'User-Agent': 'aftership-python/{}'.format(__version__),
}
def __init__(self, key=None,
max_calls_per_sec=10,
base_url='https://api.aftership.com',
ver='v3', headers={}):
self._last_call = None
self._rate_limit = 1.0 / float(max_calls_per_sec)
self._headers = self.DEFAULT_HEADERS
self._headers.update(headers)
if key:
self._headers['aftership-api-key'] = key
self._api_url = '%s/%s' % (base_url, ver)
RequestPart.__init__(self, base=self)
def call(self, method, path, *args, **body):
args = ('/%s' % '/'.join(args)) if args else ''
url = '%s%s%s' % (self._api_url, path, args)
headers = self._headers
params = None
if method != 'get':
headers['Content-Type'] = 'application/json'
body = json.dumps(body)
elif body:
params = body
body = None
logger.debug('args: %s; url: %s; headers: %s', args, url, headers)
with threading.Lock():
if self._last_call:
delta = self._rate_limit - (time.clock() - self._last_call)
if delta > 0:
time.sleep(delta)
self._last_call = time.clock()
response = requests.request(method, url, headers=headers,
params=params, data=body)
try:
ret = response.json()
except ValueError as error:
logger.exception('Error in AfterShip response')
raise APIRequestException('Server response parsing failed. ValueError: ' + str(error))
if not response.ok:
raise APIRequestException(ret)
return ret
class APIv3(API):
def __init__(self, key, max_calls_per_sec=10, datetime_convert=True, _prefix='v3'):
self._datetime_fields = ['created_at',
'created_at_min',
'created_at_max',
'updated_at',
'expected_delivery',
'checkpoint_time',
'tracking_ship_date',
'expected_delivery']
self._datetime_convert = datetime_convert
API.__init__(self, key, max_calls_per_sec=max_calls_per_sec,
base_url='https://api.aftership.com',
ver=_prefix, headers={})
def _is_datetime(self, key, value):
if type(value) is unicode_type and key in self._datetime_fields and len(value) > 0:
return True
return False
def _convert_datetime_dict(self, dct):
if type(dct) is dict:
# for key, value in dct.iteritems():
for key in list(dct.keys()):
value = dct[key]
# Convert ISO 8601 strings to datetime
if self._is_datetime(key, value):
try:
dct[key] = dateutil.parser.parse(value)
except:
dct[key] = value
# Iterate thru dict
elif type(value) is dict:
dct[key] = self._convert_datetime_dict(value)
# Iterate thru list
elif type(value) is list:
dct[key] = []
for item in value:
dct[key].append(self._convert_datetime_dict(item))
return dct
def call(self, *args, **body):
try:
# for key, value in body.iteritems():
for key in list(body.keys()):
value = body[key]
# Convert datetime to ISO 8601 string
if type(value) is datetime.datetime:
value = value.replace(microsecond=0)
body[key] = value.isoformat()
# Convert array of values to comma-separated string
elif type(value) is list:
body[key] = ','.join(value)
response = API.call(self, *args, **body)['data']
# pprint.pprint(response)
# Convert ISO 8601 strings to datetime
if self._datetime_convert:
self._convert_datetime_dict(response)
return response
except APIRequestException as error:
raise APIv3RequestException(*error.args)
class APIv4(APIv3):
"""
Test code goes below.
Test covers all accessing methods (POST, GET, PUT, DELETE).
Test covers all variants of building specific API calls (endpoints paths + body):
- dot.separated.constants.get() : GET /dot/separated/constants
- params['in']['brackets'].get() : GET /params/in/brackets
- path.get('arg1', 'arg2', arg_name='arg3') : GET /path/arg1/arg2?arg_name=arg3
Test checks conversion of input list type parameters to comma separated strings.
Test checks conversion of input timestamp strings to datetime variables.
Test checks conversion of output timestamp strings to datetime variables.
>>> api.trackings.post(tracking=dict(slug=slug, tracking_number=number, title="Title"))['tracking']['title']
u'Title'
>>> api.trackings.get(slug, number, fields=['title', 'created_at'])['tracking']['title']
u'Title'
>>> type(api.trackings.put(slug, number, tracking=dict(title="Title (changed)"))['tracking']['updated_at'])
<type 'datetime.datetime'>
>>> api.trackings[slug][number].get()['tracking']['title']
u'Title (changed)'
>>> api.trackings.get(created_at_min=datetime.datetime(2014, 6, 1), fields=['title', 'order_id'])['fields']
u'title,order_id'
>>> api.trackings.delete(slug, number)['tracking']['slug']
u'russian-post'
"""
def __init__(self, key, max_calls_per_sec=10, datetime_convert=True, _prefix='v4'):
APIv3.__init__(self, key,
max_calls_per_sec=max_calls_per_sec,
datetime_convert=datetime_convert,
_prefix=_prefix)
def call(self, *args, **body):
try:
return APIv3.call(self, *args, **body)
except APIv3RequestException as error:
raise APIv4RequestException(*error.args)
if __name__ == "__main__":
import doctest
print("Running smoke tests")
doctest.testmod(extraglobs={'slug': TEST_SLUG,
'number': TEST_TRACKING_NUMBER,
'api': APIv4(TEST_API_KEY)})
# try:
# slug = TEST_SLUG
# number = TEST_TRACKING_NUMBER
# api = APIv4(TEST_API_KEY)
# print api.trackings.post(tracking=dict(slug=slug, tracking_number=number, title="Title"))
# print api.trackings.get(slug, number, fields=['title', 'created_at'])
# print api.trackings.delete(slug, number)
# except APIv4RequestException as error:
# # FAKE_API_KEY will result in Unauthorized (401) error
# print 'Error:', error.code(), '|', error.type(), '|', error.message(), '|', error.data()
print("done!")
| |
# -*- coding: utf-8 -*-
"""
Spanish-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
from .es_provinces import PROVINCE_CHOICES
from .es_regions import REGION_CHOICES
class ESPostalCodeField(RegexField):
"""
A form field that validates its input as a spanish postal code.
Spanish postal code is a five digits string, with two first digits
between 01 and 52, assigned to provinces code.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the range and format 01XXX - 52XXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ESPostalCodeField, self).__init__(
r'^(0[1-9]|[1-4][0-9]|5[0-2])\d{3}$',
max_length, min_length, *args, **kwargs)
class ESPhoneNumberField(RegexField):
"""
A form field that validates its input as a Spanish phone number.
Information numbers are ommited.
Spanish phone numbers are nine digit numbers, where first digit is 6 (for
cell phones), 8 (for special phones), or 9 (for landlines and special
phones)
TODO: accept and strip characters like dot, hyphen... in phone number
"""
default_error_messages = {
'invalid': _('Enter a valid phone number in one of the formats 6XXXXXXXX, 8XXXXXXXX or 9XXXXXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ESPhoneNumberField, self).__init__(r'^(6|7|8|9)\d{8}$',
max_length, min_length, *args, **kwargs)
class ESIdentityCardNumberField(RegexField):
"""
Spanish NIF/NIE/CIF (Fiscal Identification Number) code.
Validates three diferent formats:
NIF (individuals): 12345678A
CIF (companies): A12345678
NIE (foreigners): X12345678A
according to a couple of simple checksum algorithms.
Value can include a space or hyphen separator between number and letters.
Number length is not checked for NIF (or NIE), old values start with a 1,
and future values can contain digits greater than 8. The CIF control digit
can be a number or a letter depending on company type. Algorithm is not
public, and different authors have different opinions on which ones allows
letters, so both validations are assumed true for all types.
http://es.wikipedia.org/wiki/N%C3%BAmero_de_identificaci%C3%B3n_fiscal
.. versionchanged:: 1.1
"""
default_error_messages = {
'invalid': _('Please enter a valid NIF, NIE, or CIF.'),
'invalid_only_nif': _('Please enter a valid NIF or NIE.'),
'invalid_nif': _('Invalid checksum for NIF.'),
'invalid_nie': _('Invalid checksum for NIE.'),
'invalid_cif': _('Invalid checksum for CIF.'),
}
def __init__(self, only_nif=False, max_length=None, min_length=None, *args, **kwargs):
self.only_nif = only_nif
self.nif_control = 'TRWAGMYFPDXBNJZSQVHLCKE'
self.cif_control = 'JABCDEFGHI'
self.cif_types = 'ABCDEFGHJKLMNPQRSVW'
self.nie_types = 'XTY'
self.id_card_pattern = r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$'
id_card_re = re.compile(self.id_card_pattern %
(self.cif_types + self.nie_types,
self.nif_control + self.cif_control),
re.IGNORECASE)
error_message = self.default_error_messages['invalid%s' %
(self.only_nif and '_only_nif' or '')]
super(ESIdentityCardNumberField, self).__init__(
id_card_re, max_length, min_length,
error_message=error_message, *args, **kwargs)
def clean(self, value):
super(ESIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.upper().replace(' ', '').replace('-', '')
m = re.match(self.id_card_pattern %
(self.cif_types + self.nie_types,
self.nif_control + self.cif_control),
value)
letter1, number, letter2 = m.groups()
if not letter1 and letter2:
# NIF
if letter2 == self.nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nif'])
elif letter1 in self.nie_types and letter2:
# NIE
if letter2 == self.nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nie'])
elif not self.only_nif and letter1 in self.cif_types and len(number) in [7, 8]:
# CIF
if not letter2:
number, letter2 = number[:-1], int(number[-1])
checksum = cif_get_checksum(number)
if letter2 in (checksum, self.cif_control[checksum]):
return value
else:
raise ValidationError(self.error_messages['invalid_cif'])
else:
raise ValidationError(self.error_messages['invalid'])
def nif_get_checksum(self, d):
return self.nif_control[int(d) % 23]
class ESCCCField(RegexField):
"""
A form field that validates its input as a Spanish bank account or CCC
(Codigo Cuenta Cliente).
Spanish CCC is in format EEEE-OOOO-CC-AAAAAAAAAA where:
E = entity
O = office
C = checksum
A = account
It's also valid to use a space as delimiter, or to use no delimiter.
First checksum digit validates entity and office, and last one
validates account. Validation is done multiplying every digit of 10
digit value (with leading 0 if necessary) by number in its position in
string 1, 2, 4, 8, 5, 10, 9, 7, 3, 6. Sum resulting numbers and extract
it from 11. Result is checksum except when 10 then is 1, or when 11
then is 0.
TODO: allow IBAN validation too
"""
default_error_messages = {
'invalid': _('Please enter a valid bank account number in format XXXX-XXXX-XX-XXXXXXXXXX.'),
'checksum': _('Invalid checksum for bank account number.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ESCCCField, self).__init__(r'^\d{4}[ -]?\d{4}[ -]?\d{2}[ -]?\d{10}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
super(ESCCCField, self).clean(value)
if value in EMPTY_VALUES:
return ''
m = re.match(r'^(\d{4})[ -]?(\d{4})[ -]?(\d{2})[ -]?(\d{10})$', value)
entity, office, checksum, account = m.groups()
if get_checksum('00' + entity + office) + get_checksum(account) == checksum:
return value
else:
raise ValidationError(self.error_messages['checksum'])
def get_checksum(d):
control_str = [1, 2, 4, 8, 5, 10, 9, 7, 3, 6]
digits = [int(digit) * int(control) for digit, control in zip(d, control_str)]
return str(11 - sum(digits) % 11).replace('10', '1').replace('11', '0')
class ESRegionSelect(Select):
"""
A Select widget that uses a list of spanish regions as its choices.
"""
def __init__(self, attrs=None):
super(ESRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class ESProvinceSelect(Select):
"""
A Select widget that uses a list of spanish provinces as its choices.
"""
def __init__(self, attrs=None):
super(ESProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
def cif_get_checksum(number):
s1 = sum([int(digit) for pos, digit in enumerate(number) if int(pos) % 2])
s2 = sum([sum([int(unit) for unit in str(int(digit) * 2)])
for pos, digit in enumerate(number) if not int(pos) % 2])
return (10 - ((s1 + s2) % 10)) % 10
| |
from neleval.configs import LUO_MEASURES, parse_measures
from neleval.coref_metrics import mapping_to_sets, sets_to_mapping
from neleval.coref_metrics import _prf, muc
from neleval import coref_metrics
from neleval.tests.util import check_correct
MAPPING = {'a': 1, 'b': 2, 'c': 1}
SETS = {1: {'a', 'c'}, 2: {'b'}}
def test_conversions():
assert mapping_to_sets(MAPPING) == SETS
assert sets_to_mapping(SETS) == MAPPING
assert sets_to_mapping(mapping_to_sets(MAPPING)) == MAPPING
assert mapping_to_sets(sets_to_mapping(SETS)) == SETS
def _get_coref_fscore(gold, resp, measures):
for name in parse_measures(measures):
f = getattr(coref_metrics, name)
yield f.__name__, round(_prf(*f(gold, resp))[2], 3)
# TC-A-* tests from https://code.google.com/p/reference-coreference-scorers
RCS14_TCA_GOLD = {'0': {1}, '1': {2,3}, '2': {4,5,6}}
RCS14_TCA_RESPS = [
('TC-A-1', # perfect
{'0': {1}, '1': {2,3}, '2': {4,5,6}},
{'muc': 1.0, 'b_cubed': 1.0,
'mention_ceaf': 1.0, 'entity_ceaf': 1.0}),
('TC-A-2', # response with missing mentions/entities
{'0': {1}, '2': {4,5}},
{'muc': 0.5, 'b_cubed': 0.56,
'mention_ceaf': 0.667, 'entity_ceaf': 0.72}),
('TC-A-3', # response with false-alarm mentions/entities
{'0': {1}, '1': {2,3,7}, '2': {4,5,6,8}, '3': {9}},
{'muc': 0.75, 'b_cubed': 0.675,
'mention_ceaf': 0.8, 'entity_ceaf': 0.759}),
('TC-A-4', # response with both missing and false-alarm mentions/entities
{'0': {1}, '1': {2,3,7}, '2': {4,8}, '3': {9}},
{'muc': 0.333, 'b_cubed': 0.468,
'mention_ceaf': 0.615, 'entity_ceaf': 0.629}),
# NOTE TC-A-5 through TC-A-9 test IO, not metrics
# # TODO Verify TC-A-10 ceafm and ceafe values
# ('TC-A-10', # Gold mentions. Only singletons in the response.
# {'0': {1}, '1': {2}, '2': {3}, '3': {4}, '4': {5}, '5': {6}},
# {'muc': 0.0, 'b_cubed': 0.667,
# 'mention_ceaf': 0.5, 'entity_ceaf': 0.481}),
# # TODO Verify TC-A-11 ceafm and ceafe values
# ('TC-A-11', # Gold mentions. All mentions are coreferent in the response.
# {'0': {1,2,3,4,5,6}},
# {'muc': 0.75, 'b_cubed': 0.56,
# 'mention_ceaf': 0.5, 'entity_ceaf': 0.333}),
# # TODO Verify TC-A-12 ceafm and ceafe values
# ('TC-A-12', # System mentions. Only singletons in the response.
# {'0': {1}, '1': {7}, '2': {8}, '3': {3}, '4': {4}, '5': {5}, '6': {9}},
# {'muc': 0.0, 'b_cubed': 0.443,
# 'mention_ceaf': 0.462, 'entity_ceaf': 0.433}),
# # TODO Verify TC-A-13 ceafm and ceafe values
# ('TC-A-13', # System mentions. All mentions are coreferent in the response.
# {'0': {1,7,8,3,4,5,9}},
# {'muc': 0.222, 'b_cubed': 0.194,
# 'mention_ceaf': 0.308, 'entity_ceaf': 0.2}),
]
def test_rcs_tca_ceaf():
"Examples from Luo (2005)"
for system, response, expected in RCS14_TCA_RESPS:
actual = dict(_get_coref_fscore(RCS14_TCA_GOLD, response, LUO_MEASURES))
check_correct(expected, actual)
## TC-B test from https://code.google.com/p/reference-coreference-scorers
#RCS14_TCB_GOLD = {'10043': {1,2}, '10054': {3,4,5}}
#RCS14_TCB_RESPS = [
# # TODO Verify TC-B-1 muc, b_cubed, ceafm and ceafe values
# ('TC-B-1', # spurious mention (x) and missing mention (a) in response; link (bc) is a key non-coref link and is an incorrect response coref link.
# {'10043': {2,3,6}, '10054': {4,5}},
# {'muc': 0.333, 'b_cubed': 0.478,
# 'mention_ceaf': 0.6, 'entity_ceaf': 0.6}),
# ]
#
#def test_rcs_tcb_ceaf():
# "Examples from Luo (2005)"
# for system, response, expected in RCS14_TCB_RESPS:
# actual = dict(_get_coref_fscore(RCS14_TCB_GOLD, response, LUO_MEASURES))
# check_correct(expected, actual)
## TC-C test from https://code.google.com/p/reference-coreference-scorers
#RCS14_TCC_GOLD = {'10043': {1,2}, '10054': {3,4,5}, '10060': {6,7}}
#RCS14_TCC_RESPS = [
# # TODO Verify TC-C-1 muc, b_cubed, ceafm and ceafe values
# ('TC-C-1', # plus a new entity and its correct prediction shown. this was for testing the more than two entity case
# {'10043': {2,3,6}, '10054': {4,5}, '10060': {6,7}},
# {'muc': 0.5, 'b_cubed': 0.674,
# 'mention_ceaf': 0.714, 'entity_ceaf': 0.733}),
# ]
#
#def test_rcs_tcc_ceaf():
# "Examples from Luo (2005)"
# for system, response, expected in RCS14_TCC_RESPS:
# actual = dict(_get_coref_fscore(RCS14_TCC_GOLD, response, LUO_MEASURES))
# check_correct(expected, actual)
# TC-M test from https://code.google.com/p/reference-coreference-scorers
RCS14_TCM_GOLD = {'0': {1,2,3,4,5,6}}
RCS14_TCM_RESPS = [
('TC-M-1',
{'0': {1,2,3,4,5,6}},
{'muc': 1.0, 'b_cubed': 1.0,
'mention_ceaf': 1.0, 'entity_ceaf': 1.0}),
# # TODO Verify TC-M-2 muc, b_cubed, ceafm and ceafe values
# ('TC-M-2',
# {'0': {1}, '1': {2}, '2': {3}, '3': {4}, '4': {5}, '5': {6}},
# {'muc': 0.0, 'b_cubed': 0.286,
# 'mention_ceaf': 0.167, 'entity_ceaf': 0.082}),
# # TODO Verify TC-M-3 muc, b_cubed, ceafm and ceafe values
# ('TC-M-3',
# {'0': {1,2}, '1': {3,4,5}, '2': {6}},
# {'muc': 0.75, 'b_cubed': 0.56,
# 'mention_ceaf': 0.5, 'entity_ceaf': 0.333}),
# # TODO Verify TC-M-4 muc, b_cubed, ceafm and ceafe valuesw
# ('TC-M-4',
# {'0': {1,2,3,7,8,9}},
# {'muc': 0.4, 'b_cubed': 0.25,
# 'mention_ceaf': 0.5, 'entity_ceaf': 0.5}),
# # TODO Verify TC-M-5 b_cubed, ceafm and ceafe valuesw
# ('TC-M-5',
# {'0': {1}, '1': {2}, '2': {3}, '3': {7}, '4': {8}, '5': {9}},
# {'muc': 0.0, 'b_cubed': 0.143,
# 'mention_ceaf': 0.167, 'entity_ceaf': 0.082}),
# # TODO Verify TC-M-6 muc, b_cubed, ceafm and ceafe valuesw
# ('TC-M-6',
# {'0': {1,2}, '1': {3,7,8}, '2': {9}},
# {'muc': 0.25, 'b_cubed': 0.205,
# 'mention_ceaf': 0.333, 'entity_ceaf': 0.25}),
]
def test_rcs_tcc_ceaf():
"Examples from Luo (2005)"
for system, response, expected in RCS14_TCM_RESPS:
actual = dict(_get_coref_fscore(RCS14_TCM_GOLD, response, LUO_MEASURES))
check_correct(expected, actual)
## TC-N test from https://code.google.com/p/reference-coreference-scorers
#RCS14_TCN_GOLD = {'0': {1}, '1': {2}, '2': {3}, '3': {4}, '4': {5}, '5': {6}}
#RCS14_TCN_RESPS = [
# ('TC-N-1',
# {'0': {1}, '1': {2}, '2': {3}, '3': {4}, '4': {5}, '5': {6}},
# {'muc': 0.0, 'b_cubed': 1.0,
# 'mention_ceaf': 1.0, 'entity_ceaf': 1.0}),
# # TODO Verify TC-N-2 muc, b_cubed, ceafm and ceafe values
# ('TC-N-2',
# {'0': {1,2,3,4,5,6}},
# {'muc': 0.0, 'b_cubed': 0.286,
# 'mention_ceaf': 0.167, 'entity_ceaf': 0.082}),
# # TODO Verify TC-N-3 muc, b_cubed, ceafm and ceafe values
# ('TC-N-3',
# {'0': {1,2}, '1': {3,4,5}, '2': {6}},
# {'muc': 0.0, 'b_cubed': 0.667,
# 'mention_ceaf': 0.5, 'entity_ceaf': 0.481}),
# # TODO Verify TC-N-4 b_cubed, ceafm and ceafe valuesw
# ('TC-N-4',
# {'0': {1}, '1': {2}, '2': {3}, '3': {7}, '4': {8}, '5': {9}},
# {'muc': 0.0, 'b_cubed': 0.5,
# 'mention_ceaf': 0.5, 'entity_ceaf': 0.5}),
# # TODO Verify TC-N-5 b_cubed, ceafm and ceafe valuesw
# ('TC-N-5',
# {'0': {1,2,3,7,8,9}},
# {'muc': 0.0, 'b_cubed': 0.143,
# 'mention_ceaf': 0.167, 'entity_ceaf': 0.082}),
# # TODO Verify TC-N-6 muc, b_cubed, ceafm and ceafe valuesw
# ('TC-N-6',
# {'0': {1,2}, '1': {3,7,8}, '2': {9}},
# {'muc': 0.0, 'b_cubed': 0.308,
# 'mention_ceaf': 0.333, 'entity_ceaf': 0.259}),
# ]
#
#def test_rcs_tcc_ceaf():
# "Examples from Luo (2005)"
# for system, response, expected in RCS14_TCN_RESPS:
# actual = dict(_get_coref_fscore(RCS14_TCN_GOLD, response, LUO_MEASURES))
# check_correct(expected, actual)
LUO05_GOLD = {'A': {1,2,3,4,5}, 'B': {6,7}, 'C': {8,9,10,11,12}}
LUO05_RESPS = [
('sysa',
{'A': {1,2,3,4,5}, 'B': {6,7,8,9,10,11,12}},
{'muc': 0.947, 'b_cubed': 0.865,
'mention_ceaf': 0.833, 'entity_ceaf': 0.733}),
('sysb',
{'A': {1,2,3,4,5,8,9,10,11,12}, 'B': {6,7}},
{'muc': 0.947, 'b_cubed': 0.737,
'mention_ceaf': 0.583, 'entity_ceaf': 0.667}),
('sysc',
{'A': {1,2,3,4,5,6,7,8,9,10,11,12}},
{'muc': 0.900, 'b_cubed': 0.545,
'mention_ceaf': 0.417, 'entity_ceaf': 0.294}),
('sysd',
{i: {i,} for i in range(1,13)},
{'muc': 0.0, 'b_cubed': 0.400,
'mention_ceaf': 0.250, 'entity_ceaf': 0.178})
]
def test_luo_ceaf():
"Examples from Luo (2005)"
for system, response, expected in LUO05_RESPS:
actual = dict(_get_coref_fscore(LUO05_GOLD, response, LUO_MEASURES))
check_correct(expected, actual)
def _get_muc_prf(gold, resp):
return tuple(round(v, 3) for v in _prf(*muc(gold, resp)))
VILAIN95 = [
# Table 1, Row 1
({1: {'A', 'B', 'C', 'D'}},
{1: {'A', 'B'}, 2: {'C', 'D'}},
(1.0, 0.667, 0.8)),
# Table 1, Row 2
({1: {'A', 'B'}, 2: {'C', 'D'}},
{1: {'A', 'B', 'C', 'D'}},
(0.667, 1.0, 0.8)),
# Table 1, Row 3
({1: {'A', 'B', 'C', 'D'}},
{1: {'A', 'B', 'C', 'D'}},
(1.0, 1.0, 1.0)),
# Table 1, Row 4
({1: {'A', 'B', 'C', 'D'}},
{1: {'A', 'B'}, 2: {'C', 'D'}},
(1.0, 0.667, 0.8)),
# Table 1, Row 5
({1: {'A', 'B', 'C'}},
{1: {'A', 'C'}},
(1.0, 0.5, 0.667)),
# More complex 1
({1: {'B', 'C', 'D', 'E', 'G', 'H', 'J'}},
{1: {'A', 'B', 'C'}, 2: {'D', 'E', 'F'}, 3: {'G', 'H', 'I'}},
(0.5, 0.5, 0.5)),
# More complex 2
({1: {'A', 'B', 'C'}, 2: {'D', 'E', 'F', 'G'}},
{1: {'A', 'B'}, 2: {'C', 'D'}, 3: {'F', 'G', 'H'}},
(0.5, 0.4, 0.444)),
]
def test_vilain_muc():
"Examples from Vilain et al. (1995)"
for key, response, expected in VILAIN95:
assert _get_muc_prf(key, response) == expected
CAI10_TABLES_4_5 = [
({1: {'a', 'b', 'c'}}, # true
{2: {'a', 'b'}, 3: {'c'}, 4: {'i'}, 5: {'j'}}, # pred
{'cs_b_cubed': (1.0, 0.556, 0.714), # Note paper says 0.715, but seems incorrect
'mention_cs_ceaf': (0.667, 0.667, 0.667)}),
({1: {'a', 'b', 'c'}},
{2: {'a', 'b'}, 3: {'i', 'j'}, 4: {'c'}},
{'cs_b_cubed': (.8, .556, .656),
'mention_cs_ceaf': (.6, .667, .632)}),
({1: {'a', 'b', 'c'}},
{2: {'a', 'b'}, 3: {'i', 'j'}, 4: {'k', 'l'}, 5: {'c'}},
{'cs_b_cubed': (.714, .556, .625),
'mention_cs_ceaf': (.571, .667, .615)}),
({1: {'a', 'b', 'c'}},
{2: {'a', 'b'}, 3: {'i', 'j', 'k', 'l'}},
{'cs_b_cubed': (.571, .556, .563),
'mention_cs_ceaf': (.429, .667, .522)}),
]
###def test_cai_strube_twinless_adjustment():
### "Examples from Cai & Strube (SIGDIAL'10)"
### for true, pred, expected in CAI10_TABLES_4_5:
### actual = {f: tuple(round(x, 3) for x in _prf(*getattr(coref_metrics, f)(true, pred)))
### for f in parse_measures(CAI_STRUBE_MEASURES)}
### check_correct(expected, actual)
| |
#!/usr/bin/env python3
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy
import re
import sqlite3
from params import PARAMETERS
_log = logging.getLogger(__name__)
class Result(object):
"""
Represents a column in the results table.
"""
def __init__(self, name, val_type, regex):
self.name = name
self.val_type = val_type
self.regex = regex
RESULTS = [
Result('queries_sent', int,
re.compile(r'\s*Queries sent:\s*(\d+)')),
Result('queries_completed', int,
re.compile(r'\s*Queries completed:\s*(\d+).*')),
Result('queries_lost', int,
re.compile(r'\s*Queries lost:\s*(\d+).*')),
Result('run_time', float,
re.compile(r'\s*Run time \(s\):\s*([0-9.]+)')),
Result('qps', float,
re.compile(r'\s*Queries per second:\s*([0-9.]+)')),
Result('avg_latency', float,
re.compile(r'\s*Average Latency \(s\):\s*([0-9.]+).*')),
Result('min_latency', float,
re.compile(r'\s*Average Latency \(s\):.*min ([0-9.]+).*')),
Result('max_latency', float,
re.compile(r'\s*Average Latency \(s\):.*max ([0-9.]+).*')),
Result('stddev_latency', float,
re.compile(r'\s*Latency StdDev \(s\):\s*([0-9.]+)')),
Result('max_perfserver_cpu', int, None),
Result('max_perfserver_memory', int, None),
Result('max_kubedns_cpu', int, None),
Result('max_kubedns_memory', int, None),
# Derived results
Result('latency_50_percentile', float, None),
Result('latency_95_percentile', float, None),
Result('latency_99_percentile', float, None),
Result('latency_99_5_percentile', float, None),
]
class Parser(object):
"""
Parses dnsperf output file.
"""
def __init__(self, out):
self.lines = [x.strip() for x in out.split('\n')]
self.results = {}
self.histogram = []
def parse(self):
self._parse_results()
self._parse_histogram()
self._compute_derived()
def _parse_results(self):
results = {}
for line in self.lines:
for result in RESULTS:
if result.regex is None:
continue
match = result.regex.match(line)
if not match:
continue
results[result.name] = result.val_type(match.group(1))
self.results = results
def _parse_histogram(self):
lines = [x for x in self.lines if re.match('^#histogram .*', x)]
for line in lines:
match = re.match(r'^#histogram\s+(\d+) (\d+)', line)
rtt, count = [int(x) for x in match.groups()]
self.histogram.append([rtt, count])
def _compute_derived(self):
# Note: not very efficient, but functional
from functools import reduce
histogram = reduce(
list.__add__,
[[rtt]*count for rtt, count in self.histogram],
[])
_log.debug('Latency histogram = %s', histogram)
for name, ptile in [('latency_50_percentile', 50),
('latency_95_percentile', 95),
('latency_99_percentile', 99),
('latency_99_5_percentile', 99.5)]:
self.results[name] = float(numpy.percentile(histogram, ptile)) # pylint: disable=no-member
class ResultDb(object):
def __init__(self, dbfile):
self.db = sqlite3.connect(dbfile)
self.c = self.db.cursor()
sql = """-- run parameters
CREATE TABLE IF NOT EXISTS runs (
run_id,
run_subid,
pod_name,
{params},
primary key (run_id, run_subid, pod_name)
)""".format(params=',\n '.join([param.name for param in PARAMETERS]))
self.c.execute(sql)
_log.debug('%s', sql)
sql = """-- run results
CREATE TABLE IF NOT EXISTS results (
run_id,
run_subid,
pod_name,
{results},
primary key (run_id, run_subid, pod_name)
)""".format(results=',\n '.join([r.name for r in RESULTS]))
_log.debug('%s', sql)
self.c.execute(sql)
sql = """-- latency histogram
CREATE TABLE IF NOT EXISTS histograms (
run_id,
run_subid,
pod_name,
rtt_ms,
rtt_ms_count
)
"""
_log.debug('%s', sql)
self.c.execute(sql)
_log.info('Using DB %s', dbfile)
def put(self, results, ignore_if_dup=True):
key = [results['params']['run_id'], results['params']['run_subid'], results['params']['pod_name']]
if self._exists(key) and ignore_if_dup:
_log.info('Ignoring duplicate results %s', key)
return
sql = ('INSERT INTO runs (run_id, run_subid, pod_name,'
+ ','.join([p.name for p in PARAMETERS])
+ ') VALUES ('
+ ','.join(['?'] * (3 + len(PARAMETERS)))
+ ')')
_log.debug('runs sql -- %s', sql)
self.c.execute(sql, key + [
results['params'][p.name] if p.name in results['params'] else None
for p in PARAMETERS
])
sql = ('INSERT INTO results (run_id, run_subid, pod_name,'
+ ','.join([r.name for r in RESULTS])
+ ') VALUES ('
+ ','.join(['?'] * (3 + len(RESULTS)))
+ ')')
_log.debug('results sql -- %s', sql)
self.c.execute(sql, key +
[results['data'][r.name]
if r.name in results['data'] else None
for r in RESULTS])
for rtt_ms, count in results['data']['histogram']:
data = {
'run_id': results['params']['run_id'],
'run_subid': results['params']['run_subid'],
'rtt_ms': rtt_ms,
'rtt_ms_count': count,
}
columns = ','.join(list(data.keys()))
qs = ','.join(['?'] * len(data))
stmt = 'INSERT INTO histograms (' + columns + ') VALUES (' + qs + ')'
_log.debug('histogram sql -- %s', stmt)
self.c.execute(stmt, list(data.values()))
def get_results(self, run_id, run_subid):
sql = ('SELECT ' + ','.join([r.name for r in RESULTS])
+ ' FROM results WHERE run_id = ? and run_subid = ? and pod_name = ?')
_log.debug('%s', sql)
self.c.execute(sql, (run_id, run_subid, pod_name))
rows = self.c.fetchall()
return dict(list(zip([r.name for r in RESULTS], rows[0]))) if rows else None
def _exists(self, key):
self.c.execute(
'SELECT COUNT(*) FROM runs WHERE run_id = ? and run_subid = ? and pod_name = ?', key)
count = self.c.fetchall()[0][0]
return count != 0
def commit(self):
self.db.commit()
| |
# -*- coding: utf-8 -*-
import functools
import mock
import pytest
from urlparse import urlparse
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import NodeLog
from osf.models.licenses import NodeLicense
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
CommentFactory,
NodeLicenseRecordFactory,
PrivateLinkFactory,
PreprintFactory,
)
from rest_framework import exceptions
from tests.base import fake
from tests.utils import assert_items_equal, assert_latest_log, assert_latest_log_not
from website.views import find_bookmark_collection
from website.util import permissions
from website.util.sanitize import strip_html
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeDetail:
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user):
return ProjectFactory(title='Project One', is_public=True, creator=user)
@pytest.fixture()
def project_private(self, user):
return ProjectFactory(title='Project Two', is_public=False, creator=user)
@pytest.fixture()
def component_public(self, user, project_public):
return NodeFactory(parent=project_public, creator=user, is_public=True)
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def url_component_public(self, component_public):
return '/{}nodes/{}/'.format(API_BASE, component_public._id)
@pytest.fixture()
def permissions_read(self):
return ['read']
@pytest.fixture()
def permissions_write(self):
return ['read', 'write']
@pytest.fixture()
def permissions_admin(self):
return ['read', 'admin', 'write']
def test_return_project_details(self, app, user, user_two, project_public, project_private, url_public, url_private, permissions_read, permissions_admin):
# test_return_public_project_details_logged_out
res = app.get(url_public)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(res.json['data']['attributes']['current_user_permissions'], permissions_read)
# test_return_public_project_details_contributor_logged_in
res = app.get(url_public, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(res.json['data']['attributes']['current_user_permissions'], permissions_admin)
# test_return_public_project_details_non_contributor_logged_in
res = app.get(url_public, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(res.json['data']['attributes']['current_user_permissions'], permissions_read)
# test_return_private_project_details_logged_in_admin_contributor
res = app.get(url_private, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert_items_equal(res.json['data']['attributes']['current_user_permissions'], permissions_admin)
# test_return_private_project_details_logged_out
res = app.get(url_private, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_project_details_logged_in_non_contributor
res = app.get(url_private, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_return_private_project_details_logged_in_write_contributor(self, app, user, user_two, project_private, url_private, permissions_write):
project_private.add_contributor(contributor=user_two, auth=Auth(user), save=True)
res = app.get(url_private, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert_items_equal(res.json['data']['attributes']['current_user_permissions'], permissions_write)
def test_top_level_project_has_no_parent(self, app, url_public):
res = app.get(url_public)
assert res.status_code == 200
assert 'parent' not in res.json['data']['relationships']
assert 'id' in res.json['data']
assert res.content_type == 'application/vnd.api+json'
def test_child_project_has_parent(self, app, user, project_public, url_public):
public_component = NodeFactory(parent=project_public, creator=user, is_public=True)
public_component_url = '/{}nodes/{}/'.format(API_BASE, public_component._id)
res = app.get(public_component_url)
assert res.status_code == 200
url = res.json['data']['relationships']['parent']['links']['related']['href']
assert urlparse(url).path == url_public
def test_node_has(self, app, url_public):
# test_node_has_children_link
res = app.get(url_public)
url = res.json['data']['relationships']['children']['links']['related']['href']
expected_url = '{}children/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_contributors_link
res = app.get(url_public)
url = res.json['data']['relationships']['contributors']['links']['related']['href']
expected_url = '{}contributors/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_node_links_link
res = app.get(url_public)
url = res.json['data']['relationships']['node_links']['links']['related']['href']
expected_url = '{}node_links/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_registrations_link
res = app.get(url_public)
url = res.json['data']['relationships']['registrations']['links']['related']['href']
expected_url = '{}registrations/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_files_link
res = app.get(url_public)
url = res.json['data']['relationships']['files']['links']['related']['href']
expected_url = '{}files/'.format(url_public)
assert urlparse(url).path == expected_url
def test_node_has_comments_link(self, app, user, project_public, url_public):
CommentFactory(node=project_public, user=user)
res = app.get(url_public)
assert res.status_code == 200
assert 'comments' in res.json['data']['relationships'].keys()
url = res.json['data']['relationships']['comments']['links']['related']['href']
res = app.get(url)
assert res.status_code == 200
assert res.json['data'][0]['type'] == 'comments'
def test_node_comments_link_query_params_formatted(self, app, user, project_public, project_private, url_private):
CommentFactory(node=project_public, user=user)
project_private_link = PrivateLinkFactory(anonymous=False)
project_private_link.nodes.add(project_private)
project_private_link.save()
res = app.get(url_private, auth=user.auth)
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert project_private_link.key not in url
res = app.get('{}?view_only={}'.format(url_private, project_private_link.key))
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert project_private_link.key in url
def test_node_has_correct_unread_comments_count(self, app, user, project_public, url_public):
contributor = AuthUserFactory()
project_public.add_contributor(contributor=contributor, auth=Auth(user), save=True)
comment = CommentFactory(node=project_public, user=contributor, page='node')
res = app.get('{}?related_counts=True'.format(url_public), auth=user.auth)
unread = res.json['data']['relationships']['comments']['links']['related']['meta']['unread']
unread_comments_node = unread['node']
assert unread_comments_node == 1
def test_node_properties(self, app, url_public):
res = app.get(url_public)
assert res.json['data']['attributes']['public'] is True
assert res.json['data']['attributes']['registration'] is False
assert res.json['data']['attributes']['collection'] is False
assert res.json['data']['attributes']['tags'] == []
def test_requesting_folder_returns_error(self, app, user):
folder = CollectionFactory(creator=user)
res = app.get(
'/{}nodes/{}/'.format(API_BASE, folder._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 404
def test_cannot_return_registrations_at_node_detail_endpoint(self, app, user, project_public):
registration = RegistrationFactory(project=project_public, creator=user)
res = app.get('/{}nodes/{}/'.format(API_BASE, registration._id), auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_cannot_return_folder_at_node_detail_endpoint(self, app, user):
folder = CollectionFactory(creator=user)
res = app.get('/{}nodes/{}/'.format(API_BASE, folder._id), auth=user.auth, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class NodeCRUDTestCase:
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def title(self):
return 'Cool Project'
@pytest.fixture()
def title_new(self):
return 'Super Cool Project'
@pytest.fixture()
def description(self):
return 'A Properly Cool Project'
@pytest.fixture()
def description_new(self):
return 'An even cooler project'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def category_new(self):
return 'project'
@pytest.fixture()
def project_public(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user
)
@pytest.fixture()
def project_private(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user
)
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def url_fake(self):
return '/{}nodes/{}/'.format(API_BASE, '12345')
@pytest.fixture()
def make_node_payload(self):
def payload(node, attributes):
return {
'data': {
'id': node._id,
'type': 'nodes',
'attributes': attributes,
}
}
return payload
@pytest.mark.django_db
class TestNodeUpdate(NodeCRUDTestCase):
def test_node_update_invalid_data(self, app, user, url_public):
res = app.put_json_api(url_public, 'Incorrect data', auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
res = app.put_json_api(url_public, ['Incorrect data'], auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
def test_cannot_make_project_public_if_non_contributor(self, app, project_private, url_private, make_node_payload):
with assert_latest_log_not(NodeLog.MADE_PUBLIC, project_private):
non_contrib = AuthUserFactory()
res = app.patch_json(
url_private,
make_node_payload(project_private, {'public': True}),
auth=non_contrib.auth, expect_errors=True
)
assert res.status_code == 403
def test_cannot_make_project_public_if_non_admin_contributor(self, app, project_private, url_private, make_node_payload):
non_admin = AuthUserFactory()
project_private.add_contributor(
non_admin,
permissions=(permissions.READ, permissions.WRITE),
auth=Auth(project_private.creator)
)
project_private.save()
res = app.patch_json(
url_private,
make_node_payload(project_private, {'public': True}),
auth=non_admin.auth, expect_errors=True
)
assert res.status_code == 403
project_private.reload()
assert not project_private.is_public
def test_can_make_project_public_if_admin_contributor(self, app, project_private, url_private, make_node_payload):
with assert_latest_log(NodeLog.MADE_PUBLIC, project_private):
admin_user = AuthUserFactory()
project_private.add_contributor(
admin_user,
permissions=(permissions.READ, permissions.WRITE, permissions.ADMIN),
auth=Auth(project_private.creator)
)
project_private.save()
res = app.patch_json_api(
url_private,
make_node_payload(project_private, {'public': True}),
auth=admin_user.auth # self.user is creator/admin
)
assert res.status_code == 200
project_private.reload()
assert project_private.is_public
def test_update_errors(self, app, user, user_two, title_new, description_new, category_new, project_public, project_private, url_public, url_private):
# test_update_project_properties_not_nested
res = app.put_json_api(url_public, {
'id': project_public._id,
'type': 'nodes',
'title': title_new,
'description': description_new,
'category': category_new,
'public': True,
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
# test_update_invalid_id
res = app.put_json_api(url_public, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_update_invalid_type
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'node',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_update_no_id
res = app.put_json_api(url_public, {
'data': {
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_update_no_type
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_update_public_project_logged_out
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_update_project_invalid_title
project = {
'data': {
'type': 'nodes',
'id': project_public._id,
'attributes': {
'title': 'A' * 201,
'category': 'project',
}
}
}
res = app.put_json_api(url_public, project, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Title cannot exceed 200 characters.'
# test_update_public_project_logged_in_but_unauthorized
res = app.put_json_api(url_public, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_update_private_project_logged_out
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_update_private_project_logged_in_non_contributor
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_update_public_project_logged_in(self, app, user, title_new, description_new, category_new, project_public, url_public):
with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public):
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description_new
assert res.json['data']['attributes']['category'] == category_new
def test_cannot_update_a_registration(self, app, user, project_public):
registration = RegistrationFactory(project=project_public, creator=user)
original_title = registration.title
original_description = registration.description
url = '/{}nodes/{}/'.format(API_BASE, registration._id)
res = app.put_json_api(url, {
'data': {
'id': registration._id,
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'hypothesis',
'public': True
}
}
}, auth=user.auth, expect_errors=True)
registration.reload()
assert res.status_code == 404
assert registration.title == original_title
assert registration.description == original_description
def test_update_private_project_logged_in_contributor(self, app, user, title_new, description_new, category_new, project_private, url_private):
with assert_latest_log(NodeLog.UPDATED_FIELDS, project_private):
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description_new
assert res.json['data']['attributes']['category'] == category_new
def test_update_project_sanitizes_html_properly(self, app, user, category_new, project_public, url_public):
with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public):
"""Post request should update resource, and any HTML in fields should be stripped"""
new_title = '<strong>Super</strong> Cool Project'
new_description = 'An <script>alert("even cooler")</script> project'
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': category_new,
'public': True,
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == strip_html(new_title)
assert res.json['data']['attributes']['description'] == strip_html(new_description)
def test_partial_update_project_updates_project_correctly_and_sanitizes_html(self, app, user, description, category, project_public, url_public):
with assert_latest_log(NodeLog.EDITED_TITLE, project_public):
new_title = 'An <script>alert("even cooler")</script> project'
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': new_title
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
res = app.get(url_public)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == strip_html(new_title)
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_partial_update_public_project_logged_in(self, app, user, title_new, description, category, project_public, url_public):
with assert_latest_log(NodeLog.EDITED_TITLE, project_public):
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_write_to_public_field_non_contrib_forbidden(self, app, user_two, project_public, url_public):
# Test non-contrib writing to public field
res = app.patch_json_api(url_public, {
'data': {
'attributes': {
'public': False},
'id': project_public._id,
'type': 'nodes'
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_partial_update_errors(self, app, user, user_two, title_new, title, project_public, project_private, url_public, url_private):
# test_partial_update_public_project_logged_out
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_partial_update_public_project_logged_in_but_unauthorized
# Public resource, logged in, unauthorized
res = app.patch_json_api(url_public, {
'data': {
'attributes': {
'title': title_new},
'id': project_public._id,
'type': 'nodes',
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_partial_update_private_project_logged_out
res = app.patch_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_partial_update_private_project_logged_in_non_contributor
res = app.patch_json_api(url_private, {
'data': {
'attributes': {
'title': title_new},
'id': project_private._id,
'type': 'nodes',
}
}, auth=user_two.auth,expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_partial_update_invalid_id
res = app.patch_json_api(url_public, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_update_invalid_type
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'node',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_update_no_id
res = app.patch_json_api(url_public, {
'data': {
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_partial_update_no_type
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# Nothing will be updated here
# test_partial_update_project_properties_not_nested
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'title': title_new,
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
def test_partial_update_private_project_logged_in_contributor(self, app, user, title_new, description, category, project_private, url_private):
with assert_latest_log(NodeLog.EDITED_TITLE, project_private):
res = app.patch_json_api(url_private, {
'data': {
'attributes': {
'title': title_new},
'id': project_private._id,
'type': 'nodes',
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_multiple_patch_requests_with_same_category_generates_one_log(self, app, user, project_private, url_private, make_node_payload):
project_private.category = 'project'
project_private.save()
new_category = 'data'
payload = make_node_payload(project_private, attributes={'category': new_category})
original_n_logs = project_private.logs.count()
res = app.patch_json_api(url_private, payload, auth=user.auth)
assert res.status_code == 200
project_private.reload()
assert project_private.category == new_category
assert project_private.logs.count() == original_n_logs + 1 # sanity check
res = app.patch_json_api(url_private, payload, auth=user.auth)
project_private.reload()
assert project_private.category == new_category
assert project_private.logs.count() == original_n_logs + 1
def test_public_project_with_publicly_editable_wiki_turns_private(self, app, user, project_public, url_public, make_node_payload):
wiki = project_public.get_addon('wiki')
wiki.set_editing(permissions=True, auth=Auth(user=user), log=True)
res = app.patch_json_api(
url_public,
make_node_payload(project_public, {'public': False}),
auth=user.auth # self.user is creator/admin
)
assert res.status_code == 200
@pytest.mark.django_db
class TestNodeDelete(NodeCRUDTestCase):
def test_deletes_node_errors(self, app, user, user_two, project_public, project_private, url_public, url_private, url_fake):
# test_deletes_public_node_logged_out
res = app.delete(url_public, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_deletes_public_node_fails_if_unauthorized
res = app.delete_json_api(url_public, auth=user_two.auth, expect_errors=True)
project_public.reload()
assert res.status_code == 403
assert project_public.is_deleted is False
assert 'detail' in res.json['errors'][0]
# test_deletes_private_node_logged_out
res = app.delete(url_private, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_deletes_private_node_logged_in_non_contributor
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
# test_deletes_invalid_node
res = app.delete(url_fake, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_read_only_contributor(self, app, user_two, project_private, url_private):
project_private.add_contributor(user_two, permissions=[permissions.READ])
project_private.save()
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
def test_delete_project_with_component_returns_error(self, app, user):
project = ProjectFactory(creator=user)
component = NodeFactory(parent=project, creator=user)
# Return a 400 because component must be deleted before deleting the parent
res = app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, project._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 400
errors = res.json['errors']
assert len(errors) == 1
assert (
errors[0]['detail'] ==
'Any child components must be deleted prior to deleting this project.')
def test_delete_bookmark_collection_returns_error(self, app, user):
bookmark_collection = find_bookmark_collection(user)
res = app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, bookmark_collection._id),
auth=user.auth,
expect_errors=True
)
# Bookmark collections are collections, so a 404 is returned
assert res.status_code == 404
@mock.patch('website.preprints.tasks.update_ezid_metadata_on_change.s')
def test_delete_node_with_preprint_calls_preprint_update_status(self, mock_update_ezid_metadata_on_change, app, user, project_public, url_public):
PreprintFactory(project=project_public)
app.delete_json_api(url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert mock_update_ezid_metadata_on_change.called
def test_deletes_public_node_succeeds_as_owner(self, app, user, project_public, url_public):
with assert_latest_log(NodeLog.PROJECT_DELETED, project_public):
res = app.delete_json_api(url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert res.status_code == 204
assert project_public.is_deleted is True
def test_requesting_deleted_returns_410(self, app, project_public, url_public):
project_public.is_deleted = True
project_public.save()
res = app.get(url_public, expect_errors=True)
assert res.status_code == 410
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_contributor(self, app, user, project_private, url_private):
with assert_latest_log(NodeLog.PROJECT_DELETED, project_private):
res = app.delete(url_private, auth=user.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 204
assert project_private.is_deleted is True
@pytest.mark.django_db
class TestReturnDeletedNode:
@pytest.fixture()
def project_public_deleted(self, user):
return ProjectFactory(
is_deleted=True,
creator=user,
title='This public project has been deleted',
category='project',
is_public=True
)
@pytest.fixture()
def project_private_deleted(self, user):
return ProjectFactory(
is_deleted=True,
creator=user,
title='This private project has been deleted',
category='project',
is_public=False
)
@pytest.fixture()
def title_new(self):
return 'This deleted node has been edited'
@pytest.fixture()
def url_project_public_deleted(self, project_public_deleted):
return '/{}nodes/{}/'.format(API_BASE, project_public_deleted._id)
@pytest.fixture()
def url_project_private_deleted(self, project_private_deleted):
return '/{}nodes/{}/'.format(API_BASE, project_private_deleted._id)
def test_return_deleted_node(self, app, user, title_new, project_public_deleted, project_private_deleted, url_project_public_deleted, url_project_private_deleted):
# test_return_deleted_public_node
res = app.get(url_project_public_deleted, expect_errors=True)
assert res.status_code == 410
# test_return_deleted_private_node
res = app.get(url_project_private_deleted, auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_edit_deleted_public_node
res = app.put_json_api(url_project_public_deleted, params={
'title': title_new,
'node_id': project_public_deleted._id,
'category': project_public_deleted.category
}, auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_edit_deleted_private_node
res = app.put_json_api(url_project_private_deleted, params={
'title': title_new,
'node_id': project_private_deleted._id,
'category': project_private_deleted.category
}, auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_delete_deleted_public_node
res = app.delete(url_project_public_deleted, auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_delete_deleted_private_node
res = app.delete(url_project_private_deleted, auth=user.auth, expect_errors=True)
assert res.status_code == 410
@pytest.mark.django_db
class TestNodeTags:
@pytest.fixture()
def user_admin(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user, user_admin):
project_public = ProjectFactory(title='Project One', is_public=True, creator=user)
project_public.add_contributor(user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_public.add_contributor(user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_public
@pytest.fixture()
def project_private(self, user, user_admin):
project_private = ProjectFactory(title='Project Two', is_public=False, creator=user)
project_private.add_contributor(user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_private.add_contributor(user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_private
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def payload_public(self, project_public):
return {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
@pytest.fixture()
def payload_private(self, project_private):
return {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
def test_public_project_starts_with_no_tags(self, app, url_public):
res = app.get(url_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_node_detail_does_not_expose_system_tags(self, app, project_public, url_public):
project_public.add_system_tag('systag', save=True)
res = app.get(url_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_contributor_can_add_tag_to_public_project(self, app, user, project_public, payload_public, url_public):
with assert_latest_log(NodeLog.TAG_ADDED, project_public):
res = app.patch_json_api(url_public, payload_public, auth=user.auth, expect_errors=True)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
project_public.reload()
assert project_public.tags.count() == 1
assert project_public.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(url_public)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
def test_contributor_can_add_tag_to_private_project(self, app, user, project_private, payload_private, url_private):
with assert_latest_log(NodeLog.TAG_ADDED, project_private):
res = app.patch_json_api(url_private, payload_private, auth=user.auth)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
project_private.reload()
assert project_private.tags.count() == 1
assert project_private.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(url_private, auth=user.auth)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
def test_partial_update_project_does_not_clear_tags(self, app, user_admin, project_private, payload_private, url_private):
res = app.patch_json_api(url_private, payload_private, auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
new_payload = {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'public': True
}
}
}
res = app.patch_json_api(url_private, new_payload, auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
new_payload['data']['attributes']['public'] = False
res = app.patch_json_api(url_private, new_payload, auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
def test_add_tag_to_project_errors(self, app, user_non_contrib, user_read_contrib, payload_public, payload_private, url_public, url_private):
# test_non_authenticated_user_cannot_add_tag_to_public_project
res = app.patch_json_api(url_public, payload_public, expect_errors=True, auth=None)
assert res.status_code == 401
# test_non_authenticated_user_cannot_add_tag_to_private_project
res = app.patch_json_api(url_private, payload_private, expect_errors=True, auth=None)
assert res.status_code == 401
# test_non_contributor_cannot_add_tag_to_public_project
res = app.patch_json_api(url_public, payload_public, expect_errors=True, auth=user_non_contrib.auth)
assert res.status_code == 403
# test_non_contributor_cannot_add_tag_to_private_project
res = app.patch_json_api(url_private, payload_private, expect_errors=True, auth=user_non_contrib.auth)
assert res.status_code == 403
# test_read_only_contributor_cannot_add_tag_to_public_project
res = app.patch_json_api(url_public, payload_public, expect_errors=True, auth=user_read_contrib.auth)
assert res.status_code == 403
# test_read_only_contributor_cannot_add_tag_to_private_project
res = app.patch_json_api(url_private, payload_private, expect_errors=True, auth=user_read_contrib.auth)
assert res.status_code == 403
def test_tags_add_and_remove_properly(self, app, user, project_private, payload_private, url_private):
with assert_latest_log(NodeLog.TAG_ADDED, project_private):
res = app.patch_json_api(url_private, payload_private, auth=user.auth)
assert res.status_code == 200
# Ensure adding tag data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, project_private), assert_latest_log(NodeLog.TAG_ADDED, project_private, 1):
# Ensure removing and adding tag data is correct from the PATCH response
res = app.patch_json_api(url_private, {'data': {'id': project_private._id, 'type':'nodes', 'attributes': {'tags':['newer-tag']}}}, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'newer-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, project_private):
# Ensure removing tag data is correct from the PATCH response
res = app.patch_json_api(url_private, {'data': {'id': project_private._id, 'type':'nodes', 'attributes': {'tags': []}}}, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_tags_post_object_instead_of_list(self, user, app):
url = '/{}nodes/'.format(API_BASE)
payload = {'data': {
'type': 'nodes',
'attributes': {
'title': 'new title',
'category': 'project',
'tags': {'foo': 'bar'}
}
}}
res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
def test_tags_patch_object_instead_of_list(self, app, user, payload_public, url_public):
payload_public['data']['attributes']['tags'] = {'foo': 'bar'}
res = app.patch_json_api(url_public, payload_public, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
@pytest.mark.django_db
class TestNodeLicense:
@pytest.fixture()
def user_admin(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def license_name(self):
return 'MIT License'
@pytest.fixture()
def node_license(self, license_name):
return NodeLicense.objects.filter(name=license_name).first()
@pytest.fixture()
def year(self):
return '2105'
@pytest.fixture()
def copyright_holders(self):
return ['Foo', 'Bar']
@pytest.fixture()
def project_public(self, user, user_admin, node_license, year, copyright_holders):
project_public = ProjectFactory(title='Project One', is_public=True, creator=user)
project_public.add_contributor(user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_public.add_contributor(user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
project_public.node_license = NodeLicenseRecordFactory(
node_license=node_license,
year=year,
copyright_holders=copyright_holders
)
project_public.save()
return project_public
@pytest.fixture()
def project_private(self, user, user_admin, node_license, year, copyright_holders):
project_private = ProjectFactory(title='Project Two', is_public=False, creator=user)
project_private.add_contributor(user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_private.add_contributor(user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
project_private.node_license = NodeLicenseRecordFactory(
node_license=node_license,
year=year,
copyright_holders=copyright_holders
)
project_private.save()
return project_private
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
def test_node_has(self, app, user, node_license, project_public, project_private, url_private, url_public):
# test_public_node_has_node_license
res = app.get(url_public)
assert project_public.node_license.year == res.json['data']['attributes']['node_license']['year']
# test_public_node_has_license_relationship
res = app.get(url_public)
expected_license_url = '/{}licenses/{}'.format(API_BASE, node_license._id)
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
assert expected_license_url in actual_license_url
# test_private_node_has_node_license
res = app.get(url_private, auth=user.auth)
assert project_private.node_license.year == res.json['data']['attributes']['node_license']['year']
# test_private_node_has_license_relationship
res = app.get(url_private, auth=user.auth)
expected_license_url = '/{}licenses/{}'.format(API_BASE, node_license._id)
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
assert expected_license_url in actual_license_url
def test_component_return_parent_license_if_no_license(self, app, user, node_license, project_public):
node = NodeFactory(parent=project_public, creator=user)
node.save()
node_url = '/{}nodes/{}/'.format(API_BASE, node._id)
res = app.get(node_url, auth=user.auth)
assert not node.node_license
assert project_public.node_license.year == res.json['data']['attributes']['node_license']['year']
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
expected_license_url = '/{}licenses/{}'.format(API_BASE, node_license._id)
assert expected_license_url in actual_license_url
@pytest.mark.django_db
class TestNodeUpdateLicense:
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def node(self, user_admin_contrib, user_write_contrib, user_read_contrib):
node = NodeFactory(creator=user_admin_contrib)
node.add_contributor(user_write_contrib, auth=Auth(user_admin_contrib))
node.add_contributor(user_read_contrib, auth=Auth(user_admin_contrib), permissions=['read'])
node.save()
return node
@pytest.fixture()
def license_cc0(self):
return NodeLicense.objects.filter(name='CC0 1.0 Universal').first()
@pytest.fixture()
def license_mit(self):
return NodeLicense.objects.filter(name='MIT License').first()
@pytest.fixture()
def license_no(self):
return NodeLicense.objects.get(name='No license')
@pytest.fixture()
def url_node(self, node):
return '/{}nodes/{}/'.format(API_BASE, node._id)
@pytest.fixture()
def make_payload(self):
def payload(node_id, license_id=None, license_year=None, copyright_holders=None):
attributes = {}
if license_year and copyright_holders:
attributes = {
'node_license': {
'year': license_year,
'copyright_holders': copyright_holders
}
}
elif license_year:
attributes = {
'node_license': {
'year': license_year
}
}
elif copyright_holders:
attributes = {
'node_license': {
'copyright_holders': copyright_holders
}
}
return {
'data': {
'type': 'nodes',
'id': node_id,
'attributes': attributes,
'relationships': {
'license': {
'data': {
'type': 'licenses',
'id': license_id
}
}
}
}
} if license_id else {
'data': {
'type': 'nodes',
'id': node_id,
'attributes': attributes
}
}
return payload
@pytest.fixture()
def make_request(self, app):
def request(url, data, auth=None, expect_errors=False):
return app.patch_json_api(url, data, auth=auth, expect_errors=expect_errors)
return request
def test_admin_update_license_with_invalid_id(self, user_admin_contrib, node, make_payload, make_request, url_node):
data = make_payload(
node_id=node._id,
license_id='thisisafakelicenseid'
)
assert node.node_license is None
res = make_request(url_node, data, auth=user_admin_contrib.auth, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified license.'
node.reload()
assert node.node_license is None
def test_admin_can_update_license(self, user_admin_contrib, node, make_payload, make_request, license_cc0, url_node):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
assert node.node_license is None
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year is None
assert node.node_license.copyright_holders == []
def test_admin_can_update_license_record(self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='2015',
copyright_holders=['Mr. Monument', 'Princess OSF']
)
assert node.node_license is None
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == ['Mr. Monument', 'Princess OSF']
def test_cannot_update(self, user_write_contrib, user_read_contrib, user_non_contrib, node, make_payload, make_request, license_cc0, url_node):
# def test_rw_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, auth=user_write_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_read_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, auth=user_read_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_non_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, auth=user_non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_unauthenticated_user_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
def test_update_node_with_existing_license_year_attribute_only(self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_year='2015'
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
def test_update_node_with_existing_license_copyright_holders_attribute_only(self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
copyright_holders=['Mr. Monument', 'Princess OSF']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Mr. Monument', 'Princess OSF']
def test_update_node_with_existing_license_relationship_only(self, user_admin_contrib, node, make_payload, make_request, license_cc0, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
def test_update_node_with_existing_license_relationship_and_attributes(self, user_admin_contrib, node, make_payload, make_request, license_no, license_cc0, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
save=True
)
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_id=license_cc0._id,
license_year='2015',
copyright_holders=['Mr. Monument', 'Princess OSF']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == ['Mr. Monument', 'Princess OSF']
def test_update_node_license_without_required_year_in_payload(self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
copyright_holders=['Rick', 'Morty']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'year must be specified for this license'
def test_update_node_license_without_required_copyright_holders_in_payload_(self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='1994'
)
res = make_request(url_node, data, auth=user_admin_contrib.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'copyrightHolders must be specified for this license'
def test_update_node_license_adds_log(self, user_admin_contrib, node, make_payload, make_request, license_cc0, url_node):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
logs_before_update = node.logs.count()
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
logs_after_update = node.logs.count()
assert logs_before_update != logs_after_update
assert node.logs.latest().action == 'license_changed'
def test_update_node_license_without_change_does_not_add_log(self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2015',
'copyrightHolders': ['Kim', 'Kanye']
},
auth=Auth(user_admin_contrib),
save=True
)
before_num_logs = node.logs.count()
before_update_log = node.logs.latest()
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='2015',
copyright_holders=['Kanye', 'Kim']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
node.reload()
after_num_logs = node.logs.count()
after_update_log = node.logs.latest()
assert res.status_code == 200
assert before_num_logs == after_num_logs
assert before_update_log._id == after_update_log._id
| |
# the module includes adapted parts of the Python 2 and Python 3 standard
# libraries. It is intended to provide a clean implementation of the pickle
# protocol minus the actual file format on disk. This module contains no
# code specific to HDF5 files.
import sys
from collections import namedtuple
from pickle import (whichmodule, PicklingError, UnpicklingError,
dispatch_table, _extension_registry)
class H5itPicklingError(PicklingError):
pass
class H5itUnpicklingError(UnpicklingError):
pass
GlobalTuple = namedtuple(u'Global', (u'module', u'name'))
r_key_func = u'func'
r_key_cls = u'cls'
r_key_args = u'args'
r_key_state = u'state'
r_key_listitems = u'listitems'
r_key_dictitems = u'dictitems'
is_py2 = sys.version_info.major == 2
is_py3 = sys.version_info.major == 3
if is_py2:
from types import TypeType, StringType, TupleType
if is_py3:
from pickle import _getattribute, _compat_pickle
# adapted from Python 3 save_global
def save_global_py3(obj, name=None, proto=2, fix_imports=True):
if name is None and proto >= 4:
name = getattr(obj, '__qualname__', None)
if name is None:
name = obj.__name__
module_name = whichmodule(obj, name, allow_qualname=proto >= 4)
try:
__import__(module_name, level=0)
module = sys.modules[module_name]
obj2 = _getattribute(module, name, allow_qualname=proto >= 4)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module_name, name))
else:
if obj2 is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module_name, name))
if proto >= 2:
code = _extension_registry.get((module_name, name))
if code:
# assert code > 0
# if code <= 0xff:
# write(EXT1 + pack("<B", code))
# elif code <= 0xffff:
# write(EXT2 + pack("<H", code))
# else:
# write(EXT4 + pack("<i", code))
# return
raise H5itPicklingError("h5it Can't pickle %r: extension codes are not"
" supported yet." % obj)
# Non-ASCII identifiers are supported only with protocols >= 3.
if proto >= 4:
# self.save(module_name)
# self.save(name)
# write(STACK_GLOBAL)
raise H5itPicklingError("h5it Can't pickle %r: protocol %i is not "
"supported yet." % (obj, proto))
elif proto >= 3:
# write(GLOBAL + bytes(module_name, "utf-8") + b'\n' +
# bytes(name, "utf-8") + b'\n')
raise H5itPicklingError("h5it Can't pickle %r: protocol %i is not "
"supported yet." % (obj, proto))
else:
if fix_imports:
r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING
r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING
if (module_name, name) in r_name_mapping:
module_name, name = r_name_mapping[(module_name, name)]
if module_name in r_import_mapping:
module_name = r_import_mapping[module_name]
try:
return GlobalTuple(bytes(module_name, "ascii"),
bytes(name, "ascii"))
except UnicodeEncodeError:
raise PicklingError(
"can't pickle global identifier '%s.%s' using "
"pickle protocol %i" % (module, name, proto))
# adapted from Python 2 save_global
def save_global_py2(obj, name=None, proto=2):
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if proto >= 2:
code = _extension_registry.get((module, name))
if code:
# assert code > 0
# if code <= 0xff:
# write(EXT1 + chr(code))
# elif code <= 0xffff:
# write("%c%c%c" % (EXT2, code&0xff, code>>8))
# else:
# write(EXT4 + pack("<i", code))
# return
raise H5itPicklingError(
"h5it Can't pickle %r: extension codes "
"are not supported yet." % obj)
return GlobalTuple(module, name)
def save_py3(obj, proto=2):
t = type(obj)
# Check copyreg.dispatch_table only.
reduce = dispatch_table.get(t)
if reduce is not None:
rv = reduce(obj)
else:
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = False
if issc:
return save_global_py3(obj)
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce is not None:
rv = reduce(proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce is not None:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if isinstance(rv, str):
return save_global_py3(obj, rv)
# Assert that reduce() returned a tuple
if not isinstance(rv, tuple):
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# return the reduce() output for serializing
return save_reduce_py3(obj=obj, *rv)
def save_py2(obj, proto=2):
t = type(obj)
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
return save_global_py2(obj, proto=proto)
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
return save_global_py2(obj, rv)
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Return the reduce() output for serialization
return save_reduce_py2(obj=obj, *rv)
def save_reduce_py3(func, args, state=None, listitems=None, dictitems=None,
obj=None, proto=2):
reduced = {}
if not isinstance(args, tuple):
raise PicklingError("args from save_reduce() must be a tuple")
if not callable(func):
raise PicklingError("func from save_reduce() must be callable")
func_name = getattr(func, "__name__", "")
if proto >= 4 and func_name == "__newobj_ex__":
# cls, args, kwargs = args
# if not hasattr(cls, "__new__"):
# raise PicklingError("args[0] from {} args has no __new__"
# .format(func_name))
# if obj is not None and cls is not obj.__class__:
# raise PicklingError("args[0] from {} args has the wrong class"
# .format(func_name))
# save(cls)
# save(args)
# save(kwargs)
# write(NEWOBJ_EX)
raise H5itPicklingError("h5it can't reduce {} {}: __newobj_ex__ is not "
"supported.".format(obj, func_name))
elif proto >= 2 and func_name == "__newobj__":
# A __reduce__ implementation can direct protocol 2 or newer to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
reduced[r_key_cls] = save_global_py3(cls)
reduced[r_key_args] = args
else:
reduced[r_key_func] = save_global_py3(func)
reduced[r_key_args] = args
# if obj is not None:
# # If the object is already in the memo, this means it is
# # recursive. In this case, throw away everything we put on the
# # stack, and fetch the object back from the memo.
# if id(obj) in memo:
# write(POP + self.get(self.memo[id(obj)][0]))
# else:
# memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
reduced[r_key_listitems] = listitems
if dictitems is not None:
reduced[r_key_dictitems] = dictitems
if state is not None:
reduced[r_key_state] = state
return reduced
def save_reduce_py2(func, args, state=None, listitems=None, dictitems=None,
obj=None, proto=2):
reduced = {}
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise PicklingError("func from reduce should be callable")
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
reduced[r_key_cls] = save_global_py2(cls)
reduced[r_key_args] = args
else:
reduced[r_key_func] = save_global_py2(func)
reduced[r_key_args] = args
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
reduced[r_key_listitems] = listitems
if dictitems is not None:
reduced[r_key_dictitems] = dictitems
if state is not None:
reduced[r_key_state] = state
return reduced
def find_class_py3(module, name, proto=2, fix_imports=True):
if proto < 3 and fix_imports:
if (module, name) in _compat_pickle.NAME_MAPPING:
module, name = _compat_pickle.NAME_MAPPING[(module, name)]
if module in _compat_pickle.IMPORT_MAPPING:
module = _compat_pickle.IMPORT_MAPPING[module]
__import__(module, level=0)
return _getattribute(sys.modules[module], name,
allow_qualname=proto >= 4)
def find_class_py2(module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_build_py2(inst, state):
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
d = inst.__dict__
try:
for k, v in state.iteritems():
d[intern(k)] = v
# keys in state don't have to be strings
# don't blow up, but don't go out of our way
except TypeError:
d.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
return inst
def load_build_py3(inst, state):
setstate = getattr(inst, "__setstate__", None)
if setstate is not None:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
inst_dict = inst.__dict__
intern = sys.intern
for k, v in state.items():
if type(k) is str:
inst_dict[intern(k)] = v
else:
inst_dict[k] = v
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
return inst
def load_global_py3(module, name):
module = module.decode("utf-8")
name = name.decode("utf-8")
return find_class_py3(module, name)
def load_global_py2(module, name):
return find_class_py2(module, name)
if is_py2:
pickle_save_global = save_global_py2
pickle_save = save_py2
pickle_load_build = load_build_py2
pickle_load_global = load_global_py2
elif is_py3:
pickle_save_global = save_global_py3
pickle_save = save_py3
pickle_load_build = load_build_py3
pickle_load_global = load_global_py3
############################### DISPATCH TABLES ###############################
#
# PYTHON 2 TYPE PYTHON 3 TYPE FUNCTION NOTES
# -----------------------------------------------------------------------------
# NoneType type(None) save_none -
# bool bool save_bool -
# IntType - save_int -
# LongType int save_long -
# FloatType float save_float -
# StringType bytes save_{string,bytes} Jython (Py2 only)
# UnicodeType str save_{unicode,str} -
# TupleType tuple save_tuple recursive stuff
# ListType list save_list -
# DictionaryType dict save_dict -
# PyStringMap PyStringMap save_dict Jython only
# InstanceType - save_instance Old style class?
# - set save_set -
# - frozen_set save_frozenset -
# ClassType - save_global -
# FunctionType FunctionType save_global -
# BuiltinFunctionType - save_global -
# TypeType - save_global -
# - type save_type why not Py2?
###############################################################################
######################### RECONSTRUCTION OPCODES ##############################
#
# PYTHON 2 PYTHON 3 FUNCTION NOTES
# -----------------------------------------------------------------------------
# NEWOBJ NEWOBJ load_newobj -
# - NEWOBJ_EX load_newobj_ex Protocol 4+
# REDUCE REDUCE load_reduce -
# BUILD BUILD load_build -
# INST INST load_inst Old style instance
###############################################################################
| |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2019 Stefano Gottardo - @CastagnaIT (original implementation module)
Get the UUID of the device
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from resources.lib.globals import G
from resources.lib.utils.logging import LOG
from .device_utils import get_system_platform
try: # Python 2
unicode
except NameError: # Python 3
unicode = str # pylint: disable=redefined-builtin
__CRYPT_KEY__ = None
def get_crypt_key():
"""
Lazily generate the crypt key and return it
"""
# pylint: disable=global-statement
global __CRYPT_KEY__
if not __CRYPT_KEY__:
__CRYPT_KEY__ = _get_system_uuid()
return __CRYPT_KEY__
def get_random_uuid():
"""
Generate a random uuid
:return: a string of a random uuid
"""
import uuid
return unicode(uuid.uuid4())
def get_namespace_uuid(name):
"""
Generate a namespace uuid
:return: uuid object
"""
import uuid
return uuid.uuid5(uuid.NAMESPACE_DNS, name)
def _get_system_uuid():
"""
Try to get an uuid from the system, if it's not possible generates a fake uuid
:return: an uuid converted to MD5
"""
uuid_value = None
system = get_system_platform()
if system in ['windows', 'uwp']:
uuid_value = _get_windows_uuid()
elif system == 'android':
uuid_value = _get_android_uuid()
elif system in ['linux', 'linux raspberrypi']:
uuid_value = _get_linux_uuid()
elif system == 'osx':
# Due to OS restrictions on 'ios' and 'tvos' is not possible to use _get_macos_uuid()
# See python limits in the wiki development page
uuid_value = _get_macos_uuid()
if not uuid_value:
LOG.debug('It is not possible to get a system UUID creating a new UUID')
uuid_value = _get_fake_uuid(system not in ['android', 'linux', 'linux raspberrypi'])
return get_namespace_uuid(str(uuid_value)).bytes
def _get_windows_uuid():
# pylint: disable=broad-except
# pylint: disable=no-member
uuid_value = None
try:
try: # Python 2
import _winreg as winreg
except ImportError: # Python 3
import winreg
registry = winreg.HKEY_LOCAL_MACHINE
address = 'SOFTWARE\\Microsoft\\Cryptography'
keyargs = winreg.KEY_READ | winreg.KEY_WOW64_64KEY
key = winreg.OpenKey(registry, address, 0, keyargs)
value = winreg.QueryValueEx(key, 'MachineGuid')
winreg.CloseKey(key)
uuid_value = value[0]
except Exception:
pass
if not uuid_value:
try:
import subprocess
output = subprocess.check_output(['vol', 'c:'])
output = output.split()
uuid_value = output[len(output) - 1:]
except Exception:
pass
return uuid_value
def _get_linux_uuid():
# pylint: disable=broad-except
import subprocess
uuid_value = None
try:
uuid_value = subprocess.check_output(['cat', '/var/lib/dbus/machine-id']).decode('utf-8')
except Exception as exc:
import traceback
LOG.error('_get_linux_uuid first attempt returned: {}', exc)
LOG.error(G.py2_decode(traceback.format_exc(), 'latin-1'))
if not uuid_value:
try:
# Fedora linux
uuid_value = subprocess.check_output(['cat', '/etc/machine-id']).decode('utf-8')
except Exception as exc:
LOG.error('_get_linux_uuid second attempt returned: {}', exc)
return uuid_value
def _get_android_uuid():
# pylint: disable=broad-except
import subprocess
import re
values = ''
try:
# Due to the new android security we cannot get any type of serials
sys_prop = ['ro.product.board', 'ro.product.brand', 'ro.product.device', 'ro.product.locale'
'ro.product.manufacturer', 'ro.product.model', 'ro.product.platform',
'persist.sys.timezone', 'persist.sys.locale', 'net.hostname']
# Warning net.hostname property starting from android 10 is deprecated return empty
proc = subprocess.Popen(['/system/bin/getprop'], stdout=subprocess.PIPE)
output_data = proc.communicate()[0].decode('utf-8')
list_values = output_data.splitlines()
for value in list_values:
value_splitted = re.sub(r'\[|\]|\s', '', value).split(':')
if value_splitted[0] in sys_prop:
values += value_splitted[1]
except Exception:
pass
return values.encode('utf-8')
def _get_macos_uuid():
# pylint: disable=broad-except
import subprocess
sp_dict_values = None
try:
proc = subprocess.Popen(
['/usr/sbin/system_profiler', 'SPHardwareDataType', '-detaillevel', 'full', '-xml'],
stdout=subprocess.PIPE)
output_data = proc.communicate()[0].decode('utf-8')
if output_data:
sp_dict_values = _parse_osx_xml_plist_data(output_data)
except Exception as exc:
LOG.debug('Failed to fetch OSX/IOS system profile {}'.format(exc))
if sp_dict_values:
if 'UUID' in list(sp_dict_values.keys()):
return sp_dict_values['UUID']
if 'serialnumber' in list(sp_dict_values.keys()):
return sp_dict_values['serialnumber']
return None
def _parse_osx_xml_plist_data(data):
import plistlib
import re
dict_values = {}
try: # Python 2
xml_data = plistlib.readPlistFromString(data)
except AttributeError: # Python => 3.4
# pylint: disable=no-member
xml_data = plistlib.loads(data)
items_dict = xml_data[0]['_items'][0]
r = re.compile(r'.*UUID.*') # Find to example "platform_UUID" key
uuid_keys = list(filter(r.match, list(items_dict.keys())))
if uuid_keys:
dict_values['UUID'] = items_dict[uuid_keys[0]]
if not uuid_keys:
r = re.compile(r'.*serial.*number.*') # Find to example "serial_number" key
serialnumber_keys = list(filter(r.match, list(items_dict.keys())))
if serialnumber_keys:
dict_values['serialnumber'] = items_dict[serialnumber_keys[0]]
return dict_values
def _get_fake_uuid(with_hostname=True):
"""
Generate a uuid based on various system information
"""
import xbmc
import platform
list_values = [xbmc.getInfoLabel('System.Memory(total)')]
if with_hostname:
# Note: on linux systems hostname content may change after every system update
try:
list_values.append(platform.node())
except Exception: # pylint: disable=broad-except
# Due to OS restrictions on 'ios' and 'tvos' an error happen
# See python limits in the wiki development page
pass
return '_'.join(list_values)
| |
# -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array, check_consistent_length
#from sklearn.neighbors import NearestNeighbors
from sklearn.cluster._dbscan_inner import dbscan_inner
# Local imports
#import knn as knnn # imports the shared library knn.so
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', metric_params=None,
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if metric is not "rmsd":
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.concatenate(([0], np.cumsum(X_mask)))[X.indptr[1:]]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
elif algorithm is 'buffer_kd_tree':
from bufferkdtree import NearestNeighbors
plat_dev_ids = {0: [0]}
verbose = 1
neighbors_model = NearestNeighbors(algorithm="buffer_kd_tree",
tree_depth=9,
plat_dev_ids=plat_dev_ids,
verbose=verbose)
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
elif algorithm != 'vp_tree':
from sklearn.neighbors import NearestNeighbors
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
else:
# Creates a parallel CPU VP-Tree based search
#if metric == 'rmsd':
print("Using RMSD metric")
shape_x = np.shape(X.xyz)
print("Calculating knn...")
knn = knnn.vp_tree_parallel(np.reshape(X.xyz, (shape_x[0] * shape_x[1] * shape_x[2])), shape_x[1] * 3,
"rmsd_serial")
# This has worst case O(n^2) memory complexity
queries = np.linspace(0, len(X.xyz) - 1, len(X.xyz), dtype='int')
distances, neighborhoods = knn.query_radius(queries, eps)
print(distances)
#t0 = time.time()
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
if metric is "rmsd":
labels = -np.ones(X.xyz.shape[0], dtype=np.intp)
else:
labels = -np.ones(X.shape[0], dtype=np.intp)
#labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
metric_params=None, algorithm='auto', leaf_size=30, p=None,
n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
#if metric is not "rmsd":
# X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| |
from __future__ import division, print_function, absolute_import
from copy import deepcopy
from sklearn.base import clone
from sklearn.metrics import accuracy_score, roc_auc_score, mean_squared_error
import numpy
import pandas
from scipy.special import expit
from six.moves import cPickle
from ..estimators import Classifier, Regressor
from ..report.metrics import OptimalMetric
__author__ = 'Tatiana Likhomanenko, Alex Rogozhnikov'
"""
Abstract code to test any classifier or regressor
"""
# TODO test of features parameters
def generate_classification_sample(n_samples, n_features, distance=1.5, n_classes=2):
"""Generates some test distribution,
distributions are gaussian with centers at (x, x, x, ... x), where x = class_id * distance
"""
from sklearn.datasets import make_blobs
centers = numpy.zeros((n_classes, n_features))
centers += numpy.arange(n_classes)[:, numpy.newaxis] * distance
X, y = make_blobs(n_samples=n_samples, n_features=n_features, centers=centers)
columns = ["column" + str(x) for x in range(n_features)]
X = pandas.DataFrame(X, columns=columns)
return X, y
def generate_regression_sample(n_samples, n_features):
"""
Generates dataset for regression, fratures are drawn from multivariate gaussian,
target is logistic function of features' sum + small noise
"""
X = numpy.random.normal(size=[n_samples, n_features])
columns = ["column" + str(x) for x in range(n_features)]
X = pandas.DataFrame(X, columns=columns)
y = expit(numpy.sum(X, axis=1)) + numpy.random.normal(size=n_samples) * 0.05
return X, y
def generate_classification_data(n_classes=2, distance=1.5):
""" Generates random number of samples and features. """
n_samples = 1000 + numpy.random.poisson(1000)
n_features = numpy.random.randint(10, 16)
sample_weight = numpy.ones(n_samples, dtype=float)
X, y = generate_classification_sample(n_features=n_features, n_samples=n_samples, n_classes=n_classes,
distance=distance)
return X, y, sample_weight
def generate_regression_data(n_targets=1):
""" Generates random number of samples and features."""
n_samples = 1000 + numpy.random.poisson(1000)
n_features = numpy.random.randint(10, 16)
sample_weight = numpy.ones(n_samples, dtype=float)
X, y = generate_regression_sample(n_features=n_features, n_samples=n_samples)
if n_targets > 1:
y = numpy.vstack([y * numpy.random.random() for _ in range(n_targets)]).T
assert len(X) == len(y)
return X, y, sample_weight
def check_picklability_and_predictions(estimator):
# testing picklability
dump_string = cPickle.dumps(estimator)
loaded_estimator = cPickle.loads(dump_string)
assert type(estimator) == type(loaded_estimator)
# testing clone-ability
classifier_clone = clone(estimator)
assert type(estimator) == type(classifier_clone)
assert set(estimator.get_params().keys()) == set(classifier_clone.get_params().keys()), \
'something strange was loaded'
# testing get_params, set_params
params = estimator.get_params(deep=False)
params = estimator.get_params(deep=True)
classifier_clone.set_params(**params)
params = classifier_clone.get_params()
return loaded_estimator
def check_classification_model(classifier, X, y, check_instance=True, has_staged_pp=True, has_importances=True):
n_classes = len(numpy.unique(y))
if check_instance:
assert isinstance(classifier, Classifier)
labels = classifier.predict(X)
proba = classifier.predict_proba(X)
print('PROBABILITIES:', proba)
score = accuracy_score(y, labels)
print('ROC AUC:', score)
assert score > 0.7
assert numpy.allclose(proba.sum(axis=1), 1), 'probabilities do not sum to 1'
assert numpy.all(proba >= 0.), 'negative probabilities'
if n_classes == 2:
# only for binary classification
auc_score = roc_auc_score(y == numpy.unique(y)[1], proba[:, 1])
print(auc_score)
assert auc_score > 0.8
if has_staged_pp:
for p in classifier.staged_predict_proba(X):
assert p.shape == (len(X), n_classes)
# checking that last iteration coincides with previous
assert numpy.all(p == proba), "staged_pp and pp predictions are different"
if has_importances:
importances = classifier.feature_importances_
assert numpy.array(importances).shape == (len(classifier.features), )
loaded_classifier = check_picklability_and_predictions(classifier)
assert numpy.all(classifier.predict_proba(X) == loaded_classifier.predict_proba(X)), 'something strange was loaded'
def check_regression_model(regressor, X, y, check_instance=True, has_stages=True, has_importances=True):
if check_instance:
assert isinstance(regressor, Regressor)
predictions = regressor.predict(X)
score = mean_squared_error(y, predictions)
std = numpy.std(y)
assert score < std * 0.5, 'Too big error: ' + str(score)
if has_stages:
for p in regressor.staged_predict(X):
assert p.shape == (len(X),)
# checking that last iteration coincides with previous
assert numpy.all(p == predictions)
if has_importances:
importances = regressor.feature_importances_
assert numpy.array(importances).shape == (len(regressor.features), )
loaded_regressor = check_picklability_and_predictions(regressor)
assert numpy.all(regressor.predict(X) == loaded_regressor.predict(X)), 'something strange was loaded'
def fit_on_data(estimator, X, y, sample_weight, supports_weight):
if supports_weight:
learned = estimator.fit(X, y, sample_weight=sample_weight)
else:
learned = estimator.fit(X, y)
# checking that fit returns the classifier
assert learned == estimator, "fitting doesn't return initial classifier"
return estimator
def check_classifier(classifier, check_instance=True, has_staged_pp=True, has_importances=True, supports_weight=True,
n_classes=2):
X, y, sample_weight = generate_classification_data(n_classes=n_classes)
check_deepcopy(classifier)
fit_on_data(classifier, X, y, sample_weight, supports_weight=supports_weight)
assert list(classifier.features) == list(X.columns)
check_classification_model(classifier, X, y, check_instance=check_instance, has_staged_pp=has_staged_pp,
has_importances=has_importances)
def check_regression(regressor, check_instance=True, has_staged_predictions=True, has_importances=True,
supports_weight=True, n_targets=1):
X, y, sample_weight = generate_regression_data(n_targets=n_targets)
check_deepcopy(regressor)
fit_on_data(regressor, X, y, sample_weight, supports_weight=supports_weight)
assert list(regressor.features) == list(X.columns)
check_regression_model(regressor, X, y, check_instance=check_instance, has_stages=has_staged_predictions,
has_importances=has_importances)
def check_params(estimator_type, n_attempts=4, **params):
"""
Checking that init, get, set are working normally
:param estimator_type: i.e. sklearn.ensemble.AdaBoostRegressor
:param n_attempts: how many times to check
:param params: parameters that are acceptable for estimator
"""
import numpy
for _ in range(n_attempts):
subparams = {k: v for k, v in params.items() if numpy.random.random() > 0.5}
classifier = estimator_type(**subparams)
for clf in [classifier, clone(classifier), deepcopy(classifier)]:
saved_params = clf.get_params()
for name, value in subparams.items():
assert saved_params[name] == value, \
'Problem with init/get_params {} {} {}'.format(name, value, saved_params[name])
def check_classification_reproducibility(classifier, X, y):
"""
Check if given estimator after refitting / cloning gives same parameters.
"""
classifier.fit(X, y)
auc = roc_auc_score(y, classifier.predict_proba(X)[:, 1])
cloned_clf = clone(classifier)
cloned_clf.fit(X, y)
cloned_auc = roc_auc_score(y, cloned_clf.predict_proba(X)[:, 1])
assert auc == cloned_auc, 'cloned network produces different result, {} {}'.format(auc, cloned_auc)
classifier.fit(X, y)
refitted_auc = roc_auc_score(y, classifier.predict_proba(X)[:, 1])
assert auc == refitted_auc, 'running a network twice produces different results, {} {}'.format(auc, refitted_auc)
def check_deepcopy(classifier):
"""
Checks that simple deepcopy works (it uses the mechanism as pickle/unpickle)
"""
classifier_copy = deepcopy(classifier)
assert type(classifier) == type(classifier_copy)
assert set(classifier.get_params().keys()) == set(classifier_copy.get_params().keys())
def check_grid(classifier, check_instance=True, has_staged_pp=True, has_importances=True):
X, y, sample_weight = generate_classification_data()
assert classifier == classifier.fit(X, y, sample_weight=sample_weight)
classifier = classifier.fit_best_estimator(X, y, sample_weight=sample_weight)
check_classification_model(classifier, X, y, check_instance=check_instance, has_staged_pp=has_staged_pp,
has_importances=has_importances)
return classifier
def AMS(s, b):
br = 0.01
radicands = 2 * ((s + b + br) * numpy.log(1.0 + s / (b + br)) - s)
return numpy.sqrt(radicands)
def run_grid(model_grid):
optimal_ams = OptimalMetric(AMS)
try:
model_grid(optimal_ams)
except ImportError as e:
print('Model is not available', e)
| |
#!/usr/bin/env python
from urllib import urlencode, quote_plus
import urllib2
import xml.dom.minidom as minidom
from xml.parsers.expat import ExpatError
try:
import json
except ImportError:
# The json module was added in Python 2.6
json = None
STATE_OK = 'ok'
STATE_WARNING = 'warning'
STATE_CRITICAL = 'critical'
STATE_UNKNOWN = 'unknown'
STATE_UNHANDLED = 'unhandled'
STATE_UP = 'up'
STATE_DOWN = 'down'
if not hasattr(__builtins__, 'all'):
# all was added in Python 2.5
def all(target):
for item in target:
if not item:
return False
return True
if not hasattr(__builtins__, 'any'):
# any was added in Python 2.5
def any(target):
for item in target:
if item:
return True
return False
def _dict_to_xml(target):
element_list = [
'<%s>%s</%s>' % (
key,
(isinstance(target[key], dict) and _dict_to_xml(target[key])) or
target[key],
key
) for key in target
]
return ''.join(element_list)
class OpsviewException(Exception):
"""Basic exception."""
def __init__(self, msg=None):
if msg is None:
self.msg = 'Unknown Error'
else:
self.msg = msg
def __str__(self):
return 'Error: %s' % self.msg
def __repr__(self):
return str(self)
class OpsviewParseException(OpsviewException):
parse_text_length_limit = 45
def __init__(self, msg, text):
super(OpsviewParseException, self).__init__(msg)
self.parse_text = text
def __str__(self):
if len(self.parse_text) > self.__class__.msg_length_limit:
text = self.parse_text[:45] + '...'
else:
text = self.parse_text
return 'Error parsing "%s": %s' % (text, self.msg)
class OpsviewLogicException(OpsviewException):
def __str__(self):
return 'Logic Error: %s' % self.msg
class OpsviewHTTPException(OpsviewException):
def __str__(self):
return 'HTTP Error: %s' % self.msg
class OpsviewAttributeException(OpsviewException):
def __str__(self):
return 'Invalid or unknown attribute: %s' % self.msg
class OpsviewValueException(OpsviewException):
def __init__(self, value_name, value):
self.value_name = value_name
self.value = value
def __str__(self):
return 'Invalid value: "%s" as %s' % (self.value, self.value_name)
#class Remote(object):
class OpsviewRemote(object):
"""Remote interface to Opsview server."""
api_urls = dict({
'acknowledge': 'status/service/acknowledge',
'status_all': 'api/status/service',
'status_service': 'api/status/service',
'status_host': 'api/status/service',
'status_byhostgroup': 'api/status/service',
'status_hostgroup': 'api/status/hostgroup',
'login': 'login',
'api': 'api',
})
filters = dict({
STATE_OK: ('state', 0),
STATE_WARNING: ('state', 1),
STATE_CRITICAL: ('state', 2),
STATE_UNKNOWN: ('state', 3),
STATE_UNHANDLED: ('filter', 'unhandled'),
})
status_content_types = dict({
# json is disabled since it would currently break a bunch of shit.
#'json': 'application/json',
'xml': 'text/xml',
})
def __init__(self, base_url, username, password, content_type=None):
self.base_url = base_url
self.username = username
self.password = password
self._cookies = urllib2.HTTPCookieProcessor()
self._opener = urllib2.build_opener(self._cookies)
try:
self._content_type = self.__class__.status_content_types[content_type]
except KeyError:
self._content_type = self.__class__.status_content_types['xml']
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, self.base_url)
def login(self):
"""Login to the Opsview server.
This is implicitly called on every get/post to the Opsview server to
make sure we're always authed. Of course, we don't always send an actual
login request, the method will check if we have an "auth_tkt" cookie
first and return if we do since it means we're still logged in.
"""
if 'auth_tkt' not in [cookie.name for cookie in self._cookies.cookiejar]:
try:
self._opener.open(
urllib2.Request(self.base_url + self.__class__.api_urls['login'],
urlencode(dict({
'login':'Log In',
'back':self.base_url,
'login_username':self.username,
'login_password':self.password,
})))
)
except urllib2.HTTPError, error:
raise OpsviewHTTPException(error)
if 'auth_tkt' not in [cookie.name for cookie in self._cookies.cookiejar]:
raise OpsviewHTTPException('Login failed')
def _acknowledge(self, targets, comment='', notify=True, auto_remove_comment=True):
"""Send acknowledgements for each target in targets.
Targets should be a dict with this layout:
targets=dict({
host1:[list, of, services],
host2:[another, list, None], #None means acknowledge the host itself
})
"""
if notify:
notify = 'on'
else:
notify = 'off'
if auto_remove_comment:
auto_remove_comment = 'on'
else:
auto_remove_comment = 'off'
data = urlencode(dict({
'from': self.base_url,
'submit': 'Submit',
'comment': comment,
'notify': notify,
'autoremovecomment':
auto_remove_comment,
}))
# Construct the hosts and services to acknowledge parameters.
data += '&' + '&'.join([(service and 'service_selection=%s' %
quote_plus('%s;%s' % (host, service))) or
'host_selection=%s' % quote_plus(host)
for host in targets for service in targets[host]])
return self._send_post(self.__class__.api_urls['acknowledge'], data)
def _send_xml(self, payload):
"""Send payload (a xml Node object) to the api url via POST."""
try:
if isinstance(payload, basestring):
payload = minidom.parseString(payload)
elif isinstance(payload, file):
payload = minidom.parse(payload)
assert isinstance(payload, minidom.Node)
except (AssertionError, ExpatError):
raise OpsviewHTTPException('Invalid XML payload')
response = send_post(self.__class__.api_urls['api'],
payload.toxml(),
dict({'Content-Type':self.__class__.status_content_types['xml']})
)
try:
response = minidom.parse(response)
except ExpatError:
raise OpsviewHTTPException('Recieved non-XML response from Opsview server')
return response
def _send_get(self, location, parameters=None, headers=None):
request = urllib2.Request('%s?%s' % (self.base_url + location, parameters))
if headers is not None:
map(
lambda header_key: request.add_header(header_key, headers[header_key]),
headers
)
request.add_header('Content-Type', self._content_type)
self.login()
try:
reply = self._opener.open(request)
except urllib2.HTTPError, error:
raise OpsviewHTTPException(error)
return reply
def _send_post(self, location, data, headers=None):
request = urllib2.Request(self.base_url + location, data)
if headers is not None:
map(
lambda header_key: request.add_header(header_key, headers[header_key]),
headers
)
self.login()
try:
reply = self._opener.open(request)
except urllib2.HTTPError, error:
raise OpsviewHTTPException(error)
return reply
def get_status_all(self, filters=None):
"""Get status of all services.
Optionally filter the results with a list of filters from
OpsviewRemote.filters
"""
try:
filters = [self.__class__.filters[filter] for filter in filters]
except TypeError:
filters = []
try:
return minidom.parse(self._send_get(
self.__class__.api_urls['status_all'],
urlencode(filters)
))
except ExpatError:
raise OpsviewHTTPException('Recieved invalid status XML')
def get_status_host(self, host, filters=None):
"""Get status of a host and all its services.
Optionally filter the results with a list of filters from
OpsviewRemote.filters
"""
try:
filters = [self.__class__.filters[filter] for filter in filters]
except TypeError:
filters = []
filters.append(('host', host))
try:
return minidom.parse(self._send_get(
self.__class__.api_urls['status_host'],
urlencode(filters)
))
except ExpatError:
raise OpsviewHTTPException('Recieved invalid status XML')
def get_status_service(self, host, service):
"""Get status of a host's service."""
# Temporary hack until forced content type is removed from _send_get()
old_content_type = self._content_type
self._content_type = self.__class__.status_content_types['xml']
host_xml = self.get_status_host(host)
self._content_type = old_content_type
services = host_xml.getElementsByTagName('services')
for node in services:
if node.getAttribute('name').lower() == service.lower():
return node
# This behavior is inconsistent with get_status_host and should be fixed.
raise OpsviewAttributeException('service')
def get_status_by_hostgroup(self, hostgroup, filters=None):
"""Get status of the hosts in a hostgroup..
Optionally filter the results with a list of filters from
OpsviewRemote.filters
"""
try:
filters = [self.__class__.filters[filter] for filter in filters]
except TypeError:
filters = []
filters.append(('hostgroupid', int(hostgroup)))
return minidom.parse(self._send_get(
self.__class__.api_urls['status_host'],
urlencode(filters)
))
def get_status_hostgroup(self, hostgroup=None):
"""Get of a top-level hostgroup or all top-level hostgroups."""
if hostgroup is None:
hostgroup = ''
try:
return minidom.parse(self._send_get('%s/%s' %
(self.__class__.api_urls['status_hostgroup'], hostgroup)))
except ExpatError:
raise OpsviewHTTPException('Recieved invalid status XML')
def acknowledge_service(self, host, service, comment, notify=True, auto_remove_comment=True):
"""Acknoledge a single service."""
return self._acknowledge(dict({host:[service]}), comment, notify, auto_remove_comment)
def acknowledge_host(self, host, comment, notify=True, auto_remove_comment=True):
"""Acknoledge a single host."""
return self._acknowledge(dict({host:[None]}), comment, notify, auto_remove_comment)
def acknowledge_all(self, comment, notify=True, auto_remove_comment=True):
"""Acknowledge all currently alerting hosts and services.
Alerting is understood here to be when current_check_attempt is equal to
max_check_attempts.
"""
status = self.get_status_all([STATE_WARNING, STATE_CRITICAL, STATE_UNHANDLED])
alerting = dict({})
# These two loops can probably stand to be cleaned up a bit.
for host in status.getElementsByTagName('list'):
alerting[host.getAttribute('name')] = []
if int(host.getAttribute('current_check_attempt')) == \
int(host.getAttribute('max_check_attempts')):
alerting[host.getAttribute('name')].append(None)
for service in host.getElementsByTagName('services'):
if int(service.getAttribute('current_check_attempt')) == \
int(service.getAttribute('max_check_attempts')):
alerting[host.getAttribute('name')].append(service.getAttribute('name'))
return self._acknowledge(alerting, comment, notify, auto_remove_comment)
def create_host(self, **attrs):
"""Create a new host.
The new host's attributes are passed as as arbitrary keyword arguments.
The only values checked for are 'name' and 'ip' as they are required by
Opsview.
"""
required_attrs = ['name', 'ip']
if not all(map(lambda attr: attr in attrs, required_attrs)):
raise OpsviewAttributeException(
', '.join(filter(lambda attr: attr not in attrs, required_attrs)))
xml = """<opsview>
<host action="create">
%s
</host>
</opsview>"""
return self._send_xml(xml % _dict_to_xml(attrs))
def clone_host(self, src_host_name, **attrs):
"""Create a new host by cloning an old one.
Syntax is the same as create_host with the addition of the src_host_name
argument that selects the host to clone from.
"""
required_attrs = ['name', 'ip']
if not all(map(lambda attr: attr in attrs, required_attrs)):
raise OpsviewAttributeException(
', '.join(filter(lambda attr: attr not in attrs, required_attrs)))
xml = """<opsview>
<host action="create">
<clone>
<name>%s</name>
</clone>
%s
</host>
</opsview>"""
return self._send_xml(xml % (src_host_name, _dict_to_xml(attrs)))
def delete_host(self, host):
"""Delete a host by name or ID number."""
xml = """<opsview>
<host action="delete" by_%s="%s"/>
</opsview>"""
if host.isdigit():
method = 'id'
else:
method = 'name'
return self._send_xml(xml % (method, host))
def schedule_downtime(self, hostgroup, start, end, comment):
"""Schedule downtime for a leaf hostgroup by id or name."""
xml = """<opsview>
<hostgroup action="change" by_%s="%s">
<downtime
start="%s"
end="%s"
comment="%s">
enable
</downtime>
</hostgroup>
</opsview>"""
if hostgroup.isdigit():
method = 'id'
else:
method = 'name'
return self._send_xml(xml %
(method, hostgroup, start, end, comment))
def disable_scheduled_downtime(self, hostgroup):
"""Cancel downtime for a leaf hostgroup by id or name."""
xml = """<opsview>
<hostgroup action="change" by_%s="%s">
<downtime>disable</downtime>
</hostgroup>
</opsview>"""
if hostgroup.isdigit():
method = 'id'
else:
method = 'name'
return self._send_xml(xml % (method, hostgroup))
def enable_notifications(self, hostgroup):
"""Enable notifications for a leaf hostgroup by id or name."""
xml = """<opsview>
<hostgroup action="change" by_%s="%s">
<notifications>enable</notifications>
</hostgroup>
</opsview>"""
if hostgroup.isdigit():
method = 'id'
else:
method = 'name'
return self._send_xml(xml % (method, hostgroup))
def disable_notifications(self, hostgroup):
"""Disable notifications for a leaf hostgroup by id or name."""
xml = """<opsview>
<hostgroup action="change" by_%s="%s">
<notifications>disable</notifications>
</hostgroup>
</opsview>"""
if hostgroup.isdigit():
method = 'id'
else:
method = 'name'
return self._send_xml(xml % (method, hostgroup))
def reload(self):
"""Reload the remote Opsview server's configuration."""
xml = """<opsview>
<system action="reload"/>
</opsview>"""
return self._send_xml(xml)
#class Node(dict):
class OpsviewNode(dict):
"""Basic Opsview node.
All nodes require access to an OpsviewRemote, a node can get it's remote by
at init either by passing an actual remote instance or just the login args
to create one (base_url, username, and password). If either of those don't
work out, the node will search upwards through the tree to find a node that
does have a remote and will use the first one it finds. If it fails to find
one after all that it will throw an OpsviewException.
"""
def __init__(self, parent=None, remote=None, src=None, **remote_login):
self.parent = parent
self.children = None
self.remote = remote
if isinstance(remote, OpsviewRemote):
self.remote = remote
elif all(map(lambda attr: attr in remote_login, ['base_url', 'username', 'password'])):
self.remote = OpsviewRemote(**remote_login)
else:
remote_search = self.parent
while self.remote is None and remote_search is not None:
self.remote = remote_search.remote
remote_search = remote_search.parent
if self.remote is None:
raise OpsviewLogicException('Unable to find OpsviewRemote for %s' %
self)
if src is not None:
self.parse(src)
def __str__(self):
try:
return self['name']
except KeyError:
return repr(self)
def append_child(self, child_src):
try:
self.children.append(
self.__class__.child_type(
parent=self,
src=child_src,
remote=self.remote))
except TypeError:
# self.__class__.child_type is None
raise OpsviewLogicException('%s cannot have children' %
self.__class__.__name__)
# Whoops, this replaces the builtin dict.update and does something sort of
# different. Needs to be replaced with refresh() at some point.
#def refresh(self, filters=None):
def update(self, filters=None):
raise NotImplementedError()
def parse(self, src):
try:
self.parse_xml(src)
except OpsviewParseException:
try:
self.parse_json(src)
except OpsviewParseException:
raise OpsviewParseException('No handler for source format', src)
def parse_xml(self, src):
try:
if isinstance(src, basestring):
src = minidom.parseString(src)
elif isinstance(src, file):
src = minidom.parse(src)
assert isinstance(src, minidom.Node)
except (ExpatError, AssertionError):
raise OpsviewParseException('Failed to parse XML source', src)
if not (hasattr(src, 'tagName') and
src.tagName == self.__class__.status_xml_element_name):
src = src.getElementsByTagName(self.__class__.status_xml_element_name)[0]
for i in range(src.attributes.length):
try:
self[src.attributes.item(i).name] = int(src.attributes.item(i).value)
except ValueError:
self[src.attributes.item(i).name] = src.attributes.item(i).value
self.children = []
# This may cause a memory leak if Python doesn't properly garbage
# collect the released objects.
try:
map(
self.append_child,
src.getElementsByTagName(
self.__class__.child_type.status_xml_element_name)
)
except (OpsviewLogicException, AttributeError):
if self.__class__.child_type is not None:
raise OpsviewParseException('Invalid source structure', src)
if json is not None:
def parse_json(self, src):
try:
if isinstance(src, basestring):
src = json.loads(src)
elif isinstance(src, file):
src = json.load(src)
assert isinstance(src, dict)
except (ValueError, AssertionError):
raise OpsviewParseException('Failed to parse JSON source', src)
if self.__class__.status_json_element_name in src:
src = src[self.__class__.status_json_element_name]
for item in filter(lambda item: isinstance(src[item], basestring), src):
try:
self[item] = int(src[item])
except ValueError:
self[item] = src[item]
self.children = []
try:
map(
self.append_child,
src[self.__class__.child_type.status_json_element_name]
)
except (OpsviewLogicException, AttributeError):
if self.__class__.child_type is not None:
raise OpsviewParseException('Invalid source structure', src)
def to_xml(self):
return _dict_to_xml(dict({self.__class__.status_xml_element_name:self}))
if json is not None:
def to_json(self):
return json.dumps(self)
#class Service(Node):
class OpsviewService(OpsviewNode):
"""Logical Opsview service node."""
status_xml_element_name = 'services'
status_json_element_name = 'services'
child_type = None
def update(self):
self.parse_xml(self.remote.get_status_service(
self.parent['name'],
self['name']
))
return self
#class Host(Node):
class OpsviewHost(OpsviewNode):
"""Logical Opsview host node."""
status_xml_element_name = 'list'
status_json_element_name = 'list'
child_type = OpsviewService
def update(self, filters=None):
self.parse_xml(self.remote.get_status_host(self['name'], filters))
return self
#class Server(Node):
class OpsviewServer(OpsviewNode):
"""Logical Opsview server node."""
status_xml_element_name = 'data'
status_json_element_name = 'service'
child_type = OpsviewHost
def update(self, filters=None):
self.parse_xml(self.remote.get_status_all(filters))
return self
#class Hostgroup(Server):
class OpsviewHostgroup(OpsviewServer):
"""Logical Opsview Hostgroup node."""
def __init__(self, parent=None, remote=None, src=None, id=None, **remote_login):
try:
self.id = int(id)
assert self.id >= 0
except (ValueError, AssertionError):
raise OpsviewValueException('id', id)
super(OpsviewHostgroup, self).__init__(parent, remote, src, **remote_login)
def update(self, filters=None):
self.parse_xml(
self.remote.get_status_by_hostgroup(self.id, filters))
return self
| |
# PyFileMaker - Integrating FileMaker and Python
# (c) 2014-2014 Marcin Kawa, kawa@aeguana.com
# (c) 2006-2008 Klokan Petr Pridal, klokan@klokan.cz
# (c) 2002-2006 Pieter Claerhout, pieter@yellowduck.be
#
# http://code.google.com/p/pyfilemaker/
# http://www.yellowduck.be/filemaker/
# Import the main modules
import sys
import re
import base64
import string
import urllib
import requests
import collections
import datetime
import StringIO
try:
from google.appengine.api import urlfetch
except:
urlfetch = False
import httplib
from exceptions import StandardError
# Import the FM modules
import xml2obj
import FMResultset
from FMError import *
uu = urllib.urlencode
class FMServer:
"""The main class for communicating with FileMaker Server"""
def __init__(self, url='http://login:password@localhost/', db='', layout='', debug=False):
"""Class constructor"""
self._url = url
m = re.match(r'^((?P<protocol>http)://)?((?P<login>\w+)(:(?P<password>\w+))?@)?(?P<host>[\d\w\-.]+)(:(?P<port>\d+))?/?(?P<address>/.+)?$', self._url)
if not m:
raise FMError, "Address of FileMaker Server is not correctly formatted"
self._protocol = m.group('protocol')
self._login = m.group('login')
self._password = m.group('password')
self._host = m.group('host')
self._port = m.group('port')
self._address = m.group('address')
if not self._protocol: self._protocol = 'http'
if not self._host: self._host = 'localhost'
if not self._port: self._port = 80
if not self._address: self._address = '/fmi/xml/fmresultset.xml'
if not self._login: self._login = 'pyfilemaker'
if not self._password: self._password = ''
self._file_address = 'fmi/xml/cnt/data.%(extension)s'
self._extra_script = None
self._maxRecords = 0
self._skipRecords = 0
self._db = db
self._layout = layout
self._lop = 'and'
self._dbParams = []
self._sortParams = []
self._debug = debug
if '--debug' in sys.argv and not debug:
self._debug = True
@staticmethod
def toJSON(fm_data, to_lower=False):
if str(type(fm_data)) == "<type 'instance'>":
ml = []
for obj in fm_data:
ml.append(FMServer.toJSON(obj))
return ml
elif str(type(fm_data)) == "<class 'PyFileMaker.FMData.FMData'>":
d = {}
for field in fm_data:
orig_f = field
if to_lower:
field = field.lower()
d[field] = FMServer.toJSON(fm_data[orig_f])
return d
elif type(fm_data) == list:
l = []
for item in fm_data:
l.append(FMServer.toJSON(item))
return l
elif isinstance(fm_data, datetime.datetime):
return fm_data.strftime('%d/%m/%Y %H:%M')
elif isinstance(fm_data, datetime.date):
return fm_data.strftime('%d/%m/%Y')
elif isinstance(fm_data, datetime.time):
return fm_data.strftime('%H:%M')
else:
return fm_data
def setDb(self, db):
"""Select the database to use. You don't need to specify the file
extension. PyFileMaker will do this automatically."""
self._db = db
def setLayout(self, layout):
"""Select the right layout from the database."""
self._layout = layout
def _setMaxRecords(self, maxRec):
"""Specifies the maximum number of records you want returned (number or constant 'all')"""
if type(maxRec) == int:
self._maxRecords = maxRec
elif type(maxRec) == str and (maxRec.lower == 'all' or maxRec.isdigit()):
self._maxRecords = maxRec.lower
else:
raise FMError, 'Unsupported -max value (not a number or "all").'
def _setSkipRecords(self, skipRec):
"""Specifies how many records to skip in the found set"""
if type(skipRec) == int or (type(skipRec) == str and skipRec.isdigit()):
self._skipRecords = skipRec
else:
raise FMError, 'Unsupported -skip value (not a number).'
def _setLogicalOperator(self, lop):
"""Sets the way the find fields should be combined together."""
if not lop.lower() in ['and', 'or']:
raise FMError, 'Unsupported logical operator (not one of "and" or "or").'
self._lop = lop.lower()
def _setComparasionOperator(self, field, oper):
"""Sets correct operator for given string representation"""
if oper != '':
validOperators = {
'eq':'eq',
'equals':'eq',
'=':'eq',
'==':'eq',
'cn':'cn',
'contains':'cn',
'%%':'cn',
'%':'cn',
'*':'cn',
'bw':'bw',
'begins with':'bw',
'^':'bw',
'ew':'ew',
'ends with':'ew',
'$':'ew',
'gt':'gt',
'greater than':'gt',
'>':'gt',
'gte':'gte',
'greater than or equals':'gte',
'>=':'gte',
'lt':'lt',
'less than':'lt',
'<':'lt',
'lte':'lte',
'less than or equals':'lte',
'<=':'lte',
'neq':'neq',
'not equals':'neq',
'!=':'neq',
'<>':'neq'
}
if not string.lower(oper) in validOperators.keys():
raise FMError, 'Invalid operator "'+ oper + '" for "' + field + '"'
oper = validOperators[oper.lower()]
self._dbParams.append(
["%s.op" % field, oper]
)
def _addDBParam(self, name, value):
"""Adds a database parameter"""
if name[-4:] == '__OP':
return self._setComparasionOperator(name[:-4], value)
if name[-3:] == '.op':
return self._setComparasionOperator(name[:-3], value)
if name.find('__') != -1:
import re
name = name.replace('__','::')
elif name.find('.') != -1:
name = name.replace('.','::')
self._dbParams.append(
[name, value]
)
def _addSortParam(self, field, order=''):
"""Adds a sort parameter, order have to be in ['ascend', 'ascending','descend', 'descending','custom']"""
if order != '':
validSortOrders = {
'ascend':'ascend',
'ascending':'ascend',
'<':'ascend',
'descend':'descend',
'descending':'descend',
'>':'descend'
}
if not string.lower(order) in validSortOrders.keys():
raise FMError, 'Invalid sort order for "' + field + '"'
self._sortParams.append(
[field, validSortOrders[string.lower(order)]]
)
def _checkRecordID(self):
"""This function will check if a record ID was specified."""
hasRecID = 0
for dbParam in self._dbParams:
if dbParam[0] == 'RECORDID':
hasRecID = 1
break
return hasRecID
def getFile(self, file_xml_uri):
""" This will execute cmd to fetch file data from FMServer """
find = re.match('/fmi/xml/cnt/([\w\d.-]+)\.([\w]+)?-*', file_xml_uri)
file_name = find.group(1)
file_extension = find.group(2)
file_binary = self._doRequest(is_file=True, file_xml_uri=file_xml_uri)
return (file_name, file_extension, file_binary)
def doScript(self, script_name, params=None):
"""This function executes the script for given layout for the current db."""
request = [
uu({'-db': self._db }),
uu({'-lay': self._layout }),
uu({'-script': script_name})
]
if params:
request.append(uu({'-script.param': params }))
request.append(uu({'-findall': '' }))
result = self._doRequest(request)
result = FMResultset.FMResultset(result)
try:
resp = result.resultset[0] # Try to return latest result
except IndexError:
resp = None
return resp
def doScriptAfter(self, func, func_kwargs={}, script_name='', params=None):
""" This function will execute extra script after passed function """
request = [
uu({'-script': script_name})
]
if params:
request.append(uu({'-script.param': params }))
self._extra_script = request
return func(**func_kwargs)
def doFindQuery(self, query_dict, negate_fields=None):
def process_value(idx, key, value):
params = []
values = []
inner_key = key
qs_str = "(q%s)"
if key.startswith('!'):
inner_key = key[1:]
qs_str = "!(q%s)"
params.append(qs_str%idx)
values.append(uu({'-q%s'%idx: inner_key}))
values.append(uu({'-q%s.value'%idx: value}))
return params, values
query_params = []
query_values = []
if negate_fields is None:
negate_fields = {}
_idx = 1
for key, value in query_dict.iteritems():
if not isinstance(value, str) and isinstance(value, collections.Iterable):
for inner_value in value:
q_list = process_value(_idx, key, inner_value)
query_params += q_list[0]
query_values += q_list[1]
_idx += 1
else:
q_list = process_value(_idx, key, value)
query_params += q_list[0]
query_values += q_list[1]
_idx += 1
query_params_str = ';'.join(query_params)
request = [
uu({'-db': self._db }),
uu({'-lay': self._layout }),
'-query=%s'%query_params_str
]
request += query_values
request.append('-findquery')
resp = self._doRequest(request)
result = FMResultset.FMResultset(resp).resultset
return result
def getDbNames(self):
"""This function returns the list of open databases"""
request = []
request.append(uu({'-dbnames': '' }))
result = self._doRequest(request)
result = FMResultset.FMResultset(result)
dbNames = []
for dbName in result.resultset:
dbNames.append(string.lower(dbName['DATABASE_NAME']))
return dbNames
def getLayoutNames(self):
"""This function returns the list of layouts for the current db."""
if self._db == '':
raise FMError, 'No database was selected'
request = []
request.append(uu({'-db': self._db }))
request.append(uu({'-layoutnames': '' }))
result = self._doRequest(request)
result = FMResultset.FMResultset(result)
layoutNames = []
for layoutName in result.resultset:
layoutNames.append(string.lower(layoutName['LAYOUT_NAME']))
return layoutNames
def getScriptNames(self):
"""This function returns the list of layouts for the current db."""
if self._db == '':
raise FMError, 'No database was selected'
request = []
request.append(uu({'-db': self._db }))
request.append(uu({'-scriptnames': '' }))
result = self._doRequest(request)
result = FMResultset.FMResultset(result)
scriptNames = []
for scriptName in result.resultset:
scriptNames.append(string.lower(scriptName['SCRIPT_NAME']))
return scriptNames
def _preFind(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND'):
"""This function will process attributtes for all -find* commands."""
if hasattr(WHAT, '_modified'):
self._addDBParam('RECORDID', WHAT.RECORDID)
elif type(WHAT)==dict:
for key in WHAT:
self._addDBParam(key, WHAT[key])
else:
raise FMError, 'Python Runtime: Object type (%s) given to on of function doFind* as argument WHAT cannot be used.' % type(WHAT)
for key in SORT:
self._addSortParam(key, SORT[key])
if SKIP: self._setSkipRecords(SKIP)
if MAX: self._setMaxRecords(MAX)
if LOP: self._setLogicalOperator(LOP)
if self._layout == '':
raise FMError, 'No layout was selected'
def doFind(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND', **params):
"""This function will perform the command -find."""
self._preFind(WHAT, SORT, SKIP, MAX, LOP)
for key in params:
self._addDBParam(key, params[key])
try:
return self._doAction('-find')
except FMServerError as e:
if e.args[0] in [401, 8]:
return []
def doFindAll(self, WHAT={}, SORT=[], SKIP=None, MAX=None):
"""This function will perform the command -findall."""
self._preFind(WHAT, SORT, SKIP, MAX)
return self._doAction('-findall')
def doFindAny(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND', **params):
"""This function will perform the command -findany."""
self._preFind(WHAT, SORT, SKIP, MAX, LOP)
for key in params:
self._addDBParam(key, params[key])
return self._doAction('-findany')
def doDelete(self, WHAT={}):
"""This function will perform the command -delete."""
if hasattr(WHAT, '_modified'):
self._addDBParam('RECORDID', WHAT.RECORDID)
self._addDBParam('MODID', WHAT.MODID)
elif type(WHAT) == dict and WHAT.has_key('RECORDID'):
self._addDBParam('RECORDID', WHAT['RECORDID'])
else:
raise FMError, 'Python Runtime: Object type (%s) given to function doDelete as argument WHAT cannot be used.' % type(WHAT)
if self._layout == '':
raise FMError, 'No layout was selected'
if self._checkRecordID() == 0:
raise FMError, 'RecordID is missing'
return self._doAction('-delete')
def doEdit(self, WHAT={}, **params):
"""This function will perform the command -edit."""
if hasattr(WHAT, '_modified'):
for key, value in WHAT._modified():
if WHAT.__new2old__.has_key(key):
self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), value)
else:
self._addDBParam(key, value)
self._addDBParam('RECORDID', WHAT.RECORDID)
self._addDBParam('MODID', WHAT.MODID)
elif type(WHAT)==dict:
for key in WHAT:
self._addDBParam(key, WHAT[key])
else:
raise FMError, 'Python Runtime: Object type (%s) given to function doEdit as argument WHAT cannot be used.' % type(WHAT)
if self._layout == '':
raise FMError, 'No layout was selected'
for key in params:
self._addDBParam(key, params[key])
if len(self._dbParams) == 0:
raise FMError, 'No data to be edited'
if self._checkRecordID() == 0:
raise FMError, 'RecordID is missing'
return self._doAction('-edit')
def doNew(self, WHAT={}, **params):
"""This function will perform the command -new."""
if hasattr(WHAT, '_modified'):
for key in WHAT:
if key not in ['RECORDID','MODID']:
if WHAT.__new2old__.has_key(key):
self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), WHAT[key])
else:
self._addDBParam(key, WHAT[key])
elif type(WHAT)==dict:
for key in WHAT:
self._addDBParam(key, WHAT[key])
else:
raise FMError, 'Python Runtime: Object type (%s) given to function doNew as argument WHAT cannot be used.' % type(WHAT)
if self._layout == '':
raise FMError, 'No layout was selected'
for key in params:
self._addDBParam(key, params[key])
if len(self._dbParams) == 0:
raise FMError, 'No data to be added'
return self._doAction('-new')
def doView(self):
"""This function will perform the command -view. (Retrieves the metadata section of XML document and an empty recordset)"""
if self._layout == '':
raise FMError, 'No layout was selected'
return self._doAction('-view')
def doDup(self, WHAT={}, **params):
"""This function will perform the command -dup."""
if hasattr(WHAT, '_modified'):
for key, value in WHAT._modified():
if WHAT.__new2old__.has_key(key):
self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), value)
else:
self._addDBParam(key, value)
self._addDBParam('RECORDID', WHAT.RECORDID)
self._addDBParam('MODID', WHAT.MODID)
elif type(WHAT) == dict:
for key in WHAT:
self._addDBParam(key, WHAT[key])
else:
raise FMError, 'Python Runtime: Object type (%s) given to function doDup as argument WHAT cannot be used.' % type(WHAT)
if self._layout == '':
raise FMError, 'No layout was selected'
for key in params:
self._addDBParam(key, params[key])
if self._checkRecordID() == 0:
raise FMError, 'RecordID is missing'
return self._doAction('-dup')
def _doAction(self, action):
"""This function will perform a FileMaker action."""
if self._db == '':
raise FMError, 'No database was selected'
result = ''
try:
request = [
uu({'-db': self._db })
]
if self._layout != '':
request.append(uu({'-lay': self._layout }))
if action == '-find' and self._lop != 'and':
request.append(uu({'-lop': self._lop }))
if action in ['-find', '-findall']:
if self._skipRecords != 0:
request.append(uu({ '-skip': self._skipRecords }))
if self._maxRecords != 0:
request.append(uu({ '-max': self._maxRecords }))
for i in range(0, len(self._sortParams)):
sort = self._sortParams[i]
request.append(uu({ '-sortfield.'+str(i+1): sort[0] }))
if sort[1] != '':
request.append(uu({ '-sortorder.'+str(i+1): sort[1] }))
for dbParam in self._dbParams:
if dbParam[0] == 'RECORDID':
request.append(uu({ '-recid': dbParam[1] }))
elif dbParam[0] == 'MODID':
request.append(uu({ '-modid': dbParam[1] }))
elif hasattr(dbParam[1], 'strftime'):
d = dbParam[1]
if (not hasattr(d, 'second')):
request.append(uu({ dbParam[0]: d.strftime('%m-%d-%Y') }))
else:
request.append(uu({ dbParam[0]: d.strftime('%m-%d-%Y %H:%M:%S') }))
del(d)
else:
request.append(uu({ dbParam[0]: dbParam[1] }))
request.append(action)
if self._extra_script:
request += self._extra_script
self._extra_script = None
result = self._doRequest(request)
try:
result = FMResultset.FMResultset(result)
except FMFieldError, value:
realfields = FMServer(self._buildUrl(), self._db, self._layout).doView()
l = []
for k, v in self._dbParams:
if k[-3:] != '.op' and k[0] != '-':
l.append(("'%s'" % k.replace('::','.')).encode('utf-8'))
raise FMError, "Field(s) %s not found on layout '%s'" % (', '.join(l), self._layout)
if action == '-view':
result = result.fieldNames
finally:
self._dbParams = []
self._sortParams = []
self._skipRecords = 0
self._maxRecords = 0
self._lop = 'and'
return result
def _buildUrl(self):
"""Builds url for normal FM requests."""
return '%(protocol)s://%(host)s:%(port)s/%(address)s'%{
'protocol': self._protocol,
'host': self._host,
'port': self._port,
'address': self._address,
}
def _buildFileUrl(self, xml_req):
"""Builds url for fetching the files from FM."""
return '%(protocol)s://%(host)s:%(port)s%(xml_req)s'%{
'protocol': self._protocol,
'host': self._host,
'port': self._port,
'xml_req': xml_req,
}
def _doRequest(self, request=None, is_file=False, file_xml_uri=''):
"""This function will perform the specified request on the FileMaker
server, and it will return the raw result from FileMaker."""
if request is None:
request = []
if is_file and file_xml_uri:
url = self._buildFileUrl(file_xml_uri)
else:
request = '&'.join(request)
url = "%s?%s" % (self._buildUrl(), request)
resp = requests.get(
url = url,
auth = (self._login, self._password)
)
resp.raise_for_status()
return resp.content
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.