gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Thermos runner.
This module contains the TaskRunner, the core component of Thermos responsible for actually running
tasks. It also contains several Handlers which define the behaviour on state transitions within the
TaskRunner.
There are three "active" states in a running Thermos task:
ACTIVE
CLEANING
FINALIZING
A task in ACTIVE state is running regular processes. The moment this task succeeds or goes over its
failure limit, it then goes into CLEANING state, where it begins the staged termination of leftover
processes (with SIGTERMs). Once all processes have terminated, the task goes into FINALIZING state,
where the processes marked with the 'final' bit run. Once the task has gone into CLEANING state, it
has a deadline for going into terminal state. If it doesn't make it in time (at any point, whether
in CLEANING or FINALIZING state), it is forced into terminal state through SIGKILLs of all live
processes (coordinators, shells and the full process trees rooted at the shells.)
TaskRunner.kill is implemented by forcing the task into CLEANING state and setting its finalization
deadline manually. So in practice, we implement Task preemption by calling kill with the
finalization deadline = now + preemption wait, which gives the Task an opportunity to do graceful
shutdown. If preemption_wait=0, it will result in immediate SIGKILLs and then transition to the
terminal state.
"""
import os
import pwd
import socket
import sys
import time
import traceback
from contextlib import contextmanager
from pystachio import Empty, Environment
from twitter.common import log
from twitter.common.dirutil import safe_mkdir
from twitter.common.quantity import Amount, Data, Time
from twitter.common.recordio import ThriftRecordReader
from apache.thermos.common.ckpt import (
CheckpointDispatcher,
ProcessStateHandler,
TaskStateHandler,
UniversalStateHandler
)
from apache.thermos.common.path import TaskPath
from apache.thermos.common.planner import TaskPlanner
from apache.thermos.config.loader import (
ThermosConfigLoader,
ThermosTaskValidator,
ThermosTaskWrapper
)
from apache.thermos.config.schema import Logger, RotatePolicy, ThermosContext
from .helper import TaskRunnerHelper
from .muxer import ProcessMuxer
from .process import LoggerMode, Process
from gen.apache.thermos.ttypes import (
ProcessState,
ProcessStatus,
RunnerCkpt,
RunnerHeader,
RunnerState,
TaskState,
TaskStatus
)
# TODO(wickman) Currently this is messy because of all the private access into ._runner.
# Clean this up by giving the TaskRunnerProcessHandler the components it should own, and
# create a legitimate API contract into the Runner.
class TaskRunnerProcessHandler(ProcessStateHandler):
"""
Accesses these parts of the runner:
| _task_processes [array set, pop]
| _task_process_from_process_name [process name / sequence number => Process]
| _watcher [ProcessMuxer.register, unregister]
| _plan [add_success, add_failure, set_running]
"""
def __init__(self, runner):
self._runner = runner
def on_waiting(self, process_update):
log.debug('Process on_waiting %s' % process_update)
self._runner._task_processes[process_update.process] = (
self._runner._task_process_from_process_name(
process_update.process, process_update.seq + 1))
self._runner._watcher.register(process_update.process, process_update.seq - 1)
def on_forked(self, process_update):
log.debug('Process on_forked %s' % process_update)
task_process = self._runner._task_processes[process_update.process]
task_process.rebind(process_update.coordinator_pid, process_update.fork_time)
self._runner._plan.set_running(process_update.process)
def on_running(self, process_update):
log.debug('Process on_running %s' % process_update)
self._runner._plan.set_running(process_update.process)
def _cleanup(self, process_update):
if not self._runner._recovery:
TaskRunnerHelper.kill_process(self._runner.state, process_update.process)
def on_success(self, process_update):
log.debug('Process on_success %s' % process_update)
log.info('Process(%s) finished successfully [rc=%s]' % (
process_update.process, process_update.return_code))
self._cleanup(process_update)
self._runner._task_processes.pop(process_update.process)
self._runner._watcher.unregister(process_update.process)
self._runner._plan.add_success(process_update.process)
def _on_abnormal(self, process_update):
log.info('Process %s had an abnormal termination' % process_update.process)
self._runner._task_processes.pop(process_update.process)
self._runner._watcher.unregister(process_update.process)
def on_failed(self, process_update):
log.debug('Process on_failed %s' % process_update)
log.info('Process(%s) failed [rc=%s]' % (process_update.process, process_update.return_code))
self._cleanup(process_update)
self._on_abnormal(process_update)
self._runner._plan.add_failure(process_update.process)
if process_update.process in self._runner._plan.failed:
log.info('Process %s reached maximum failures, marking process run failed.' %
process_update.process)
else:
log.info('Process %s under maximum failure limit, restarting.' % process_update.process)
def on_lost(self, process_update):
log.debug('Process on_lost %s' % process_update)
self._cleanup(process_update)
self._on_abnormal(process_update)
self._runner._plan.lost(process_update.process)
def on_killed(self, process_update):
log.debug('Process on_killed %s' % process_update)
self._cleanup(process_update)
self._runner._task_processes.pop(process_update.process)
self._runner._watcher.unregister(process_update.process)
log.debug('Process killed, marking it as a loss.')
self._runner._plan.lost(process_update.process)
class TaskRunnerTaskHandler(TaskStateHandler):
"""
Accesses these parts of the runner:
_plan [set to regular_plan or finalizing_plan]
_recovery [boolean, whether or not to side-effect]
_pathspec [path creation]
_task [ThermosTask]
_set_finalization_start
_kill
"""
def __init__(self, runner):
self._runner = runner
self._pathspec = self._runner._pathspec
def on_active(self, task_update):
log.debug('Task on_active(%s)' % task_update)
self._runner._plan = self._runner._regular_plan
if self._runner._recovery:
return
TaskRunnerHelper.initialize_task(self._pathspec,
ThermosTaskWrapper(self._runner._task).to_json())
def on_cleaning(self, task_update):
log.debug('Task on_cleaning(%s)' % task_update)
self._runner._finalization_start = task_update.timestamp_ms / 1000.0
self._runner._terminate_plan(self._runner._regular_plan)
def on_finalizing(self, task_update):
log.debug('Task on_finalizing(%s)' % task_update)
if not self._runner._recovery:
self._runner._kill()
self._runner._plan = self._runner._finalizing_plan
if self._runner._finalization_start is None:
self._runner._finalization_start = task_update.timestamp_ms / 1000.0
def on_killed(self, task_update):
log.debug('Task on_killed(%s)' % task_update)
self._cleanup()
def on_success(self, task_update):
log.debug('Task on_success(%s)' % task_update)
self._cleanup()
log.info('Task succeeded.')
def on_failed(self, task_update):
log.debug('Task on_failed(%s)' % task_update)
self._cleanup()
def on_lost(self, task_update):
log.debug('Task on_lost(%s)' % task_update)
self._cleanup()
def _cleanup(self):
if not self._runner._recovery:
self._runner._kill()
TaskRunnerHelper.finalize_task(self._pathspec)
class TaskRunnerUniversalHandler(UniversalStateHandler):
"""
Universal handler to checkpoint every process and task transition of the runner.
Accesses these parts of the runner:
_ckpt_write
"""
def __init__(self, runner):
self._runner = runner
def _checkpoint(self, record):
self._runner._ckpt_write(record)
def on_process_transition(self, state, process_update):
log.debug('_on_process_transition: %s' % process_update)
self._checkpoint(RunnerCkpt(process_status=process_update))
def on_task_transition(self, state, task_update):
log.debug('_on_task_transition: %s' % task_update)
self._checkpoint(RunnerCkpt(task_status=task_update))
def on_initialization(self, header):
log.debug('_on_initialization: %s' % header)
ThermosTaskValidator.assert_valid_task(self._runner.task)
ThermosTaskValidator.assert_valid_ports(self._runner.task, header.ports)
self._checkpoint(RunnerCkpt(runner_header=header))
class TaskRunnerStage(object):
"""
A stage of the task runner pipeline.
"""
MAX_ITERATION_WAIT = Amount(1, Time.SECONDS)
def __init__(self, runner):
self.runner = runner
self.clock = runner._clock
def run(self):
"""
Perform any work necessary at this stage of the task.
If there is no more work to be done, return None. [This will invoke a state transition.]
If there is still work to be done, return the number of seconds from now in which you'd like
to be called to re-run the plan.
"""
return None
def transition_to(self):
"""
The stage to which we should transition.
"""
raise NotImplementedError
class TaskRunnerStage_ACTIVE(TaskRunnerStage): # noqa
"""
Run the regular plan (i.e. normal, non-finalizing processes.)
"""
MAX_ITERATION_WAIT = Amount(15, Time.SECONDS)
MIN_ITERATION_WAIT = Amount(1, Time.SECONDS)
def __init__(self, runner):
super(TaskRunnerStage_ACTIVE, self).__init__(runner)
def run(self):
launched = self.runner._run_plan(self.runner._regular_plan)
# Have we terminated?
terminal_state = None
if self.runner._regular_plan.is_complete():
log.info('Regular plan complete.')
terminal_state = TaskState.SUCCESS if self.runner.is_healthy() else TaskState.FAILED
elif not self.runner.is_healthy():
log.error('Regular plan unhealthy!')
terminal_state = TaskState.FAILED
if terminal_state:
# No more work to do
return None
elif launched > 0:
# We want to run ASAP after updates have been collected
return max(self.MIN_ITERATION_WAIT.as_(Time.SECONDS), self.runner._regular_plan.min_wait())
else:
# We want to run as soon as something is available to run or after a prescribed timeout.
return min(self.MAX_ITERATION_WAIT.as_(Time.SECONDS), self.runner._regular_plan.min_wait())
def transition_to(self):
return TaskState.CLEANING
class TaskRunnerStage_CLEANING(TaskRunnerStage): # noqa
"""
Start the cleanup of the regular plan (e.g. if it failed.) On ACTIVE -> CLEANING,
we send SIGTERMs to all still-running processes. We wait at most finalization_wait
for all processes to complete before SIGKILLs are sent. If everything exits cleanly
prior to that point in time, we transition to FINALIZING, which kicks into gear
the finalization schedule (if any.)
"""
def run(self):
log.debug('TaskRunnerStage[CLEANING]: Finalization remaining: %s' %
self.runner._finalization_remaining())
if self.runner._finalization_remaining() > 0 and self.runner.has_running_processes():
return min(self.runner._finalization_remaining(), self.MAX_ITERATION_WAIT.as_(Time.SECONDS))
def transition_to(self):
if self.runner._finalization_remaining() <= 0:
log.info('Exceeded finalization wait, skipping finalization.')
return self.runner.terminal_state()
return TaskState.FINALIZING
class TaskRunnerStage_FINALIZING(TaskRunnerStage): # noqa
"""
Run the finalizing plan, specifically the plan of tasks with the 'final'
bit marked (e.g. log savers, checkpointers and the like.) Anything in this
plan will be SIGKILLed if we go over the finalization_wait.
"""
def run(self):
self.runner._run_plan(self.runner._finalizing_plan)
log.debug('TaskRunnerStage[FINALIZING]: Finalization remaining: %s' %
self.runner._finalization_remaining())
if self.runner.deadlocked(self.runner._finalizing_plan):
log.warning('Finalizing plan deadlocked.')
return None
if self.runner._finalization_remaining() > 0 and not self.runner._finalizing_plan.is_complete():
return min(self.runner._finalization_remaining(), self.MAX_ITERATION_WAIT.as_(Time.SECONDS))
def transition_to(self):
if self.runner._finalization_remaining() <= 0:
log.info('Exceeded finalization wait, terminating finalization.')
return self.runner.terminal_state()
class TaskRunner(object):
"""
Run a ThermosTask.
This class encapsulates the core logic to run and control the state of a Thermos task.
Typically, it will be instantiated directly to control a new task, but a TaskRunner can also be
synthesised from an existing task's checkpoint root
"""
class Error(Exception): pass
class InternalError(Error): pass
class InvalidTask(Error): pass
class PermissionError(Error): pass
class StateError(Error): pass
# Maximum amount of time we spend waiting for new updates from the checkpoint streams
# before doing housecleaning (checking for LOST tasks, dead PIDs.)
MAX_ITERATION_TIME = Amount(10, Time.SECONDS)
# Minimum amount of time we wait between polls for updates on coordinator checkpoints.
COORDINATOR_INTERVAL_SLEEP = Amount(1, Time.SECONDS)
# Amount of time we're willing to wait after forking before we expect the runner to have
# exec'ed the child process.
LOST_TIMEOUT = Amount(60, Time.SECONDS)
# Active task stages
STAGES = {
TaskState.ACTIVE: TaskRunnerStage_ACTIVE,
TaskState.CLEANING: TaskRunnerStage_CLEANING,
TaskState.FINALIZING: TaskRunnerStage_FINALIZING
}
@classmethod
def get(cls, task_id, checkpoint_root):
"""
Get a TaskRunner bound to the task_id in checkpoint_root.
"""
path = TaskPath(root=checkpoint_root, task_id=task_id, state='active')
task_json = path.getpath('task_path')
task_checkpoint = path.getpath('runner_checkpoint')
if not os.path.exists(task_json):
return None
task = ThermosConfigLoader.load_json(task_json)
if task is None:
return None
if len(task.tasks()) == 0:
return None
try:
checkpoint = CheckpointDispatcher.from_file(task_checkpoint)
if checkpoint is None or checkpoint.header is None:
return None
return cls(task.tasks()[0].task(), checkpoint_root, checkpoint.header.sandbox,
log_dir=checkpoint.header.log_dir, task_id=task_id,
portmap=checkpoint.header.ports, hostname=checkpoint.header.hostname)
except Exception as e:
log.error('Failed to reconstitute checkpoint in TaskRunner.get: %s' % e, exc_info=True)
return None
def __init__(self, task, checkpoint_root, sandbox, log_dir=None,
task_id=None, portmap=None, user=None, chroot=False, clock=time,
universal_handler=None, planner_class=TaskPlanner, hostname=None,
process_logger_destination=None, process_logger_mode=None,
rotate_log_size_mb=None, rotate_log_backups=None,
preserve_env=False, mesos_containerizer_path=None, container_sandbox=None):
"""
required:
task (config.Task) = the task to run
checkpoint_root (path) = the checkpoint root
sandbox (path) = the sandbox in which the path will be run
[if None, cwd will be assumed, but garbage collection will be
disabled for this task.]
optional:
log_dir (string) = directory to house stdout/stderr logs. If not specified, logs will be
written into the sandbox directory under .logs/
task_id (string) = bind to this task id. if not specified, will synthesize an id based
upon task.name()
portmap (dict) = a map (string => integer) from name to port, e.g. { 'http': 80 }
user (string) = the user to run the task as. if not current user, requires setuid
privileges.
chroot (boolean) = whether or not to chroot into the sandbox prior to exec.
clock (time interface) = the clock to use throughout
universal_handler = checkpoint record handler (only used for testing)
planner_class (TaskPlanner class) = TaskPlanner class to use for constructing the task
planning policy.
process_logger_destination (string) = The destination of logger to use for all processes.
process_logger_mode (string) = The mode of logger to use for all processes.
rotate_log_size_mb (integer) = The maximum size of the rotated stdout/stderr logs in MiB.
rotate_log_backups (integer) = The maximum number of rotated stdout/stderr log backups.
preserve_env (boolean) = whether or not env variables for the runner should be in the
env for the task being run
mesos_containerizer_path = the path to the mesos-containerizer executable that will be used
to isolate the task's filesystem (if using a filesystem image).
container_sandbox = the path within the isolated filesystem where the task's sandbox is
mounted.
"""
if not issubclass(planner_class, TaskPlanner):
raise TypeError('planner_class must be a TaskPlanner.')
self._clock = clock
launch_time = self._clock.time()
launch_time_ms = '%06d' % int((launch_time - int(launch_time)) * (10 ** 6))
if not task_id:
self._task_id = '%s-%s.%s' % (task.name(),
time.strftime('%Y%m%d-%H%M%S', time.localtime(launch_time)),
launch_time_ms)
else:
self._task_id = task_id
current_user = TaskRunnerHelper.get_actual_user()
self._user = user or current_user
# TODO(wickman) This should be delegated to the ProcessPlatform / Helper
if self._user != current_user:
if os.geteuid() != 0:
raise ValueError('task specifies user as %s, but %s does not have setuid permission!' % (
self._user, current_user))
self._portmap = portmap or {}
self._launch_time = launch_time
self._log_dir = log_dir or os.path.join(sandbox, '.logs')
self._process_logger_destination = process_logger_destination
self._process_logger_mode = process_logger_mode
self._rotate_log_size_mb = rotate_log_size_mb
self._rotate_log_backups = rotate_log_backups
self._pathspec = TaskPath(root=checkpoint_root, task_id=self._task_id, log_dir=self._log_dir)
self._hostname = hostname or socket.gethostname()
try:
ThermosTaskValidator.assert_valid_task(task)
ThermosTaskValidator.assert_valid_ports(task, self._portmap)
except ThermosTaskValidator.InvalidTaskError as e:
raise self.InvalidTask('Invalid task: %s' % e)
context = ThermosContext(
task_id=self._task_id,
ports=self._portmap,
user=self._user)
self._task, uninterp = (task % Environment(thermos=context)).interpolate()
if len(uninterp) > 0:
raise self.InvalidTask('Failed to interpolate task, missing: %s' %
', '.join(str(ref) for ref in uninterp))
try:
ThermosTaskValidator.assert_same_task(self._pathspec, self._task)
except ThermosTaskValidator.InvalidTaskError as e:
raise self.InvalidTask('Invalid task: %s' % e)
self._plan = None # plan currently being executed (updated by Handlers)
self._regular_plan = planner_class(self._task, clock=clock,
process_filter=lambda proc: proc.final().get() is False)
self._finalizing_plan = planner_class(self._task, clock=clock,
process_filter=lambda proc: proc.final().get() is True)
self._chroot = chroot
self._sandbox = sandbox
self._container_sandbox = container_sandbox
self._terminal_state = None
self._ckpt = None
self._process_map = dict((p.name().get(), p) for p in self._task.processes())
self._task_processes = {}
self._stages = dict((state, stage(self)) for state, stage in self.STAGES.items())
self._finalization_start = None
self._preemption_deadline = None
self._watcher = ProcessMuxer(self._pathspec)
self._state = RunnerState(processes={})
self._preserve_env = preserve_env
self._mesos_containerizer_path = mesos_containerizer_path
# create runner state
universal_handler = universal_handler or TaskRunnerUniversalHandler
self._dispatcher = CheckpointDispatcher()
self._dispatcher.register_handler(universal_handler(self))
self._dispatcher.register_handler(TaskRunnerProcessHandler(self))
self._dispatcher.register_handler(TaskRunnerTaskHandler(self))
# recover checkpointed runner state and update plan
self._recovery = True
self._replay_runner_ckpt()
@property
def task(self):
return self._task
@property
def task_id(self):
return self._task_id
@property
def state(self):
return self._state
@property
def processes(self):
return self._task_processes
def task_state(self):
return self._state.statuses[-1].state if self._state.statuses else TaskState.ACTIVE
def close_ckpt(self):
"""Force close the checkpoint stream. This is necessary for runners terminated through
exception propagation."""
log.debug('Closing the checkpoint stream.')
self._ckpt.close()
@contextmanager
def control(self, force=False):
"""
Bind to the checkpoint associated with this task, position to the end of the log if
it exists, or create it if it doesn't. Fails if we cannot get "leadership" i.e. a
file lock on the checkpoint stream.
"""
if self.is_terminal():
raise self.StateError('Cannot take control of a task in terminal state.')
if self._sandbox:
safe_mkdir(self._sandbox)
ckpt_file = self._pathspec.getpath('runner_checkpoint')
try:
self._ckpt = TaskRunnerHelper.open_checkpoint(ckpt_file, force=force, state=self._state)
except TaskRunnerHelper.PermissionError:
raise self.PermissionError('Unable to open checkpoint %s' % ckpt_file)
log.debug('Flipping recovery mode off.')
self._recovery = False
self._set_task_status(self.task_state())
self._resume_task()
try:
yield
except Exception as e:
log.error('Caught exception in self.control(): %s' % e)
log.error(' %s' % traceback.format_exc())
self._ckpt.close()
def _resume_task(self):
assert self._ckpt is not None
unapplied_updates = self._replay_process_ckpts()
if self.is_terminal():
raise self.StateError('Cannot resume terminal task.')
self._initialize_ckpt_header()
self._replay(unapplied_updates)
def _ckpt_write(self, record):
"""
Write to the checkpoint stream if we're not in recovery mode.
"""
if not self._recovery:
self._ckpt.write(record)
def _replay(self, checkpoints):
"""
Replay a sequence of RunnerCkpts.
"""
for checkpoint in checkpoints:
self._dispatcher.dispatch(self._state, checkpoint)
def _replay_runner_ckpt(self):
"""
Replay the checkpoint stream associated with this task.
"""
ckpt_file = self._pathspec.getpath('runner_checkpoint')
if os.path.exists(ckpt_file):
with open(ckpt_file, 'r') as fp:
ckpt_recover = ThriftRecordReader(fp, RunnerCkpt)
for record in ckpt_recover:
log.debug('Replaying runner checkpoint record: %s' % record)
self._dispatcher.dispatch(self._state, record, recovery=True)
def _replay_process_ckpts(self):
"""
Replay the unmutating process checkpoints. Return the unapplied process updates that
would mutate the runner checkpoint stream.
"""
process_updates = self._watcher.select()
unapplied_process_updates = []
for process_update in process_updates:
if self._dispatcher.would_update(self._state, process_update):
unapplied_process_updates.append(process_update)
else:
self._dispatcher.dispatch(self._state, process_update, recovery=True)
return unapplied_process_updates
def _initialize_ckpt_header(self):
"""
Initializes the RunnerHeader for this checkpoint stream if it has not already
been constructed.
"""
if self._state.header is None:
try:
uid = pwd.getpwnam(self._user).pw_uid
except KeyError:
# This will cause failures downstream, but they will at least be correctly
# reflected in the process state.
log.error('Unknown user %s.' % self._user)
uid = None
header = RunnerHeader(
task_id=self._task_id,
launch_time_ms=int(self._launch_time * 1000),
sandbox=self._sandbox,
log_dir=self._log_dir,
hostname=self._hostname,
user=self._user,
uid=uid,
ports=self._portmap)
runner_ckpt = RunnerCkpt(runner_header=header)
self._dispatcher.dispatch(self._state, runner_ckpt)
def _set_task_status(self, state):
update = TaskStatus(state=state, timestamp_ms=int(self._clock.time() * 1000),
runner_pid=os.getpid(), runner_uid=os.getuid())
runner_ckpt = RunnerCkpt(task_status=update)
self._dispatcher.dispatch(self._state, runner_ckpt, self._recovery)
def _finalization_remaining(self):
# If a preemption deadline has been set, use that.
if self._preemption_deadline:
return max(0, self._preemption_deadline - self._clock.time())
# Otherwise, use the finalization wait provided in the configuration.
finalization_allocation = self.task.finalization_wait().get()
if self._finalization_start is None:
return sys.float_info.max
else:
waited = max(0, self._clock.time() - self._finalization_start)
return max(0, finalization_allocation - waited)
def _set_process_status(self, process_name, process_state, **kw):
if 'sequence_number' in kw:
sequence_number = kw.pop('sequence_number')
log.debug('_set_process_status(%s <= %s, seq=%s[force])' % (process_name,
ProcessState._VALUES_TO_NAMES.get(process_state), sequence_number))
else:
current_run = self._current_process_run(process_name)
if not current_run:
assert process_state == ProcessState.WAITING
sequence_number = 0
else:
sequence_number = current_run.seq + 1
log.debug('_set_process_status(%s <= %s, seq=%s[auto])' % (process_name,
ProcessState._VALUES_TO_NAMES.get(process_state), sequence_number))
runner_ckpt = RunnerCkpt(process_status=ProcessStatus(
process=process_name, state=process_state, seq=sequence_number, **kw))
self._dispatcher.dispatch(self._state, runner_ckpt, self._recovery)
def _task_process_from_process_name(self, process_name, sequence_number):
"""
Construct a Process() object from a process_name, populated with its
correct run number and fully interpolated commandline.
"""
run_number = len(self.state.processes[process_name]) - 1
pathspec = self._pathspec.given(process=process_name, run=run_number)
process = self._process_map.get(process_name)
if process is None:
raise self.InternalError('FATAL: Could not find process: %s' % process_name)
def close_ckpt_and_fork():
pid = os.fork()
if pid == 0 and self._ckpt is not None:
self._ckpt.close()
return pid
(logger_destination,
logger_mode,
rotate_log_size,
rotate_log_backups) = self._build_process_logger_args(process)
return Process(
process.name().get(),
process.cmdline().get(),
sequence_number,
pathspec,
self._sandbox,
self._user,
chroot=self._chroot,
fork=close_ckpt_and_fork,
logger_destination=logger_destination,
logger_mode=logger_mode,
rotate_log_size=rotate_log_size,
rotate_log_backups=rotate_log_backups,
preserve_env=self._preserve_env,
mesos_containerizer_path=self._mesos_containerizer_path,
container_sandbox=self._container_sandbox)
_DEFAULT_LOGGER = Logger()
_DEFAULT_ROTATION = RotatePolicy()
def _build_process_logger_args(self, process):
"""
Build the appropriate logging configuration based on flags + process
configuration settings.
If no configuration (neither flags nor process config), default to
"standard" mode.
"""
destination, mode, size, backups = (self._DEFAULT_LOGGER.destination().get(),
self._DEFAULT_LOGGER.mode().get(),
None,
None)
logger = process.logger()
if logger is Empty:
if self._process_logger_destination:
destination = self._process_logger_destination
if self._process_logger_mode:
mode = self._process_logger_mode
else:
destination = logger.destination().get()
mode = logger.mode().get()
if mode == LoggerMode.ROTATE:
size = Amount(self._DEFAULT_ROTATION.log_size().get(), Data.BYTES)
backups = self._DEFAULT_ROTATION.backups().get()
if logger is Empty:
if self._rotate_log_size_mb:
size = Amount(self._rotate_log_size_mb, Data.MB)
if self._rotate_log_backups:
backups = self._rotate_log_backups
else:
rotate = logger.rotate()
if rotate is not Empty:
size = Amount(rotate.log_size().get(), Data.BYTES)
backups = rotate.backups().get()
return destination, mode, size, backups
def deadlocked(self, plan=None):
"""Check whether a plan is deadlocked, i.e. there are no running/runnable processes, and the
plan is not complete."""
plan = plan or self._regular_plan
now = self._clock.time()
running = list(plan.running)
runnable = list(plan.runnable_at(now))
waiting = list(plan.waiting_at(now))
log.debug('running:%d runnable:%d waiting:%d complete:%s' % (
len(running), len(runnable), len(waiting), plan.is_complete()))
return len(running + runnable + waiting) == 0 and not plan.is_complete()
def is_healthy(self):
"""Check whether the TaskRunner is healthy. A healthy TaskRunner is not deadlocked and has not
reached its max_failures count."""
max_failures = self._task.max_failures().get()
deadlocked = self.deadlocked()
under_failure_limit = max_failures == 0 or len(self._regular_plan.failed) < max_failures
log.debug('max_failures:%d failed:%d under_failure_limit:%s deadlocked:%s ==> health:%s' % (
max_failures, len(self._regular_plan.failed), under_failure_limit, deadlocked,
not deadlocked and under_failure_limit))
return not deadlocked and under_failure_limit
def _current_process_run(self, process_name):
if process_name not in self._state.processes or len(self._state.processes[process_name]) == 0:
return None
return self._state.processes[process_name][-1]
def is_process_lost(self, process_name):
"""Determine whether or not we should mark a task as LOST and do so if necessary."""
current_run = self._current_process_run(process_name)
if not current_run:
raise self.InternalError('No current_run for process %s!' % process_name)
def forked_but_never_came_up():
return current_run.state == ProcessState.FORKED and (
self._clock.time() - current_run.fork_time > self.LOST_TIMEOUT.as_(Time.SECONDS))
def running_but_coordinator_died():
if current_run.state != ProcessState.RUNNING:
return False
coordinator_pid, _, _ = TaskRunnerHelper.scan_process(self.state, process_name)
if coordinator_pid is not None:
return False
elif self._watcher.has_data(process_name):
return False
return True
if forked_but_never_came_up() or running_but_coordinator_died():
log.info('Detected a LOST task: %s' % current_run)
log.debug(' forked_but_never_came_up: %s' % forked_but_never_came_up())
log.debug(' running_but_coordinator_died: %s' % running_but_coordinator_died())
return True
return False
def _run_plan(self, plan):
log.debug('Schedule pass:')
running = list(plan.running)
log.debug('running: %s' % ' '.join(plan.running))
log.debug('finished: %s' % ' '.join(plan.finished))
launched = []
for process_name in plan.running:
if self.is_process_lost(process_name):
self._set_process_status(process_name, ProcessState.LOST)
now = self._clock.time()
runnable = list(plan.runnable_at(now))
waiting = list(plan.waiting_at(now))
log.debug('runnable: %s' % ' '.join(runnable))
log.debug('waiting: %s' % ' '.join(
'%s[T-%.1fs]' % (process, plan.get_wait(process)) for process in waiting))
def pick_processes(process_list):
if self._task.max_concurrency().get() == 0:
return process_list
num_to_pick = max(self._task.max_concurrency().get() - len(running), 0)
return process_list[:num_to_pick]
for process_name in pick_processes(runnable):
tp = self._task_processes.get(process_name)
if tp:
current_run = self._current_process_run(process_name)
assert current_run.state == ProcessState.WAITING
else:
self._set_process_status(process_name, ProcessState.WAITING)
tp = self._task_processes[process_name]
log.info('Forking Process(%s)' % process_name)
try:
tp.start()
launched.append(tp)
except Process.Error as e:
log.error('Failed to launch process: %s' % e)
self._set_process_status(process_name, ProcessState.FAILED)
return len(launched) > 0
def _terminate_plan(self, plan):
TaskRunnerHelper.terminate_orphans(self.state)
for process in plan.running:
last_run = self._current_process_run(process)
if last_run and last_run.state in (ProcessState.FORKED, ProcessState.RUNNING):
TaskRunnerHelper.terminate_process(self.state, process)
def has_running_processes(self):
"""
Returns True if any processes associated with this task have active pids.
"""
process_tree = TaskRunnerHelper.scan_tree(self.state)
return any(any(process_set) for process_set in process_tree.values())
def has_active_processes(self):
"""
Returns True if any processes are in non-terminal states.
"""
return any(not TaskRunnerHelper.is_process_terminal(run.state) for run in
filter(None, (self._current_process_run(process) for process in self.state.processes)))
def collect_updates(self, timeout=None):
"""
Collects and applies updates from process checkpoint streams. Returns the number
of applied process checkpoints.
"""
if not self.has_active_processes():
return 0
sleep_interval = self.COORDINATOR_INTERVAL_SLEEP.as_(Time.SECONDS)
total_time = 0.0
while True:
process_updates = self._watcher.select()
for process_update in process_updates:
self._dispatcher.dispatch(self._state, process_update, self._recovery)
if process_updates:
return len(process_updates)
if timeout is not None and total_time >= timeout:
return 0
total_time += sleep_interval
self._clock.sleep(sleep_interval)
def is_terminal(self):
return TaskRunnerHelper.is_task_terminal(self.task_state())
def terminal_state(self):
if self._terminal_state:
log.debug('Forced terminal state: %s' %
TaskState._VALUES_TO_NAMES.get(self._terminal_state, 'UNKNOWN'))
return self._terminal_state
else:
return TaskState.SUCCESS if self.is_healthy() else TaskState.FAILED
def run(self, force=False):
"""
Entrypoint to runner. Assume control of checkpoint stream, and execute TaskRunnerStages
until runner is terminal.
"""
if self.is_terminal():
return
with self.control(force):
self._run()
def _run(self):
while not self.is_terminal():
start = self._clock.time()
# step 1: execute stage corresponding to the state we're currently in
runner = self._stages[self.task_state()]
iteration_wait = runner.run()
if iteration_wait is None:
log.debug('Run loop: No more work to be done in state %s' %
TaskState._VALUES_TO_NAMES.get(self.task_state(), 'UNKNOWN'))
self._set_task_status(runner.transition_to())
continue
log.debug('Run loop: Work to be done within %.1fs' % iteration_wait)
# step 2: check child process checkpoint streams for updates
if not self.collect_updates(iteration_wait):
# If we don't collect any updates, at least 'touch' the checkpoint stream
# so as to prevent garbage collection.
elapsed = self._clock.time() - start
if elapsed < iteration_wait:
log.debug('Update collection only took %.1fs, idling %.1fs' % (
elapsed, iteration_wait - elapsed))
self._clock.sleep(iteration_wait - elapsed)
log.debug('Run loop: No updates collected, touching checkpoint.')
os.utime(self._pathspec.getpath('runner_checkpoint'), None)
# step 3: reap any zombie child processes
TaskRunnerHelper.reap_children()
def kill(self, force=False, terminal_status=TaskState.KILLED,
preemption_wait=Amount(1, Time.MINUTES)):
"""
Kill all processes associated with this task and set task/process states as terminal_status
(defaults to KILLED)
"""
log.debug('Runner issued kill: force:%s, preemption_wait:%s' % (
force, preemption_wait))
assert terminal_status in (TaskState.KILLED, TaskState.LOST)
self._preemption_deadline = self._clock.time() + preemption_wait.as_(Time.SECONDS)
with self.control(force):
if self.is_terminal():
log.warning('Task is not in ACTIVE state, cannot issue kill.')
return
self._terminal_state = terminal_status
if self.task_state() == TaskState.ACTIVE:
self._set_task_status(TaskState.CLEANING)
self._run()
def lose(self, force=False):
"""
Mark a task as LOST and kill any straggling processes.
"""
self.kill(force, preemption_wait=Amount(0, Time.SECONDS), terminal_status=TaskState.LOST)
def _kill(self):
processes = TaskRunnerHelper.scan_tree(self._state)
for process, pid_tuple in processes.items():
current_run = self._current_process_run(process)
coordinator_pid, pid, tree = pid_tuple
if TaskRunnerHelper.is_process_terminal(current_run.state):
if coordinator_pid or pid or tree:
log.warning('Terminal process (%s) still has running pids:' % process)
log.warning(' coordinator_pid: %s' % coordinator_pid)
log.warning(' pid: %s' % pid)
log.warning(' tree: %s' % tree)
TaskRunnerHelper.kill_process(self.state, process)
else:
if coordinator_pid or pid or tree:
log.info('Transitioning %s to KILLED' % process)
self._set_process_status(process, ProcessState.KILLED,
stop_time=self._clock.time(), return_code=-1)
else:
log.info('Transitioning %s to LOST' % process)
if current_run.state != ProcessState.WAITING:
self._set_process_status(process, ProcessState.LOST)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI
from cinder.image import glance
from cinder import objects
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
# These attributes we will attempt to save for the volume if they exist
# in the source image metadata.
IMAGE_ATTRIBUTES = (
'checksum',
'container_format',
'disk_format',
'min_disk',
'min_ram',
'size',
)
class OnFailureRescheduleTask(flow_utils.CinderTask):
"""Triggers a rescheduling request to be sent when reverting occurs.
If rescheduling doesn't occur this task errors out the volume.
Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
this volume elsewhere.
"""
def __init__(self, reschedule_context, db, scheduler_rpcapi,
do_reschedule):
requires = ['filter_properties', 'request_spec', 'volume_id',
'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires)
self.do_reschedule = do_reschedule
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
self.reschedule_context = reschedule_context
# These exception types will trigger the volume to be set into error
# status rather than being rescheduled.
self.no_reschedule_types = [
# Image copying happens after volume creation so rescheduling due
# to copy failure will mean the same volume will be created at
# another place when it still exists locally.
exception.ImageCopyFailure,
# Metadata updates happen after the volume has been created so if
# they fail, rescheduling will likely attempt to create the volume
# on another machine when it still exists locally.
exception.MetadataCopyFailure,
exception.MetadataCreateFailure,
exception.MetadataUpdateFailure,
# The volume/snapshot has been removed from the database, that
# can not be fixed by rescheduling.
exception.VolumeNotFound,
exception.SnapshotNotFound,
exception.VolumeTypeNotFound,
exception.ImageUnacceptable,
]
def execute(self, **kwargs):
pass
def _pre_reschedule(self, context, volume_id):
"""Actions that happen before the rescheduling attempt occur here."""
try:
# Update volume's timestamp and host.
#
# NOTE(harlowja): this is awkward to be done here, shouldn't
# this happen at the scheduler itself and not before it gets
# sent to the scheduler? (since what happens if it never gets
# there??). It's almost like we need a status of 'on-the-way-to
# scheduler' in the future.
# We don't need to update the volume's status to creating, since
# we haven't changed it to error.
update = {
'scheduled_at': timeutils.utcnow(),
'host': None,
}
LOG.debug("Updating volume %(volume_id)s with %(update)s.",
{'update': update, 'volume_id': volume_id})
self.db.volume_update(context, volume_id, update)
except exception.CinderException:
# Don't let updating the state cause the rescheduling to fail.
LOG.exception(_LE("Volume %s: update volume state failed."),
volume_id)
def _reschedule(self, context, cause, request_spec, filter_properties,
volume_id):
"""Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume
if not filter_properties:
filter_properties = {}
if 'retry' not in filter_properties:
filter_properties['retry'] = {}
retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume_id
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s",
{'volume_id': volume_id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
'reason': cause.exception_str})
if all(cause.exc_info):
# Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, CONF.volume_topic, volume_id,
request_spec=request_spec,
filter_properties=filter_properties)
def _post_reschedule(self, volume_id):
"""Actions that happen after the rescheduling attempt occur here."""
LOG.debug("Volume %s: re-scheduled", volume_id)
def revert(self, context, result, flow_failures, volume_id, **kwargs):
# NOTE(dulek): Revert is occurring and manager need to know if
# rescheduling happened. We're returning boolean flag that will
# indicate that. It which will be available in flow engine store
# through get_revert_result method.
# If do not want to be rescheduled, just set the volume's status to
# error and return.
if not self.do_reschedule:
common.error_out_volume(context, self.db, volume_id)
LOG.error(_LE("Volume %s: create failed"), volume_id)
return False
# Check if we have a cause which can tell us not to reschedule and
# set the volume's status to error.
for failure in flow_failures.values():
if failure.check(*self.no_reschedule_types):
common.error_out_volume(context, self.db, volume_id)
LOG.error(_LE("Volume %s: create failed"), volume_id)
return False
# Use a different context when rescheduling.
if self.reschedule_context:
cause = list(flow_failures.values())[0]
context = self.reschedule_context
try:
self._pre_reschedule(context, volume_id)
self._reschedule(context, cause, volume_id=volume_id, **kwargs)
self._post_reschedule(volume_id)
return True
except exception.CinderException:
LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id)
return False
class ExtractVolumeRefTask(flow_utils.CinderTask):
"""Extracts volume reference for given volume id."""
default_provides = 'volume_ref'
def __init__(self, db, host, set_error=True):
super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
self.db = db
self.host = host
self.set_error = set_error
def execute(self, context, volume_id):
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
volume_ref = self.db.volume_get(context, volume_id)
return volume_ref
def revert(self, context, volume_id, result, **kwargs):
if isinstance(result, ft.Failure) or not self.set_error:
return
common.error_out_volume(context, self.db, volume_id)
LOG.error(_LE("Volume %s: create failed"), volume_id)
class ExtractVolumeSpecTask(flow_utils.CinderTask):
"""Extracts a spec of a volume to be created into a common structure.
This task extracts and organizes the input requirements into a common
and easier to analyze structure for later tasks to use. It will also
attach the underlying database volume reference which can be used by
other tasks to reference for further details about the volume to be.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, db):
requires = ['volume_ref', 'request_spec']
super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, volume_ref, request_spec):
get_remote_image_service = glance.get_remote_image_service
volume_name = volume_ref['name']
volume_size = utils.as_int(volume_ref['size'], quiet=False)
# Create a dictionary that will represent the volume to be so that
# later tasks can easily switch between the different types and create
# the volume according to the volume types specifications (which are
# represented in this dictionary).
specs = {
'status': volume_ref['status'],
'type': 'raw', # This will have the type of the volume to be
# created, which should be one of [raw, snap,
# source_vol, image]
'volume_id': volume_ref['id'],
'volume_name': volume_name,
'volume_size': volume_size,
}
if volume_ref.get('snapshot_id'):
# We are making a snapshot based volume instead of a raw volume.
specs.update({
'type': 'snap',
'snapshot_id': volume_ref['snapshot_id'],
})
elif volume_ref.get('source_volid'):
# We are making a source based volume instead of a raw volume.
#
# NOTE(harlowja): This will likely fail if the source volume
# disappeared by the time this call occurred.
source_volid = volume_ref.get('source_volid')
source_volume_ref = self.db.volume_get(context, source_volid)
specs.update({
'source_volid': source_volid,
# This is captured incase we have to revert and we want to set
# back the source volume status to its original status. This
# may or may not be sketchy to do??
'source_volstatus': source_volume_ref['status'],
'type': 'source_vol',
})
elif request_spec.get('source_replicaid'):
# We are making a clone based on the replica.
#
# NOTE(harlowja): This will likely fail if the replica
# disappeared by the time this call occurred.
source_volid = request_spec['source_replicaid']
source_volume_ref = self.db.volume_get(context, source_volid)
specs.update({
'source_replicaid': source_volid,
'source_replicastatus': source_volume_ref['status'],
'type': 'source_replica',
})
elif request_spec.get('image_id'):
# We are making an image based volume instead of a raw volume.
image_href = request_spec['image_id']
image_service, image_id = get_remote_image_service(context,
image_href)
specs.update({
'type': 'image',
'image_id': image_id,
'image_location': image_service.get_location(context,
image_id),
'image_meta': image_service.show(context, image_id),
# Instead of refetching the image service later just save it.
#
# NOTE(harlowja): if we have to later recover this tasks output
# on another 'node' that this object won't be able to be
# serialized, so we will have to recreate this object on
# demand in the future.
'image_service': image_service,
})
return specs
def revert(self, context, result, **kwargs):
if isinstance(result, ft.Failure):
return
volume_spec = result.get('volume_spec')
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, volume_spec)
class NotifyVolumeActionTask(flow_utils.CinderTask):
"""Performs a notification about the given volume when called.
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
def execute(self, context, volume_ref):
volume_id = volume_ref['id']
try:
volume_utils.notify_about_volume_usage(context, volume_ref,
self.event_suffix,
host=volume_ref['host'])
except exception.CinderException:
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate
LOG.exception(_LE("Failed notifying about the volume"
" action %(event)s for volume %(volume_id)s"),
{'event': self.event_suffix, 'volume_id': volume_id})
class CreateVolumeFromSpecTask(flow_utils.CinderTask):
"""Creates a volume from a provided specification.
Reversion strategy: N/A
"""
default_provides = 'volume'
def __init__(self, db, driver):
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
self.db = db
self.driver = driver
def _handle_bootable_volume_glance_meta(self, context, volume_id,
**kwargs):
"""Enable bootable flag and properly handle glance metadata.
Caller should provide one and only one of snapshot_id,source_volid
and image_id. If an image_id specified, an image_meta should also be
provided, otherwise will be treated as an empty dictionary.
"""
log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
"%(vol_id)s.")
exception_template = _("Failed updating volume %(vol_id)s metadata"
" using the provided %(src_type)s"
" %(src_id)s metadata")
src_type = None
src_id = None
self._enable_bootable_flag(context, volume_id)
try:
if kwargs.get('snapshot_id'):
src_type = 'snapshot'
src_id = kwargs['snapshot_id']
snapshot_id = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_to_volume(
context, volume_id, snapshot_id)
elif kwargs.get('source_volid'):
src_type = 'source volume'
src_id = kwargs['source_volid']
source_volid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_volid,
volume_id)
elif kwargs.get('source_replicaid'):
src_type = 'source replica'
src_id = kwargs['source_replicaid']
source_replicaid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_replicaid,
volume_id)
elif kwargs.get('image_id'):
src_type = 'image'
src_id = kwargs['image_id']
image_id = src_id
image_meta = kwargs.get('image_meta', {})
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self._capture_volume_image_metadata(context, volume_id,
image_id, image_meta)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(exception_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
raise exception.MetadataCopyFailure(reason=ex)
def _create_from_snapshot(self, context, volume_ref, snapshot_id,
**kwargs):
volume_id = volume_ref['id']
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
model_update = self.driver.create_volume_from_snapshot(volume_ref,
snapshot)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
make_bootable = False
try:
originating_vref = self.db.volume_get(context,
snapshot.volume_id)
make_bootable = originating_vref.bootable
except exception.CinderException as ex:
LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s "
"bootable"
" flag using the provided glance snapshot "
"%(snapshot_ref_id)s volume reference"),
{'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot.volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
if make_bootable:
self._handle_bootable_volume_glance_meta(context, volume_id,
snapshot_id=snapshot_id)
return model_update
def _enable_bootable_flag(self, context, volume_id):
try:
LOG.debug('Marking volume %s as bootable.', volume_id)
self.db.volume_update(context, volume_id, {'bootable': True})
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating volume %(volume_id)s bootable "
"flag to true"), {'volume_id': volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
def _create_from_source_volume(self, context, volume_ref,
source_volid, **kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_volid)
model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
source_volid=source_volid)
return model_update
def _create_from_source_replica(self, context, volume_ref,
source_replicaid, **kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_replicaid)
model_update = self.driver.create_replica_test_volume(volume_ref,
srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(
context,
volume_ref['id'],
source_replicaid=source_replicaid)
return model_update
def _copy_image_to_volume(self, context, volume_ref,
image_id, image_location, image_service):
"""Downloads Glance image to the specified volume."""
copy_image_to_volume = self.driver.copy_image_to_volume
volume_id = volume_ref['id']
LOG.debug("Attempting download of %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s.",
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
try:
copy_image_to_volume(context, volume_ref, image_service, image_id)
except processutils.ProcessExecutionError as ex:
LOG.exception(_LE("Failed to copy image %(image_id)s to volume: "
"%(volume_id)s"),
{'volume_id': volume_id, 'image_id': image_id})
raise exception.ImageCopyFailure(reason=ex.stderr)
except exception.ImageUnacceptable as ex:
LOG.exception(_LE("Failed to copy image to volume: %(volume_id)s"),
{'volume_id': volume_id})
raise exception.ImageUnacceptable(ex)
except Exception as ex:
LOG.exception(_LE("Failed to copy image %(image_id)s to "
"volume: %(volume_id)s"),
{'volume_id': volume_id, 'image_id': image_id})
if not isinstance(ex, exception.ImageCopyFailure):
raise exception.ImageCopyFailure(reason=ex)
else:
raise
LOG.debug("Downloaded image %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s successfully.",
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
def _capture_volume_image_metadata(self, context, volume_id,
image_id, image_meta):
# Save some base attributes into the volume metadata
base_metadata = {
'image_id': image_id,
}
name = image_meta.get('name', None)
if name:
base_metadata['image_name'] = name
# Save some more attributes into the volume metadata from the image
# metadata
for key in IMAGE_ATTRIBUTES:
if key not in image_meta:
continue
value = image_meta.get(key, None)
if value is not None:
base_metadata[key] = value
# Save all the image metadata properties into the volume metadata
property_metadata = {}
image_properties = image_meta.get('properties', {})
for (key, value) in image_properties.items():
if value is not None:
property_metadata[key] = value
volume_metadata = dict(property_metadata)
volume_metadata.update(base_metadata)
LOG.debug("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s.",
{'volume_id': volume_id, 'image_id': image_id,
'vol_metadata': volume_metadata})
self.db.volume_glance_metadata_bulk_create(context, volume_id,
volume_metadata)
def _clone_image_volume(self, context, volume, image_location, image_meta):
"""Create a volume efficiently from an existing image.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred
"""
if not image_location:
return None, False
if (image_meta.get('container_format') != 'bare' or
image_meta.get('disk_format') != 'raw'):
LOG.info(_LI("Requested image %(id)s is not in raw format."),
{'id': image_meta.get('id')})
return None, False
image_volume = None
direct_url, locations = image_location
urls = set([direct_url] + [loc.get('url') for loc in locations or []])
image_volume_ids = [url[9:] for url in urls
if url and url.startswith('cinder://')]
image_volumes = self.db.volume_get_all_by_host(
context, volume['host'], filters={'id': image_volume_ids})
for image_volume in image_volumes:
# For the case image volume is stored in the service tenant,
# image_owner volume metadata should also be checked.
image_owner = None
volume_metadata = image_volume.get('volume_metadata') or {}
for m in volume_metadata:
if m['key'] == 'image_owner':
image_owner = m['value']
if (image_meta['owner'] != volume['project_id'] and
image_meta['owner'] != image_owner):
LOG.info(_LI("Skipping image volume %(id)s because "
"it is not accessible by current Tenant."),
{'id': image_volume.id})
continue
LOG.info(_LI("Will clone a volume from the image volume "
"%(id)s."), {'id': image_volume.id})
break
else:
LOG.debug("No accessible image volume for image %(id)s found.",
{'id': image_meta['id']})
return None, False
try:
return self.driver.create_cloned_volume(volume, image_volume), True
except (NotImplementedError, exception.CinderException):
LOG.exception(_LE('Failed to clone image volume %(id)s.'),
{'id': image_volume['id']})
return None, False
def _create_from_image(self, context, volume_ref,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s.",
{'volume_id': volume_ref['id'],
'image_location': image_location, 'image_id': image_id})
# Create the volume from an image.
#
# NOTE (singn): two params need to be returned
# dict containing provider_location for cloned volume
# and clone status.
model_update, cloned = self.driver.clone_image(context,
volume_ref,
image_location,
image_meta,
image_service)
if not cloned and 'cinder' in CONF.allowed_direct_url_schemes:
model_update, cloned = self._clone_image_volume(context,
volume_ref,
image_location,
image_meta)
if not cloned:
# TODO(harlowja): what needs to be rolled back in the clone if this
# volume create fails?? Likely this should be a subflow or broken
# out task in the future. That will bring up the question of how
# do we make said subflow/task which is only triggered in the
# clone image 'path' resumable and revertable in the correct
# manner.
#
# Create the volume and then download the image onto the volume.
model_update = self.driver.create_volume(volume_ref)
updates = dict(model_update or dict(), status='downloading')
try:
volume_ref = self.db.volume_update(context,
volume_ref['id'], updates)
except exception.CinderException:
LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(updates)s"),
{'volume_id': volume_ref['id'],
'updates': updates})
self._copy_image_to_volume(context, volume_ref,
image_id, image_location, image_service)
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
image_id=image_id,
image_meta=image_meta)
return model_update
def _create_raw_volume(self, volume_ref, **kwargs):
return self.driver.create_volume(volume_ref)
def execute(self, context, volume_ref, volume_spec):
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
if not volume_id:
volume_id = volume_ref['id']
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
LOG.exception(_LE("Unable to create volume. "
"Volume driver %s not initialized"), driver_name)
raise exception.DriverNotInitialized()
create_type = volume_spec.pop('type', None)
LOG.info(_LI("Volume %(volume_id)s: being created as %(create_type)s "
"with specification: %(volume_spec)s"),
{'volume_spec': volume_spec, 'volume_id': volume_id,
'create_type': create_type})
if create_type == 'raw':
model_update = self._create_raw_volume(volume_ref=volume_ref,
**volume_spec)
elif create_type == 'snap':
model_update = self._create_from_snapshot(context,
volume_ref=volume_ref,
**volume_spec)
elif create_type == 'source_vol':
model_update = self._create_from_source_volume(
context, volume_ref=volume_ref, **volume_spec)
elif create_type == 'source_replica':
model_update = self._create_from_source_replica(
context, volume_ref=volume_ref, **volume_spec)
elif create_type == 'image':
model_update = self._create_from_image(context,
volume_ref=volume_ref,
**volume_spec)
else:
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
# Persist any model information provided on creation.
try:
if model_update:
volume_ref = self.db.volume_update(context, volume_ref['id'],
model_update)
except exception.CinderException:
# If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at
# this point has been created).
LOG.exception(_LE("Failed updating model of volume %(volume_id)s "
"with creation provided model %(model)s"),
{'volume_id': volume_id, 'model': model_update})
raise
return volume_ref
class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
"""On successful volume creation this will perform final volume actions.
When a volume is created successfully it is expected that MQ notifications
and database updates will occur to 'signal' to others that the volume is
now ready for usage. This task does those notifications and updates in a
reliable manner (not re-raising exceptions if said actions can not be
triggered).
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix)
self.status_translation = {
'migration_target_creating': 'migration_target',
}
def execute(self, context, volume, volume_spec):
volume_id = volume['id']
new_status = self.status_translation.get(volume_spec.get('status'),
'available')
update = {
'status': new_status,
'launched_at': timeutils.utcnow(),
}
try:
# TODO(harlowja): is it acceptable to only log if this fails??
# or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in
# 'creating' if this fails)??
volume_ref = self.db.volume_update(context, volume_id, update)
# Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
except exception.CinderException:
LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(update)s"), {'volume_id': volume_id,
'update': update})
# Even if the update fails, the volume is ready.
LOG.info(_LI("Volume %(volume_name)s (%(volume_id)s): "
"created successfully"),
{'volume_name': volume_spec['volume_name'],
'volume_id': volume_id})
def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
allow_reschedule, reschedule_context, request_spec,
filter_properties):
"""Constructs and returns the manager entrypoint flow.
This flow will do the following:
1. Determines if rescheduling is enabled (ahead of time).
2. Inject keys & values for dependent tasks.
3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
status & notify or one to update the db status & notify & *reschedule*).
4. Extracts a volume specification from the provided inputs.
5. Notifies that the volume has started to be created.
6. Creates a volume from the extracted volume specification.
7. Attaches a on-success *only* task that notifies that the volume creation
has ended and performs further database status updates.
"""
flow_name = ACTION.replace(":", "_") + "_manager"
volume_flow = linear_flow.Flow(flow_name)
# This injects the initial starting flow values into the workflow so that
# the dependency order of the tasks provides/requires can be correctly
# determined.
create_what = {
'context': context,
'filter_properties': filter_properties,
'request_spec': request_spec,
'volume_id': volume_id,
}
volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False))
retry = filter_properties.get('retry', None)
# Always add OnFailureRescheduleTask and we handle the change of volume's
# status when reverting the flow. Meanwhile, no need to revert process of
# ExtractVolumeRefTask.
do_reschedule = allow_reschedule and request_spec and retry
volume_flow.add(OnFailureRescheduleTask(reschedule_context, db,
scheduler_rpcapi,
do_reschedule))
LOG.debug("Volume reschedule parameters: %(allow)s "
"retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
volume_flow.add(ExtractVolumeSpecTask(db),
NotifyVolumeActionTask(db, "create.start"),
CreateVolumeFromSpecTask(db, driver),
CreateVolumeOnFinishTask(db, "create.end"))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)
|
|
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Support for mounting virtual image files."""
import os
import time
from oslo_log import log as logging
from oslo_utils import importutils
from nova import exception
from nova.i18n import _, _LI, _LW
from nova import utils
from nova.virt.image import model as imgmodel
LOG = logging.getLogger(__name__)
MAX_DEVICE_WAIT = 30
class Mount(object):
"""Standard mounting operations, that can be overridden by subclasses.
The basic device operations provided are get, map and mount,
to be called in that order.
"""
mode = None # to be overridden in subclasses
@staticmethod
def instance_for_format(image, mountdir, partition):
"""Get a Mount instance for the image type
:param image: instance of nova.virt.image.model.Image
:param mountdir: path to mount the image at
:param partition: partition number to mount
"""
LOG.debug("Instance for format image=%(image)s "
"mountdir=%(mountdir)s partition=%(partition)s",
{'image': image, 'mountdir': mountdir,
'partition': partition})
if isinstance(image, imgmodel.LocalFileImage):
if image.format == imgmodel.FORMAT_RAW:
LOG.debug("Using LoopMount")
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
image, mountdir, partition)
else:
LOG.debug("Using NbdMount")
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
image, mountdir, partition)
else:
# TODO(berrange) we could mount images of
# type LocalBlockImage directly without
# involving loop or nbd devices
#
# We could also mount RBDImage directly
# using kernel RBD block dev support.
#
# This is left as an enhancement for future
# motivated developers todo, since raising
# an exception is on par with what this
# code did historically
raise exception.UnsupportedImageModel(
image.__class__.__name__)
@staticmethod
def instance_for_device(image, mountdir, partition, device):
"""Get a Mount instance for the device type
:param image: instance of nova.virt.image.model.Image
:param mountdir: path to mount the image at
:param partition: partition number to mount
:param device: mounted device path
"""
LOG.debug("Instance for device image=%(image)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"device=%(device)s",
{'image': image, 'mountdir': mountdir,
'partition': partition, 'device': device})
if "loop" in device:
LOG.debug("Using LoopMount")
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
image, mountdir, partition, device)
else:
LOG.debug("Using NbdMount")
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
image, mountdir, partition, device)
def __init__(self, image, mount_dir, partition=None, device=None):
"""Create a new Mount instance
:param image: instance of nova.virt.image.model.Image
:param mount_dir: path to mount the image at
:param partition: partition number to mount
:param device: mounted device path
"""
# Input
self.image = image
self.partition = partition
self.mount_dir = mount_dir
# Output
self.error = ""
# Internal
self.linked = self.mapped = self.mounted = self.automapped = False
self.device = self.mapped_device = device
# Reset to mounted dir if possible
self.reset_dev()
def reset_dev(self):
"""Reset device paths to allow unmounting."""
if not self.device:
return
self.linked = self.mapped = self.mounted = True
device = self.device
if os.path.isabs(device) and os.path.exists(device):
if device.startswith('/dev/mapper/'):
device = os.path.basename(device)
device, self.partition = device.rsplit('p', 1)
self.device = os.path.join('/dev', device)
def get_dev(self):
"""Make the image available as a block device in the file system."""
self.device = None
self.linked = True
return True
def _get_dev_retry_helper(self):
"""Some implementations need to retry their get_dev."""
# NOTE(mikal): This method helps implement retries. The implementation
# simply calls _get_dev_retry_helper from their get_dev, and implements
# _inner_get_dev with their device acquisition logic. The NBD
# implementation has an example.
start_time = time.time()
device = self._inner_get_dev()
while not device:
LOG.info(_LI('Device allocation failed. Will retry in 2 seconds.'))
time.sleep(2)
if time.time() - start_time > MAX_DEVICE_WAIT:
LOG.warning(_LW('Device allocation failed after repeated '
'retries.'))
return False
device = self._inner_get_dev()
return True
def _inner_get_dev(self):
raise NotImplementedError()
def unget_dev(self):
"""Release the block device from the file system namespace."""
self.linked = False
def map_dev(self):
"""Map partitions of the device to the file system namespace."""
assert(os.path.exists(self.device))
LOG.debug("Map dev %s", self.device)
automapped_path = '/dev/%sp%s' % (os.path.basename(self.device),
self.partition)
if self.partition == -1:
self.error = _('partition search unsupported with %s') % self.mode
elif self.partition and not os.path.exists(automapped_path):
map_path = '/dev/mapper/%sp%s' % (os.path.basename(self.device),
self.partition)
assert(not os.path.exists(map_path))
# Note kpartx can output warnings to stderr and succeed
# Also it can output failures to stderr and "succeed"
# So we just go on the existence of the mapped device
_out, err = utils.trycmd('kpartx', '-a', self.device,
run_as_root=True, discard_warnings=True)
# Note kpartx does nothing when presented with a raw image,
# so given we only use it when we expect a partitioned image, fail
if not os.path.exists(map_path):
if not err:
err = _('partition %s not found') % self.partition
self.error = _('Failed to map partitions: %s') % err
else:
self.mapped_device = map_path
self.mapped = True
elif self.partition and os.path.exists(automapped_path):
# Note auto mapping can be enabled with the 'max_part' option
# to the nbd or loop kernel modules. Beware of possible races
# in the partition scanning for _loop_ devices though
# (details in bug 1024586), which are currently uncatered for.
self.mapped_device = automapped_path
self.mapped = True
self.automapped = True
else:
self.mapped_device = self.device
self.mapped = True
return self.mapped
def unmap_dev(self):
"""Remove partitions of the device from the file system namespace."""
if not self.mapped:
return
LOG.debug("Unmap dev %s", self.device)
if self.partition and not self.automapped:
utils.execute('kpartx', '-d', self.device, run_as_root=True)
self.mapped = False
self.automapped = False
def mnt_dev(self):
"""Mount the device into the file system."""
LOG.debug("Mount %(dev)s on %(dir)s",
{'dev': self.mapped_device, 'dir': self.mount_dir})
_out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir,
discard_warnings=True, run_as_root=True)
if err:
self.error = _('Failed to mount filesystem: %s') % err
LOG.debug(self.error)
return False
self.mounted = True
return True
def unmnt_dev(self):
"""Unmount the device from the file system."""
if not self.mounted:
return
self.flush_dev()
LOG.debug("Umount %s", self.mapped_device)
utils.execute('umount', self.mapped_device, run_as_root=True)
self.mounted = False
def flush_dev(self):
pass
def do_mount(self):
"""Call the get, map and mnt operations."""
status = False
try:
status = self.get_dev() and self.map_dev() and self.mnt_dev()
finally:
if not status:
LOG.debug("Fail to mount, tearing back down")
self.do_teardown()
return status
def do_umount(self):
"""Call the unmnt operation."""
if self.mounted:
self.unmnt_dev()
def do_teardown(self):
"""Call the umnt, unmap, and unget operations."""
if self.mounted:
self.unmnt_dev()
if self.mapped:
self.unmap_dev()
if self.linked:
self.unget_dev()
|
|
import logging
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
try:
from pygame.transform import flip, rotate
import pygame
except ImportError:
logger.error('cannot import pygame (is it installed?)')
raise
import itertools
import pytmx
__all__ = ['load_pygame', 'pygame_image_loader', 'simplify', 'build_rects']
def handle_transformation(tile, flags):
if flags.flipped_diagonally:
tile = flip(rotate(tile, 270), 1, 0)
if flags.flipped_horizontally or flags.flipped_vertically:
tile = flip(tile, flags.flipped_horizontally, flags.flipped_vertically)
return tile
def smart_convert(original, colorkey, pixelalpha):
"""
this method does several tests on a surface to determine the optimal
flags and pixel format for each tile surface.
this is done for the best rendering speeds and removes the need to
convert() the images on your own
"""
tile_size = original.get_size()
threshold = 127 # the default
# count the number of pixels in the tile that are not transparent
px = pygame.mask.from_surface(original, threshold).count()
# there are no transparent pixels in the image
if px == tile_size[0] * tile_size[1]:
tile = original.convert()
# there are transparent pixels, and tiled set a colorkey
elif colorkey:
tile = original.convert()
tile.set_colorkey(colorkey, pygame.RLEACCEL)
# there are transparent pixels, and set for perpixel alpha
elif pixelalpha:
tile = original.convert_alpha()
# there are transparent pixels, and we won't handle them
else:
tile = original.convert()
return tile
def pygame_image_loader(filename, colorkey, **kwargs):
if colorkey:
colorkey = pygame.Color('#{0}'.format(colorkey))
pixelalpha = kwargs.get('pixelalpha', True)
image = pygame.image.load(filename)
def load_image(rect=None, flags=None):
if rect:
try:
tile = image.subsurface(rect)
except ValueError:
logger.error('Tile bounds outside bounds of tileset image')
raise
else:
tile = image.copy()
if flags:
tile = handle_transformation(tile, flags)
tile = smart_convert(tile, colorkey, pixelalpha)
return tile
return load_image
def load_pygame(filename, *args, **kwargs):
"""
PYGAME USERS: Use me.
Load a TMX file, load the images, and return a TiledMap class that is
ready to use.
this utility has 'smart' tile loading. by default any tile without
transparent pixels will be loaded for quick blitting. if the tile has
transparent pixels, then it will be loaded with per-pixel alpha. this is
a per-tile, per-image check.
if a color key is specified as an argument, or in the tmx data, the
per-pixel alpha will not be used at all. if the tileset's image has colorkey
transparency set in Tiled, the util_pygam will return images that have their
transparency already set.
TL;DR:
Don't attempt to convert() or convert_alpha() the individual tiles. It is
already done for you.
"""
kwargs['image_loader'] = pygame_image_loader
return pytmx.TiledMap(filename, *args, **kwargs)
def build_rects(tmxmap, layer, tileset=None, real_gid=None):
"""generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects
"""
if isinstance(tileset, int):
try:
tileset = tmxmap.tilesets[tileset]
except IndexError:
msg = "Tileset #{0} not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise IndexError
elif isinstance(tileset, str):
try:
tileset = [t for t in tmxmap.tilesets if t.name == tileset].pop()
except IndexError:
msg = "Tileset \"{0}\" not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise ValueError
elif tileset:
msg = "Tileset must be either a int or string. got: {0}"
logger.debug(msg.format(type(tileset)))
raise TypeError
gid = None
if real_gid:
try:
gid, flags = tmxmap.map_gid(real_gid)[0]
except IndexError:
msg = "GID #{0} not found"
logger.debug(msg.format(real_gid))
raise ValueError
if isinstance(layer, int):
layer_data = tmxmap.get_layer_data(layer)
elif isinstance(layer, str):
try:
layer = [l for l in tmxmap.layers if l.name == layer].pop()
layer_data = layer.data
except IndexError:
msg = "Layer \"{0}\" not found in map {1}."
logger.debug(msg.format(layer, tmxmap))
raise ValueError
p = itertools.product(range(tmxmap.width), range(tmxmap.height))
if gid:
points = [(x, y) for (x, y) in p if layer_data[y][x] == gid]
else:
points = [(x, y) for (x, y) in p if layer_data[y][x]]
rects = simplify(points, tmxmap.tilewidth, tmxmap.tileheight)
return rects
def simplify(all_points, tilewidth, tileheight):
"""Given a list of points, return list of rects that represent them
kludge:
"A kludge (or kluge) is a workaround, a quick-and-dirty solution,
a clumsy or inelegant, yet effective, solution to a problem, typically
using parts that are cobbled together."
-- wikipedia
turn a list of points into a rects
adjacent rects will be combined.
plain english:
the input list must be a list of tuples that represent
the areas to be combined into rects
the rects will be blended together over solid groups
so if data is something like:
0 1 1 1 0 0 0
0 1 1 0 0 0 0
0 0 0 0 0 4 0
0 0 0 0 0 4 0
0 0 0 0 0 0 0
0 0 1 1 1 1 1
you'll have the 4 rects that mask the area like this:
..######......
..####........
..........##..
..........##..
..............
....##########
pretty cool, right?
there may be cases where the number of rectangles is not as low as possible,
but I haven't found that it is excessively bad. certainly much better than
making a list of rects, one for each tile on the map!
"""
def pick_rect(points, rects):
ox, oy = sorted([(sum(p), p) for p in points])[0][1]
x = ox
y = oy
ex = None
while 1:
x += 1
if not (x, y) in points:
if ex is None:
ex = x - 1
if (ox, y + 1) in points:
if x == ex + 1:
y += 1
x = ox
else:
y -= 1
break
else:
if x <= ex: y -= 1
break
c_rect = pygame.Rect(ox * tilewidth, oy * tileheight,
(ex - ox + 1) * tilewidth,
(y - oy + 1) * tileheight)
rects.append(c_rect)
rect = pygame.Rect(ox, oy, ex - ox + 1, y - oy + 1)
kill = [p for p in points if rect.collidepoint(p)]
[points.remove(i) for i in kill]
if points:
pick_rect(points, rects)
rect_list = []
while all_points:
pick_rect(all_points, rect_list)
return rect_list
|
|
#!/usr/bin/python
#
# Combine all source and header files in source directory into
# a single C file.
#
# The process is not very simple or clean. This helper is not
# a generic or 100% correct in the general case: it just needs
# to work for Duktape.
#
# Overview of the process:
#
# * Parse all relevant C and H files.
#
# * Change all non-exposed functions and variables to "static" in
# both headers (extern -> static) and in implementation files.
#
# * Emit internal headers by starting from duk_internal.h (the only
# internal header included by Duktape C files) and emulating the
# include mechanism recursively.
#
# * Emit all source files, removing any internal includes (these
# should all be duk_internal.h ideally but there are a few remnants).
#
# At every step, source and header lines are represented with explicit
# line objects which keep track of original filename and line. The
# output contains #line directives, if necessary, to ensure error
# throwing and other diagnostic info will work in a useful manner when
# deployed.
#
# Making the process deterministic is important, so that if users have
# diffs that they apply to the combined source, such diffs would apply
# for as long as possible.
#
# Limitations and notes:
#
# * #defines are not #undef'd at the end of an input file, so defines
# may bleed to other files. These need to be fixed in the original
# sources.
#
# * System headers included with a certain define (like _BSD_SOURCE)
# are not handled correctly now.
#
# * External includes are not removed or combined: some of them are
# inside #ifdef directives, so it would difficult to do so. Ideally
# there would be no external includes in individual files.
import os
import sys
import re
re_extinc = re.compile(r'^#include <(.*?)>.*$')
re_intinc = re.compile(r'^#include \"(duk.*?)\".*$') # accept duktape.h too
class File:
filename_full = None
filename = None
lines = None
def __init__(self, filename, lines):
self.filename = os.path.basename(filename)
self.filename_full = filename
self.lines = lines
class Line:
filename_full = None
filename = None
lineno = None
data = None
def __init__(self, filename, lineno, data):
self.filename = os.path.basename(filename)
self.filename_full = filename
self.lineno = lineno
self.data = data
def read(filename):
lines = []
f = None
try:
f = open(filename, 'rb')
lineno = 0
for line in f:
lineno += 1
if len(line) > 0 and line[-1] == '\n':
line = line[:-1]
lines.append(Line(filename, lineno, line))
finally:
if f is not None:
f.close()
return File(filename, lines)
def findFile(files, filename):
for i in files:
if i.filename == filename:
return i
return None
def processIncludes(f):
extinc = []
intinc = []
for line in f.lines:
if not line.data.startswith('#include'):
continue
m = re_extinc.match(line.data)
if m is not None:
# external includes are kept; they may even be conditional
extinc.append(m.group(1))
#line.data = ''
continue
m = re_intinc.match(line.data)
if m is not None:
intinc.append(m.group(1))
#line.data = ''
continue
print(line.data)
raise Exception('cannot parse include directive')
return extinc, intinc
def processDeclarations(f):
for line in f.lines:
# FIXME: total placeholder
if line.data.startswith('int ') or line.data.startswith('void '):
line.data = 'static ' + line.data
elif line.data.startswith('extern int') or line.data.startswith('extern void '):
line.data = 'static ' + line.data[7:] # replace extern with static
def createCombined(files, extinc, intinc):
res = []
emit_state = [ None, None ] # curr_filename, curr_lineno
def emit(line):
if isinstance(line, (str, unicode)):
res.append(line)
emit_state[1] += 1
else:
if line.filename != emit_state[0] or line.lineno != emit_state[1]:
res.append('#line %d "%s"' % (line.lineno, line.filename))
res.append(line.data)
emit_state[0] = line.filename
emit_state[1] = line.lineno + 1
processed = {}
# Helper to process internal headers recursively, starting from duk_internal.h
def processHeader(f_hdr):
#print('Process header: ' + f_hdr.filename)
for line in f_hdr.lines:
m = re_intinc.match(line.data)
if m is None:
emit(line)
continue
incname = m.group(1)
if incname in [ 'duktape.h', 'duk_custom.h' ]:
# keep a few special headers as is
emit(line)
continue
#print('Include: ' + incname)
f_inc = findFile(files, incname)
assert(f_inc)
if processed.has_key(f_inc.filename):
#print('already included, skip: ' + f_inc.filename)
emit('/* already included: %s */' % f_inc.filename)
continue
processed[f_inc.filename] = True
# include file in this place, recursively
processHeader(f_inc)
# Process internal headers by starting with duk_internal.h
f_dukint = findFile(files, 'duk_internal.h')
assert(f_dukint)
processHeader(f_dukint)
# Mark all internal headers processed
for f in files:
if os.path.splitext(f.filename)[1] != '.h':
continue
processed[f.filename] = True
# Then emit remaining files
for f in files:
if processed.has_key(f.filename):
continue
for line in f.lines:
m = re_intinc.match(line.data)
if m is None:
emit(line)
else:
incname = m.group(1)
emit('/* include removed: %s */' % incname)
return '\n'.join(res) + '\n'
def main():
outname = sys.argv[2]
assert(outname)
print 'Read input files'
files = []
filelist = os.listdir(sys.argv[1])
filelist.sort() # for consistency
for fn in filelist:
if os.path.splitext(fn)[1] not in [ '.c', '.h' ]:
continue
res = read(os.path.join(sys.argv[1], fn))
files.append(res)
print '%d files read' % len(files)
print 'Process #include statements'
extinc = []
intinc = []
for f in files:
extnew, intnew = processIncludes(f)
for i in extnew:
if i in extinc:
continue
extinc.append(i)
for i in intnew:
if i in intinc:
continue
intinc.append(i)
#print('external includes: ' + ', '.join(extinc))
#print('internal includes: ' + ', '.join(intinc))
print 'Process declarations (non-exposed are converted to static)'
for f in files:
#processDeclarations(f)
pass
print 'Output final file'
final = createCombined(files, extinc, intinc)
f = open(outname, 'wb')
f.write(final)
f.close()
print 'Wrote %d bytes to %s' % (len(final), outname)
if __name__ == '__main__':
main()
|
|
"""Base test case support for tools.
Version Added:
3.0
"""
from __future__ import unicode_literals
import os
import tempfile
from copy import deepcopy
from functools import wraps
from unittest import SkipTest
import kgb
import six
from reviewbot.config import config
from reviewbot.repositories import GitRepository
from reviewbot.testing import TestCase
from reviewbot.utils.process import execute
class ToolTestCaseMetaclass(type):
"""Metaclass for tool tests.
This is required for all subclasses of :py:class:`BaseToolTestCase`.
This will split any test methods that are marked as a simulation and/or
integration test into individual tests, set up by the subclass's
:py:meth:`~BaseToolTestCase.setup_simulation_test` or
:py:meth:`~BaseToolTestCase.setup_integration_test` method.
Version Added:
3.0
"""
def __new__(meta, name, bases, d):
"""Construct a new class.
Args:
name (str):
The name of the class.
bases (tuple of str):
The parent classes/mixins.
d (dict):
The class dictionary.
Returns:
type:
The new class.
"""
tool_class = d.get('tool_class')
assert tool_class, '%s must set base_tool_class' % name
if tool_class.exe_dependencies:
assert d.get('tool_exe_config_key'), \
'%s must set tool_exe_config_key' % name
assert d.get('tool_exe_path'), '%s must set tool_exe_path' % name
for func_name, func in six.iteritems(d.copy()):
if callable(func):
added = False
if hasattr(func, 'integration_setup_kwargs'):
new_name = meta.tag_func_name(func_name, 'integration')
d[new_name] = meta.make_integration_test_func(func,
new_name)
added = True
if hasattr(func, 'simulation_setup_kwargs'):
new_name = meta.tag_func_name(func_name, 'simulation')
d[new_name] = meta.make_simulation_test_func(func,
new_name)
added = True
if added:
del d[func_name]
return super(ToolTestCaseMetaclass, meta).__new__(meta, name, bases, d)
@classmethod
def tag_func_name(meta, func_name, tag):
"""Return a function name tagged with an identifier.
This will convert a ``test_*` function name into a
:samp:`test_{tag}_*`.
Args:
func_name (str):
The original name of the function.
tag (unicode):
The tag to add.
Returns:
str:
The resulting function name.
"""
assert func_name.startswith('test_')
return str('test_%s_%s' % (tag, func_name[5:]))
@classmethod
def make_integration_test_func(meta, func, func_name):
"""Return a new function for an integration test.
The function will wrap the original function from the class, and
set up the state for an integration test.
Args:
func (callable):
The function to wrap.
func_name (str):
The name of the function.
Returns:
callable:
The new integration test function.
"""
@wraps(func)
def _wrapper(self, *args, **kwargs):
old_path = os.environ['PATH']
old_tool_exe_path = self.tool_exe_path
try:
os.environ['PATH'] = self._old_path
if not self.tool_class().check_dependencies():
raise SkipTest('%s dependencies not available'
% self.tool_class.name)
if self.tool_exe_config_key:
self.tool_exe_path = \
config['exe_paths'][self.tool_exe_config_key]
self.spy_on(execute)
self.setup_integration_test(**func.integration_setup_kwargs)
return func(self, *args, **kwargs)
finally:
os.environ['PATH'] = old_path
self.tool_exe_path = old_tool_exe_path
_wrapper.__name__ = func_name
_wrapper.__doc__ = '%s [integration test]' % _wrapper.__doc__
return _wrapper
@classmethod
def make_simulation_test_func(meta, func, func_name):
"""Return a new function for a simulation test.
The function will wrap the original function from the class, and
set up the state for a simulation test.
Args:
func (callable):
The function to wrap.
func_name (str):
The name of the function.
Returns:
callable:
The new simulation test function.
"""
@wraps(func)
def _wrapper(self, *args, **kwargs):
print('setup!')
self.setup_simulation_test(**func.simulation_setup_kwargs)
return func(self, *args, **kwargs)
_wrapper.__name__ = func_name
_wrapper.__doc__ = '%s [simulation test]' % _wrapper.__doc__
return _wrapper
class BaseToolTestCase(kgb.SpyAgency, TestCase):
"""Base class for Tool test cases.
Version Added:
3.0
"""
#: The tool class to test.
#:
#: This is required.
#:
#: Type:
#: type
tool_class = None
#: The key in the configuration identifying the executable of the tool.
#:
#: This is required.
#:
#: Type:
#: unicode
tool_exe_config_key = None
#: The path to the executable for running the tool.
#:
#: This will generally be a fake path for simulated tool runs, but a
#: real one for integration tests. It can be set on the class or during
#: test/test suite setup.
#:
#: Type:
#: unicode
tool_exe_path = None
def run_get_can_handle_file(self, filename, file_contents=b'',
tool_settings={}):
"""Run get_can_handle_file with the given file and settings.
This will create the review objects, set up a repository (if needed
by the tool), apply any configuration, and run
:py:meth:`~reviewbot.tools.base.BaseTool.get_can_handle_file`.
Args:
filename (unicode):
The filename of the file being reviewed.
file_contents (bytes, optional):
File content to review.
tool_settings (dict, optional):
The settings to pass to the tool constructor.
Returns:
bool:
``True`` if the file can be handled. ``False`` if it cannot.
"""
review = self.create_review()
review_file = self.create_review_file(
review,
source_file=filename,
dest_file=filename,
diff_data=self.create_diff_data(chunks=[{
'change': 'insert',
'lines': file_contents.splitlines(),
'new_linenum': 1,
}]),
patched_content=file_contents)
tool = self.tool_class(settings=tool_settings)
return tool.get_can_handle_file(review_file)
def run_tool_execute(self, filename, file_contents, checkout_dir=None,
tool_settings={}, other_files={}):
"""Run execute with the given file and settings.
This will create the review objects, set up a repository (if needed
by the tool), apply any configuration, and run
:py:meth:`~reviewbot.tools.base.BaseTool.execute`.
Args:
filename (unicode):
The filename of the file being reviewed.
file_contents (bytes):
File content to review.
checkout_dir (unicode, optional):
An explicit directory to use as the checkout directory, for
tools that require full-repository checkouts.
tool_settings (dict, optional):
The settings to pass to the tool constructor.
other_files (dict, optional):
Other files to write to the tree. Each will result in a new
file added to the review.
The dictionary is a map of file paths (relative to the
checkout directory) to byte strings.
Returns:
tuple:
A 2-tuple containing:
1. The review (:py:class:`reviewbot.processing.review.Review)`
2. The file entry corresponding to ``filename``
(:py:class:`reviewbot.processing.review.File`)
If ``other_files`` is specified, the second tuple item will
instead be a dictionary of keys from ``other_files`` (along with
``filename``) to :py:class:`reviewbot.processing.review.File`
instances.
"""
if self.tool_class.working_directory_required:
repository = GitRepository(name='MyRepo',
clone_path='git://example.com/repo')
self.spy_on(repository.sync, call_original=False)
@self.spy_for(repository.checkout)
def _checkout(_self, *args, **kwargs):
return checkout_dir or tempfile.mkdtemp()
else:
repository = None
review = self.create_review()
review_file = self.create_review_file(
review,
source_file=filename,
dest_file=filename,
diff_data=self.create_diff_data(chunks=[{
'change': 'insert',
'lines': file_contents.splitlines(),
'new_linenum': 1,
}]),
patched_content=file_contents)
review_files = {}
if other_files:
review_files[filename] = review_file
for other_filename, other_contents in six.iteritems(other_files):
review_files[other_filename] = self.create_review_file(
review,
source_file=other_filename,
dest_file=other_filename,
diff_data=self.create_diff_data(chunks=[{
'change': 'insert',
'lines': other_contents.splitlines(),
'new_linenum': 1,
}]),
patched_content=other_contents)
worker_config = deepcopy(self.config)
worker_config.setdefault('exe_paths', {}).update({
self.tool_exe_config_key: self.tool_exe_path,
})
with self.override_config(worker_config):
tool = self.tool_class(settings=tool_settings)
tool.execute(review,
repository=repository)
if other_files:
return review, review_files
return review, review_file
def setup_integration_test(self, **kwargs):
"""Set up an integration test.
Args:
**kwargs (dict):
Keyword arguments passed to
:py:func:`~reviewbot.tools.testing.testcases.integration_test`.
"""
pass
def setup_simulation_test(self, **kwargs):
"""Set up a simulation test.
Args:
**kwargs (dict):
Keyword arguments passed to
:py:func:`~reviewbot.tools.testing.testcases.simulation_test`.
"""
pass
|
|
''' COSMO-VIEW,
Quim Ballabrera, May 2017
Script for visualizing model outputs provided by various operational
systems
EGL, 06/2020:
A heap variable MESSAGE has been introduce to store "print" messages
'''
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import datetime
from calendar import monthrange
import wget
__version__ = "1.1"
__author__ = "Quim Ballabrera"
__date__ = "July 2017"
class parameters():
# =================
''' Parameters for CODAR stations'''
__version__ = "1.2"
__author__ = "Quim Ballabrera"
__date__ = "July 2017"
# Version 1.0 (June 2017) : Initial version
# Version 1.1 (July 2017) : Allows multiple time steps in CODAR file
# Version 1.2 (December 2017) : Update path names
# Version 1.3 (March 2018) : Compact version
def __init__ (self):
self.STATION = tk.StringVar()
self.URL = tk.StringVar()
self.FILENAME = tk.StringVar()
TODAY = datetime.datetime.now().date()
self.YEAR = tk.IntVar()
self.MONTH = tk.IntVar()
self.DAY = tk.IntVar()
self.HOUR = tk.IntVar()
self.YEAR.set(TODAY.year)
self.MONTH.set(TODAY.month)
self.DAY.set(TODAY.day)
self.HOUR.set(datetime.datetime.now().time().hour)
# =============
class WinCodar:
# =============
def __init__ (self,master):
self.MESSAGE = ""
self.master = master
self.PARAMS = parameters()
self.frame = tk.Frame(self.master)
self.HFR_LIST = ['Ebre','Gibraltar','Galicia','Cadiz','Eivissa']
self.HFR_INIT_YEAR = [2013,2011,2010,2013,2012]
self.HFR_INIT_MONTH = [12,5,7,5,6]
self.HFR_PROVIDER = ['Puertos','Puertos','Puertos','Puertos','SOCIB']
# Paths as a function of the provider
# 0 = Puertos
# 1 = SOCIB
self.OPATH = []
self.FPATH = []
self.OPATH.append('http://opendap.puertos.es/thredds/dodsC/radar_local_')
self.FPATH.append('http://opendap.puertos.es/thredds/fileServer/radar_local_')
self.OPATH.append('http://thredds.socib.es/thredds/dodsC/observational/hf_radar/hf_radar_ibiza-scb_codarssproc001_L1_agg/files/')
self.FPATH.append('http://thredds.socib.es/thredds/fileServer/observational/hf_radar/hf_radar_ibiza-scb_codarssproc001_L1_agg/files/')
# Initialize to Gibraltar
self.PARAMS.STATION.set(self.HFR_LIST[1])
self.INIT_YEAR = self.HFR_INIT_YEAR[1]
self.INIT_MONTH = self.HFR_INIT_MONTH[1]
self.PROVIDER = self.HFR_PROVIDER[1]
if self.PROVIDER == 'Puertos':
self.pp = 0
elif self.PROVIDER == 'SOCIB':
self.pp = 1
# Get today's date: year, month, day and hour (set to zero):
TODAY = datetime.datetime.now().date()
self.THIS_YEAR = TODAY.year
self.THIS_MONTH = TODAY.month
self.THIS_DAY = TODAY.day
# Fill the default pull-down menu lists to select the data of the
# simulation.
self.YEAR_LIST = list(range(self.INIT_YEAR,self.THIS_YEAR+1))
self.MONTH_LIST = list(range(1,self.THIS_MONTH+1))
self.DAY_LIST = list(range(1,self.THIS_DAY+1))
self.HOUR_LIST = ('00','01','02','03','04','05','06','07','08','09','10',
'11','12','13','14','15','16','17','18','19','20','21','22','23')
# Window construction:
ttk.Label(self.frame,text='CODAR station',font='Helvetica 12 bold',padding=3).grid(row=0,column=0,sticky='w',padx=3)
self.stationbox = ttk.Combobox(self.frame,textvariable=self.PARAMS.STATION,width=14)
self.stationbox['values'] = self.HFR_LIST
self.stationbox.bind('<<ComboboxSelected>>',lambda e: self.station_selection())
self.stationbox.grid(row=1,column=0,sticky='w')
ttk.Label(self.frame,text='Year',font='Helvetica 12 bold',padding=3).grid(row=0,column=1,sticky='w',padx=3)
self.yearbox = ttk.Combobox(self.frame,textvariable=self.PARAMS.YEAR,width=8)
self.yearbox['values'] = self.YEAR_LIST
self.yearbox.bind('<<ComboboxSelected>>',lambda e: self.station_year())
self.yearbox.grid(row=1,column=1,sticky='w')
ttk.Label(self.frame,text='Month',font='Helvetica 12 bold',padding=3).grid(row=0,column=2,sticky='we',padx=3)
self.monthbox = ttk.Combobox(self.frame,textvariable=self.PARAMS.MONTH,width=8)
self.monthbox.bind('<<ComboboxSelected>>',lambda e: self.station_month())
self.monthbox['values'] = self.MONTH_LIST
self.monthbox.grid(row=1,column=2,sticky='w')
ttk.Label(self.frame,text='Day',font='Helvetica 12 bold',padding=3).grid(row=0,column=3,sticky='we',padx=3)
self.daybox = ttk.Combobox(self.frame,textvariable=self.PARAMS.DAY,width=8)
self.daybox.bind('<<ComboboxSelected>>',lambda e: self.station_day())
self.daybox['values'] = self.DAY_LIST
self.daybox.grid(row=1,column=3,sticky='w')
ttk.Label(self.frame,text='Hour',font='Helvetica 12 bold',padding=3).grid(row=0,column=4,sticky='we',padx=3)
self.hourbox = ttk.Combobox(self.frame,textvariable=self.PARAMS.HOUR,width=8)
self.hourbox.bind('<<ComboboxSelected>>',lambda e: self.station_hour())
self.hourbox['values'] = self.HOUR_LIST
self.hourbox.grid(row=1,column=4,sticky='w')
_wc = ttk.Button(self.frame,text='Cancel', \
command=self.cancel,padding=3)
_wc.grid(row=2,column=2,padx=3,pady=5,sticky='e')
_wc.bind("<Return>",lambda e: self.cancel())
_wd = ttk.Button(self.frame,text='Download', \
command=self.download,padding=3)
_wd.grid(row=2,column=3,padx=3,pady=5,sticky='e')
_wd.bind("<Return>",lambda e: self.download())
_wD = ttk.Button(self.frame,text='Done', \
command=self.done,padding=3)
_wD.grid(row=2,column=4,padx=3,pady=5,sticky='e')
_wD.bind("<Return>",lambda e: self.done())
self.frame.grid(padx=5,pady=5)
def cancel(self):
self.PARAMS.FILENAME.set('')
self.pp = None
self.PARAMS.xname = ''
self.PARAMS.yname = ''
self.PARAMS.tname = ''
self.PARAMS.uname = ''
self.PARAMS.vname = ''
self.master.destroy()
self.master = None
def filename(self,PATH):
if self.pp is None:
return ''
if self.PARAMS.STATION.get() == 'Ebre':
long_name = 'deltaebro'
short_name = 'EBRO'
elif self.PARAMS.STATION.get() == 'Gibraltar':
long_name = 'gibraltar'
short_name = 'GIBR'
elif self.PARAMS.STATION.get() == 'Galicia':
long_name = 'GALICIA'
short_name = 'GALI'
elif self.PARAMS.STATION.get() == 'Cadiz':
long_name = 'HUELVA'
short_name = 'TRAD'
elif self.PARAMS.STATION.get() == 'Eivissa':
long_name = 'ibiza'
short_name = 'ibiza'
else:
return ''
if self.pp == 0:
self.PARAMS.xname = 'lon'
self.PARAMS.yname = 'lat'
self.PARAMS.tname = 'time'
self.PARAMS.uname = 'u'
self.PARAMS.vname = 'v'
theurl = PATH + long_name + '/%d' % self.PARAMS.YEAR.get() + \
'/%02d/CODAR_' % self.PARAMS.MONTH.get() + \
short_name + \
'_%d_%02d_%02d_' % (self.PARAMS.YEAR.get(),self.PARAMS.MONTH.get(),self.PARAMS.DAY.get())+\
'%02d00.nc' % self.PARAMS.HOUR.get()
elif self.pp == 1:
self.PARAMS.xname = 'LON'
self.PARAMS.yname = 'LAT'
self.PARAMS.tname = 'time'
self.PARAMS.uname = 'U'
self.PARAMS.vname = 'V'
theurl = PATH + '%d/' % self.PARAMS.YEAR.get() + \
'dep0001_hf-radar-%s_scb-codarssproc001_L1_' % short_name + \
'%d-%02d.nc' % (self.PARAMS.YEAR.get(),self.PARAMS.MONTH.get())
return theurl
def download(self):
theurl = self.filename(self.FPATH[self.pp])
self.MESSAGE += 'Fetching '+theurl
#print('Fetching ',theurl)
#print('')
try:
filename = wget.download(theurl)
messagebox.showinfo(message='Download complete')
except:
messagebox.showinfo(message='Unable to download file')
def get_url(self):
theurl = self.filename(self.OPATH[self.pp])
return theurl
def done(self):
theurl = self.filename(self.OPATH[self.pp])
self.MESSAGE += 'Filename '+ theurl
#print('Filename ',theurl)
self.PARAMS.FILENAME.set(theurl)
self.master.destroy()
self.master = None
def get_filename(self):
return self.PARAMS.FILENAME.get()
def station_selection(self):
self.PARAMS.STATION.set(self.stationbox.get())
indx = self.HFR_LIST.index(self.PARAMS.STATION.get())
self.INIT_YEAR = self.HFR_INIT_YEAR[indx]
self.INIT_MONTH = self.HFR_INIT_MONTH[indx]
self.PROVIDER = self.HFR_PROVIDER[indx]
if self.PROVIDER == 'Puertos':
self.pp = 0
elif self.PROVIDER == 'SOCIB':
self.pp = 1
#print(self.INIT_YEAR,self.INIT_MONTH,self.PROVIDER,self.pp)
year = self.INIT_YEAR if self.PARAMS.YEAR.get() < self.INIT_YEAR else self.PARAMS.YEAR.get()
self.YEAR_LIST = list(range(self.INIT_YEAR,self.THIS_YEAR+1))
if year == self.THIS_YEAR:
month = self.THIS_MONTH if self.PARAMS.MONTH.get() > self.THIS_MONTH else self.PARAMS.MONTH.get()
self.MONTH_LIST = list(range(1,self.THIS_MONTH+1))
elif year == self.INIT_YEAR:
month = self.INIT_MONTH if self.PARAMS.MONTH.get() < self.INIT_MONTH else self.PARAMS.MONTH.get()
self.MONTH_LIST = list(range(self.INIT_MONTH,13))
else:
month = self.PARAMS.MONTH.get()
self.MONTH_LIST = list(range(1,13))
if year == self.THIS_YEAR and month == self.THIS_MONTH:
day = self.THIS_DAY if self.PARAMS.DAY.get() > self.THIS_DAY else self.PARAMS.DAY.get()
self.DAY_LIST = list(range(1,self.THIS_DAY+1))
else:
day = self.PARAMS.DAY.get()
self.DAY_LIST = list(range(1,monthrange(year,month)[1]+1))
self.PARAMS.YEAR.set(year)
self.PARAMS.MONTH.set(month)
self.PARAMS.DAY.set(day)
self.yearbox['values'] = self.YEAR_LIST
self.monthbox['values'] = self.MONTH_LIST
self.daybox['values'] = self.DAY_LIST
if self.PARAMS.STATION.get() == 'Eivissa':
self.daybox['state'] = 'disabled'
self.hourbox['state'] = 'disabled'
else:
self.daybox['state'] = '!disabled'
self.hourbox['state'] = '!disabled'
def station_year(self):
year = int(self.yearbox.get())
if year == self.THIS_YEAR:
month = self.THIS_MONTH if self.PARAMS.MONTH.get() > self.THIS_MONTH else self.PARAMS.MONTH.get()
self.MONTH_LIST = list(range(1,self.THIS_MONTH+1))
elif year == self.INIT_YEAR:
month = self.INIT_MONTH if self.PARAMS.MONTH.get() < self.INIT_MONTH else self.PARAMS.MONTH.get()
self.MONTH_LIST = list(range(self.INIT_MONTH,13))
else:
month = self.PARAMS.MONTH.get()
self.MONTH_LIST = list(range(1,13))
if year == self.THIS_YEAR and month == self.THIS_MONTH:
day = self.THIS_DAY if self.PARAMS.DAY.get() > self.THIS_DAY else self.PARAMS.DAY.get()
self.DAY_LIST = list(range(1,self.THIS_DAY+1))
else:
day = self.PARAMS.DAY.get()
self.DAY_LIST = list(range(1,monthrange(year,month)[1]+1))
self.PARAMS.YEAR.set(year)
self.PARAMS.MONTH.set(month)
self.PARAMS.DAY.set(day)
self.monthbox['values'] = self.MONTH_LIST
self.daybox['values'] = self.DAY_LIST
def station_month(self):
year = self.PARAMS.YEAR.get()
month = int(self.monthbox.get())
if year == self.THIS_YEAR and month == self.THIS_MONTH:
day = self.THIS_DAY if self.PARAMS.DAY.get() > self.THIS_DAY else self.PARAMS.DAY.get()
self.DAY_LIST = list(range(1,self.THIS_DAY+1))
else:
day = self.PARAMS.DAY.get()
self.DAY_LIST = list(range(1,monthrange(year,month)[1]+1))
self.PARAMS.MONTH.set(month)
self.PARAMS.DAY.set(day)
self.daybox['values'] = self.DAY_LIST
def station_day(self):
self.PARAMS.DAY.set(int(self.daybox.get()))
def station_hour(self):
self.PARAMS.HOUR.set(int(self.hourbox.get()))
def main():
root = tk.Tk()
root.title('Load CODAR File')
app = WinCodar(root)
root.mainloop()
if __name__ == '__main__':
main()
|
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Wrapper classes for Cif input and output from Structures.
"""
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "3.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import math
import re
import textwrap
import warnings
from collections import OrderedDict, deque
import six
from six.moves import zip, cStringIO
import numpy as np
from pymatgen.core.periodic_table import Element, Specie
from monty.io import zopen
from pymatgen.util.coord_utils import in_coord_list_pbc
from monty.string import remove_non_ascii
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
class CifBlock(object):
maxlen = 70 # not quite 80 so we can deal with semicolons and things
def __init__(self, data, loops, header):
"""
Object for storing cif data. All data is stored in a single dictionary.
Data inside loops are stored in lists in the data dictionary, and
information on which keys are grouped together are stored in the loops
attribute.
Args:
data: dict or OrderedDict of data to go into the cif. Values should
be convertible to string, or lists of these if the key is
in a loop
loops: list of lists of keys, grouped by which loop they should
appear in
header: name of the block (appears after the data_ on the first
line)
"""
self.loops = loops
self.data = data
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self.header = header[:74]
def __getitem__(self, key):
return self.data[key]
def __str__(self):
"""
Returns the cif string for the data block
"""
s = ["data_{}".format(self.header)]
keys = self.data.keys()
written = []
for k in keys:
if k in written:
continue
for l in self.loops:
#search for a corresponding loop
if k in l:
s.append(self._loop_to_string(l))
written.extend(l)
break
if k not in written:
#k didn't belong to a loop
v = self._format_field(self.data[k])
if len(k) + len(v) + 3 < self.maxlen:
s.append("{} {}".format(k, v))
else:
s.extend([k, v])
return "\n".join(s)
def _loop_to_string(self, loop):
s = "loop_"
for l in loop:
s += '\n ' + l
for fields in zip(*[self.data[k] for k in loop]):
line = "\n"
for val in map(self._format_field, fields):
if val[0] == ";":
s += line + "\n" + val
line = "\n"
elif len(line) + len(val) + 2 < self.maxlen:
line += " " + val
else:
s += line
line = '\n ' + val
s += line
return s
def _format_field(self, v):
v = v.__str__().strip()
if len(v) > self.maxlen:
return ';\n' + textwrap.fill(v, self.maxlen) + '\n;'
#add quotes if necessary
if " " in v and not (v[0] == "'" and v[-1] == "'") \
and not (v[0] == '"' and v[-1] == '"'):
if "'" in v:
q = '"'
else:
q = "'"
v = q + v + q
return v
@classmethod
def _process_string(cls, string):
#remove comments
string = re.sub("#.*", "", string)
#remove empty lines
string = re.sub("^\s*\n", "", string, flags=re.MULTILINE)
#remove whitespaces at beginning of lines
string = re.sub("^\s*", "", string, flags=re.MULTILINE)
#remove non_ascii
string = remove_non_ascii(string)
#since line breaks in .cif files are mostly meaningless,
#break up into a stream of tokens to parse, rejoining multiline
#strings (between semicolons)
q = deque()
multiline = False
ml = []
#this regex splits on spaces, except when in quotes.
#it also ignores single quotes when surrounded by non-whitespace
#since they are sometimes used in author names
p = re.compile(r'''([^'"\s]+)|'((?:\S'\S|[^'])*)'|"([^"]*)"''')
for l in string.splitlines():
if multiline:
if l.startswith(";"):
multiline = False
q.append(" ".join(ml))
ml = []
l = l[1:].strip()
else:
ml.append(l)
continue
if l.startswith(";"):
multiline = True
ml.append(l[1:].strip())
else:
for s in p.findall(l):
q.append(''.join(s))
return q
@classmethod
def from_string(cls, string):
q = cls._process_string(string)
header = q.popleft()[5:]
data = OrderedDict()
loops = []
while q:
s = q.popleft()
if s == "_eof":
break
if s.startswith("_"):
data[s] = q.popleft()
elif s.startswith("loop_"):
columns = []
items = []
while q:
s = q[0]
if s.startswith("loop_") or not s.startswith("_"):
break
columns.append(q.popleft())
data[columns[-1]] = []
while q:
s = q[0]
if s.startswith("loop_") or s.startswith("_"):
break
items.append(q.popleft())
n = len(items) // len(columns)
assert len(items) % n == 0
loops.append(columns)
for k, v in zip(columns * n, items):
data[k].append(v.strip())
elif s.strip() != "":
warnings.warn("Possible error in cif format"
" error at {}".format(s.strip()))
return cls(data, loops, header)
class CifFile(object):
"""
Reads and parses CifBlocks from a .cif file
"""
def __init__(self, data, orig_string=None):
"""
Args:
data: OrderedDict of CifBlock objects
string: The original cif string
"""
self.data = data
self.orig_string = orig_string
def __str__(self):
s = ["%s" % v for v in self.data.values()]
comment = "#generated using pymatgen\n"
return comment + "\n".join(s)+"\n"
@classmethod
def from_string(cls, string):
d = OrderedDict()
for x in re.split("^data_", "x\n"+string,
flags=re.MULTILINE | re.DOTALL)[1:]:
c = CifBlock.from_string("data_"+x)
d[c.header] = c
return cls(d, string)
@classmethod
def from_file(cls, filename):
with zopen(filename, "rt") as f:
return cls.from_string(f.read())
class CifParser(object):
"""
Parses a cif file
Args:
filename (str): Cif filename. bzipped or gzipped cifs are fine too.
occupancy_tolerance (float): If total occupancy of a site is between 1
and occupancy_tolerance, the occupancies will be scaled down to 1.
"""
def __init__(self, filename, occupancy_tolerance=1.):
self._occupancy_tolerance = occupancy_tolerance
if isinstance(filename, six.string_types):
self._cif = CifFile.from_file(filename)
else:
self._cif = CifFile.from_string(filename.read())
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.):
"""
Creates a CifParser from a string.
Args:
cif_string (str): String representation of a CIF.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
CifParser
"""
stream = cStringIO(cif_string)
return CifParser(stream, occupancy_tolerance)
def _unique_coords(self, coord_in):
"""
Generate unique coordinates using coord and symmetry positions.
"""
coords = []
for op in self.symmetry_operations:
coord = op.operate(coord_in)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord, atol=1e-3):
coords.append(coord)
return coords
def _get_structure(self, data, primitive):
"""
Generate structure from part of the cif.
"""
lengths = [str2float(data["_cell_length_" + i])
for i in ["a", "b", "c"]]
angles = [str2float(data["_cell_angle_" + i])
for i in ["alpha", "beta", "gamma"]]
lattice = Lattice.from_lengths_and_angles(lengths, angles)
try:
sympos = data["_symmetry_equiv_pos_as_xyz"]
except KeyError:
try:
sympos = data["_symmetry_equiv_pos_as_xyz_"]
except KeyError:
warnings.warn("No _symmetry_equiv_pos_as_xyz type key found. "
"Defaulting to P1.")
sympos = ['x, y, z']
self.symmetry_operations = [SymmOp.from_xyz_string(s) for s in sympos]
def parse_symbol(sym):
m = re.search("([A-Z][a-z]*)", sym)
if m:
return m.group(1)
return ""
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
except (ValueError, KeyError):
oxi_states = None
coord_to_species = OrderedDict()
for i in range(len(data["_atom_site_type_symbol"])):
symbol = parse_symbol(data["_atom_site_type_symbol"][i])
if oxi_states is not None:
el = Specie(symbol,
oxi_states[data["_atom_site_type_symbol"][i]])
else:
el = Element(symbol)
x = str2float(data["_atom_site_fract_x"][i])
y = str2float(data["_atom_site_fract_y"][i])
z = str2float(data["_atom_site_fract_z"][i])
try:
occu = str2float(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
if coord not in coord_to_species:
coord_to_species[coord] = {el: occu}
else:
coord_to_species[coord][el] = occu
allspecies = []
allcoords = []
for coord, species in coord_to_species.items():
coords = self._unique_coords(coord)
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
#rescale occupancies if necessary
for species in allspecies:
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
for key, value in six.iteritems(species):
species[key] = value / totaloccu
struct = Structure(lattice, allspecies, allcoords)
if primitive:
struct = struct.get_primitive_structure().get_reduced_structure()
return struct.get_sorted_structure()
def get_structures(self, primitive=True):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True.
Returns:
List of Structures.
"""
structures = []
for d in self._cif.data.values():
try:
structures.append(self._get_structure(d, primitive))
except KeyError as exc:
# Warn the user (Errors should never pass silently)
# A user reported a problem with cif files produced by Avogadro
# in which the atomic coordinates are in Cartesian coords.
warnings.warn(str(exc))
return structures
def as_dict(self):
d = OrderedDict()
for k, v in self._cif.data.items():
d[k] = {}
for k2, v2 in v.data.items():
d[k][k2] = v2
return d
class CifWriter:
"""
A wrapper around CifFile to write CIF files from pymatgen structures.
Args:
struct (Structure): A pymatgen.core.structure.Structure object.
find_spacegroup (bool): Whether to find spacegroup.
If so, spacegroup information is written.
"""
def __init__(self, struct, find_spacegroup=False, symprec=None):
"""
Args:
struct (Structure): structure to write
find_spacegroup (bool): whether to try to determine the spacegroup
symprec (float): If not none, finds the symmetry of the structure and
writes the cif with symmetry information. Passes symprec to the
SpacegroupAnalyzer
"""
format_str = "{:.8f}"
block = OrderedDict()
loops = []
latt = struct.lattice
comp = struct.composition
no_oxi_comp = comp.element_composition
spacegroup = ("P 1", 1)
if find_spacegroup:
sf = SpacegroupAnalyzer(struct, 0.001)
spacegroup = (sf.get_spacegroup_symbol(),
sf.get_spacegroup_number())
block["_symmetry_space_group_name_H-M"] = spacegroup[0]
for cell_attr in ['a', 'b', 'c']:
block["_cell_length_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
for cell_attr in ['alpha', 'beta', 'gamma']:
block["_cell_angle_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
block["_symmetry_Int_Tables_number"] = spacegroup[1]
block["_chemical_formula_structural"] = no_oxi_comp.reduced_formula
block["_chemical_formula_sum"] = no_oxi_comp.formula
block["_cell_volume"] = latt.volume.__str__()
reduced_comp = no_oxi_comp.reduced_composition
el = no_oxi_comp.elements[0]
amt = comp[el]
fu = int(amt / reduced_comp[Element(el.symbol)])
block["_cell_formula_units_Z"] = str(fu)
if symprec is None:
block["_symmetry_equiv_pos_site_id"] = ["1"]
block["_symmetry_equiv_pos_as_xyz"] = ["x, y, z"]
else:
sf = SpacegroupAnalyzer(struct, symprec)
ops = [op.as_xyz_string() for op in sf.get_symmetry_operations()]
block["_symmetry_equiv_pos_site_id"] = \
["%d" % i for i in range(1, len(ops) + 1)]
block["_symmetry_equiv_pos_as_xyz"] = ops
loops.append(["_symmetry_equiv_pos_site_id",
"_symmetry_equiv_pos_as_xyz"])
contains_oxidation = True
try:
symbol_to_oxinum = OrderedDict([
(el.__str__(), float(el.oxi_state))
for el in sorted(comp.elements)])
except AttributeError:
symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in
sorted(comp.elements)])
contains_oxidation = False
if contains_oxidation:
block["_atom_type_symbol"] = symbol_to_oxinum.keys()
block["_atom_type_oxidation_number"] = symbol_to_oxinum.values()
loops.append(["_atom_type_symbol", "_atom_type_oxidation_number"])
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_label = []
atom_site_occupancy = []
count = 1
if symprec is None:
for site in struct:
for sp, occu in site.species_and_occu.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
else:
for group in sf.get_symmetrized_structure().equivalent_sites:
site = group[0]
for sp, occu in site.species_and_occu.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("%d" % len(group))
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block["_atom_site_label"] = atom_site_label
block["_atom_site_symmetry_multiplicity"] = \
atom_site_symmetry_multiplicity
block["_atom_site_fract_x"] = atom_site_fract_x
block["_atom_site_fract_y"] = atom_site_fract_y
block["_atom_site_fract_z"] = atom_site_fract_z
block["_atom_site_occupancy"] = atom_site_occupancy
loops.append(["_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy"])
d = OrderedDict()
d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)
self._cf = CifFile(d)
def __str__(self):
"""
Returns the cif as a string.
"""
return self._cf.__str__()
def write_file(self, filename):
"""
Write the cif file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
return float(re.sub("\(.+\)", "", text))
|
|
# Copyright (c) 2010-2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import quote
from swift.common.middleware.copy import \
_check_copy_from_header as check_copy_from_header, \
_check_destination_header as check_destination_header, \
_copy_headers as copy_headers
from swift.common.swob import HTTPBadRequest, HTTPUnauthorized, \
HTTPMethodNotAllowed, HTTPPreconditionFailed, HTTPForbidden
from swift.common.utils import config_true_value, public, FileLikeIter, \
list_from_csv, split_path
from swift.common.middleware.acl import clean_acl
from swift.common.wsgi import make_subrequest
from swift.proxy.controllers.base import get_account_info
from storlets.swift_middleware.handlers.base import StorletBaseHandler, \
NotStorletRequest, NotStorletExecution
CONDITIONAL_KEYS = ['IF_MATCH', 'IF_NONE_MATCH', 'IF_MODIFIED_SINCE',
'IF_UNMODIFIED_SINCE']
REFERER_PREFIX = 'storlets'
class StorletProxyHandler(StorletBaseHandler):
def __init__(self, request, conf, gateway_conf, app, logger):
super(StorletProxyHandler, self).__init__(
request, conf, gateway_conf, app, logger)
self.storlet_containers = [self.storlet_container,
self.storlet_dependency]
self.agent = 'ST'
self.extra_sources = []
# A very initial hook for blocking requests
self._should_block(request)
if not self.is_storlet_request:
# This is not storlet-related request, so pass it
raise NotStorletRequest()
# In proxy server, storlet handler validate if storlet enabled
# at the account, anyway
account_meta = get_account_info(self.request.environ,
self.app)['meta']
storlets_enabled = account_meta.get('storlet-enabled', 'False')
if not config_true_value(storlets_enabled):
msg = 'Account disabled for storlets'
self.logger.debug(msg)
raise HTTPBadRequest(msg.encode('utf8'), request=self.request)
if self.is_storlet_acl_update:
self.acl_string = self._validate_acl_update(self.request)
elif self.is_storlet_object_update:
# TODO(takashi): We have to validate metadata in COPY case
self._validate_registration(self.request)
raise NotStorletExecution()
elif self.is_storlet_execution:
self._setup_gateway()
else:
raise NotStorletExecution()
def _should_block(self, request):
# Currently, we have only one reason to block
# requests at such an early stage of the processing:
# we block requests with referer that have the internal prefix
# of:
if request.referer and REFERER_PREFIX in request.referer:
msg = 'Referrer containing %s is not allowed' % REFERER_PREFIX
self.logger.debug(msg)
raise HTTPForbidden(msg.encode('utf8'), request=self.request)
def _parse_vaco(self):
return self.request.split_path(3, 4, rest_with_last=True)
def is_proxy_runnable(self, resp=None):
"""
Check if the storlet should be executed at proxy server
:param resp: swob.Response instance
:return: Whether we should execute the storlet at proxy
"""
# SLO / proxy only case:
# storlet to be invoked now at proxy side:
slo_resposne = False
if resp:
slo_resposne = self.is_slo_response(resp)
runnable = any(
[self.execute_on_proxy,
self.execute_range_on_proxy,
slo_resposne])
return runnable
@property
def is_storlet_request(self):
return (self.is_storlet_execution or self.is_storlet_object_update
or self.is_storlet_acl_update)
@property
def is_storlet_object_update(self):
return (self.container in self.storlet_containers and self.obj
and self.request.method in ['PUT', 'POST'])
@property
def is_storlet_acl_update(self):
return (self.request.method == 'POST' and not self.obj and
'X-Storlet-Container-Read' in self.request.headers)
@property
def is_put_copy_request(self):
return 'X-Copy-From' in self.request.headers
def _parse_storlet_params(self, headers):
"""
Parse storlet parameters from storlet/dependency object metadata
:returns: dict of storlet parameters
"""
params = dict()
for key in headers:
if key.startswith('X-Object-Meta-Storlet'):
params[key[len('X-Object-Meta-Storlet-'):]] = headers[key]
return params
def _validate_registration(self, req):
"""
Validate parameters about storlet/dependency object when registrating
:params req: swob.Request instance
:raises ValueError: If some parameters are wrong
"""
params = self._parse_storlet_params(req.headers)
try:
if self.container == self.storlet_container:
self.logger.debug('updating object in storlet container. '
'Sanity check')
self.gateway_class.validate_storlet_registration(
params, self.obj)
else:
self.logger.debug('updating object in storlet dependency. '
'Sanity check')
self.gateway_class.validate_dependency_registration(
params, self.obj)
except ValueError as e:
self.logger.exception('Bad parameter')
raise HTTPBadRequest(e.args[0].encode('utf8'))
def _build_acl_string(self, user, storlet):
acl_string = '%s.%s_%s' % (REFERER_PREFIX, user, storlet)
return acl_string
def _validate_acl_update(self, req):
"""
Validate the request has the necessary headers for a
storlet ACL update
:params req: swob.Request instance
:return: the resulting acl string that hould be added
:raises HTTPBadRequest: If a header is missing or mulformed
"""
# Make sure we are not meddling with the storlet containers
if self.container in self.storlet_containers:
msg = b'storlet ACL update cannot be a storlet container'
raise HTTPBadRequest(msg)
# Make sure the expected headers are supplied
user_name = req.headers.get("X-Storlet-Container-Read", None)
storlet_name = req.headers.get("X-Storlet-Name", None)
if not user_name or not storlet_name:
msg = b'storlet ACL update request is missing a mandatory header'
raise HTTPBadRequest(msg)
# Make sure the resulting acl is valid
acl_string = '.r:%s' % self._build_acl_string(user_name, storlet_name)
try:
clean_acl('X-Container-Read', acl_string)
except ValueError as err:
msg = ('storlet ACL update request has invalid values %s'
% str(err))
raise HTTPBadRequest(msg.encode('utf8'))
# Make sure the resulting acl permits a single entity
if ',' in acl_string:
msg = 'storlet ACL update request has ' \
'mulformed storlet or user name'
raise HTTPBadRequest(msg.encode('utf8'))
# The request is valid. Keep the ACL string
return acl_string
def verify_access_to_storlet(self):
"""
Verify access to the storlet object
:return: storlet parameters
:raises HTTPUnauthorized: If it fails to verify access
"""
sobj = self.request.headers.get('X-Run-Storlet')
spath = '/'.join(['', self.api_version, self.account,
self.storlet_container, sobj])
self.logger.debug('Verify access to %s' % spath)
new_env = dict(self.request.environ)
if 'HTTP_TRANSFER_ENCODING' in new_env:
del new_env['HTTP_TRANSFER_ENCODING']
for key in CONDITIONAL_KEYS:
env_key = 'HTTP_' + key
if env_key in new_env:
del new_env[env_key]
auth_token = self.request.headers.get('X-Auth-Token')
storlet_req = make_subrequest(
new_env, 'HEAD', spath,
headers={'X-Auth-Token': auth_token},
swift_source=self.agent)
resp = storlet_req.get_response(self.app)
if not resp.is_success:
msg = 'Failed to verify access to the storlet. ' \
'Either the storlet does not exist or ' \
'you are not authorized to run the ' \
'storlet.'
raise HTTPUnauthorized(msg.encode('utf8'),
request=self.request)
params = self._parse_storlet_params(resp.headers)
for key in ['Content-Length', 'X-Timestamp']:
params[key] = resp.headers[key]
return params
def handle_request(self):
if hasattr(self, self.request.method):
try:
handler = getattr(self, self.request.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
# TODO(kota_): add allowed_method list to Allow header
return HTTPMethodNotAllowed(request=self.request)
return handler()
else:
raise HTTPMethodNotAllowed(request=self.request)
def _call_gateway(self, resp):
sreq = self._build_storlet_request(self.request, resp.headers,
resp.app_iter)
return self.gateway.invocation_flow(sreq, self.extra_sources)
def augment_storlet_request(self, params):
"""
Add to request the storlet parameters to be used in case the request
is forwarded to the data node (GET case)
:param params: parameters to be augmented to request
"""
for key, val in params.items():
self.request.headers['X-Storlet-' + key] = val
def gather_extra_sources(self):
# (kota_): I know this is a crazy hack to set the resp
# dinamically so that this is a temprorary way to make sure
# the capability, this aboslutely needs cleanup more genelic
if 'X-Storlet-Extra-Resources' in self.request.headers:
try:
resources = list_from_csv(
self.request.headers['X-Storlet-Extra-Resources'])
# resourece should be /container/object
for resource in resources:
# sanity check, if it's invalid path ValueError
# will be raisen
swift_path = ['', self.api_version, self.account]
swift_path.extend(split_path(resource, 2, 2, True))
sub_req = make_subrequest(
self.request.environ,
'GET', '/'.join(swift_path),
agent=self.agent)
sub_resp = sub_req.get_response(self.app)
# TODO(kota_): make this in another green thread
# expicially, in parallel with primary GET
self.extra_sources.append(
self._build_storlet_request(
self.request, sub_resp.headers,
sub_resp.app_iter))
except ValueError:
raise HTTPBadRequest(
'X-Storlet-Extra-Resource must be a csv with'
'/container/object format')
@public
def GET(self):
"""
GET handler on Proxy
"""
if self.is_range_request:
msg = 'Storlet execution with range header is not supported'
raise HTTPBadRequest(msg.encode('utf8'),
request=self.request)
params = self.verify_access_to_storlet()
self.augment_storlet_request(params)
# Range requests:
# Range requests are not allowed with storlet invocation.
# To run a storlet on a selected input range use the X-Storlet-Range
# header.
# If the range request is to be executed on the proxy we
# create an HTTP Range request based on X-Storlet-Range
# and let the request continue so that we get the required
# range as input to the storlet that would get executed on
# the proxy.
if self.execute_range_on_proxy:
self.request.headers['Range'] = \
self.request.headers['X-Storlet-Range']
original_resp = self.request.get_response(self.app)
if original_resp.status_int == 403:
# The user is unauthoried to read from the container.
# It might be, however, that the user is permitted
# to read given that the required storlet is executed.
if not self.request.environ['HTTP_X_USER_NAME']:
# The requester is not even an authenticated user.
self.logger.info(('Storlet run request by an'
' authenticated user'))
raise HTTPUnauthorized(b'User is not authorized')
user_name = self.request.environ['HTTP_X_USER_NAME']
storlet_name = self.request.headers['X-Run-Storlet']
internal_referer = '//%s' % self._build_acl_string(user_name,
storlet_name)
self.logger.info(('Got 403 for original GET %s request. '
'Trying with storlet internal referer %s' %
(self.path, internal_referer)))
self.request.referrer = self.request.referer = internal_referer
original_resp = self.request.get_response(self.app)
if original_resp.is_success:
# The get request may be a SLO object GET request.
# Simplest solution would be to invoke a HEAD
# for every GET request to test if we are in SLO case.
# In order to save the HEAD overhead we implemented
# a slightly more involved flow:
# At proxy side, we augment request with Storlet stuff
# and let the request flow.
# At object side, we invoke the plain (non Storlet)
# request and test if we are in SLO case.
# and invoke Storlet only if non SLO case.
# Back at proxy side, we test if test received
# full object to detect if we are in SLO case,
# and invoke Storlet only if in SLO case.
if self.is_proxy_runnable(original_resp):
self.gather_extra_sources()
return self.apply_storlet(original_resp)
else:
# Non proxy GET case: Storlet was already invoked at
# object side
# TODO(kota_): Do we need to pop the Transfer-Encoding/
# Content-Length header from the resp?
if 'Transfer-Encoding' in original_resp.headers:
original_resp.headers.pop('Transfer-Encoding')
original_resp.headers['Content-Length'] = None
return original_resp
else:
# In failure case, we need nothing to do, just return original
# response
return original_resp
def _validate_copy_request(self):
# We currently block copy from account
unsupported_headers = ['X-Copy-From-Account',
'Destination-Account',
'X-Fresh-Metadata']
for header in unsupported_headers:
if header in self.request.headers:
msg = 'Storlet on copy with %s is not supported' % header
raise HTTPBadRequest(msg.encode('utf8'))
def handle_put_copy_response(self, app_iter):
self._remove_storlet_headers(self.request.headers)
if 'CONTENT_LENGTH' in self.request.environ:
self.request.environ.pop('CONTENT_LENGTH')
self.request.headers['Transfer-Encoding'] = 'chunked'
self.request.environ['wsgi.input'] = FileLikeIter(app_iter)
return self.request.get_response(self.app)
def _remove_storlet_headers(self, headers):
for key in list(headers):
if (key.startswith('X-Storlet-') or
key.startswith('X-Object-Meta-Storlet') or
key == 'X-Run-Storlet'):
headers.pop(key)
def base_handle_copy_request(self, src_container, src_obj,
dest_container, dest_object):
"""
Unified path for:
PUT verb with X-Copy-From and
COPY verb with Destination
"""
# Get an iterator over the source object
source_path = '/%s/%s/%s/%s' % (self.api_version, self.account,
src_container, src_obj)
source_req = self.request.copy_get()
source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
source_req.path_info = source_path
# In the copy case we can run either in the proxy
# or in the object node:
# e.g. if the object is SLO or we have extra resources
# we run on the proxy. Otherwise, we run on the object
# Handling in proxy means that:
# 0. Handle in the proxy
# 1. The object GET request to the object node
# should be called without 'X-Run-Storlet'
# 2. The metadata in the response from the object node
# should not be prefixed with X-Object-Meta
if self.is_proxy_runnable():
source_req.headers.pop('X-Run-Storlet', None)
src_resp = source_req.get_response(self.app)
copy_headers(src_resp.headers, self.request.headers)
# We check here again, because src_resp may reveal that
# the object is an SLO and so even if the above check was
# False, we now may need to run on proxy
if self.is_proxy_runnable(src_resp):
# We need to run on proxy.
# Do it and fixup the user metadata headers.
sreq = self._build_storlet_request(self.request, src_resp.headers,
src_resp.app_iter)
self.gather_extra_sources()
sresp = self.gateway.invocation_flow(sreq, self.extra_sources)
data_iter = sresp.data_iter
self._set_metadata_in_headers(self.request.headers,
sresp.user_metadata)
else:
data_iter = src_resp.app_iter
resp = self.handle_put_copy_response(data_iter)
acct, path = src_resp.environ['PATH_INFO'].split('/', 3)[2:4]
resp.headers['X-Storlet-Generated-From-Account'] = quote(acct)
resp.headers['X-Storlet-Generated-From'] = quote(path)
if 'last-modified' in src_resp.headers:
resp.headers['X-Storlet-Generated-From-Last-Modified'] = \
src_resp.headers['last-modified']
return resp
@public
def PUT(self):
"""
PUT handler on Proxy
"""
params = self.verify_access_to_storlet()
self.augment_storlet_request(params)
if self.is_put_copy_request:
self._validate_copy_request()
src_container, src_obj = check_copy_from_header(self.request)
dest_container = self.container
dest_object = self.obj
self.request.headers.pop('X-Copy-From', None)
return self.base_handle_copy_request(src_container, src_obj,
dest_container, dest_object)
# TODO(takashi): chunk size should be configurable
reader = self.request.environ['wsgi.input'].read
body_iter = iter(lambda: reader(65536), b'')
sreq = self._build_storlet_request(
self.request, self.request.headers, body_iter)
sresp = self.gateway.invocation_flow(sreq)
self._set_metadata_in_headers(self.request.headers,
sresp.user_metadata)
return self.handle_put_copy_response(sresp.data_iter)
@public
def COPY(self):
"""
COPY handler on Proxy
"""
if not self.request.headers.get('Destination'):
return HTTPPreconditionFailed(request=self.request,
body='Destination header required')
params = self.verify_access_to_storlet()
self.augment_storlet_request(params)
self._validate_copy_request()
dest_container, dest_object = check_destination_header(self.request)
# re-write the existing request as a PUT instead of creating a new one
# TODO(eranr): do we want a new sub_request or re-write existing one as
# we do below. See proxy obj controller COPY.
self.request.method = 'PUT'
self.request.path_info = '/v1/%s/%s/%s' % \
(self.account, dest_container, dest_object)
self.request.headers['Content-Length'] = 0
del self.request.headers['Destination']
return self.base_handle_copy_request(self.container, self.obj,
dest_container, dest_object)
@public
def POST(self):
"""
POST handler on Proxy
Deals with storlet ACL updates
"""
# Get the current container's ACL
# We perform a sub request rather than get_container_info
# since get_container_info bypasses authorization, and we
# prefer to be on the safe side.
sub_req = make_subrequest(self.request.environ,
'HEAD', self.path,
agent=self.agent)
sub_resp = sub_req.get_response(self.app)
if sub_resp.status_int != 204:
self.logger.info("Failed to retrieve container metadata")
return HTTPUnauthorized(('Unauthorized to get or modify '
'the container ACL'))
# Add the requested ACL
read_acl = sub_resp.headers.get("X-Container-Read")
if read_acl:
new_read_acl = ','.join([read_acl, self.acl_string])
else:
new_read_acl = self.acl_string
self.request.headers['X-Container-Read'] = new_read_acl
resp = self.request.get_response(self.app)
return resp
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 9 17:09:09 2016
@author: Tobias Jachowski
"""
import hashlib
import inspect
import numpy as np
from .. import config as cf
from .. import traces as tc
from .region import Region
class Record(Region):
"""
A Record consists of:
- a filename, containing the name of the filename
- a parafile, containing the name of the parafile
- a calibration object, containing dsurf, focalshift and radius
(and the height dependent stiffnesses and displacement sensitivities)
- the data loaded from the filename
- the parameters loaded from the parafile
- and some methods for convenient access of the data and para.
A Record contains some basic modifications, like a static and fixed offset,
conversion, and inversion factor. The offset, inversion, and conversion
factors depend on the parameters given by the experimental setup and
software, only.
The offset factor should be applied only, if the graphical user interface
of the OT has a different value than the final value stored in the data.
The inversion factor should be applied only, if the values e.g. of the
position are moving in an inverted fashion.
The conversion factor should be applied only, if the values e.g. of the
position are stored in Volts. The position values are needed to be in um.
The modifications are needed to have the same values as used during the
calibration and need to be set accordingly.
For future development one should consider to implement the OT control
software such that whereever possible, Volts are converted to SI units and
a common convention for the directions (left/right, down/up is neg/pos) is
followed.
"""
def __init__(self, datasource, traces, calibration, offset=None,
conversion=None, inversion=None, **kwargs):
"""
Parameters
----------
datasource : DataSource
The object to retrieve data from. This object has to have the
function as_array() and the attribue `name`.
offset: dictionary of trace: value pairs
value can either be a value or a numpy function (e.g. median)
"""
super().__init__(max_parents=1, caching=True, **kwargs)
if datasource is None:
raise TypeError("Record missing the required positional argument "
"'datasource'.")
self._datasource = datasource
self._dataident = None
# Instance of Calibration used to get dsurf, radius and focalshift
if calibration is None:
raise TypeError("Record missing the required positional argument "
"'calibraion'.")
self.calibration = calibration
# Initialize self._shape before trying to set offset, inversion, or
# conversion, which in turn need to (indirectly) access self._shape.
# ZODB volatile.
self._v_data_cached = self._raw_data
raw_data = self._v_data_cached
self._shape = raw_data.shape
# Check if there are as many descriptions as traces in the data
traces = tc.normalize(traces)
if len(traces) != self.num_traces:
raise TypeError('Record argument "traces" has {} elements, but '
'"datasource" has {} data columns. Please, '
'provide the argument "traces" with a list of '
'names for every data column, or edit the '
'"cfgfile", accordingly'.format(len(traces),
self.num_traces))
self._traces = traces
# Initialize values of offset (0), inversion (1), and conversion (1)
# for all available traces
self._offset = np.zeros(self.num_traces)
self._conversion = np.ones(self.num_traces)
self._inversion = np.ones(self.num_traces)
# Modify offset, conversion and inversion according to optional
# parameters
try:
for trace, value in offset.items():
if isinstance(value, str):
trace_idx = self.traces_to_idx(trace)
value = getattr(np, value)(raw_data[:, trace_idx], axis=0)
self.set_offset(trace, value)
except:
pass
try:
for trace, value in conversion.items():
self.set_conversion(trace, value)
except:
pass
try:
for trace, value in inversion.items():
self.set_inversion(trace, value)
except:
pass
# reset cache according to modified offset, inversion, and conversion.
# ZODB volatile.
self._v_data_cached = ((raw_data - self.offset) * self.inversion
* self.conversion)
def _get_data_uncached(self, samples, traces_idx, copy=True):
"""
Returns the data of Record, according to self.filename.
Copy is always True for Record, because every time it reads in the data
it createas a new numpy.ndarray.
"""
# read in data
data = ((self._raw_data[samples, traces_idx] - self.offset[traces_idx])
* self.inversion[traces_idx]
* self.conversion[traces_idx])
# TODO: Implement different samplingrates for different traces. Pandas?
return data
@property
def _raw_data(self):
"""
Reads and returns uncorrected, uncached data (no offset, no inversion,
no conversion).
Additionally, sets self._shape according to _raw_data.shape and creates
or compares previously generated hashdigest of the data.
"""
data = self.datasource.as_array()
if not isinstance(data, np.ndarray):
raise TypeError("The data you try to load is no numpy array!")
if data.ndim != 2:
raise ValueError("The data array you try to load does not have 2 "
"dimensions!")
data = data.copy(order='C')
ident = hashlib.md5(data).hexdigest()
if self._dataident is None:
self._dataident = ident
elif self._dataident != ident:
raise ValueError("The data you try to load from '%s' has changed "
"since the last time. Please, check the "
"datasource of the record '%s'."
% (self.datasource.name, self.name))
return data
@property
def offset(self):
return self._offset.copy()
@property
def inversion(self):
return self._inversion.copy()
@property
def conversion(self):
return self._conversion.copy()
def set_offset(self, trace, offset):
"""
Set the offset of a selected trace. The offset should be given in units
of the raw data, this means independent of inversion and conversion.
"""
trace = self.traces_to_idx(trace)
self._offset[trace] = offset
# Inform descendants of change
self.set_changed()
def set_inversion(self, trace, invert=True):
trace = self.traces_to_idx(trace)
self._inversion[trace] = - int(invert) or 1
# Inform descendants of change
self.set_changed()
def set_conversion(self, trace, conversion):
trace = self.traces_to_idx(trace)
self._conversion[trace] = conversion
# Inform descendants of change
self.set_changed()
@property
def calibration(self):
return self.parent
@calibration.setter
def calibration(self, calibration):
if calibration is None:
raise TypeError("Calibration must not be None.")
self.set_parent(calibration)
@property
def datasource(self):
return self._datasource
@property
def samplingrate(self):
# TODO: Implement different samplingrates for different traces. Pandas?
return self.datasource.samplingrate
@property
def start(self):
return 0
@property
def stop(self):
return self._shape[0]
@property
def num_traces(self):
return self._shape[1]
@property
def traces(self):
return self._traces.copy()
@property
def caching(self):
return self._caching
@caching.setter
def caching(self, caching):
super(Record, self.__class__).caching.fset(self, caching)
if caching is False:
print("You switched of caching for Record.",
"This will seriously lessen the performance of pyoti!",
"Do this only, if you have weighty reasons.",
"Otherwise, you should revert and reset `caching` to True",
sep="\n")
class SpecialRecord(Record):
"""
This class inherits from the superclass Record and can be used as a
template to modify the default behaviour of how the data is read in.
Details see property _raw_data().
"""
def __init__(self, datasource, traces, calibration, offset=None,
conversion=None, inversion=None, **kwargs):
super().__init__(datasource, traces, calibration, offset=offset,
conversion=conversion, inversion=inversion, **kwargs)
# Get corrected raw_data, which by super().__init__() was initialized
# and stored in self._v_data_cached
# raw_data = self._v_data_cached
# Or get uncorrected raw_data
# raw_data = self._raw_data()
# Do something to the data for initialization of SpecialRecord
@property
def _raw_data(self):
data = super()._raw_data
# do something to data ...
# e.g. calculate positionXYZ of a combination of signals of one or
# multiple stage, objective, moveable lens, or mirror signals. Keep in
# mind that positionZ is increasing for increasing distances and
# decreasing for decreasing distances of the bead to the surface.
# PositionXY need to have the same sign as psdXY, i.e. if the bead is
# pulled to the left/right and displaced to the left/right, positionXY
# and psdXY both need to have the same signs.
return data
def create_record(calibration,
traces=None,
name=None,
group=None,
rc_class=None,
ds_class=None,
offset=None,
conversion=None,
inversion=None,
cfgfile=None,
**kwargs):
"""
Parameters
----------
**kwargs
Is used to get parameters for initialization of `rc_class` and
initialisation of `ds_class`
"""
# Set default configfile
cfgfile = cfgfile or 'record.cfg'
# Read configfile
cfg = cf.read_cfg_file(cfgfile)
print(("Creating record '" + name + "':"))
rc_class = rc_class or cf.get_cfg_class(cfg, sec='record',
std_mod='.region',
std_cls='Record')
traces = traces or cf.get_cfg_list(cfg, sec='record', opt='traces')
if not rc_class:
print("Could not create Record class defined in config file %s"
% cfgfile)
return None
# get parameters specific for record
record_pars = {}
for par in inspect.getargspec(rc_class.__init__)[0]:
if par in kwargs:
record_pars[par] = kwargs.pop(par)
# datasource class and parameters
ds_class = ds_class or cf.get_cfg_class(cfg, sec='datasource',
std_mod='.plugins.datasources.generic',
std_cls='GenericDataFile')
if not ds_class:
print("Could not create DataSource class defined in config file %s"
% cfgfile)
return None
# create datasource
datasource = ds_class(**kwargs)
# read and convert offset, conversion, and inversion to dict of floats
# (booleans)
offset = offset or cf.get_cfg_sec_dict(cfg, 'offset', convert='float')
conversion = conversion or cf.get_cfg_sec_dict(cfg, 'conversion',
convert='float')
inversion = inversion or cf.get_cfg_sec_dict(cfg, 'inversion',
convert='boolean')
record = rc_class(datasource, traces, calibration, offset=offset,
conversion=conversion, inversion=inversion, name=name,
group=group, **record_pars)
return record
|
|
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
import itertools
import os
import shutil
import unittest
from lxml import etree
import mock
from mox3 import mox as mox_lib
import six
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fake_api as netapp_api)
from cinder import utils as cinder_utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp.dataontap import (nfs_7mode
as netapp_nfs_7mode)
from cinder.volume.drivers.netapp.dataontap import (nfs_cmode
as netapp_nfs_cmode)
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import utils
from oslo_config import cfg
CONF = cfg.CONF
CONNECTION_INFO = {
'hostname': 'fake_host',
'transport_type': 'https',
'port': 443,
'username': 'admin',
'password': 'passw0rd',
}
FAKE_CONNECTION_INFO_HTTP = {
'hostname': '127.0.0.1',
'transport_type': 'http',
'port': None,
'username': 'admin',
'password': 'pass',
'vserver': 'openstack',
}
FAKE_CONNECTION_INFO_HTTPS = dict(FAKE_CONNECTION_INFO_HTTP,
transport_type='https')
FAKE_7MODE_CONNECTION_INFO_HTTP = dict(FAKE_CONNECTION_INFO_HTTP)
FAKE_7MODE_CONNECTION_INFO_HTTP.pop('vserver')
FAKE_7MODE_CONNECTION_INFO_HTTP['vfiler'] = 'test_vfiler'
FAKE_7MODE_CONNECTION_INFO_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTP,
transport_type='https')
SEVEN_MODE_CONNECTION_INFO = dict(
itertools.chain(CONNECTION_INFO.items(),
{'vfiler': 'test_vfiler'}.items()))
FAKE_VSERVER = 'fake_vserver'
def create_configuration():
configuration = mox_lib.MockObject(conf.Configuration)
configuration.append_config_values(mox_lib.IgnoreArg())
configuration.max_over_subscription_ratio = 20.0
configuration.reserved_percentage = 0
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
configuration.nas_mount_options = None
configuration.nfs_used_ratio = .95
configuration.nfs_oversub_ratio = 1.0
configuration.netapp_server_hostname = CONNECTION_INFO['hostname']
configuration.netapp_transport_type = CONNECTION_INFO['transport_type']
configuration.netapp_server_port = CONNECTION_INFO['port']
configuration.netapp_login = CONNECTION_INFO['username']
configuration.netapp_password = CONNECTION_INFO['password']
configuration.netapp_vfiler = SEVEN_MODE_CONNECTION_INFO['vfiler']
return configuration
class FakeVolume(object):
def __init__(self, host='', size=0):
self.size = size
self.id = hash(self)
self.name = None
self.host = host
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetAppCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
TEST_NFS_HOST = 'nfs-host1'
TEST_NFS_SHARE_PATH = '/export'
TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH)
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
TEST_MNT_POINT = '/mnt/nfs'
def setUp(self):
super(NetAppCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([client_cmode, client_base])
self.mock_object(common.na_utils, 'check_netapp_lib')
self.mock_object(nfs_base, 'LOG')
self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self._driver.zapi_client = mock.Mock()
config = self._driver.configuration
config.netapp_vserver = FAKE_VSERVER
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
location = '127.0.0.1:/nfs'
host = 'hostname@backend#' + location
volume = FakeVolume(host, 1)
snapshot = FakeSnapshot(1)
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
drv._clone_backing_file_for_volume(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._get_volume_location(mox_lib.IgnoreArg()).AndReturn(location)
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
self.mock_object(drv, '_do_qos_for_volume')
self.mock_object(utils, 'get_volume_extra_specs')
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(expected_result, loc)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(mox_lib.IgnoreArg())
drv._get_provider_location(mox_lib.IgnoreArg())
drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup')
@mock.patch.object(client_cmode.Client, '__init__', return_value=None)
def test_do_setup(self, mock_client_init, mock_super_do_setup):
context = mock.Mock()
self._driver.do_setup(context)
mock_client_init.assert_called_once_with(vserver=FAKE_VSERVER,
**CONNECTION_INFO)
mock_super_do_setup.assert_called_once_with(context)
@mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error')
@mock.patch.object(ssc_cmode, 'check_ssc_api_permissions')
def test_check_for_setup_error(self, mock_ssc_api_permission_check,
mock_super_check_for_setup_error):
self._driver.zapi_client = mock.Mock()
self._driver.check_for_setup_error()
mock_ssc_api_permission_check.assert_called_once_with(
self._driver.zapi_client)
mock_super_check_for_setup_error.assert_called_once_with()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
drv.zapi_client = mox.CreateMockAnything()
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv.zapi_client.get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv.zapi_client.get_vol_by_junc_vserver('openstack', '/nfs').AndReturn(
'nfsvol')
drv.zapi_client.clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._get_host_ip(mox_lib.IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(mox_lib.IgnoreArg()).AndReturn('/nfs')
drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return netapp_api.NaElement(response_el).get_children()
def test_clone_backing_file_for_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
share = 'ip:/share'
drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id,
share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
mox_lib.IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file_at_path')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._delete_file_at_path('/mnt/img-cache-2').AndReturn(True)
drv._delete_file_at_path('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image('',
volume,
('image_location', None),
{'id': 'image_id'}, '')
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv.clone_image(
'',
volume,
('nfs://127.0.0.1:/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._clone_backing_file_for_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
'',
volume,
('nfs://127.0.0.1:/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
'',
volume,
('nfs://127.0.0.1/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, '_do_qos_for_volume')
mox.StubOutWithMock(drv, 'local_path')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(False)
mox.ReplayAll()
vol_dict, result = drv.clone_image(
'',
volume,
('nfs://127.0.0.1/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(utils, 'get_volume_extra_specs')
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_backing_file_for_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_do_qos_for_volume')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
utils.get_volume_extra_specs(mox_lib.IgnoreArg())
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(), 'raw',
run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._do_qos_for_volume(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file(
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndRaise(exception.InvalidResults())
mox.ReplayAll()
vol_dict, result = drv.clone_image(
'',
volume,
('nfs://127.0.0.1/share/img-id', None),
{'id': 'image_id'},
'')
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(mox_lib.IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(mox_lib.IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
# Valid metdata
[{'metadata':
{'share_location': 'nfs://host/path',
'mountpoint': '/opt/stack/data/glance',
'id': 'abc-123',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id-0'},
# missing metadata
{'metadata': {},
'url': 'file:///opt/stack/data/glance/image-id-1'},
# missing location_type
{'metadata': {'location_type': None},
'url': 'file:///opt/stack/data/glance/image-id-2'},
# non-nfs location_type
{'metadata': {'location_type': 'not-NFS'},
'url': 'file:///opt/stack/data/glance/image-id-3'},
# missing share_location
{'metadata': {'location_type': 'nfs',
'share_location': None},
'url': 'file:///opt/stack/data/glance/image-id-4'},
# missing mountpoint
{'metadata': {'location_type': 'nfs',
'share_location': 'nfs://host/path',
# Pre-kilo we documented "mount_point"
'mount_point': '/opt/stack/data/glance'},
'url': 'file:///opt/stack/data/glance/image-id-5'},
# Valid metadata
{'metadata':
{'share_location': 'nfs://host/path',
'mountpoint': '/opt/stack/data/glance',
'id': 'abc-123',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id-6'}])
locations = drv._construct_image_nfs_url(img_loc)
self.assertIn("nfs://host/path/image-id-0", locations)
self.assertIn("nfs://host/path/image-id-6", locations)
self.assertEqual(2, len(locations))
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
locations = drv._construct_image_nfs_url(img_loc)
self.assertIn("nfs://host/path/image-id", locations)
def test_get_pool(self):
pool = self._driver.get_pool({'provider_location': 'fake-share'})
self.assertEqual('fake-share', pool)
def _set_config(self, configuration):
configuration.netapp_storage_family = 'ontap_cluster'
configuration.netapp_storage_protocol = 'nfs'
configuration.netapp_login = 'admin'
configuration.netapp_password = 'pass'
configuration.netapp_server_hostname = '127.0.0.1'
configuration.netapp_transport_type = 'http'
configuration.netapp_server_port = None
configuration.netapp_vserver = 'openstack'
configuration.nfs_shares_config = '/nfs'
return configuration
@mock.patch.object(utils, 'get_volume_extra_specs')
def test_check_volume_type_mismatch(self, get_specs):
if not hasattr(self._driver, 'vserver'):
return unittest.skip("Test only applies to cmode driver")
get_specs.return_value = {'thin_volume': 'true'}
self._driver._is_share_vol_type_match = mock.Mock(return_value=False)
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self._driver._check_volume_type, 'vol',
'share', 'file')
get_specs.assert_called_once_with('vol')
self._driver._is_share_vol_type_match.assert_called_once_with(
'vol', 'share', 'file')
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_all_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_CONNECTION_INFO_HTTPS)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_CONNECTION_INFO_HTTP, port=81)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_cmode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_CONNECTION_INFO_HTTPS, port=446)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_convert_vol_ref_share_name_to_share_ip(self, mock_hostname):
drv = self._driver
share = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name')
modified_share = '10.12.142.11:/export/test_file_name'
modified_vol_ref = drv._convert_vol_ref_share_name_to_share_ip(share)
self.assertEqual(modified_share, modified_vol_ref)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
@mock.patch.object(os.path, 'isfile', return_value=True)
def test_get_share_mount_and_vol_from_vol_ref(self, mock_isfile,
mock_hostname):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, 'test_file_name')
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
(share, mount, file_path) = \
drv._get_share_mount_and_vol_from_vol_ref(vol_ref)
self.assertEqual(self.TEST_NFS_EXPORT1, share)
self.assertEqual(self.TEST_MNT_POINT, mount)
self.assertEqual('test_file_name', file_path)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_get_share_mount_and_vol_from_vol_ref_with_bad_ref(self,
mock_hostname):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_ref = {'source-id': '1234546'}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
self.assertRaises(exception.ManageExistingInvalidReference,
drv._get_share_mount_and_vol_from_vol_ref, vol_ref)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_get_share_mount_and_vol_from_vol_ref_where_not_found(self,
mock_host):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT2, 'test_file_name')
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
self.assertRaises(exception.ManageExistingInvalidReference,
drv._get_share_mount_and_vol_from_vol_ref, vol_ref)
@mock.patch.object(utils, 'resolve_hostname', return_value='10.12.142.11')
def test_get_share_mount_and_vol_from_vol_ref_where_is_dir(self,
mock_host):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
vol_ref = {'source-name': self.TEST_NFS_EXPORT2}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
self.assertRaises(exception.ManageExistingInvalidReference,
drv._get_share_mount_and_vol_from_vol_ref, vol_ref)
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1073741824)
def test_manage_existing_get_size(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
vol_size = drv.manage_existing_get_size(volume, vol_ref)
self.assertEqual(1, vol_size)
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing_get_size_round_up(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
vol_size = drv.manage_existing_get_size(volume, vol_ref)
self.assertEqual(2, vol_size)
@mock.patch.object(cinder_utils, 'get_file_size', return_value='badfloat')
def test_manage_existing_get_size_error(self, get_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
self.assertRaises(exception.VolumeBackendAPIException,
drv.manage_existing_get_size, volume, vol_ref)
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'file-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
drv._check_volume_type = mock.Mock()
self.stubs.Set(drv, '_execute', mock.Mock())
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
shutil.move = mock.Mock()
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = {}
self.mock_object(drv, '_do_qos_for_volume')
location = drv.manage_existing(volume, vol_ref)
self.assertEqual(self.TEST_NFS_EXPORT1, location['provider_location'])
drv._check_volume_type.assert_called_once_with(
volume, self.TEST_NFS_EXPORT1, test_file, {})
@mock.patch.object(cinder_utils, 'get_file_size', return_value=1074253824)
def test_manage_existing_move_fails(self, get_file_size):
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
test_file = 'test_file_name'
volume = FakeVolume()
volume['name'] = 'volume-new-managed-123'
volume['id'] = 'volume-new-managed-123'
vol_path = "%s/%s" % (self.TEST_NFS_EXPORT1, test_file)
vol_ref = {'source-name': vol_path}
mock_check_volume_type = drv._check_volume_type = mock.Mock()
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._get_share_mount_and_vol_from_vol_ref = mock.Mock(
return_value=(self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT,
test_file))
drv._execute = mock.Mock(side_effect=OSError)
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = {}
self.mock_object(drv, '_do_qos_for_volume')
self.assertRaises(exception.VolumeBackendAPIException,
drv.manage_existing, volume, vol_ref)
mock_check_volume_type.assert_called_once_with(
volume, self.TEST_NFS_EXPORT1, test_file, {})
@mock.patch.object(nfs_base, 'LOG')
def test_unmanage(self, mock_log):
drv = self._driver
self.mock_object(utils, 'get_valid_qos_policy_group_info')
volume = FakeVolume()
volume['id'] = '123'
volume['provider_location'] = '/share'
drv.unmanage(volume)
self.assertEqual(1, mock_log.info.call_count)
class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetAppCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
self._driver.zapi_client = mock.Mock()
self.mock_object(netapp_nfs_cmode, 'LOG')
self._fake_empty_qos_policy_group_info = {
'legacy': None,
'spec': None,
}
self._fake_legacy_qos_policy_group_info = {
'legacy': {
'policy_name': 'qos_policy_1'
},
'spec': None,
}
@mock.patch.object(utils, 'LOG', mock.Mock())
def test_create_volume(self):
drv = self._driver
drv.ssc_enabled = False
fake_extra_specs = {}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = fake_extra_specs
self.mock_object(drv, '_ensure_shares_mounted')
self.mock_object(drv, '_do_create_volume')
mock_get_qos_info =\
self.mock_object(utils, 'get_valid_qos_policy_group_info')
mock_get_qos_info.return_value = self._fake_empty_qos_policy_group_info
volume_info = self._driver.create_volume(FakeVolume(host, 1))
self.assertEqual(fake_share, volume_info.get('provider_location'))
self.assertEqual(0, utils.LOG.warning.call_count)
def test_create_volume_no_pool_specified(self):
drv = self._driver
drv.ssc_enabled = False
host = 'hostname@backend' # missing pool
with mock.patch.object(drv, '_ensure_shares_mounted'):
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
def test_create_volume_with_legacy_qos_policy(self):
drv = self._driver
drv.ssc_enabled = False
fake_extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
fake_volume = FakeVolume(host, 1)
mock_get_specs = self.mock_object(utils, 'get_volume_extra_specs')
mock_get_specs.return_value = fake_extra_specs
mock_get_qos_info =\
self.mock_object(utils, 'get_valid_qos_policy_group_info')
mock_get_qos_info.return_value =\
self._fake_legacy_qos_policy_group_info
self.mock_object(drv, '_ensure_shares_mounted')
self.mock_object(drv, '_do_create_volume')
mock_set_qos = self.mock_object(drv, '_set_qos_policy_group_on_volume')
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual('localhost:myshare',
volume_info.get('provider_location'))
mock_set_qos.assert_called_once_with(
fake_volume, self._fake_legacy_qos_policy_group_info)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
nfs_base.NetAppNfsDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
nfs_base.NetAppNfsDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"])
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs =\
mock.Mock(return_value=('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file_at_path = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs =\
mock.Mock(return_value=('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file_at_path = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file_at_path.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
"""Test direct NetApp 7 Mode driver."""
def _custom_setup(self):
self.mock_object(utils, 'OpenStackInfo')
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([client_cmode, client_base])
self.mock_object(common.na_utils, 'check_netapp_lib')
self.mock_object(common.na_utils, 'LOG')
self.mock_object(nfs_base, 'LOG')
self._driver = netapp_nfs_7mode.NetApp7modeNfsDriver(
configuration=create_configuration())
self._driver.zapi_client = mock.Mock()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(mox_lib.IgnoreArg())
drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_create_volume_no_pool_specified(self):
drv = self._driver
drv.ssc_enabled = False
host = 'hostname@backend' # missing pool
with mock.patch.object(drv, '_ensure_shares_mounted'):
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup')
@mock.patch.object(client_7mode.Client, '__init__', return_value=None)
def test_do_setup(self, mock_client_init, mock_super_do_setup):
context = mock.Mock()
self._driver.do_setup(context)
mock_client_init.assert_called_once_with(**SEVEN_MODE_CONNECTION_INFO)
mock_super_do_setup.assert_called_once_with(context)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_all_default(self):
configuration = self._set_config(create_configuration())
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'http'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
mock_invoke.assert_called_with(**FAKE_7MODE_CONNECTION_INFO_HTTPS)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_http_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_server_port = 81
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTP = dict(FAKE_7MODE_CONNECTION_INFO_HTTP,
port=81)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTP)
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
@mock.patch.object(nfs_base.NetAppNfsDriver, 'do_setup', mock.Mock())
def test_do_setup_https_non_default_port(self):
configuration = self._set_config(create_configuration())
configuration.netapp_transport_type = 'https'
configuration.netapp_server_port = 446
driver = common.NetAppDriver(configuration=configuration)
mock_invoke = self.mock_object(client_7mode, 'Client')
driver.do_setup(context='')
FAKE_CONN_INFO_PORT_HTTPS = dict(FAKE_7MODE_CONNECTION_INFO_HTTPS,
port=446)
mock_invoke.assert_called_with(**FAKE_CONN_INFO_PORT_HTTPS)
@mock.patch.object(nfs_base.NetAppNfsDriver, 'check_for_setup_error')
def test_check_for_setup_error(self, mock_super_check_for_setup_error):
self._driver.zapi_client.get_ontapi_version.return_value = (1, 20)
self.assertIsNone(self._driver.check_for_setup_error())
mock_super_check_for_setup_error.assert_called_once_with()
def test_check_for_setup_error_old_version(self):
self._driver.zapi_client.get_ontapi_version.return_value = (1, 8)
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_no_version(self):
self._driver.zapi_client.get_ontapi_version.return_value = None
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.check_for_setup_error)
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
drv._get_export_ip_path(
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
return mox
def test_clone_backing_file_for_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
try:
drv._clone_backing_file_for_volume(volume_name, clone_name,
volume_id)
except Exception as e:
if isinstance(e, netapp_api.NaApiError):
pass
else:
raise
mox.VerifyAll()
def test_get_pool(self):
pool = self._driver.get_pool({'provider_location': 'fake-share'})
self.assertEqual('fake-share', pool)
def _set_config(self, configuration):
super(NetApp7modeNfsDriverTestCase, self)._set_config(
configuration)
configuration.netapp_storage_family = 'ontap_7mode'
return configuration
def test_clone_backing_file_for_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + six.text_type(hash(volume_name))
share = 'ip:/share'
drv._clone_backing_file_for_volume(volume_name, clone_name, volume_id,
share)
mox.VerifyAll()
|
|
"""This module includes tests of the code object representation.
>>> def f(x):
... def g(y):
... return x + y
... return g
...
>>> dump(f.__code__)
name: f
argcount: 1
kwonlyargcount: 0
names: ()
varnames: ('x', 'g')
cellvars: ('x',)
freevars: ()
nlocals: 2
flags: 3
consts: ('None', '<code object g>', "'f.<locals>.g'")
>>> dump(f(4).__code__)
name: g
argcount: 1
kwonlyargcount: 0
names: ()
varnames: ('y',)
cellvars: ()
freevars: ('x',)
nlocals: 1
flags: 19
consts: ('None',)
>>> def h(x, y):
... a = x + y
... b = x - y
... c = a * b
... return c
...
>>> dump(h.__code__)
name: h
argcount: 2
kwonlyargcount: 0
names: ()
varnames: ('x', 'y', 'a', 'b', 'c')
cellvars: ()
freevars: ()
nlocals: 5
flags: 67
consts: ('None',)
>>> def attrs(obj):
... print(obj.attr1)
... print(obj.attr2)
... print(obj.attr3)
>>> dump(attrs.__code__)
name: attrs
argcount: 1
kwonlyargcount: 0
names: ('print', 'attr1', 'attr2', 'attr3')
varnames: ('obj',)
cellvars: ()
freevars: ()
nlocals: 1
flags: 67
consts: ('None',)
>>> def optimize_away():
... 'doc string'
... 'not a docstring'
... 53
... 0x53
>>> dump(optimize_away.__code__)
name: optimize_away
argcount: 0
kwonlyargcount: 0
names: ()
varnames: ()
cellvars: ()
freevars: ()
nlocals: 0
flags: 67
consts: ("'doc string'", 'None')
>>> def keywordonly_args(a,b,*,k1):
... return a,b,k1
...
>>> dump(keywordonly_args.__code__)
name: keywordonly_args
argcount: 2
kwonlyargcount: 1
names: ()
varnames: ('a', 'b', 'k1')
cellvars: ()
freevars: ()
nlocals: 3
flags: 67
consts: ('None',)
"""
import sys
import unittest
import weakref
from test.support import run_doctest, run_unittest, cpython_only
def consts(t):
"""Yield a doctest-safe sequence of object reprs."""
for elt in t:
r = repr(elt)
if r.startswith("<code object"):
yield "<code object %s>" % elt.co_name
else:
yield r
def dump(co):
"""Print out a text representation of a code object."""
for attr in ["name", "argcount", "kwonlyargcount", "names", "varnames",
"cellvars", "freevars", "nlocals", "flags"]:
print("%s: %s" % (attr, getattr(co, "co_" + attr)))
print("consts:", tuple(consts(co.co_consts)))
class CodeTest(unittest.TestCase):
@cpython_only
def test_newempty(self):
import _testcapi
co = _testcapi.code_newempty("filename", "funcname", 15)
self.assertEqual(co.co_filename, "filename")
self.assertEqual(co.co_name, "funcname")
self.assertEqual(co.co_firstlineno, 15)
def isinterned(s):
return s is sys.intern(('_' + s + '_')[1:-1])
class CodeConstsTest(unittest.TestCase):
def find_const(self, consts, value):
for v in consts:
if v == value:
return v
self.assertIn(value, consts) # raises an exception
self.fail('Should never be reached')
def assertIsInterned(self, s):
if not isinterned(s):
self.fail('String %r is not interned' % (s,))
def assertIsNotInterned(self, s):
if isinterned(s):
self.fail('String %r is interned' % (s,))
@cpython_only
def test_interned_string(self):
co = compile('res = "str_value"', '?', 'exec')
v = self.find_const(co.co_consts, 'str_value')
self.assertIsInterned(v)
@cpython_only
def test_interned_string_in_tuple(self):
co = compile('res = ("str_value",)', '?', 'exec')
v = self.find_const(co.co_consts, ('str_value',))
self.assertIsInterned(v[0])
@cpython_only
def test_interned_string_in_frozenset(self):
co = compile('res = a in {"str_value"}', '?', 'exec')
v = self.find_const(co.co_consts, frozenset(('str_value',)))
self.assertIsInterned(tuple(v)[0])
@cpython_only
def test_interned_string_default(self):
def f(a='str_value'):
return a
self.assertIsInterned(f())
@cpython_only
def test_interned_string_with_null(self):
co = compile(r'res = "str\0value!"', '?', 'exec')
v = self.find_const(co.co_consts, 'str\0value!')
self.assertIsNotInterned(v)
class CodeWeakRefTest(unittest.TestCase):
def test_basic(self):
# Create a code object in a clean environment so that we know we have
# the only reference to it left.
namespace = {}
exec("def f(): pass", globals(), namespace)
f = namespace["f"]
del namespace
self.called = False
def callback(code):
self.called = True
# f is now the last reference to the function, and through it, the code
# object. While we hold it, check that we can create a weakref and
# deref it. Then delete it, and check that the callback gets called and
# the reference dies.
coderef = weakref.ref(f.__code__, callback)
self.assertTrue(bool(coderef()))
del f
self.assertFalse(bool(coderef()))
self.assertTrue(self.called)
def test_main(verbose=None):
from test import test_code
run_doctest(test_code, verbose)
run_unittest(CodeTest, CodeConstsTest, CodeWeakRefTest)
if __name__ == "__main__":
test_main()
|
|
import os
import sys
import time
import re
import pprint
import random
import types
import unittest
import imp
import jsonpickle
class DevNull(object):
def __init__(self, *writers):
self.writers = writers
def write(self, text):
return
def f_noarg(self):
return
def f_varg(self, *args, **kwargs):
return
class TestProgram(unittest.TestCase):
def setUp(self):
# suppress program output streams
#sys.stdout = DevNull(sys.stdout)
sys.stderr = DevNull(sys.stderr)
sys.path.append('/Users/lwy08/Downloads/pyutilib.math-3.3')
sys.path.append('/Users/lwy08/Downloads/pyutilib.math-3.3/pyutilib/math/')
# reference module under test
self.module = __import__('util')
def tearDown(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
del self.module
def assertRaisesException(self, expected_e, function, *args, **kwargs):
try:
function(*args, **kwargs)
except Exception as actual_e:
assert actual_e.__class__.__name__ == expected_e.__name__
def assertEqualClassName(self, function, expected, *args, **kwargs):
return_value = function
assert return_value.__class__.__name__ == expected
def assertEqualAttrs(self, function, expected, *args, **kwargs):
return_value = function
unpickled = jsonpickle.decode(expected)
assert isinstance(unpickled, dict)
for key, value in unpickled.iteritems():
assert return_value.__dict__[key] == value
def assertNotRaises(self, function, *args, **kwargs):
with self.assertRaises(Exception):
try:
function(*args, **kwargs)
except:
pass
else:
raise Exception
def test_as_number_all_None(self):
self.assertNotRaises(self.module.as_number, *[None])
self.assertIsNone(self.module.as_number(*[None]))
def test_as_number_all_attr_None_wdef(self):
self.assertNotRaises(self.module.as_number, *[None])
self.assertIsNone(self.module.as_number(*[None]))
def test_as_number_all_attr_MetaParam_wdef(self):
self.assertNotRaises(self.module.as_number, *[None])
self.assertIsNone(self.module.as_number(*[None]))
def test_argmin_all_None(self):
self.assertRaises(TypeError, self.module.argmin, *[None])
def test_argmin_all_attr_None_wdef(self):
self.assertRaises(TypeError, self.module.argmin, *[None])
def test_argmin_all_attr_MetaParam_wdef(self):
self.assertRaises(TypeError, self.module.argmin, *[None])
def test_argmin_ad4efc0a06c14ec7a1df5cf0d8155530(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.argmin, *[Param1])
def test_argmin_4ed680dcd03e480c9e3eee2150fa705c(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.argmin, *[Param1])
def test_argmin_30d973a6ece64248948dc7ad199f7ac3(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.argmin, *[Param1])
def test_isint_all_None(self):
self.assertNotRaises(self.module.isint, *[None])
self.assertEqual(self.module.isint(*[None]), False, 'incorrect function return value encountered')
def test_isint_all_attr_None_wdef(self):
self.assertNotRaises(self.module.isint, *[None])
self.assertEqual(self.module.isint(*[None]), False, 'incorrect function return value encountered')
def test_isint_all_attr_MetaParam_wdef(self):
self.assertNotRaises(self.module.isint, *[None])
self.assertEqual(self.module.isint(*[None]), False, 'incorrect function return value encountered')
def test_isint_caf54bb775384cec8b5835fa675dcc63(self):
Param1 = type('',(object,), {})()
self.assertNotRaises(self.module.isint, *[Param1])
self.assertEqual(self.module.isint(*[Param1]), False, 'incorrect function return value encountered')
def test_factorial_all_None(self):
self.assertRaises(ArithmeticError, self.module.factorial, *[None])
def test_factorial_all_attr_None_wdef(self):
self.assertRaises(ArithmeticError, self.module.factorial, *[None])
def test_factorial_all_attr_MetaParam_wdef(self):
self.assertRaises(ArithmeticError, self.module.factorial, *[None])
def test_factorial_01b0d3dfcbfb45179f727d45af77c8b4(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.factorial, *[Param1])
def test_factorial_8d7ff0c9efc147a88fedb9b1f2c1faec(self):
self.assertNotRaises(self.module.factorial, *[0])
self.assertEqual(self.module.factorial(*[0]), 1, 'incorrect function return value encountered')
def test_factorial_4340135040(self):
self.assertEqual(479001600, self.module.factorial(12))
def test_factorial_4340135232(self):
self.assertEqual(24, self.module.factorial(4))
def test_factorial_4340135304(self):
self.assertEqual(1, self.module.factorial(1))
def test_factorial_4340135184(self):
self.assertEqual(720, self.module.factorial(6))
def test_factorial_4340134800(self):
self.assertEqual(1124000727777607680000L, self.module.factorial(22))
def test_factorial_4340135256(self):
self.assertEqual(6, self.module.factorial(3))
def test_factorial_4340134920(self):
self.assertEqual(355687428096000, self.module.factorial(17))
def test_factorial_4340135064(self):
self.assertEqual(39916800, self.module.factorial(11))
def test_factorial_4340134728(self):
self.assertEqual(15511210043330985984000000L, self.module.factorial(25))
def test_factorial_4340134656(self):
self.assertEqual(304888344611713860501504000000L, self.module.factorial(28))
def test_factorial_4340134944(self):
self.assertEqual(20922789888000, self.module.factorial(16))
def test_factorial_4340134704(self):
self.assertEqual(403291461126605635584000000L, self.module.factorial(26))
def test_factorial_4340134824(self):
self.assertEqual(51090942171709440000L, self.module.factorial(21))
def test_factorial_4340135088(self):
self.assertEqual(3628800, self.module.factorial(10))
def test_factorial_4340134536(self):
self.assertEqual(8683317618811886495518194401280000000L, self.module.factorial(33))
def test_factorial_4340134584(self):
self.assertEqual(8222838654177922817725562880000000L, self.module.factorial(31))
def test_factorial_4340135160(self):
self.assertEqual(5040, self.module.factorial(7))
def test_factorial_4340134848(self):
self.assertEqual(2432902008176640000, self.module.factorial(20))
def test_factorial_4340135328(self):
self.assertEqual(1, self.module.factorial(0))
def test_factorial_4340134896(self):
self.assertEqual(6402373705728000, self.module.factorial(18))
def test_factorial_4340135112(self):
self.assertEqual(362880, self.module.factorial(9))
def test_factorial_4340134992(self):
self.assertEqual(87178291200, self.module.factorial(14))
def test_factorial_4340134968(self):
self.assertEqual(1307674368000, self.module.factorial(15))
def test_factorial_4340134488(self):
self.assertEqual(10333147966386144929666651337523200000000L, self.module.factorial(35))
def test_factorial_4340134560(self):
self.assertEqual(263130836933693530167218012160000000L, self.module.factorial(32))
def test_factorial_4340135208(self):
self.assertEqual(120, self.module.factorial(5))
def test_factorial_4340136416(self):
self.assertEqual(13763753091226345046315979581580902400000000L, self.module.factorial(37))
def test_factorial_4340134752(self):
self.assertEqual(620448401733239439360000L, self.module.factorial(24))
def test_factorial_4340135280(self):
self.assertEqual(2, self.module.factorial(2))
def test_factorial_4340136440(self):
self.assertEqual(371993326789901217467999448150835200000000L, self.module.factorial(36))
def test_factorial_4340134632(self):
self.assertEqual(8841761993739701954543616000000L, self.module.factorial(29))
def test_factorial_4340135136(self):
self.assertEqual(40320, self.module.factorial(8))
def test_factorial_4340134680(self):
self.assertEqual(10888869450418352160768000000L, self.module.factorial(27))
def test_factorial_4340134512(self):
self.assertEqual(295232799039604140847618609643520000000L, self.module.factorial(34))
def test_factorial_4340135016(self):
self.assertEqual(6227020800, self.module.factorial(13))
def test_factorial_4340134872(self):
self.assertEqual(121645100408832000, self.module.factorial(19))
def test_factorial_4340134776(self):
self.assertEqual(25852016738884976640000L, self.module.factorial(23))
def test_factorial_4340134608(self):
self.assertEqual(265252859812191058636308480000000L, self.module.factorial(30))
def test_approx_equal_all_None(self):
self.assertRaises(TypeError, self.module.approx_equal, *[None, None, None, None])
def test_approx_equal_all_attr_None_wdef(self):
self.assertRaises(TypeError, self.module.approx_equal, *[None, None, None, None])
def test_approx_equal_all_attr_MetaParam_wdef(self):
self.assertRaises(TypeError, self.module.approx_equal, *[None, None, None, None])
def test_approx_equal_2146bade19504f498c783c5a8683c15c(self):
Param1 = type('',(object,), {})()
Param2 = type('',(object,), {})()
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.approx_equal, *[Param1, Param2, Param3, Param4])
def test_approx_equal_265fbaa69c67493491ecf4ad4a815af8(self):
Param2 = type('',(object,), {})()
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.approx_equal, *[0, Param2, Param3, Param4])
def test_approx_equal_7ca9cc0d97a642cc867f78b9209493d3(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[0, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[0, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_494f01ec03334a448e889a47110c7a3f(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[-4655114418234116396, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[-4655114418234116396, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_e16ffff2da044d2b964631dc136a02f2(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[-6131897808919339136, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[-6131897808919339136, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_7611746f797649c081a9aeec5ff9f37d(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[-6125374539477339154, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[-6125374539477339154, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_9566e0c92ca645df899ba326536be2f3(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[-2636873581161505499, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[-2636873581161505499, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_f5d5a33386c040d8b272f067c21cb99e(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[-2420807821085804151, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[-2420807821085804151, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_b4f731f2232843228fbb8a80669f5d6f(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[-3409018997018743123, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[-3409018997018743123, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_2b3701d80de34b40b176c7e62aabd25f(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[-180, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[-180, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_45818dc9e91e41c391fa3047dca9e474(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[-330, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[-330, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_dabfaa6cf7b94610b81262c7f9894fef(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[-421, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[-421, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_28fbe40318ce42a7a5fd1ee28e14458d(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[0, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[0, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_279532e93ae84bd9bed1bf90f4428941(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[0, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[0, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_7d897a945669483eaf9014b505499792(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[0, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[0, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_db2798d8e21d42b286eda749dc3c90a0(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[1461055374234865915, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[1461055374234865915, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_9ccca1aefaf043609f7c0708e9a6578e(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[2335276838901610646, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[2335276838901610646, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_dae1ff1641fb49548f30b26f54c47566(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[570933242364456186, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[570933242364456186, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_42c9a9e6dc2442af971d7d2278bf7400(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[994, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[994, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_f2ec09d613b146e48a487485c1b90e8e(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[943, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[943, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_11724f3966f947f8b3eda44d1d22324c(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[199, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[199, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_6389de1635ad4681bc8f70fc0eee1a27(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[8526227503029157372, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[8526227503029157372, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_c2061993b6404830bf7f44b70b66133d(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[7084305517131841305, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[7084305517131841305, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_5931703f73b043a09fa6fd7897df5a45(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_c8abae97e7c24627b26ca40cf7fccba4(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, -8736418112569525826, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, -8736418112569525826, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_bdb9cad9a3824a24b4a71b9a550e5623(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, -7462078265811997708, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, -7462078265811997708, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_0467fe21c54e477fa9eee3ae0faf96b9(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, -7639708591453651354, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, -7639708591453651354, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_a101114c4a604f94827ca235691bbd82(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, -4449899796604440793, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, -4449899796604440793, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_5749b95df7db47299d86cd617c92b208(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, -481283442090188829, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, -481283442090188829, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_a335fadb1fc841e5961e5191ece775ff(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, -3826906411031990765, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, -3826906411031990765, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_cb9ba04e1346493da5d6c15c943fc630(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, -175, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, -175, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_70b1615fc064437ab7fa8f78533100f6(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, -974, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, -974, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_0c03cdfb07ad496298bac507593d7f90(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, -929, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, -929, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_c2201b8f95fe4edea529318e0110f414(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_c9971c2475ff4ba3bfc4666c0ede86ea(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_966ebc90a8c7450ca5b6fbf527db8cd2(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 0, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 0, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_76c4dca96b474d52baac0504640b7ee5(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 2299004351204935507, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 2299004351204935507, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_79101415071941d5be7a87449e1c3859(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 718409013508866449, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 718409013508866449, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_5822eeaf63ff404b9090b69882f3ff21(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 3855680292900019305, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 3855680292900019305, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_3dc79d78883a4e56870b4da91d853236(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 223, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 223, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_589eab1d6e6f4f62af6b4ef499706a48(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 126, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 126, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_a8b65acefb91484f8ed93be6c78ea673(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 218, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 218, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_149d9192aa214c28b89f56dbaa2852c9(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 8031884445174416923, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 8031884445174416923, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_a57773f9b7b9462d94598f67052896cc(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 8194069775094425622, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 8194069775094425622, Param3, Param4]), True, 'incorrect function return value encountered')
def test_approx_equal_cde3abc933664290be3613fb861bd201(self):
Param3 = type('',(object,), {})()
Param4 = type('',(object,), {})()
self.assertNotRaises(self.module.approx_equal, *[6508623219935938797, 8404225380505660358, Param3, Param4])
self.assertEqual(self.module.approx_equal(*[6508623219935938797, 8404225380505660358, Param3, Param4]), True, 'incorrect function return value encountered')
def test_perm_all_None(self):
self.assertRaises(TypeError, self.module.perm, *[None, None])
def test_perm_all_attr_None_wdef(self):
self.assertRaises(TypeError, self.module.perm, *[None, None])
def test_perm_all_attr_MetaParam_wdef(self):
self.assertRaises(TypeError, self.module.perm, *[None, None])
def test_perm_6c92bb388a07410fb605972dff0127c4(self):
Param1 = type('',(object,), {})()
Param2 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.perm, *[Param1, Param2])
def test_perm_2e6abcc574504c39850f2bac58dd6bf3(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.perm, *[Param1, 0])
def test_perm_3a325e3a4e60413bb13eca033b1553a9(self):
self.assertNotRaises(self.module.perm, *[0, 0])
self.assertEqual(self.module.perm(*[0, 0]), 1, 'incorrect function return value encountered')
def test_perm_fd60de189d8f44099d9a481566f4d148(self):
self.assertNotRaises(self.module.perm, *[1, 0])
self.assertEqual(self.module.perm(*[1, 0]), 1, 'incorrect function return value encountered')
def test_perm_b5cd59159f1946f589f1d0e3728b533c(self):
self.assertNotRaises(self.module.perm, *[0, 0])
self.assertEqual(self.module.perm(*[0, 0]), 1, 'incorrect function return value encountered')
def test_perm_fe8eec8e76c54ec389f8b3093d6a0019(self):
self.assertNotRaises(self.module.perm, *[0, 0])
self.assertEqual(self.module.perm(*[0, 0]), 1, 'incorrect function return value encountered')
def test_perm_c04700ba307e451296848d6ef3d3a935(self):
self.assertNotRaises(self.module.perm, *[0, 0])
self.assertEqual(self.module.perm(*[0, 0]), 1, 'incorrect function return value encountered')
def test_perm_5fd15087f9854ab58bfa58ec67b59a65(self):
self.assertNotRaises(self.module.perm, *[757, 0])
self.assertEqual(self.module.perm(*[757, 0]), 1L, 'incorrect function return value encountered')
def test_perm_7017f5a4c4e745e3b8bbbaca3b2bbb44(self):
self.assertNotRaises(self.module.perm, *[465, 0])
self.assertEqual(self.module.perm(*[465, 0]), 1L, 'incorrect function return value encountered')
def test_perm_372ab171a348497b8035515f12b2f321(self):
self.assertNotRaises(self.module.perm, *[793, 0])
self.assertEqual(self.module.perm(*[793, 0]), 1L, 'incorrect function return value encountered')
def test_argmax_all_None(self):
self.assertRaises(TypeError, self.module.argmax, *[None])
def test_argmax_all_attr_None_wdef(self):
self.assertRaises(TypeError, self.module.argmax, *[None])
def test_argmax_all_attr_MetaParam_wdef(self):
self.assertRaises(TypeError, self.module.argmax, *[None])
def test_argmax_5fe63084cde8447f8311ce3d42c7049a(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.argmax, *[Param1])
def test_argmax_b267d6b7aff44a30806ed9988fc6b85b(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.argmax, *[Param1])
def test_argmax_90eec8ae2a2a431eb81304d53a3dae25(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.argmax, *[Param1])
def test_mean_all_None(self):
self.assertRaises(TypeError, self.module.mean, *[None])
def test_mean_all_attr_None_wdef(self):
self.assertRaises(TypeError, self.module.mean, *[None])
def test_mean_all_attr_MetaParam_wdef(self):
self.assertRaises(TypeError, self.module.mean, *[None])
def test_mean_3f5e12f197c14380b0a51e60e4118d19(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.mean, *[Param1])
def test_mean_c2a640b8ae1e48e1a110391a15a533d8(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.mean, *[Param1])
def test_mean_45e29a753ace4029a4542c8f50ee3ce9(self):
Param1 = type('',(object,), {})()
self.assertRaises(TypeError, self.module.mean, *[Param1])
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
from reporter.connections import RedcapInstance
from reporter.emailing import (
RECIPIENT_PREDICT_ADMIN as RECIPIENT_ADMIN,
RECIPIENT_PREDICT_MANAGER as RECIPIENT_MANAGER,
RECIPIENT_IT_DQ,
)
from reporter.application_abstract_reports.redcap.data_quality import (
RedcapInvalidStudyNumber,
RedcapInvalidNhsNumber,
RedcapRecordInvalidStudyNumber,
RedcapInvalidBloodPressure,
RedcapInvalidPulse,
RedcapInvalidHeightInCm,
RedcapInvalidWeightInKg,
RedcapInvalidBmi,
RedcapInvalidDate,
RedcapInvalidUhlSystemNumber,
RedcapInvalidPostCode,
RedcapMissingData,
RedcapXrefMismatch,
)
from reporter.application_abstract_reports.redcap.percentage_complete import (
RedcapPercentageCompleteReport,
)
from reporter.application_abstract_reports.redcap.withdrawn_or_excluded_with_data import (
RedcapWithdrawnOrExcludedWithDataReport,
)
from reporter.application_abstract_reports.redcap.web_data_quality import (
RedcapWebDataQuality,
)
CRF_PROJECT_ID = 62
DEMOGRAPHICS_PROJECT_ID = 63
REDCAP_INSTANCE = RedcapInstance.internal
# All
class PredictRedcapPercentageCompleteReport(RedcapPercentageCompleteReport):
def __init__(self):
super().__init__(
study_name='Predict',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class PredictRedcapWithdrawnOrExcludedWithDataReport(
RedcapWithdrawnOrExcludedWithDataReport):
def __init__(self):
super().__init__(
study_name='Predict',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
# CRF Validation
class PredictRedcapCrfMissingData(
RedcapMissingData):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
fields=[
'patient_id',
'date_of_visit',
'consent_date',
'dob',
'age_years',
'gender',
'height_cm',
'weight_kg',
'bmi_kg_m2',
'hip_circumference_cm',
'waist_circumference_cm',
'smoker',
'ethnicity',
'sbp1_mmhg',
'sbp2_mmhg',
'sbp3_mmhg',
'avg_sbp_mmhg',
'dbp1_mmhg',
'dbp2_mmhg',
'dbp3_mmhg',
'avg_dbp_mmhg',
'hr1_bpm',
'hr2_bpm',
'hr3_bpm',
'avg_hr_bpm',
],
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidPatientId(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
fields=['patient_id'],
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidDates(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidBloodPressure1(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
systolic_field_name='sbp1_mmhg',
diastolic_field_name='dbp1_mmhg',
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidBloodPressure2(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
systolic_field_name='sbp2_mmhg',
diastolic_field_name='dbp2_mmhg',
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidBloodPressure3(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
systolic_field_name='sbp3_mmhg',
diastolic_field_name='dbp3_mmhg',
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidBloodPressureAvg(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
systolic_field_name='avg_sbp_mmhg',
diastolic_field_name='avg_dbp_mmhg',
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidPulse(
RedcapInvalidPulse):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
fields=['hr1_bpm', 'hr2_bpm', 'hr3_bpm', 'avg_hr_bpm'],
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
fields=['height_cm'],
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
fields=['weight_kg'],
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfInvalidBmi(
RedcapInvalidBmi):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
fields=['bmi_kg_m2'],
recipients=[RECIPIENT_ADMIN],
)
# Demographics Validation
class PredictRedcapDemographicsMissingData(
RedcapMissingData):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=CRF_PROJECT_ID,
fields=[
'patient_id',
'research_appt_date',
'nhs_no',
's_no',
'first_name',
'last_name',
'dob',
'add_1',
'postcode',
'gender',
'ethnicity',
'gp_name',
'gp_address_line_1',
'gp_postcode',
],
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapDemographicsInvalidNhsNumber(
RedcapInvalidNhsNumber):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=DEMOGRAPHICS_PROJECT_ID,
fields=['nhs_no'],
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapDemographicsInvalidDates(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=DEMOGRAPHICS_PROJECT_ID,
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapDemographicsInvalidUhlSystemNumber(
RedcapInvalidUhlSystemNumber):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=DEMOGRAPHICS_PROJECT_ID,
fields=['s_no'],
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapDemographicsInvalidPostCode(
RedcapInvalidPostCode):
def __init__(self):
super().__init__(
redcap_instance=REDCAP_INSTANCE,
project_id=DEMOGRAPHICS_PROJECT_ID,
fields=['postcode', 'gp_postcode'],
recipients=[RECIPIENT_ADMIN],
)
# CRF / Demographics XRef Validation
class PredictRedcapXrefMismatchPatientId(
RedcapXrefMismatch):
def __init__(self):
super().__init__(
redcap_instance_a=REDCAP_INSTANCE,
project_id_a=CRF_PROJECT_ID,
field_name_a='patient_id',
redcap_instance_b=REDCAP_INSTANCE,
project_id_b=DEMOGRAPHICS_PROJECT_ID,
field_name_b='patient_id',
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapXrefMismatchDob(
RedcapXrefMismatch):
def __init__(self):
super().__init__(
redcap_instance_a=REDCAP_INSTANCE,
project_id_a=CRF_PROJECT_ID,
field_name_a='dob',
redcap_instance_b=REDCAP_INSTANCE,
project_id_b=DEMOGRAPHICS_PROJECT_ID,
field_name_b='dob',
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapXrefMismatchGender(
RedcapXrefMismatch):
def __init__(self):
super().__init__(
redcap_instance_a=REDCAP_INSTANCE,
project_id_a=CRF_PROJECT_ID,
field_name_a='gender',
redcap_instance_b=REDCAP_INSTANCE,
project_id_b=DEMOGRAPHICS_PROJECT_ID,
field_name_b='gender',
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapXrefMismatchEthnicity(
RedcapXrefMismatch):
def __init__(self):
super().__init__(
redcap_instance_a=REDCAP_INSTANCE,
project_id_a=CRF_PROJECT_ID,
field_name_a='ethnicity',
redcap_instance_b=REDCAP_INSTANCE,
project_id_b=DEMOGRAPHICS_PROJECT_ID,
field_name_b='ethnicity',
recipients=[RECIPIENT_ADMIN],
)
class PredictRedcapCrfWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
REDCAP_INSTANCE,
CRF_PROJECT_ID,
[RECIPIENT_IT_DQ]
)
class PredictRedcapDemographicsWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
REDCAP_INSTANCE,
DEMOGRAPHICS_PROJECT_ID,
[RECIPIENT_IT_DQ]
)
|
|
# coding: utf-8
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Attachment(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, headers=None, object=None, content_id=None, data_handler=None, content_disposition=None, content_type=None):
"""
Attachment - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'headers': 'dict(str, list[str])',
'object': 'object',
'content_id': 'str',
'data_handler': 'DataHandler',
'content_disposition': 'ContentDisposition',
'content_type': 'MediaType'
}
self.attribute_map = {
'headers': 'headers',
'object': 'object',
'content_id': 'contentId',
'data_handler': 'dataHandler',
'content_disposition': 'contentDisposition',
'content_type': 'contentType'
}
self._headers = headers
self._object = object
self._content_id = content_id
self._data_handler = data_handler
self._content_disposition = content_disposition
self._content_type = content_type
@property
def headers(self):
"""
Gets the headers of this Attachment.
:return: The headers of this Attachment.
:rtype: dict(str, list[str])
"""
return self._headers
@headers.setter
def headers(self, headers):
"""
Sets the headers of this Attachment.
:param headers: The headers of this Attachment.
:type: dict(str, list[str])
"""
self._headers = headers
@property
def object(self):
"""
Gets the object of this Attachment.
:return: The object of this Attachment.
:rtype: object
"""
return self._object
@object.setter
def object(self, object):
"""
Sets the object of this Attachment.
:param object: The object of this Attachment.
:type: object
"""
self._object = object
@property
def content_id(self):
"""
Gets the content_id of this Attachment.
:return: The content_id of this Attachment.
:rtype: str
"""
return self._content_id
@content_id.setter
def content_id(self, content_id):
"""
Sets the content_id of this Attachment.
:param content_id: The content_id of this Attachment.
:type: str
"""
self._content_id = content_id
@property
def data_handler(self):
"""
Gets the data_handler of this Attachment.
:return: The data_handler of this Attachment.
:rtype: DataHandler
"""
return self._data_handler
@data_handler.setter
def data_handler(self, data_handler):
"""
Sets the data_handler of this Attachment.
:param data_handler: The data_handler of this Attachment.
:type: DataHandler
"""
self._data_handler = data_handler
@property
def content_disposition(self):
"""
Gets the content_disposition of this Attachment.
:return: The content_disposition of this Attachment.
:rtype: ContentDisposition
"""
return self._content_disposition
@content_disposition.setter
def content_disposition(self, content_disposition):
"""
Sets the content_disposition of this Attachment.
:param content_disposition: The content_disposition of this Attachment.
:type: ContentDisposition
"""
self._content_disposition = content_disposition
@property
def content_type(self):
"""
Gets the content_type of this Attachment.
:return: The content_type of this Attachment.
:rtype: MediaType
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""
Sets the content_type of this Attachment.
:param content_type: The content_type of this Attachment.
:type: MediaType
"""
self._content_type = content_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
# Meme: a fast mind-mapping tool
# (c) 2010 Jamie Webb - MIT license
import math, sys, pygtk, gtk, cairo, time
pygtk.require('2.0')
from meme.layout import Layout
from meme.model import *
from meme.renderer import Renderer
from meme.style import GlobalStyle
from meme import io
class MemeGui(Observer):
def __init__(self, xml, model = None):
self._layout = None
self._renderer = None
self._filename = None
if model:
self._model = model
else:
self._model = Model()
self._model.observe(self)
self._builder = gtk.Builder()
self._builder.add_from_file(xml)
self._builder.connect_signals(self)
self._main_win = self._builder.get_object("main_win")
self._canvas = self._builder.get_object("canvas")
self._text = self._builder.get_object("text")
self._undo = self._builder.get_object("undo_action")
self._redo = self._builder.get_object("redo_action")
self._save = self._builder.get_object("save_action")
self._main_win.show()
self._canvas.grab_focus()
def on_return(m):
n = Node()
sib = m.current
if not sib or not sib.parent:
return
m.do(AddCommand(sib.parent, n, sib.index + 1))
self._text.grab_focus()
self._text.set_text("")
def on_tab(m):
n = Node()
m.do(AddCommand(m.current or m.root, n))
self._text.grab_focus()
self._text.set_text("")
def on_delete(m):
n = self._model.current
if n and n.parent:
m.do(DeleteCommand(n))
self._key_dispatch = {
gtk.keysyms.Return: on_return,
gtk.keysyms.Tab: on_tab,
gtk.keysyms.Left: lambda m: m.move(lambda n: n.parent),
gtk.keysyms.Right: lambda m: m.move(lambda n: n.child(0)),
gtk.keysyms.Up: lambda m: m.move(lambda n: n.find_sibling(-1)),
gtk.keysyms.Down: lambda m: m.move(lambda n: n.find_sibling(1)),
gtk.keysyms.F4: lambda m: m.do(m.current and ColorCommand(m.current, 0)),
gtk.keysyms.F5: lambda m: m.do(m.current and ColorCommand(m.current, 1)),
gtk.keysyms.F6: lambda m: m.do(m.current and ColorCommand(m.current, 2)),
gtk.keysyms.F7: lambda m: m.do(m.current and ColorCommand(m.current, 3)),
gtk.keysyms.F8: lambda m: m.do(m.current and ColorCommand(m.current, 4)),
gtk.keysyms.Insert: lambda m: self._text.grab_focus(),
gtk.keysyms.Escape: lambda m: m.click(m.root),
gtk.keysyms.Delete: on_delete
}
self._update_tools()
def _update_tools(self):
self._undo.set_sensitive(self._model.has_undo)
self._redo.set_sensitive(self._model.has_redo)
self._save.set_sensitive(self._filename is not None and not self._model.is_clean)
def on_main_win_destroy(self, widget, data = None):
gtk.main_quit()
def on_file_quit_activate(self, widget, data = None):
gtk.main_quit()
def on_help_about_activate(self, widget, data = None):
self._about = self._builder.get_object("about_dialog")
self._about.run()
self._about.hide()
def on_canvas_configure_event(self, widget, data = None):
x, y, w, h = widget.get_allocation()
if self._renderer:
self._renderer.resize(w, h)
else:
style = GlobalStyle()
self._renderer = Renderer(style, self._canvas, widget.window, w, h)
self._layout = Layout(self._model, style, self._renderer)
return True
def on_canvas_expose_event(self, widget, data = None):
if self._renderer:
self._renderer.redraw(*data.area)
return False
def on_canvas_button_press_event(self, widget, data = None):
widget.grab_focus()
node = self._layout.find(data.x, data.y)
self._model.click(node)
def on_canvas_key_press_event(self, widget, data = None):
k = data.keyval
model = self._model
if k == gtk.keysyms.space:
model.toggle_expand()
elif k < 255:
if not model.current:
model.new_child()
self._text.grab_focus()
self._text.set_text(chr(k))
self._text.set_position(1)
elif k in self._key_dispatch:
self._key_dispatch[k](model)
#else:
# print "No binding for %d / %s" % (k, data)
return True
def on_text_key_press_event(self, widget, data = None):
if data.keyval == gtk.keysyms.Return:
self._canvas.grab_focus()
elif data.keyval == gtk.keysyms.Escape:
widget.set_text(self._model.title or "")
self._canvas.grab_focus()
elif data.keyval in [gtk.keysyms.Up, gtk.keysyms.Down]:
self._canvas.grab_focus()
self.on_canvas_key_press_event(self._canvas, data)
def on_text_focus_out_event(self, widget, data = None):
if self._model.current:
self._model.do(EditCommand(self._model.current, widget.get_text()))
widget.set_text(self._model.title or "")
def on_undo_action_activate(self, widget, data = None):
self._model.undo()
def on_redo_action_activate(self, widget, data = None):
self._model.redo()
def on_new_action_activate(self, widget, data = None):
self._filename = None
self._model = Model()
self._layout = Layout(self._model, GlobalStyle(), self._renderer)
self._model.observe(self)
self._canvas.grab_focus()
self._update_tools()
def on_open_action_activate(self, widget, data = None):
dlg = gtk.FileChooserDialog("Open...", action = gtk.FILE_CHOOSER_ACTION_OPEN,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
r = dlg.run()
if r == gtk.RESPONSE_OK:
f = dlg.get_filename()
if f.endswith(".mm"):
root = io.read_freemind(f)
self._filename = None
else:
root = io.read_native(f)
self._filename = f
self._model = Model(root)
self._layout = Layout(self._model, GlobalStyle(), self._renderer)
self._model.observe(self)
self._canvas.grab_focus()
self._update_tools()
dlg.destroy()
def on_save_action_activate(self, widget, data = None):
if self._filename:
io.write_native(self._filename, self._model.root)
self._model.mark_clean()
self._update_tools()
def on_saveas_action_activate(self, widget, data = None):
dlg = gtk.FileChooserDialog("Save As...", action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
r = dlg.run()
if r == gtk.RESPONSE_OK:
f = dlg.get_filename()
io.write_native(f, self._model.root)
self._filename = f
self._model.mark_clean()
dlg.destroy()
self._update_tools()
def on_node_select(self, model, node, old):
self._text.set_text(node.title if node else "")
self._update_tools()
def on_node_change(self, model, node):
self._update_tools()
def on_node_add(self, model, node, pos):
self._update_tools()
def on_node_delete(self, model, node, pos):
self._update_tools()
# vim:sw=4 ts=4
|
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <ilya@glas.net>
# License: http://pysnmp.sf.net/license.html
#
import time, sys
from pysnmp.proto.secmod.base import AbstractSecurityModel
from pysnmp.proto.secmod.rfc3414.auth import hmacmd5, hmacsha, noauth
from pysnmp.proto.secmod.rfc3414.priv import des, nopriv
from pysnmp.proto.secmod.rfc3826.priv import aes
from pysnmp.proto.secmod.eso.priv import des3, aes192, aes256
from pysnmp.smi.error import NoSuchInstanceError
from pysnmp.proto import rfc1155, errind, error
from pysnmp import debug
from pyasn1.type import univ, namedtype, constraint
from pyasn1.codec.ber import encoder, decoder, eoo
from pyasn1.error import PyAsn1Error
from pyasn1.compat.octets import null
# USM security params
class UsmSecurityParameters(rfc1155.TypeCoercionHackMixIn, univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('msgAuthoritativeEngineId', univ.OctetString()),
namedtype.NamedType('msgAuthoritativeEngineBoots', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgAuthoritativeEngineTime', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
namedtype.NamedType('msgUserName', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 32))),
namedtype.NamedType('msgAuthenticationParameters', univ.OctetString()),
namedtype.NamedType('msgPrivacyParameters', univ.OctetString())
)
class SnmpUSMSecurityModel(AbstractSecurityModel):
securityModelID = 3
authServices = {hmacmd5.HmacMd5.serviceID: hmacmd5.HmacMd5(),
hmacsha.HmacSha.serviceID: hmacsha.HmacSha(),
noauth.NoAuth.serviceID: noauth.NoAuth()}
privServices = {des.Des.serviceID: des.Des(),
des3.Des3.serviceID: des3.Des3(),
aes.Aes.serviceID: aes.Aes(),
aes192.Aes192.serviceID: aes192.Aes192(),
aes256.Aes256.serviceID: aes256.Aes256(),
nopriv.NoPriv.serviceID: nopriv.NoPriv()}
def __init__(self):
AbstractSecurityModel.__init__(self)
self.__securityParametersSpec = UsmSecurityParameters()
self.__timeline = {}
self.__timelineExpQueue = {}
self.__expirationTimer = 0
self.__paramsBranchId = -1
def __sec2usr(self, snmpEngine, securityName, securityEngineID=None):
usmUserEngineID, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('SNMP-USER-BASED-SM-MIB', 'usmUserEngineID')
if self.__paramsBranchId != usmUserEngineID.branchVersionId:
usmUserName, usmUserSecurityName = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('SNMP-USER-BASED-SM-MIB', 'usmUserName', 'usmUserSecurityName')
self.__securityToUserMap = {}
nextMibNode = usmUserEngineID
while True:
try:
nextMibNode = usmUserEngineID.getNextNode(nextMibNode.name)
except NoSuchInstanceError:
self.__paramsBranchId = usmUserEngineID.branchVersionId
debug.logger & debug.flagSM and debug.logger('_sec2usr: built snmpEngineId + securityName to userName map, version %s: %r' % (self.__paramsBranchId, self.__securityToUserMap))
break
instId = nextMibNode.name[len(usmUserSecurityName.name):]
__engineID = usmUserEngineID.getNode(usmUserEngineID.name + instId).syntax
__userName = usmUserName.getNode(usmUserName.name + instId).syntax
__securityName = usmUserSecurityName.getNode(usmUserSecurityName.name + instId).syntax
k = __engineID, __securityName
# first (lesser) securityName wins
if k not in self.__securityToUserMap:
self.__securityToUserMap[k] = __userName
if securityEngineID is None:
snmpEngineID, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')
securityEngineID = snmpEngineID.syntax
try:
userName = self.__securityToUserMap[(securityEngineID, securityName)]
except KeyError:
debug.logger & debug.flagSM and debug.logger('_sec2usr: no entry exists for snmpEngineId %r, securityName %r' % (securityEngineID, securityName))
raise NoSuchInstanceError() # emulate MIB lookup
debug.logger & debug.flagSM and debug.logger('_sec2usr: using userName %r for snmpEngineId %r, securityName %r' % (userName, securityEngineID, securityName))
return userName
def __getUserInfo(self, mibInstrumController, securityEngineID, userName):
usmUserEntry, = mibInstrumController.mibBuilder.importSymbols(
'SNMP-USER-BASED-SM-MIB', 'usmUserEntry'
)
tblIdx = usmUserEntry.getInstIdFromIndices(securityEngineID, userName)
# Get userName & securityName
usmUserName = usmUserEntry.getNode(usmUserEntry.name + (2,) + tblIdx).syntax
usmUserSecurityName = usmUserEntry.getNode(usmUserEntry.name + (3,) + tblIdx).syntax
# Get protocols
usmUserAuthProtocol = usmUserEntry.getNode(usmUserEntry.name + (5,) + tblIdx).syntax
usmUserPrivProtocol = usmUserEntry.getNode(usmUserEntry.name + (8,) + tblIdx).syntax
# Get keys
pysnmpUsmKeyEntry, = mibInstrumController.mibBuilder.importSymbols(
'PYSNMP-USM-MIB', 'pysnmpUsmKeyEntry'
)
pysnmpUsmKeyAuthLocalized = pysnmpUsmKeyEntry.getNode(pysnmpUsmKeyEntry.name + (1,) + tblIdx).syntax
pysnmpUsmKeyPrivLocalized = pysnmpUsmKeyEntry.getNode(pysnmpUsmKeyEntry.name + (2,) + tblIdx).syntax
return (usmUserName, usmUserSecurityName, usmUserAuthProtocol,
pysnmpUsmKeyAuthLocalized, usmUserPrivProtocol,
pysnmpUsmKeyPrivLocalized)
def __cloneUserInfo(self, mibInstrumController, securityEngineID,
userName):
snmpEngineID, = mibInstrumController.mibBuilder.importSymbols(
'__SNMP-FRAMEWORK-MIB', 'snmpEngineID'
)
# Proto entry
usmUserEntry, = mibInstrumController.mibBuilder.importSymbols(
'SNMP-USER-BASED-SM-MIB', 'usmUserEntry'
)
tblIdx1 = usmUserEntry.getInstIdFromIndices(
snmpEngineID.syntax, userName
)
# Get proto protocols
usmUserName = usmUserEntry.getNode(usmUserEntry.name + (2,) + tblIdx1)
usmUserSecurityName = usmUserEntry.getNode(usmUserEntry.name + (3,) + tblIdx1)
usmUserCloneFrom = usmUserEntry.getNode(usmUserEntry.name + (4,) + tblIdx1)
usmUserAuthProtocol = usmUserEntry.getNode(usmUserEntry.name + (5,) + tblIdx1)
usmUserPrivProtocol = usmUserEntry.getNode(usmUserEntry.name + (8,) + tblIdx1)
# Get proto keys
pysnmpUsmKeyEntry, = mibInstrumController.mibBuilder.importSymbols(
'PYSNMP-USM-MIB', 'pysnmpUsmKeyEntry'
)
pysnmpUsmKeyAuth = pysnmpUsmKeyEntry.getNode(pysnmpUsmKeyEntry.name + (3,) + tblIdx1)
pysnmpUsmKeyPriv = pysnmpUsmKeyEntry.getNode(pysnmpUsmKeyEntry.name + (4,) + tblIdx1)
# Create new row from proto values
tblIdx2 = usmUserEntry.getInstIdFromIndices(securityEngineID, userName)
# New row
mibInstrumController.writeVars(
((usmUserEntry.name + (13,) + tblIdx2, 4),)
)
# Set user&securityNames
usmUserEntry.getNode(usmUserEntry.name + (2,) + tblIdx2).syntax = usmUserName.syntax
usmUserEntry.getNode(usmUserEntry.name + (3,) + tblIdx2).syntax = usmUserSecurityName.syntax
# Store a reference to original row
usmUserEntry.getNode(usmUserEntry.name + (4,) + tblIdx2).syntax = usmUserCloneFrom.syntax.clone(tblIdx1)
# Set protocols
usmUserEntry.getNode(usmUserEntry.name + (5,) + tblIdx2).syntax = usmUserAuthProtocol.syntax
usmUserEntry.getNode(usmUserEntry.name + (8,) + tblIdx2).syntax = usmUserPrivProtocol.syntax
# Localize and set keys
pysnmpUsmKeyEntry, = mibInstrumController.mibBuilder.importSymbols(
'PYSNMP-USM-MIB', 'pysnmpUsmKeyEntry'
)
pysnmpUsmKeyAuthLocalized = pysnmpUsmKeyEntry.getNode(
pysnmpUsmKeyEntry.name + (1,) + tblIdx2
)
if usmUserAuthProtocol.syntax in self.authServices:
localizeKey = self.authServices[usmUserAuthProtocol.syntax].localizeKey
localAuthKey = localizeKey(pysnmpUsmKeyAuth.syntax,
securityEngineID)
else:
raise error.StatusInformation(
errorIndication=errind.unsupportedAuthProtocol
)
if localAuthKey is not None:
pysnmpUsmKeyAuthLocalized.syntax = pysnmpUsmKeyAuthLocalized.syntax.clone(localAuthKey)
pysnmpUsmKeyPrivLocalized = pysnmpUsmKeyEntry.getNode(
pysnmpUsmKeyEntry.name + (2,) + tblIdx2
)
if usmUserPrivProtocol.syntax in self.privServices:
localizeKey = self.privServices[usmUserPrivProtocol.syntax].localizeKey
localPrivKey = localizeKey(usmUserAuthProtocol.syntax,
pysnmpUsmKeyPriv.syntax,
securityEngineID)
else:
raise error.StatusInformation(errorIndication=errind.unsupportedPrivProtocol)
if localPrivKey is not None:
pysnmpUsmKeyPrivLocalized.syntax = pysnmpUsmKeyPrivLocalized.syntax.clone(localPrivKey)
return (usmUserName.syntax, usmUserSecurityName.syntax,
usmUserAuthProtocol.syntax, pysnmpUsmKeyAuthLocalized.syntax,
usmUserPrivProtocol.syntax, pysnmpUsmKeyPrivLocalized.syntax)
def __generateRequestOrResponseMsg(self, snmpEngine,
messageProcessingModel,
globalData, maxMessageSize,
securityModel, securityEngineID,
securityName, securityLevel,
scopedPDU, securityStateReference):
snmpEngineID = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
# 3.1.1
if securityStateReference is not None:
# 3.1.1a
cachedSecurityData = self._cache.pop(securityStateReference)
usmUserName = cachedSecurityData['msgUserName']
if 'usmUserSecurityName' in cachedSecurityData:
usmUserSecurityName = cachedSecurityData['usmUserSecurityName']
else:
usmUserSecurityName = usmUserName
if 'usmUserAuthProtocol' in cachedSecurityData:
usmUserAuthProtocol = cachedSecurityData['usmUserAuthProtocol']
else:
usmUserAuthProtocol = noauth.NoAuth.serviceID
if 'usmUserAuthKeyLocalized' in cachedSecurityData:
usmUserAuthKeyLocalized = cachedSecurityData['usmUserAuthKeyLocalized']
else:
usmUserAuthKeyLocalized = None
if 'usmUserPrivProtocol' in cachedSecurityData:
usmUserPrivProtocol = cachedSecurityData['usmUserPrivProtocol']
else:
usmUserPrivProtocol = nopriv.NoPriv.serviceID
if 'usmUserPrivKeyLocalized' in cachedSecurityData:
usmUserPrivKeyLocalized = cachedSecurityData['usmUserPrivKeyLocalized']
else:
usmUserPrivKeyLocalized = None
securityEngineID = snmpEngineID
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: user info read from cache')
elif securityName:
# 3.1.1b
try:
(usmUserName, usmUserSecurityName, usmUserAuthProtocol,
usmUserAuthKeyLocalized, usmUserPrivProtocol,
usmUserPrivKeyLocalized) = self.__getUserInfo(
snmpEngine.msgAndPduDsp.mibInstrumController,
securityEngineID,
self.__sec2usr(snmpEngine, securityName, securityEngineID)
)
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read user info')
except NoSuchInstanceError:
pysnmpUsmDiscovery, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscovery')
reportUnknownName = not pysnmpUsmDiscovery.syntax
if not reportUnknownName:
try:
(usmUserName, usmUserSecurityName,
usmUserAuthProtocol, usmUserAuthKeyLocalized,
usmUserPrivProtocol,
usmUserPrivKeyLocalized) = self.__cloneUserInfo(
snmpEngine.msgAndPduDsp.mibInstrumController,
securityEngineID,
self.__sec2usr(snmpEngine, securityName)
)
except NoSuchInstanceError:
reportUnknownName = True
if reportUnknownName:
raise error.StatusInformation(
errorIndication=errind.unknownSecurityName
)
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: clone user info')
except PyAsn1Error:
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % (sys.exc_info()[1],))
snmpInGenErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
snmpInGenErrs.syntax += 1
raise error.StatusInformation(
errorIndication=errind.invalidMsg
)
else:
# empty username used for engineID discovery
usmUserName = usmUserSecurityName = null
usmUserAuthProtocol = noauth.NoAuth.serviceID
usmUserPrivProtocol = nopriv.NoPriv.serviceID
usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: use empty USM data')
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: local usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %s usmUserPrivProtocol %s securityEngineID %r securityName %r' % (usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, securityEngineID, securityName))
msg = globalData
# 3.1.2
if securityLevel == 3:
if usmUserAuthProtocol == noauth.NoAuth.serviceID or \
usmUserPrivProtocol == nopriv.NoPriv.serviceID:
raise error.StatusInformation(
errorIndication=errind.unsupportedSecurityLevel
)
# 3.1.3
if securityLevel == 3 or securityLevel == 2:
if usmUserAuthProtocol == noauth.NoAuth.serviceID:
raise error.StatusInformation(
errorIndication=errind.unsupportedSecurityLevel
)
securityParameters = self.__securityParametersSpec
scopedPDUData = msg.setComponentByPosition(3).getComponentByPosition(3)
scopedPDUData.setComponentByPosition(
0, scopedPDU, verifyConstraints=False
)
# 3.1.6a
if securityStateReference is None and securityLevel in (2, 3):
if securityEngineID in self.__timeline:
(snmpEngineBoots, snmpEngineTime, latestReceivedEngineTime,
latestUpdateTimestamp) = self.__timeline[securityEngineID]
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from timeline')
else:
# 2.3 XXX is this correct?
snmpEngineBoots = snmpEngineTime = 0
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: no timeline for securityEngineID %r' % (securityEngineID,))
# 3.1.6.b
elif securityStateReference is not None: # XXX Report?
(snmpEngineBoots,
snmpEngineTime) = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
snmpEngineBoots = snmpEngineBoots.syntax
snmpEngineTime = snmpEngineTime.syntax.clone()
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from LCD')
# 3.1.6.c
else:
snmpEngineBoots = snmpEngineTime = 0
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: assuming zero snmpEngineBoots, snmpEngineTime')
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: use snmpEngineBoots %s snmpEngineTime %s for securityEngineID %r' % (snmpEngineBoots, snmpEngineTime, securityEngineID))
# 3.1.4a
if securityLevel == 3:
if usmUserPrivProtocol in self.privServices:
privHandler = self.privServices[usmUserPrivProtocol]
else:
raise error.StatusInformation(
errorIndication=errind.encryptionError
)
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU %s' % scopedPDU.prettyPrint())
try:
dataToEncrypt = encoder.encode(scopedPDU)
except PyAsn1Error:
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU serialization error: %s' % sys.exc_info()[1])
raise error.StatusInformation(
errorIndication=errind.serializationError
)
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU encoded into %s' % debug.hexdump(dataToEncrypt))
(encryptedData,
privParameters) = privHandler.encryptData(
usmUserPrivKeyLocalized,
(snmpEngineBoots, snmpEngineTime, None), dataToEncrypt
)
securityParameters.setComponentByPosition(
5, privParameters, verifyConstraints=False
)
scopedPDUData.setComponentByPosition(
1, encryptedData, verifyConstraints=False
)
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU ciphered into %s' % debug.hexdump(encryptedData))
# 3.1.4b
elif securityLevel == 1 or securityLevel == 2:
securityParameters.setComponentByPosition(5, '')
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % scopedPDUData.prettyPrint())
# 3.1.5
securityParameters.setComponentByPosition(
0, securityEngineID, verifyConstraints=False
)
securityParameters.setComponentByPosition(
1, snmpEngineBoots, verifyConstraints=False
)
securityParameters.setComponentByPosition(
2, snmpEngineTime, verifyConstraints=False
)
# 3.1.7
securityParameters.setComponentByPosition(
3, usmUserName, verifyConstraints=False
)
# 3.1.8a
if securityLevel == 3 or securityLevel == 2:
if usmUserAuthProtocol in self.authServices:
authHandler = self.authServices[usmUserAuthProtocol]
else:
raise error.StatusInformation(
errorIndication=errind.authenticationFailure
)
# extra-wild hack to facilitate BER substrate in-place re-write
securityParameters.setComponentByPosition(
4, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
)
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))
try:
msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)
except PyAsn1Error:
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: securityParameters serialization error: %s' % sys.exc_info()[1])
raise error.StatusInformation(
errorIndication=errind.serializationError
)
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: auth outgoing msg: %s' % msg.prettyPrint())
try:
wholeMsg = encoder.encode(msg)
except PyAsn1Error:
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
raise error.StatusInformation(
errorIndication=errind.serializationError
)
authenticatedWholeMsg = authHandler.authenticateOutgoingMsg(
usmUserAuthKeyLocalized, wholeMsg
)
# 3.1.8b
else:
securityParameters.setComponentByPosition(
4, '', verifyConstraints=False
)
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))
try:
msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)
except PyAsn1Error:
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: secutiryParameters serialization error: %s' % sys.exc_info()[1])
raise error.StatusInformation(
errorIndication=errind.serializationError
)
try:
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: plain outgoing msg: %s' % msg.prettyPrint())
authenticatedWholeMsg = encoder.encode(msg)
except PyAsn1Error:
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
raise error.StatusInformation(
errorIndication=errind.serializationError
)
debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s outgoing msg: %s' % (securityLevel > 1 and "authenticated" or "plain", debug.hexdump(authenticatedWholeMsg)))
# 3.1.9
return (msg.getComponentByPosition(2), authenticatedWholeMsg)
def generateRequestMsg(self, snmpEngine, messageProcessingModel,
globalData, maxMessageSize, securityModel,
securityEngineID, securityName, securityLevel,
scopedPDU):
return self.__generateRequestOrResponseMsg(snmpEngine,
messageProcessingModel,
globalData,
maxMessageSize,
securityModel,
securityEngineID,
securityName,
securityLevel,
scopedPDU,
None)
def generateResponseMsg(self, snmpEngine, messageProcessingModel,
globalData, maxMessageSize, securityModel,
securityEngineID, securityName, securityLevel,
scopedPDU, securityStateReference):
return self.__generateRequestOrResponseMsg(
snmpEngine, messageProcessingModel, globalData,
maxMessageSize, securityModel, securityEngineID,
securityName, securityLevel, scopedPDU, securityStateReference
)
# 3.2
def processIncomingMsg(self, snmpEngine, messageProcessingModel,
maxMessageSize, securityParameters,
securityModel, securityLevel, wholeMsg, msg):
# 3.2.9 -- moved up here to be able to report
# maxSizeResponseScopedPDU on error
# (48 - maximum SNMPv3 header length)
maxSizeResponseScopedPDU = int(maxMessageSize) - len(securityParameters) - 48
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: securityParameters %s' % debug.hexdump(securityParameters))
# 3.2.1
try:
securityParameters, rest = decoder.decode(
securityParameters, asn1Spec=self.__securityParametersSpec
)
except PyAsn1Error:
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
snmpInASNParseErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInASNParseErrs')
snmpInASNParseErrs.syntax += 1
raise error.StatusInformation(errorIndication=errind.parseError)
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (securityParameters.prettyPrint(),))
if eoo.endOfOctets.isSameTypeWith(securityParameters):
raise error.StatusInformation(errorIndication=errind.parseError)
# 3.2.2
msgAuthoritativeEngineId = securityParameters.getComponentByPosition(0)
securityStateReference = self._cache.push(
msgUserName=securityParameters.getComponentByPosition(3)
)
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: cache write securityStateReference %s by msgUserName %s' % (securityStateReference, securityParameters.getComponentByPosition(3)))
scopedPduData = msg.getComponentByPosition(3)
# Used for error reporting
contextEngineId = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
contextName = null
snmpEngineID = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
# 3.2.3
if msgAuthoritativeEngineId != snmpEngineID and \
msgAuthoritativeEngineId not in self.__timeline:
if msgAuthoritativeEngineId and \
4 < len(msgAuthoritativeEngineId) < 33:
# 3.2.3a - cloned user when request was sent
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: unsynchronized securityEngineID %r' % (msgAuthoritativeEngineId,))
else:
# 3.2.3b
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: peer requested snmpEngineID discovery')
usmStatsUnknownEngineIDs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownEngineIDs')
usmStatsUnknownEngineIDs.syntax += 1
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: null or malformed msgAuthoritativeEngineId')
pysnmpUsmDiscoverable, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscoverable')
if pysnmpUsmDiscoverable.syntax:
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: starting snmpEngineID discovery procedure')
# Report original contextName
if scopedPduData.getName() != 'plaintext':
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: scopedPduData not plaintext %s' % scopedPduData.prettyPrint())
raise error.StatusInformation(
errorIndication=errind.unknownEngineID
)
# 7.2.6.a.1
scopedPdu = scopedPduData.getComponent()
contextEngineId = scopedPdu.getComponentByPosition(0)
contextName = scopedPdu.getComponentByPosition(1)
raise error.StatusInformation(
errorIndication=errind.unknownEngineID,
oid=usmStatsUnknownEngineIDs.name,
val=usmStatsUnknownEngineIDs.syntax,
securityStateReference=securityStateReference,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
scopedPDU=scopedPdu,
maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
)
else:
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: will not discover EngineID')
# free securityStateReference XXX
raise error.StatusInformation(
errorIndication=errind.unknownEngineID
)
msgUserName = securityParameters.getComponentByPosition(3)
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read from securityParams msgAuthoritativeEngineId %r msgUserName %r' % (msgAuthoritativeEngineId, msgUserName))
if msgUserName:
# 3.2.4
try:
(usmUserName, usmUserSecurityName, usmUserAuthProtocol,
usmUserAuthKeyLocalized, usmUserPrivProtocol,
usmUserPrivKeyLocalized) = self.__getUserInfo(
snmpEngine.msgAndPduDsp.mibInstrumController,
msgAuthoritativeEngineId, msgUserName
)
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read user info from LCD')
except NoSuchInstanceError:
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: unknown securityEngineID %r msgUserName %r' % (msgAuthoritativeEngineId, msgUserName))
usmStatsUnknownUserNames, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
usmStatsUnknownUserNames.syntax += 1
raise error.StatusInformation(
errorIndication=errind.unknownSecurityName,
oid=usmStatsUnknownUserNames.name,
val=usmStatsUnknownUserNames.syntax,
securityStateReference=securityStateReference,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
)
except PyAsn1Error:
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
snmpInGenErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
snmpInGenErrs.syntax += 1
raise error.StatusInformation(errorIndication=errind.invalidMsg)
else:
# empty username used for engineID discovery
usmUserName = usmUserSecurityName = null
usmUserAuthProtocol = noauth.NoAuth.serviceID
usmUserPrivProtocol = nopriv.NoPriv.serviceID
usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: now have usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %r usmUserPrivProtocol %r for msgUserName %r' % (usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, msgUserName))
# 3.2.11 (moved up here to let Reports be authenticated & encrypted)
self._cache.pop(securityStateReference)
securityStateReference = self._cache.push(
msgUserName=securityParameters.getComponentByPosition(3),
usmUserSecurityName=usmUserSecurityName,
usmUserAuthProtocol=usmUserAuthProtocol,
usmUserAuthKeyLocalized=usmUserAuthKeyLocalized,
usmUserPrivProtocol=usmUserPrivProtocol,
usmUserPrivKeyLocalized=usmUserPrivKeyLocalized
)
msgAuthoritativeEngineBoots = securityParameters.getComponentByPosition(1)
msgAuthoritativeEngineTime = securityParameters.getComponentByPosition(2)
snmpEngine.observer.storeExecutionContext(
snmpEngine, 'rfc3414.processIncomingMsg',
dict(securityEngineId=msgAuthoritativeEngineId,
snmpEngineBoots=msgAuthoritativeEngineBoots,
snmpEngineTime=msgAuthoritativeEngineTime,
userName=usmUserName,
securityName=usmUserSecurityName,
authProtocol=usmUserAuthProtocol,
authKey=usmUserAuthKeyLocalized,
privProtocol=usmUserPrivProtocol,
privKey=usmUserPrivKeyLocalized)
)
snmpEngine.observer.clearExecutionContext(
snmpEngine, 'rfc3414.processIncomingMsg'
)
# 3.2.5
if msgAuthoritativeEngineId == snmpEngineID:
# Authoritative SNMP engine: make sure securityLevel is sufficient
badSecIndication = None
if securityLevel == 3:
if usmUserAuthProtocol == noauth.NoAuth.serviceID:
badSecIndication = 'authPriv wanted while auth not expected'
if usmUserPrivProtocol == nopriv.NoPriv.serviceID:
badSecIndication = 'authPriv wanted while priv not expected'
elif securityLevel == 2:
if usmUserAuthProtocol == noauth.NoAuth.serviceID:
badSecIndication = 'authNoPriv wanted while auth not expected'
if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
# 4 (discovery phase always uses authenticated messages)
if msgAuthoritativeEngineBoots or msgAuthoritativeEngineTime:
badSecIndication = 'authNoPriv wanted while priv expected'
elif securityLevel == 1:
if usmUserAuthProtocol != noauth.NoAuth.serviceID:
badSecIndication = 'noAuthNoPriv wanted while auth expected'
if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
badSecIndication = 'noAuthNoPriv wanted while priv expected'
if badSecIndication:
usmStatsUnsupportedSecLevels, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnsupportedSecLevels')
usmStatsUnsupportedSecLevels.syntax += 1
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: reporting inappropriate security level for user %s: %s' % (msgUserName, badSecIndication))
raise error.StatusInformation(
errorIndication=errind.unsupportedSecurityLevel,
oid=usmStatsUnsupportedSecLevels.name,
val=usmStatsUnsupportedSecLevels.syntax,
securityStateReference=securityStateReference,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
)
# 3.2.6
if securityLevel == 3 or securityLevel == 2:
if usmUserAuthProtocol in self.authServices:
authHandler = self.authServices[usmUserAuthProtocol]
else:
raise error.StatusInformation(
errorIndication=errind.authenticationFailure
)
try:
authenticatedWholeMsg = authHandler.authenticateIncomingMsg(
usmUserAuthKeyLocalized,
securityParameters.getComponentByPosition(4),
wholeMsg
)
except error.StatusInformation:
usmStatsWrongDigests, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsWrongDigests')
usmStatsWrongDigests.syntax += 1
raise error.StatusInformation(
errorIndication=errind.authenticationFailure,
oid=usmStatsWrongDigests.name,
val=usmStatsWrongDigests.syntax,
securityStateReference=securityStateReference,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
)
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: incoming msg authenticated')
# synchronize time with authed peer
self.__timeline[msgAuthoritativeEngineId] = (
securityParameters.getComponentByPosition(1),
securityParameters.getComponentByPosition(2),
securityParameters.getComponentByPosition(2),
int(time.time())
)
expireAt = int(self.__expirationTimer + 300 / snmpEngine.transportDispatcher.getTimerResolution())
if expireAt not in self.__timelineExpQueue:
self.__timelineExpQueue[expireAt] = []
self.__timelineExpQueue[expireAt].append(msgAuthoritativeEngineId)
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: store timeline for securityEngineID %r' % (msgAuthoritativeEngineId,))
# 3.2.7
if securityLevel == 3 or securityLevel == 2:
if msgAuthoritativeEngineId == snmpEngineID:
# Authoritative SNMP engine: use local notion (SF bug #1649032)
(snmpEngineBoots, snmpEngineTime) = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
snmpEngineBoots = snmpEngineBoots.syntax
snmpEngineTime = snmpEngineTime.syntax.clone()
idleTime = 0
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read snmpEngineBoots (%s), snmpEngineTime (%s) from LCD' % (snmpEngineBoots, snmpEngineTime))
else:
# Non-authoritative SNMP engine: use cached estimates
if msgAuthoritativeEngineId in self.__timeline:
(snmpEngineBoots, snmpEngineTime,
latestReceivedEngineTime,
latestUpdateTimestamp) = self.__timeline[
msgAuthoritativeEngineId
]
# time passed since last talk with this SNMP engine
idleTime = int(time.time())-latestUpdateTimestamp
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read timeline snmpEngineBoots %s snmpEngineTime %s for msgAuthoritativeEngineId %r, idle time %s secs' % (snmpEngineBoots, snmpEngineTime, msgAuthoritativeEngineId, idleTime))
else:
raise error.ProtocolError('Peer SNMP engine info missing')
# 3.2.7a
if msgAuthoritativeEngineId == snmpEngineID:
if snmpEngineBoots == 2147483647 or \
snmpEngineBoots != msgAuthoritativeEngineBoots or \
abs(idleTime + int(snmpEngineTime) - \
int(msgAuthoritativeEngineTime)) > 150:
usmStatsNotInTimeWindows, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsNotInTimeWindows')
usmStatsNotInTimeWindows.syntax += 1
raise error.StatusInformation(
errorIndication=errind.notInTimeWindow,
oid=usmStatsNotInTimeWindows.name,
val=usmStatsNotInTimeWindows.syntax,
securityStateReference=securityStateReference,
securityLevel=2,
contextEngineId=contextEngineId,
contextName=contextName,
maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
)
# 3.2.7b
else:
# 3.2.7b.1
if msgAuthoritativeEngineBoots > snmpEngineBoots or \
msgAuthoritativeEngineBoots == snmpEngineBoots and \
msgAuthoritativeEngineTime > latestReceivedEngineTime:
self.__timeline[msgAuthoritativeEngineId] = (
msgAuthoritativeEngineBoots,
msgAuthoritativeEngineTime,
msgAuthoritativeEngineTime,
int(time.time())
)
expireAt = int(self.__expirationTimer + 300 / snmpEngine.transportDispatcher.getTimerResolution())
if expireAt not in self.__timelineExpQueue:
self.__timelineExpQueue[expireAt] = []
self.__timelineExpQueue[expireAt].append(msgAuthoritativeEngineId)
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: stored timeline msgAuthoritativeEngineBoots %s msgAuthoritativeEngineTime %s for msgAuthoritativeEngineId %r' % (msgAuthoritativeEngineBoots, msgAuthoritativeEngineTime, msgAuthoritativeEngineId))
# 3.2.7b.2
if snmpEngineBoots == 2147483647 or \
msgAuthoritativeEngineBoots < snmpEngineBoots or \
msgAuthoritativeEngineBoots == snmpEngineBoots and \
abs(idleTime + int(snmpEngineTime) - \
int(msgAuthoritativeEngineTime)) > 150:
raise error.StatusInformation(
errorIndication=errind.notInTimeWindow
)
# 3.2.8a
if securityLevel == 3:
if usmUserPrivProtocol in self.privServices:
privHandler = self.privServices[usmUserPrivProtocol]
else:
raise error.StatusInformation(
errorIndication=errind.decryptionError
)
encryptedPDU = scopedPduData.getComponentByPosition(1)
if encryptedPDU is None: # no ciphertext
raise error.StatusInformation(
errorIndication=errind.decryptionError
)
try:
decryptedData = privHandler.decryptData(
usmUserPrivKeyLocalized,
(securityParameters.getComponentByPosition(1),
securityParameters.getComponentByPosition(2),
securityParameters.getComponentByPosition(5)),
encryptedPDU
)
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: PDU deciphered into %s' % debug.hexdump(decryptedData))
except error.StatusInformation:
usmStatsDecryptionErrors, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsDecryptionErrors')
usmStatsDecryptionErrors.syntax += 1
raise error.StatusInformation(
errorIndication=errind.decryptionError,
oid=usmStatsDecryptionErrors.name,
val=usmStatsDecryptionErrors.syntax,
securityStateReference=securityStateReference,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
)
scopedPduSpec = scopedPduData.setComponentByPosition(0).getComponentByPosition(0)
try:
scopedPDU, rest = decoder.decode(decryptedData,
asn1Spec=scopedPduSpec)
except PyAsn1Error:
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: scopedPDU decoder failed %s' % sys.exc_info()[0])
raise error.StatusInformation(
errorIndication=errind.decryptionError
)
if eoo.endOfOctets.isSameTypeWith(scopedPDU):
raise error.StatusInformation(
errorIndication=errind.decryptionError
)
else:
# 3.2.8b
scopedPDU = scopedPduData.getComponentByPosition(0)
if scopedPDU is None: # no plaintext
raise error.StatusInformation(
errorIndication=errind.decryptionError
)
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: scopedPDU decoded %s' % scopedPDU.prettyPrint())
# 3.2.10
securityName = usmUserSecurityName
debug.logger & debug.flagSM and debug.logger('processIncomingMsg: cached msgUserName %s info by securityStateReference %s' % (msgUserName, securityStateReference))
# Delayed to include details
if not msgUserName and not msgAuthoritativeEngineId:
usmStatsUnknownUserNames, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
usmStatsUnknownUserNames.syntax += 1
raise error.StatusInformation(
errorIndication=errind.unknownSecurityName,
oid=usmStatsUnknownUserNames.name,
val=usmStatsUnknownUserNames.syntax,
securityStateReference=securityStateReference,
securityEngineID=msgAuthoritativeEngineId,
securityLevel=securityLevel,
contextEngineId=contextEngineId,
contextName=contextName,
maxSizeResponseScopedPDU=maxSizeResponseScopedPDU,
PDU=scopedPDU
)
# 3.2.12
return (msgAuthoritativeEngineId, securityName, scopedPDU,
maxSizeResponseScopedPDU, securityStateReference)
def __expireTimelineInfo(self):
if self.__expirationTimer in self.__timelineExpQueue:
for engineIdKey in self.__timelineExpQueue[self.__expirationTimer]:
if engineIdKey in self.__timeline:
del self.__timeline[engineIdKey]
debug.logger & debug.flagSM and debug.logger('__expireTimelineInfo: expiring %r' % (engineIdKey,))
del self.__timelineExpQueue[self.__expirationTimer]
self.__expirationTimer += 1
def receiveTimerTick(self, snmpEngine, timeNow):
self.__expireTimelineInfo()
|
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An local Ensemble object.
This module defines an Ensemble to make predictions locally using its
associated models.
This module can not only save you a few credits, but also enormously
reduce the latency for each prediction and let you use your models
offline.
from bigml.api import BigML
from bigml.ensemble import Ensemble
# api connection
api = BigML(storage='./storage')
# creating ensemble
ensemble = api.create_ensemble('dataset/5143a51a37203f2cf7000972')
# Ensemble object to predict
ensemble = Ensemble(ensemble, api)
ensemble.predict({"petal length": 3, "petal width": 1})
"""
import sys
import logging
import gc
import json
LOGGER = logging.getLogger('BigML')
from bigml.api import BigML, get_ensemble_id, get_model_id
from bigml.model import Model, retrieve_resource, print_distribution
from bigml.model import STORAGE, ONLY_MODEL, LAST_PREDICTION
from bigml.multivote import MultiVote
from bigml.multivote import PLURALITY_CODE
from bigml.multimodel import MultiModel
from bigml.basemodel import BaseModel, print_importance
def use_cache(cache_get):
"""Checks whether the user has provided a cache get function to retrieve
local models.
"""
return cache_get is not None and hasattr(cache_get, '__call__')
class Ensemble(object):
"""A local predictive Ensemble.
Uses a number of BigML remote models to build an ensemble local version
that can be used to generate predictions locally.
The expected arguments are:
ensemble: ensemble object or id, list of model objects or
ids or list of local model objects (see Model)
api: connection object. If None, a new connection object is
instantiated.
max_models: integer that limits the number of models instantiated and
held in memory at the same time while predicting. If None,
no limit is set and all the ensemble models are
instantiated and held in memory permanently.
"""
def __init__(self, ensemble, api=None, max_models=None, cache_get=None):
if api is None:
self.api = BigML(storage=STORAGE)
else:
self.api = api
self.resource_id = None
# to be deprecated
self.ensemble_id = None
self.objective_id = None
self.distributions = None
self.models_splits = []
self.multi_model = None
self.cache_get = None
if isinstance(ensemble, list):
if all([isinstance(model, Model) for model in ensemble]):
models = ensemble
self.model_ids = [local_model.resource_id for local_model in
models]
else:
try:
models = [get_model_id(model) for model in ensemble]
self.model_ids = models
except ValueError, exc:
raise ValueError('Failed to verify the list of models.'
' Check your model id values: %s' %
str(exc))
self.distributions = None
else:
ensemble = self.get_ensemble_resource(ensemble)
self.resource_id = get_ensemble_id(ensemble)
self.ensemble_id = self.resource_id
ensemble = retrieve_resource(self.api, self.resource_id)
models = ensemble['object']['models']
self.distributions = ensemble['object'].get('distributions', None)
self.model_ids = models
number_of_models = len(models)
if max_models is None:
self.models_splits = [models]
else:
self.models_splits = [models[index:(index + max_models)] for index
in range(0, number_of_models, max_models)]
if len(self.models_splits) == 1:
if not isinstance(models[0], Model):
if use_cache(cache_get):
# retrieve the models from a cache get function
try:
models = [cache_get(model_id) for model_id
in self.models_splits[0]]
self.cache_get = cache_get
except Exception, exc:
raise Exception('Error while calling the user-given'
' function %s: %s' %
(cache_get.__name__, str(exc)))
else:
models = [retrieve_resource(self.api, model_id,
query_string=ONLY_MODEL)
for model_id in self.models_splits[0]]
self.multi_model = MultiModel(models, self.api)
else:
self.cache_get = cache_get
self.fields, self.objective_id = self.all_model_fields(
max_models=max_models)
def get_ensemble_resource(self, ensemble):
"""Extracts the ensemble resource info. The ensemble argument can be
- a path to a local file
- an ensemble id
"""
# the string can be a path to a JSON file
if isinstance(ensemble, basestring):
try:
with open(ensemble) as ensemble_file:
ensemble = json.load(ensemble_file)
self.resource_id = get_ensemble_id(ensemble)
if self.resource_id is None:
raise ValueError("The JSON file does not seem"
" to contain a valid BigML ensemble"
" representation.")
except IOError:
# if it is not a path, it can be an ensemble id
self.resource_id = get_ensemble_id(ensemble)
if self.resource_id is None:
if ensemble.find('ensemble/') > -1:
raise Exception(
self.api.error_message(ensemble,
resource_type='ensemble',
method='get'))
else:
raise IOError("Failed to open the expected JSON file"
" at %s" % ensemble)
except ValueError:
raise ValueError("Failed to interpret %s."
" JSON file expected.")
return ensemble
def list_models(self):
"""Lists all the model/ids that compound the ensemble.
"""
return self.model_ids
def predict(self, input_data, by_name=True, method=PLURALITY_CODE,
with_confidence=False, add_confidence=False,
add_distribution=False, add_count=False, add_median=False,
options=None, missing_strategy=LAST_PREDICTION, median=False):
"""Makes a prediction based on the prediction made by every model.
:param input_data: Test data to be used as input
:param by_name: Boolean that is set to True if field_names (as
alternative to field ids) are used in the
input_data dict
:param method: numeric key code for the following combination
methods in classifications/regressions:
0 - majority vote (plurality)/ average: PLURALITY_CODE
1 - confidence weighted majority vote / error weighted:
CONFIDENCE_CODE
2 - probability weighted majority vote / average:
PROBABILITY_CODE
3 - threshold filtered vote / doesn't apply:
THRESHOLD_CODE
The following parameter causes the result to be returned as a list
:param with_confidence: Adds the confidence, distribution, counts
and median information to the node prediction.
The result is given in a list format output.
The following parameters cause the result to be returned as a dict
:param add_confidence: Adds confidence to the prediction
:param add_distribution: Adds the predicted node's distribution to the
prediction
:param add_count: Adds the predicted nodes' instances to the
prediction
:param add_median: Adds the median of the predicted nodes' distribution
to the prediction
:param options: Options to be used in threshold filtered votes.
:param missing_strategy: numeric key for the individual model's
prediction method. See the model predict
method.
:param median: Uses the median of each individual model's predicted
node as individual prediction for the specified
combination method.
"""
if len(self.models_splits) > 1:
# If there's more than one chunck of models, they must be
# sequentially used to generate the votes for the prediction
votes = MultiVote([])
for models_split in self.models_splits:
if not isinstance(models_split[0], Model):
if (self.cache_get is not None and
hasattr(self.cache_get, '__call__')):
# retrieve the models from a cache get function
try:
models = [self.cache_get(model_id) for model_id
in models_split]
except Exception, exc:
raise Exception('Error while calling the '
'user-given'
' function %s: %s' %
(self.cache_get.__name__,
str(exc)))
else:
models = [retrieve_resource(self.api, model_id,
query_string=ONLY_MODEL)
for model_id in models_split]
multi_model = MultiModel(models, api=self.api)
votes_split = multi_model.generate_votes(
input_data, by_name=by_name,
missing_strategy=missing_strategy,
add_median=(add_median or median))
if median:
for prediction in votes_split.predictions:
prediction['prediction'] = prediction['median']
votes.extend(votes_split.predictions)
else:
# When only one group of models is found you use the
# corresponding multimodel to predict
votes_split = self.multi_model.generate_votes(
input_data, by_name=by_name, missing_strategy=missing_strategy,
add_median=(add_median or median))
votes = MultiVote(votes_split.predictions)
if median:
for prediction in votes.predictions:
prediction['prediction'] = prediction['median']
return votes.combine(method=method, with_confidence=with_confidence,
add_confidence=add_confidence,
add_distribution=add_distribution,
add_count=add_count,
add_median=add_median,
options=options)
def field_importance_data(self):
"""Computes field importance based on the field importance information
of the individual models in the ensemble.
"""
field_importance = {}
field_names = {}
if (self.distributions is not None and
isinstance(self.distributions, list) and
all('importance' in item for item in self.distributions)):
# Extracts importance from ensemble information
importances = [model_info['importance'] for model_info in
self.distributions]
for index in range(0, len(importances)):
model_info = importances[index]
for field_info in model_info:
field_id = field_info[0]
if not field_id in field_importance:
field_importance[field_id] = 0.0
name = self.fields[field_id]['name']
field_names[field_id] = {'name': name}
field_importance[field_id] += field_info[1]
else:
# Old ensembles, extracts importance from model information
for model_id in self.model_ids:
local_model = BaseModel(model_id, api=self.api)
for field_info in local_model.field_importance:
field_id = field_info[0]
if not field_info[0] in field_importance:
field_importance[field_id] = 0.0
name = self.fields[field_id]['name']
field_names[field_id] = {'name': name}
field_importance[field_id] += field_info[1]
number_of_models = len(self.model_ids)
for field_id in field_importance.keys():
field_importance[field_id] /= number_of_models
return map(list, sorted(field_importance.items(), key=lambda x: x[1],
reverse=True)), field_names
def print_importance(self, out=sys.stdout):
"""Prints ensemble field importance
"""
print_importance(self, out=out)
def get_data_distribution(self, distribution_type="training"):
"""Returns the required data distribution by adding the distributions
in the models
"""
ensemble_distribution = []
categories = []
for model_distribution in self.distributions:
summary = model_distribution[distribution_type]
if 'bins' in summary:
distribution = summary['bins']
elif 'counts' in summary:
distribution = summary['counts']
elif 'categories' in summary:
distribution = summary['categories']
for point, instances in distribution:
if point in categories:
ensemble_distribution[
categories.index(point)][1] += instances
else:
categories.append(point)
ensemble_distribution.append([point, instances])
return sorted(ensemble_distribution, key=lambda x: x[0])
def summarize(self, out=sys.stdout):
"""Prints ensemble summary. Only field importance at present.
"""
distribution = self.get_data_distribution("training")
out.write(u"Data distribution:\n")
print_distribution(distribution, out=out)
out.write(u"\n\n")
predictions = self.get_data_distribution("predictions")
out.write(u"Predicted distribution:\n")
print_distribution(predictions, out=out)
out.write(u"\n\n")
out.write(u"Field importance:\n")
self.print_importance(out=out)
out.flush()
def all_model_fields(self, max_models=None):
"""Retrieves the fields used as predictors in all the ensemble
models
"""
fields = {}
models = []
objective_id = None
no_objective_id = False
if isinstance(self.models_splits[0][0], Model):
for split in self.models_splits:
models.extend(split)
else:
models = self.model_ids
for index, model_id in enumerate(models):
if isinstance(model_id, Model):
local_model = model_id
elif self.cache_get is not None:
local_model = self.cache_get(model_id)
else:
local_model = Model(model_id, self.api)
if (max_models is not None and index > 0 and
index % max_models == 0):
gc.collect()
fields.update(local_model.fields)
if (objective_id is not None and
objective_id != local_model.objective_id):
# the models' objective field have different ids, no global id
no_objective_id = True
else:
objective_id = local_model.objective_id
if no_objective_id:
objective_id = None
gc.collect()
return fields, objective_id
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
orm.Journal.objects.filter(national_code=None).update(national_code='')
def backwards(self, orm):
"Write your backwards methods here."
orm.Journal.objects.filter(national_code='').update(national_code=None)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'journalmanager.aheadpressrelease': {
'Meta': {'object_name': 'AheadPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Journal']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.article': {
'Meta': {'object_name': 'Article'},
'aid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'article_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'articles_linkage_is_pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'doi': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '2048', 'db_index': 'True'}),
'domain_key': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'False'}),
'es_is_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'es_updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_aop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'issn_epub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issn_ppub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Issue']"}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'journal_title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'db_index': 'True'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['journalmanager.Article']", 'null': 'True', 'through': "orm['journalmanager.ArticlesLinkage']", 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'xml': ('scielomanager.custom_fields.XMLSPSField', [], {}),
'xml_version': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
'journalmanager.articleslinkage': {
'Meta': {'object_name': 'ArticlesLinkage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'referrers'", 'to': "orm['journalmanager.Article']"}),
'link_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'referrer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links_to'", 'to': "orm['journalmanager.Article']"})
},
'journalmanager.collection': {
'Meta': {'ordering': "['name']", 'object_name': 'Collection'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'name_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.institution': {
'Meta': {'ordering': "['name']", 'object_name': 'Institution'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.issue': {
'Meta': {'ordering': "('created', 'id')", 'object_name': 'Issue'},
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_marked_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'publication_end_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_start_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Section']", 'symmetrical': 'False', 'blank': 'True'}),
'spe_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'suppl_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'total_documents': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '15'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']", 'null': 'True'}),
'volume': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'})
},
'journalmanager.issuetitle': {
'Meta': {'object_name': 'IssueTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Issue']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.journal': {
'Meta': {'ordering': "('title', 'id')", 'object_name': 'Journal'},
'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}),
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'through': "orm['journalmanager.Membership']", 'symmetrical': 'False'}),
'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'cover': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'current_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'editor_journal'", 'null': 'True', 'to': "orm['auth.User']"}),
'editor_address': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_address_city': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'editor_address_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'editor_address_state': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'editor_address_zip': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'editor_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_phone1': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'editor_phone2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'final_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_coverage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'init_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_year': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'is_indexed_aehci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_scie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_ssci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}),
'logo': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'medline_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'medline_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'national_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'other_previous_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'previous_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publication_city': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publisher_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'publisher_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'publisher_state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'secs_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}),
'study_areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals_migration_tmp'", 'null': 'True', 'to': "orm['journalmanager.StudyArea']"}),
'subject_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals'", 'null': 'True', 'to': "orm['journalmanager.SubjectCategory']"}),
'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url_journal': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'url_online_submission': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"})
},
'journalmanager.journalmission': {
'Meta': {'object_name': 'JournalMission'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missions'", 'to': "orm['journalmanager.Journal']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True'})
},
'journalmanager.journaltimeline': {
'Meta': {'object_name': 'JournalTimeline'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''"}),
'since': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'journalmanager.journaltitle': {
'Meta': {'object_name': 'JournalTitle'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'other_titles'", 'to': "orm['journalmanager.Journal']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'journalmanager.membership': {
'Meta': {'unique_together': "(('journal', 'collection'),)", 'object_name': 'Membership'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'since': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16'})
},
'journalmanager.pendedform': {
'Meta': {'object_name': 'PendedForm'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'form_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_forms'", 'to': "orm['auth.User']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.pendedvalue': {
'Meta': {'object_name': 'PendedValue'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['journalmanager.PendedForm']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'journalmanager.pressrelease': {
'Meta': {'object_name': 'PressRelease'},
'doi': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'journalmanager.pressreleasearticle': {
'Meta': {'object_name': 'PressReleaseArticle'},
'article_pid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['journalmanager.PressRelease']"})
},
'journalmanager.pressreleasetranslation': {
'Meta': {'object_name': 'PressReleaseTranslation'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['journalmanager.PressRelease']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.regularpressrelease': {
'Meta': {'object_name': 'RegularPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Issue']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.section': {
'Meta': {'ordering': "('id',)", 'object_name': 'Section'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '21', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'legacy_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'journalmanager.sectiontitle': {
'Meta': {'ordering': "['title']", 'object_name': 'SectionTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'titles'", 'to': "orm['journalmanager.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.sponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']},
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}),
'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.studyarea': {
'Meta': {'object_name': 'StudyArea'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.subjectcategory': {
'Meta': {'object_name': 'SubjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'})
},
'journalmanager.translateddata': {
'Meta': {'object_name': 'TranslatedData'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'translation': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'journalmanager.uselicense': {
'Meta': {'ordering': "['license_code']", 'object_name': 'UseLicense'},
'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'journalmanager.usercollections': {
'Meta': {'unique_together': "(('user', 'collection'),)", 'object_name': 'UserCollections'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'journalmanager.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tz': ('django.db.models.fields.CharField', [], {'default': "'America/Sao_Paulo'", 'max_length': '150'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['journalmanager']
symmetrical = True
|
|
import datetime
import uuid
import mock
from django.utils import timezone
from django.core import mail
from rest_framework import status
from rest_framework.test import APITestCase
from cla_common.constants import CASE_SOURCE
from cla_eventlog.models import Log
from checker.serializers import CaseSerializer
from core.tests.mommy_utils import make_recipe
from core.tests.test_base import SimpleResourceAPIMixin
from legalaid.models import Case, PersonalDetails
from legalaid.tests.views.test_base import CLACheckerAuthBaseApiTestMixin
from call_centre.tests.test_utils import CallCentreFixedOperatingHours
class BaseCaseTestCase(
CLACheckerAuthBaseApiTestMixin, CallCentreFixedOperatingHours, SimpleResourceAPIMixin, APITestCase
):
LOOKUP_KEY = "reference"
API_URL_BASE_NAME = "case"
RESOURCE_RECIPE = "legalaid.case"
def make_resource(self):
return None
def assertCaseResponseKeys(self, response):
self.assertItemsEqual(
response.data.keys(),
[
"eligibility_check",
"personal_details",
"reference",
"requires_action_at",
"callback_window_type",
"adaptation_details",
"thirdparty_details",
],
)
def assertPersonalDetailsEqual(self, data, obj):
if data is None or obj is None:
self.assertEqual(data, obj)
else:
for prop in ["title", "full_name", "postcode", "street", "mobile_phone", "home_phone"]:
self.assertEqual(unicode(getattr(obj, prop)), data[prop])
def assertCaseEqual(self, data, case):
self.assertEqual(case.reference, data["reference"])
self.assertEqual(unicode(case.eligibility_check.reference), data["eligibility_check"])
self.assertPersonalDetailsEqual(data["personal_details"], case.personal_details)
self.assertEqual(Case.objects.count(), 1)
case = Case.objects.first()
self.assertEqual(case.source, CASE_SOURCE.WEB)
def get_personal_details_default_post_data(self):
return {
"title": "MR",
"full_name": "John Doe",
"postcode": "SW1H 9AJ",
"street": "102 Petty France",
"mobile_phone": "0123456789",
"home_phone": "9876543210",
}
class CaseTestCase(BaseCaseTestCase):
def test_methods_not_allowed(self):
"""
Ensure that we can't POST, PUT or DELETE
"""
# LIST
self._test_delete_not_allowed(self.list_url)
# CREATE
def test_create_no_data(self):
"""
CREATE should raise validation error when data is empty
"""
response = self.client.post(self.list_url, data={}, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertItemsEqual(response.data.keys(), ["personal_details"])
self.assertEqual(Case.objects.count(), 0)
def test_create_with_data(self):
check = make_recipe("legalaid.eligibility_check")
data = {
"eligibility_check": unicode(check.reference),
"personal_details": self.get_personal_details_default_post_data(),
}
response = self.client.post(self.list_url, data=data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertCaseResponseKeys(response)
self.assertCaseEqual(
response.data,
Case(
reference=response.data["reference"],
eligibility_check=check,
personal_details=PersonalDetails(**data["personal_details"]),
),
)
# test that the Case is in the db and created by 'web' user
self.assertEqual(Case.objects.count(), 1)
case = Case.objects.first()
self.assertEqual(case.created_by.username, "web")
# test that the log is in the db and created by 'web' user
self.assertEqual(Log.objects.count(), 1)
log = Log.objects.first()
self.assertEqual(log.created_by.username, "web")
# no email sent
self.assertEquals(len(mail.outbox), 0)
def _test_method_in_error(self, method, url):
"""
Generic method called by 'create' and 'patch' to test against validation
errors.
"""
invalid_uuid = str(uuid.uuid4())
data = {
"eligibility_check": invalid_uuid,
"personal_details": {
"title": "1" * 21,
"full_name": None,
"postcode": "1" * 13,
"street": "1" * 256,
"mobile_phone": "1" * 21,
"home_phone": "1" * 21,
},
}
method_callable = getattr(self.client, method)
response = method_callable(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
errors = response.data
self.assertItemsEqual(errors.keys(), ["eligibility_check", "personal_details"])
self.assertEqual(errors["eligibility_check"], [u"Object with reference=%s does not exist." % invalid_uuid])
self.assertItemsEqual(
errors["personal_details"],
[
{
"title": [u"Ensure this value has at most 20 characters (it has 21)."],
"postcode": [u"Ensure this value has at most 12 characters (it has 13)."],
"street": [u"Ensure this value has at most 255 characters (it has 256)."],
"mobile_phone": [u"Ensure this value has at most 20 characters (it has 21)."],
"home_phone": [u"Ensure this value has at most 20 characters (it has 21)."],
}
],
)
def test_create_in_error(self):
self._test_method_in_error("post", self.list_url)
def test_cannot_create_with_other_reference(self):
"""
Cannot create a case passing an eligibility check reference already assigned
to another case
"""
# create a different case
case = make_recipe("legalaid.case")
data = {
"eligibility_check": unicode(case.eligibility_check.reference),
"personal_details": self.get_personal_details_default_post_data(),
}
response = self.client.post(self.list_url, data=data, format="json")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertDictEqual(
response.data, {"eligibility_check": [u"Case with this Eligibility check already exists."]}
)
def test_case_serializer_with_dupe_eligibility_check_reference(self):
case = make_recipe("legalaid.case")
data = {
u"eligibility_check": case.eligibility_check.reference,
u"personal_details": self.get_personal_details_default_post_data(),
}
serializer = CaseSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertDictEqual(
serializer.errors, {"eligibility_check": [u"Case with this Eligibility check already exists."]}
)
class CallMeBackCaseTestCase(BaseCaseTestCase):
@property
def _default_dt(self):
if not hasattr(self, "__default_dt"):
self.__default_dt = datetime.datetime(2015, 3, 30, 10, 0, 0, 0).replace(tzinfo=timezone.utc)
return self.__default_dt
def test_create_with_callmeback(self):
self.assertEquals(len(mail.outbox), 0)
check = make_recipe("legalaid.eligibility_check")
data = {
"eligibility_check": unicode(check.reference),
"personal_details": self.get_personal_details_default_post_data(),
"requires_action_at": self._default_dt.isoformat(),
}
with mock.patch(
"cla_common.call_centre_availability.current_datetime",
return_value=datetime.datetime(2015, 3, 23, 10, 0, 0, 0),
):
response = self.client.post(self.list_url, data=data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertCaseResponseKeys(response)
case = Case.objects.first()
self.assertEqual(case.requires_action_at, self._default_dt)
self.assertEqual(case.callback_attempt, 1)
self.assertEqual(case.outcome_code, "CB1")
self.assertEqual(case.source, CASE_SOURCE.WEB)
self.assertEqual(case.log_set.count(), 2)
self.assertEqual(case.log_set.filter(code="CB1").count(), 1)
log = case.log_set.get(code="CB1")
self.assertEqual(
log.notes,
"Callback scheduled for %s - %s. "
% (
timezone.localtime(self._default_dt).strftime("%d/%m/%Y %H:%M"),
(timezone.localtime(self._default_dt) + datetime.timedelta(minutes=30)).strftime("%H:%M"),
),
)
_dt = timezone.localtime(self._default_dt)
expected_sla_72h = datetime.datetime(2015, 4, 7, 13, 30, 0, 0)
self.assertDictEqual(
log.context,
{
"requires_action_at": _dt.isoformat(),
"sla_120": (_dt + datetime.timedelta(minutes=120)).isoformat(),
"sla_480": (_dt + datetime.timedelta(hours=8)).isoformat(),
"sla_15": (_dt + datetime.timedelta(minutes=15)).isoformat(),
"sla_30": (_dt + datetime.timedelta(minutes=30)).isoformat(),
"sla_72h": timezone.make_aware(expected_sla_72h, _dt.tzinfo).isoformat(),
},
)
# checking email
self.assertEquals(len(mail.outbox), 1)
def test_create_should_ignore_outcome_code(self):
"""
Here only to check backward incompatibility
"""
check = make_recipe("legalaid.eligibility_check")
data = {
"eligibility_check": unicode(check.reference),
"personal_details": self.get_personal_details_default_post_data(),
"requires_action_at": self._default_dt.isoformat(),
"outcome_code": "TEST",
}
response = self.client.post(self.list_url, data=data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertCaseResponseKeys(response)
case = Case.objects.first()
self.assertNotEqual(case.outcome_code, "TEST")
|
|
import mock
import pytest
from cmdtree import shortcuts
@pytest.fixture()
def do_nothing():
def func(*args, **kwargs):
return "do_nothing"
return func
@pytest.fixture()
def mocked_parser():
return mock.Mock()
@pytest.fixture()
def parser_proxy():
return shortcuts.ParserProxy()
@pytest.fixture()
def group(mocked_parser, do_nothing):
return shortcuts.Group(
do_nothing,
"do_nothing",
mocked_parser,
full_path=["do_nothing", ]
)
@pytest.fixture()
def cmd(mocked_parser, do_nothing):
return shortcuts.Cmd(
do_nothing,
"do_nothing",
mocked_parser,
full_path=["do_nothing", ]
)
@pytest.mark.parametrize(
"path_prefix, cmd_name, expected",
(
(
("parent", "child"),
"execute",
("parent", "child", "execute")
),
(
["parent", "child"],
"execute",
("parent", "child", "execute")
),
(None, "execute", ("execute", )),
)
)
def test_get_cmd_path(path_prefix, cmd_name, expected):
assert shortcuts._get_cmd_path(
path_prefix, cmd_name
) == expected
def test_should_apply2user_called_correctly(mocked_parser):
option = mocked_parser.option = mock.Mock()
argument = mocked_parser.argument = mock.Mock()
shortcuts._apply2parser(
[["cmd1", {}], ],
[["cmd1", {}], ["cmd1", {}], ],
mocked_parser
)
assert option.call_count == 2
assert argument.call_count == 1
@pytest.mark.parametrize(
"cmd_proxy, expected",
(
(shortcuts.CmdProxy(lambda x: x), True),
(lambda x: x, False),
)
)
def test_should_apply2parser_be_called_with_cmd_proxy(
cmd_proxy, expected, mocked_parser,
):
with mock.patch.object(
shortcuts, "_apply2parser"
) as mocked_apply:
shortcuts.apply2parser(cmd_proxy, mocked_parser)
assert mocked_apply.called is expected
class TestMkGroup:
def test_should_return_group_with_group(self, do_nothing):
assert isinstance(
shortcuts._mk_group("hello")(do_nothing),
shortcuts.Group
)
def test_should_raise_value_error_if_group_inited(
self, do_nothing, mocked_parser
):
group = shortcuts.Group(do_nothing, "test", mocked_parser)
with pytest.raises(ValueError):
shortcuts._mk_group("test")(group)
def test_should_get_func_name_called_if_no_name_given(
self, do_nothing
):
with mock.patch.object(
shortcuts, "_get_func_name"
) as mocked_get_name:
shortcuts._mk_group(None)(do_nothing)
assert mocked_get_name.called
def test_should_call_apply2parser_for_meta_cmd(
self, do_nothing
):
with mock.patch.object(
shortcuts, "apply2parser",
) as apply2parser:
cmd_proxy = shortcuts.CmdProxy(do_nothing)
shortcuts._mk_group("name")(cmd_proxy)
assert apply2parser.called
class TestMkCmd:
def test_should_return_cmd_with_cmd(self, do_nothing):
assert isinstance(
shortcuts._mk_cmd("hello")(do_nothing),
shortcuts.Cmd
)
def test_should_raise_value_error_if_cmd_inited(
self, do_nothing, mocked_parser
):
cmd = shortcuts.Cmd(do_nothing, "test", mocked_parser)
with pytest.raises(ValueError):
shortcuts._mk_cmd("test")(cmd)
def test_should_get_func_name_called_if_no_name_given(
self, do_nothing
):
with mock.patch.object(
shortcuts, "_get_func_name"
) as mocked_get_name:
shortcuts._mk_cmd(None)(do_nothing)
assert mocked_get_name.called
def test_should_call_apply2parser_for_meta_cmd(
self, do_nothing
):
with mock.patch.object(
shortcuts, "apply2parser",
) as apply2parser:
cmd_proxy = shortcuts.CmdProxy(do_nothing)
shortcuts._mk_cmd("name")(cmd_proxy)
assert apply2parser.called
def test_cmd_meta_should_handle_none_value_of_path_to_tuple():
cmd_meta = shortcuts.CmdMeta()
assert cmd_meta.full_path == tuple()
class TestParserProxy:
def test_should_option_add_options(self, parser_proxy):
parser_proxy.option("name", help="help")
assert parser_proxy.options == [(
("name", ), {"help": "help"}
)]
def test_should_argument_add_options(self, parser_proxy):
parser_proxy.argument("name", help="help")
assert parser_proxy.arguments == [(
("name", ), {"help": "help"}
)]
class TestGroup:
def test_should_group_instance_call_func(self, group):
assert group() == "do_nothing"
def test_should_full_path_be_none_if_path_is_none(self, group):
assert group.meta.full_path == ("do_nothing", )
def test_should_command_call_mk_command(self, group):
with mock.patch.object(
shortcuts, "_mk_cmd"
) as mocked_mk:
group.command("name")
mocked_mk.assert_called_with(
"name",
help=None,
path_prefix=("do_nothing", )
)
def test_should_group_call_mk_group(self, group):
with mock.patch.object(
shortcuts, "_mk_group"
) as mocked_mk:
group.group("name")
mocked_mk.assert_called_with(
"name",
help=None,
path_prefix=("do_nothing", )
)
class TestCmd:
def test_should_cmd_instance_call_func(self, cmd):
assert cmd() == "do_nothing"
def test_should_full_path_be_none_if_path_is_none(self, cmd):
assert cmd.meta.full_path == ("do_nothing", )
def test_get_func_name(do_nothing):
assert shortcuts._get_func_name(do_nothing) == "func"
|
|
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import os.path
import logging
import datetime
from six import ensure_binary, ensure_str
from six.moves.urllib.parse import urlencode
from tornado.testing import AsyncHTTPTestCase
from tornado.web import create_signed_value
from scrapydd.main import make_app
from scrapydd.config import Config
from scrapydd.models import init_database, Node, session_scope
from scrapydd.models import Project, Spider, SpiderExecutionQueue
from scrapydd.poster.encode import multipart_encode
from scrapydd.schedule import SchedulerManager
from scrapydd.nodes import NodeManager
from scrapydd.workspace import RunnerFactory
from .base import AppTest
LOGGER = logging.getLogger(__name__)
TEST_EGG_FILE = os.path.join(os.path.dirname(__file__),
'test_project-1.0-py2.7.egg')
class MainTest(AsyncHTTPTestCase):
@classmethod
def setUpClass(cls):
os.environ['ASYNC_TEST_TIMEOUT'] = '500'
if os.path.exists('test.db'):
os.remove('test.db')
config = Config(values={'database_url': 'sqlite:///test.db'})
init_database(config)
def get_app(self):
config = Config()
scheduler_manager = SchedulerManager(config=config)
scheduler_manager.init()
node_manager = NodeManager(scheduler_manager)
node_manager.init()
runner_factory = RunnerFactory(config)
return make_app(scheduler_manager, node_manager, None,
secret_key='123', runner_factory=runner_factory)
def _delproject(self):
postdata = {'project': 'test_project'}
response = self.fetch('/delproject.json', method='POST',
body=urlencode(postdata))
self.assertIn(response.code, [404, 200])
def _upload_test_project(self):
# upload a project
post_data = {}
post_data['egg'] = open(TEST_EGG_FILE, 'rb')
post_data['project'] = 'test_project'
post_data['version'] = '1.0'
datagen, headers = multipart_encode(post_data)
databuffer = ''.join(datagen)
response = self.fetch('/addversion.json', method='POST', headers=headers, body=databuffer)
self.assertEqual(200, response.code)
class DefaultTest(MainTest):
def get_app(self):
config = Config()
scheduler_manager = SchedulerManager(config=config)
scheduler_manager.init()
node_manager = NodeManager(scheduler_manager)
node_manager.init()
return make_app(scheduler_manager, node_manager, None, secret_key='123')
def test_default_page(self):
response = self.fetch('/')
self.assertEqual(200, response.code)
class SecurityTest(MainTest):
def get_app(self):
config = Config()
scheduler_manager = SchedulerManager(config=config)
scheduler_manager.init()
node_manager = NodeManager(scheduler_manager)
node_manager.init()
return make_app(scheduler_manager, node_manager,
None, enable_authentication=True, secret_key='123')
def test_no_cookie(self):
response = self.fetch('/', follow_redirects=False)
self.assertEqual(302, response.code)
def test_with_cookie(self):
username = 'test'
cookie_name, cookie_value = 'user', username
secure_cookie = ensure_str(create_signed_value(
self.get_app().settings["cookie_secret"],
cookie_name,
cookie_value))
headers = {'Cookie': '='.join((cookie_name, secure_cookie))}
response = self.fetch('/', method='GET', headers=headers)
self.assertEqual(200, response.code)
class UploadTest(MainTest):
def test_logging_init(self):
self.skipTest('no logging init')
def test_get(self):
response = self.fetch('/')
self.assertEqual(200, response.code)
class UploadTest2(AppTest):
def test_logging_init(self):
self.skipTest('no logging init')
def test_get(self):
response = self.fetch('/')
self.assertEqual(200, response.code)
def test_uploadproject_post(self):
project_name = 'test_project'
post_data = {}
post_data['egg'] = open(TEST_EGG_FILE, 'rb')
post_data['project'] = project_name
post_data['version'] = '1.0'
post_data['_xsrf'] = 'dummy'
datagen, headers = multipart_encode(post_data)
databuffer = b''.join([ensure_binary(x) for x in datagen])
headers['Cookie'] = "_xsrf=dummy"
response = self.fetch('/uploadproject', method='POST', headers=headers, body=databuffer)
self.assertEqual(200, response.code)
with session_scope() as session:
project = session.query(Project).filter_by(name=project_name).first()
self.assertIsNotNone(project)
self.assertEqual(project.name, project_name)
class ScheduleHandlerTest(AppTest):
def test_post(self):
with session_scope() as session:
session.query(SpiderExecutionQueue).delete()
session.commit()
# schedule once
project = 'test_project'
spider = 'success_spider'
postdata = urlencode({
'project': project,
'spider': spider
})
response = self.fetch('/schedule.json', method='POST', body=postdata)
self.assertEqual(200, response.code)
class AddScheduleHandlerTest(AppTest):
def test_add_scheduler(self):
project = 'test_project'
spider = 'success_spider'
cron = '* * * * *'
postdata = {
'project':project,
'spider':spider,
'cron':cron,
'_xsrf':'dummy',
}
response = self.fetch('/add_schedule.json', method='POST',
body=urlencode(postdata),
headers={"Cookie": "_xsrf=dummy"})
self.assertEqual(200, response.code)
self.assertIn(b'ok', response.body)
class ProjectListTest(MainTest):
def test_get(self):
response = self.fetch('/projects')
self.assertEqual(200, response.code)
class NodesHandlerTest(MainTest):
def test_register(self):
with session_scope() as session:
session.query(Node).delete()
response = self.fetch('/nodes', method="POST", body="")
with session_scope() as session:
new_node = session.query(Node).first()
self.assertEqual(200, response.code)
self.assertEqual('127.0.0.1', new_node.client_ip)
self.assertEqual(datetime.date.today(), new_node.create_time.date())
self.assertEqual(datetime.date.today(), new_node.last_heartbeat.date())
self.assertEqual(True, new_node.isalive)
self.assertEqual(None, new_node.tags)
class SpiderInstanceHandler2Test(AppTest):
def test_get(self):
self._upload_test_project()
with session_scope() as session:
spider = session.query(Spider).first()
project = spider.project
self.assertIsNotNone(spider)
self.assertIsNotNone(project)
response = self.fetch('/projects/%s/spiders/%s' % (project.id,
spider.id))
self.assertEqual(200, response.code)
class SpiderEggHandlerTest(AppTest):
def test_get(self):
self._upload_test_project()
with session_scope() as session:
spider = session.query(Spider).first()
self.assertIsNotNone(spider)
response = self.fetch('/spiders/%d/egg' % (spider.id, ))
self.assertEqual(200, response.code)
def test_get_egg_by_project_spider_name(self):
self._upload_test_project()
with session_scope() as session:
spider = session.query(Spider).first()
project = spider.project
self.assertIsNotNone(spider)
self.assertIsNotNone(project)
response = self.fetch('/projects/%s/spiders/%s/egg' % (project.id,
spider.id))
self.assertEqual(200, response.code)
|
|
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy.orm import attributes
from sqlalchemy import exc as sa_exc
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import *
from sqlalchemy.test.util import gc_collect
from sqlalchemy.test import testing
from test.orm import _base
from test.orm._fixtures import FixtureTest, User, Address, users, addresses
class TransactionTest(FixtureTest):
run_setup_mappers = 'once'
run_inserts = None
session = sessionmaker()
@classmethod
def setup_mappers(cls):
mapper(User, users, properties={
'addresses':relation(Address, backref='user',
cascade="all, delete-orphan", order_by=addresses.c.id),
})
mapper(Address, addresses)
class FixtureDataTest(TransactionTest):
run_inserts = 'each'
def test_attrs_on_rollback(self):
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = 'ed'
sess.rollback()
eq_(u1.name, 'jack')
def test_commit_persistent(self):
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = 'ed'
sess.flush()
sess.commit()
eq_(u1.name, 'ed')
def test_concurrent_commit_persistent(self):
s1 = self.session()
u1 = s1.query(User).get(7)
u1.name = 'ed'
s1.commit()
s2 = self.session()
u2 = s2.query(User).get(7)
assert u2.name == 'ed'
u2.name = 'will'
s2.commit()
assert u1.name == 'will'
class AutoExpireTest(TransactionTest):
def test_expunge_pending_on_rollback(self):
sess = self.session()
u2= User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
def test_trans_pending_cleared_on_commit(self):
sess = self.session()
u2= User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.commit()
assert u2 in sess
u3 = User(name='anotheruser')
sess.add(u3)
sess.rollback()
assert u3 not in sess
assert u2 in sess
def test_update_deleted_on_rollback(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
# this actually tests that the delete() operation,
# when cascaded to the "addresses" collection, does not
# trigger a flush (via lazyload) before the cascade is complete.
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
def test_gced_delete_on_rollback(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.delete(u1)
u1_state = attributes.instance_state(u1)
assert u1_state in s.identity_map.all_states()
assert u1_state in s._deleted
s.flush()
assert u1_state not in s.identity_map.all_states()
assert u1_state not in s._deleted
del u1
gc_collect()
assert u1_state.obj() is None
s.rollback()
assert u1_state in s.identity_map.all_states()
u1 = s.query(User).filter_by(name='ed').one()
assert u1_state not in s.identity_map.all_states()
assert s.scalar(users.count()) == 1
s.delete(u1)
s.flush()
assert s.scalar(users.count()) == 0
s.commit()
def test_trans_deleted_cleared_on_rollback(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.delete(u1)
s.commit()
assert u1 not in s
s.rollback()
assert u1 not in s
def test_update_deleted_on_rollback_cascade(self):
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
s.delete(u1)
assert u1 in s.deleted
assert u1.addresses[0] in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
assert u1.addresses[0] not in s.deleted
def test_update_deleted_on_rollback_orphan(self):
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
a1 = u1.addresses[0]
u1.addresses.remove(a1)
s.flush()
eq_(s.query(Address).filter(Address.email_address=='foo').all(), [])
s.rollback()
assert a1 not in s.deleted
assert u1.addresses == [a1]
def test_commit_pending(self):
sess = self.session()
u1 = User(name='newuser')
sess.add(u1)
sess.flush()
sess.commit()
eq_(u1.name, 'newuser')
def test_concurrent_commit_pending(self):
s1 = self.session()
u1 = User(name='edward')
s1.add(u1)
s1.commit()
s2 = self.session()
u2 = s2.query(User).filter(User.name=='edward').one()
u2.name = 'will'
s2.commit()
assert u1.name == 'will'
class TwoPhaseTest(TransactionTest):
@testing.requires.two_phase_transactions
def test_rollback_on_prepare(self):
s = self.session(twophase=True)
u = User(name='ed')
s.add(u)
s.prepare()
s.rollback()
assert u not in s
class RollbackRecoverTest(TransactionTest):
def test_pk_violation(self):
s = self.session()
a1 = Address(email_address='foo')
u1 = User(id=1, name='ed', addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address='bar')
u2 = User(id=1, name='jack', addresses=[a2])
u1.name = 'edward'
a1.email_address = 'foober'
s.add(u2)
assert_raises(sa_exc.FlushError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
assert u1.name == 'ed'
assert a1.email_address == 'foo'
u1.name = 'edward'
a1.email_address = 'foober'
s.commit()
eq_(
s.query(User).all(),
[User(id=1, name='edward', addresses=[Address(email_address='foober')])]
)
@testing.requires.savepoints
def test_pk_violation_with_savepoint(self):
s = self.session()
a1 = Address(email_address='foo')
u1 = User(id=1, name='ed', addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address='bar')
u2 = User(id=1, name='jack', addresses=[a2])
u1.name = 'edward'
a1.email_address = 'foober'
s.begin_nested()
s.add(u2)
assert_raises(sa_exc.FlushError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
s.commit()
assert s.query(User).all() == [User(id=1, name='edward', addresses=[Address(email_address='foober')])]
class SavepointTest(TransactionTest):
@testing.requires.savepoints
def test_savepoint_rollback(self):
s = self.session()
u1 = User(name='ed')
u2 = User(name='jack')
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name='wendy')
u4 = User(name='foo')
u1.name = 'edward'
u2.name = 'jackward'
s.add_all([u3, u4])
eq_(s.query(User.name).order_by(User.id).all(), [('edward',), ('jackward',), ('wendy',), ('foo',)])
s.rollback()
assert u1.name == 'ed'
assert u2.name == 'jack'
eq_(s.query(User.name).order_by(User.id).all(), [('ed',), ('jack',)])
s.commit()
assert u1.name == 'ed'
assert u2.name == 'jack'
eq_(s.query(User.name).order_by(User.id).all(), [('ed',), ('jack',)])
@testing.requires.savepoints
def test_savepoint_delete(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
eq_(s.query(User).filter_by(name='ed').count(), 1)
s.begin_nested()
s.delete(u1)
s.commit()
eq_(s.query(User).filter_by(name='ed').count(), 0)
s.commit()
@testing.requires.savepoints
def test_savepoint_commit(self):
s = self.session()
u1 = User(name='ed')
u2 = User(name='jack')
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name='wendy')
u4 = User(name='foo')
u1.name = 'edward'
u2.name = 'jackward'
s.add_all([u3, u4])
eq_(s.query(User.name).order_by(User.id).all(), [('edward',), ('jackward',), ('wendy',), ('foo',)])
s.commit()
def go():
assert u1.name == 'edward'
assert u2.name == 'jackward'
eq_(s.query(User.name).order_by(User.id).all(), [('edward',), ('jackward',), ('wendy',), ('foo',)])
self.assert_sql_count(testing.db, go, 1)
s.commit()
eq_(s.query(User.name).order_by(User.id).all(), [('edward',), ('jackward',), ('wendy',), ('foo',)])
@testing.requires.savepoints
def test_savepoint_rollback_collections(self):
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
u1.name='edward'
u1.addresses.append(Address(email_address='bar'))
s.begin_nested()
u2 = User(name='jack', addresses=[Address(email_address='bat')])
s.add(u2)
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.rollback()
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
]
)
s.commit()
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
]
)
@testing.requires.savepoints
def test_savepoint_commit_collections(self):
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
u1.name='edward'
u1.addresses.append(Address(email_address='bar'))
s.begin_nested()
u2 = User(name='jack', addresses=[Address(email_address='bat')])
s.add(u2)
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.commit()
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.commit()
eq_(s.query(User).order_by(User.id).all(),
[
User(name='edward', addresses=[Address(email_address='foo'), Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
@testing.requires.savepoints
def test_expunge_pending_on_rollback(self):
sess = self.session()
sess.begin_nested()
u2= User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
@testing.requires.savepoints
def test_update_deleted_on_rollback(self):
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.begin_nested()
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
class AccountingFlagsTest(TransactionTest):
def test_no_expire_on_commit(self):
sess = sessionmaker(expire_on_commit=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
testing.db.execute(users.update(users.c.name=='ed').values(name='edward'))
assert u1.name == 'ed'
sess.expire_all()
assert u1.name == 'edward'
def test_rollback_no_accounting(self):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
u1.name = 'edwardo'
sess.rollback()
testing.db.execute(users.update(users.c.name=='ed').values(name='edward'))
assert u1.name == 'edwardo'
sess.expire_all()
assert u1.name == 'edward'
def test_commit_no_accounting(self):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
u1.name = 'edwardo'
sess.rollback()
testing.db.execute(users.update(users.c.name=='ed').values(name='edward'))
assert u1.name == 'edwardo'
sess.commit()
assert testing.db.execute(select([users.c.name])).fetchall() == [('edwardo',)]
assert u1.name == 'edwardo'
sess.delete(u1)
sess.commit()
def test_preflush_no_accounting(self):
sess = sessionmaker(_enable_transaction_accounting=False, autocommit=True)()
u1 = User(name='ed')
sess.add(u1)
sess.flush()
sess.begin()
u1.name = 'edwardo'
u2 = User(name="some other user")
sess.add(u2)
sess.rollback()
sess.begin()
assert testing.db.execute(select([users.c.name])).fetchall() == [('ed',)]
class AutoCommitTest(TransactionTest):
def test_begin_nested_requires_trans(self):
sess = create_session(autocommit=True)
assert_raises(sa_exc.InvalidRequestError, sess.begin_nested)
def test_begin_preflush(self):
sess = create_session(autocommit=True)
u1 = User(name='ed')
sess.add(u1)
sess.begin()
u2 = User(name='some other user')
sess.add(u2)
sess.rollback()
assert u2 not in sess
assert u1 in sess
assert sess.query(User).filter_by(name='ed').one() is u1
class NaturalPKRollbackTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('name', String(50), primary_key=True)
)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
pass
@testing.resolve_artifact_names
def test_rollback_recover(self):
mapper(User, users)
session = sessionmaker()()
u1, u2, u3= \
User(name='u1'),\
User(name='u2'),\
User(name='u3')
session.add_all([u1, u2, u3])
session.commit()
session.delete(u2)
u4 = User(name='u2')
session.add(u4)
session.flush()
u5 = User(name='u3')
session.add(u5)
assert_raises(orm_exc.FlushError, session.flush)
assert u5 not in session
assert u2 not in session.deleted
session.rollback()
|
|
from __future__ import unicode_literals
from functools import partial, wraps
from collections import namedtuple
from .predicate import match_instance
from .compat import string_types
from .predicate import PredicateRegistry
from .arginfo import arginfo
from .error import RegistrationError
class dispatch(object):
"""Decorator to make a function dispatch based on its arguments.
This takes the predicates to dispatch on as zero or more
parameters.
:param predicates: sequence of :class:`reg.Predicate` instances to
do the dispatch on. You create predicates using
:func:`reg.match_instance`, :func:`reg.match_key`,
:func:`reg.match_class`, or with a custom predicate class. You
can also pass in plain string argument, which is turned into a
:func:`reg.match_instance` predicate.
:param get_key_lookup: a function that gets a
:class:`PredicateRegistry` instance and returns a key lookup. A
:class:`PredicateRegistry` instance is itself a key lookup, but
you can return a caching key lookup (such as
:class:`reg.DictCachingKeyLookup` or
:class:`reg.LruCachingKeyLookup`) to make it more efficient.
:returns: a function that you can use as if it were a
:class:`reg.Dispatch` instance.
"""
def __init__(self, *predicates, **kw):
self.predicates = [self._make_predicate(predicate)
for predicate in predicates]
self.get_key_lookup = kw.pop('get_key_lookup', identity)
def _make_predicate(self, predicate):
if isinstance(predicate, string_types):
return match_instance(predicate)
return predicate
def __call__(self, callable):
return Dispatch(self.predicates, callable, self.get_key_lookup).call
def identity(registry):
return registry
class LookupEntry(
namedtuple('LookupEntry', 'lookup key')):
"""The dispatch data associated to a key."""
__slots__ = ()
@property
def component(self):
"""The function to dispatch to, excluding fallbacks."""
return self.lookup.component(self.key)
@property
def fallback(self):
"""The approriate fallback implementation."""
return self.lookup.fallback(self.key)
@property
def matches(self):
"""An iterator over all the compatible implementations."""
return self.lookup.all(self.key)
@property
def all_matches(self):
"""The list of all compatible implementations."""
return list(self.matches)
class Dispatch(object):
"""Dispatch function.
You can register implementations based on particular predicates. The
dispatch function dispatches to these implementations based on its
arguments.
:param predicates: a list of predicates.
:param callable: the Python function object to register dispatch
implementations for. The signature of an implementation needs to
match that of this function. This function is used as a fallback
implementation that is called if no specific implementations match.
:param get_key_lookup: a function that gets a
:class:`PredicateRegistry` instance and returns a key lookup. A
:class:`PredicateRegistry` instance is itself a key lookup, but
you can return a caching key lookup (such as
:class:`reg.DictCachingKeyLookup` or
:class:`reg.LruCachingKeyLookup`) to make it more efficient.
"""
def __init__(self, predicates, callable, get_key_lookup):
self.wrapped_func = callable
self.get_key_lookup = get_key_lookup
self._original_predicates = predicates
self._define_call()
self._register_predicates(predicates)
def _register_predicates(self, predicates):
self.registry = PredicateRegistry(*predicates)
self.predicates = predicates
self.call.key_lookup = self.key_lookup = \
self.get_key_lookup(self.registry)
self.call.__globals__.update(
_registry_key=self.registry.key,
_component_lookup=self.key_lookup.component,
_fallback_lookup=self.key_lookup.fallback,
)
self._predicate_key.__globals__.update(
_registry_key=self.registry.key,
_return_type=partial(LookupEntry, self.key_lookup),
)
def _define_call(self):
# We build the generic function on the fly. Its definition
# requires the signature of the wrapped function and the
# arguments needed by the registered predicates
# (predicate_args):
code_template = """\
def call({signature}):
_key = _registry_key({predicate_args})
return (_component_lookup(_key) or
_fallback_lookup(_key) or
_fallback)({signature})
"""
args = arginfo(self.wrapped_func)
signature = format_signature(args)
predicate_args = ', '.join('{0}={0}'.format(x) for x in args.args)
code_source = code_template.format(
signature=signature,
predicate_args=predicate_args)
# We now compile call to byte-code:
self.call = call = wraps(self.wrapped_func)(execute(
code_source,
_registry_key=None,
_component_lookup=None,
_fallback_lookup=None,
_fallback=self.wrapped_func)['call'])
# We copy over the defaults from the wrapped function.
call.__defaults__ = args.defaults
# Make the methods available as attributes of call
for k in dir(type(self)):
if not k.startswith('_'):
setattr(call, k, getattr(self, k))
call.wrapped_func = self.wrapped_func
# We now build the implementation for the predicate_key method
self._predicate_key = execute(
"def predicate_key({signature}):\n"
" return _return_type(_registry_key({predicate_args}))".format(
signature=format_signature(args),
predicate_args=predicate_args),
_registry_key=None, _return_type=None)['predicate_key']
def clean(self):
"""Clean up implementations and added predicates.
This restores the dispatch function to its original state,
removing registered implementations and predicates added
using :meth:`reg.Dispatch.add_predicates`.
"""
self._register_predicates(self._original_predicates)
def add_predicates(self, predicates):
"""Add new predicates.
Extend the predicates used by this predicates. This can be
used to add predicates that are configured during startup time.
Note that this clears up any registered implementations.
:param predicates: a list of predicates to add.
"""
self._register_predicates(self.predicates + predicates)
def register(self, func=None, **key_dict):
"""Register an implementation.
If ``func`` is not specified, this method can be used as a
decorator and the decorated function will be used as the
actual ``func`` argument.
:param func: a function that implements behavior for this
dispatch function. It needs to have the same signature as
the original dispatch function. If this is a
:class:`reg.DispatchMethod`, then this means it needs to
take a first context argument.
:param key_dict: keyword arguments describing the registration,
with as keys predicate name and as values predicate values.
:returns: ``func``.
"""
if func is None:
return partial(self.register, **key_dict)
validate_signature(func, self.wrapped_func)
predicate_key = self.registry.key_dict_to_predicate_key(key_dict)
self.registry.register(predicate_key, func)
return func
def by_args(self, *args, **kw):
"""Lookup an implementation by invocation arguments.
:param args: positional arguments used in invocation.
:param kw: named arguments used in invocation.
:returns: a :class:`reg.LookupEntry`.
"""
return self._predicate_key(*args, **kw)
def by_predicates(self, **predicate_values):
"""Lookup an implementation by predicate values.
:param predicate_values: the values of the predicates to lookup.
:returns: a :class:`reg.LookupEntry`.
"""
return LookupEntry(
self.key_lookup,
self.registry.key_dict_to_predicate_key(predicate_values))
def validate_signature(f, dispatch):
f_arginfo = arginfo(f)
if f_arginfo is None:
raise RegistrationError(
"Cannot register non-callable for dispatch "
"%r: %r" % (dispatch, f))
if not same_signature(arginfo(dispatch), f_arginfo):
raise RegistrationError(
"Signature of callable dispatched to (%r) "
"not that of dispatch (%r)" % (
f, dispatch))
def format_signature(args):
return ', '.join(
args.args +
(['*' + args.varargs] if args.varargs else []) +
(['**' + args.keywords] if args.keywords else []))
def same_signature(a, b):
"""Check whether a arginfo and b arginfo are the same signature.
Actual names of arguments may differ. Default arguments may be
different.
"""
a_args = set(a.args)
b_args = set(b.args)
return (len(a_args) == len(b_args) and
a.varargs == b.varargs and
a.keywords == b.keywords)
def execute(code_source, **namespace):
"""Execute code in a namespace, returning the namespace."""
code_object = compile(
code_source, '<generated code: {}>'.format(code_source), 'exec')
exec(code_object, namespace)
return namespace
|
|
# -*- coding: utf-8 -*-
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2014 and later, Alexander J G Pitchford
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
# @author: Alexander Pitchford
# @email1: agp1@aber.ac.uk
# @email2: alex.pitchford@gmail.com
# @organization: Aberystwyth University
# @supervisor: Daniel Burgarth
"""
Pulse generator - Generate pulses for the timeslots
Each class defines a gen_pulse function that produces a float array of
size num_tslots. Each class produces a differ type of pulse.
See the class and gen_pulse function descriptions for details
"""
import numpy as np
import qutip.logging_utils as logging
logger = logging.get_logger()
import qutip.control.dynamics as dynamics
import qutip.control.errors as errors
def create_pulse_gen(pulse_type='RND', dyn=None, pulse_params=None):
"""
Create and return a pulse generator object matching the given type.
The pulse generators each produce a different type of pulse,
see the gen_pulse function description for details.
These are the random pulse options:
RND - Independent random value in each timeslot
RNDFOURIER - Fourier series with random coefficients
RNDWAVES - Summation of random waves
RNDWALK1 - Random change in amplitude each timeslot
RNDWALK2 - Random change in amp gradient each timeslot
These are the other non-periodic options:
LIN - Linear, i.e. contant gradient over the time
ZERO - special case of the LIN pulse, where the gradient is 0
These are the periodic options
SINE - Sine wave
SQUARE - Square wave
SAW - Saw tooth wave
TRIANGLE - Triangular wave
If a Dynamics object is passed in then this is used in instantiate
the PulseGen, meaning that some timeslot and amplitude properties
are copied over.
"""
if pulse_type == 'RND':
return PulseGenRandom(dyn, params=pulse_params)
if pulse_type == 'RNDFOURIER':
return PulseGenRndFourier(dyn, params=pulse_params)
if pulse_type == 'RNDWAVES':
return PulseGenRndWaves(dyn, params=pulse_params)
if pulse_type == 'RNDWALK1':
return PulseGenRndWalk1(dyn, params=pulse_params)
if pulse_type == 'RNDWALK2':
return PulseGenRndWalk2(dyn, params=pulse_params)
elif pulse_type == 'LIN':
return PulseGenLinear(dyn, params=pulse_params)
elif pulse_type == 'ZERO':
return PulseGenZero(dyn, params=pulse_params)
elif pulse_type == 'SINE':
return PulseGenSine(dyn, params=pulse_params)
elif pulse_type == 'SQUARE':
return PulseGenSquare(dyn, params=pulse_params)
elif pulse_type == 'SAW':
return PulseGenSaw(dyn, params=pulse_params)
elif pulse_type == 'TRIANGLE':
return PulseGenTriangle(dyn, params=pulse_params)
elif pulse_type == 'GAUSSIAN':
return PulseGenGaussian(dyn, params=pulse_params)
elif pulse_type == 'CRAB_FOURIER':
return PulseGenCrabFourier(dyn, params=pulse_params)
elif pulse_type == 'GAUSSIAN_EDGE':
return PulseGenGaussianEdge(dyn, params=pulse_params)
else:
raise ValueError("No option for pulse_type '{}'".format(pulse_type))
class PulseGen(object):
"""
Pulse generator
Base class for all Pulse generators
The object can optionally be instantiated with a Dynamics object,
in which case the timeslots and amplitude scaling and offset
are copied from that.
Otherwise the class can be used independently by setting:
tau (array of timeslot durations)
or
num_tslots and pulse_time for equally spaced timeslots
Attributes
----------
num_tslots : integer
Number of timeslots, aka timeslices
(copied from Dynamics if given)
pulse_time : float
total duration of the pulse
(copied from Dynamics.evo_time if given)
scaling : float
linear scaling applied to the pulse
(copied from Dynamics.initial_ctrl_scaling if given)
offset : float
linear offset applied to the pulse
(copied from Dynamics.initial_ctrl_offset if given)
tau : array[num_tslots] of float
Duration of each timeslot
(copied from Dynamics if given)
lbound : float
Lower boundary for the pulse amplitudes
Note that the scaling and offset attributes can be used to fully
bound the pulse for all generators except some of the random ones
This bound (if set) may result in additional shifting / scaling
Default is -Inf
ubound : float
Upper boundary for the pulse amplitudes
Note that the scaling and offset attributes can be used to fully
bound the pulse for all generators except some of the random ones
This bound (if set) may result in additional shifting / scaling
Default is Inf
periodic : boolean
True if the pulse generator produces periodic pulses
random : boolean
True if the pulse generator produces random pulses
log_level : integer
level of messaging output from the logger.
Options are attributes of qutip.logging_utils,
in decreasing levels of messaging, are:
DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL
Anything WARN or above is effectively 'quiet' execution,
assuming everything runs as expected.
The default NOTSET implies that the level will be taken from
the QuTiP settings file, which by default is WARN
"""
def __init__(self, dyn=None, params=None):
self.parent = dyn
self.params = params
self.reset()
def reset(self):
"""
reset attributes to default values
"""
if isinstance(self.parent, dynamics.Dynamics):
dyn = self.parent
self.num_tslots = dyn.num_tslots
self.pulse_time = dyn.evo_time
self.scaling = dyn.initial_ctrl_scaling
self.offset = dyn.initial_ctrl_offset
self.tau = dyn.tau
self.log_level = dyn.log_level
else:
self.num_tslots = 100
self.pulse_time = 1.0
self.scaling = 1.0
self.tau = None
self.offset = 0.0
self._uses_time = False
self.time = None
self._pulse_initialised = False
self.periodic = False
self.random = False
self.lbound = None
self.ubound = None
self.ramping_pulse = None
self.apply_params()
def apply_params(self, params=None):
"""
Set object attributes based on the dictionary (if any) passed in the
instantiation, or passed as a parameter
This is called during the instantiation automatically.
The key value pairs are the attribute name and value
"""
if not params:
params = self.params
if isinstance(params, dict):
self.params = params
for key in params:
setattr(self, key, params[key])
@property
def log_level(self):
return logger.level
@log_level.setter
def log_level(self, lvl):
"""
Set the log_level attribute and set the level of the logger
that is call logger.setLevel(lvl)
"""
logger.setLevel(lvl)
def gen_pulse(self):
"""
returns the pulse as an array of vales for each timeslot
Must be implemented by subclass
"""
# must be implemented by subclass
raise errors.UsageError(
"No method defined for generating a pulse. "
" Suspect base class was used where sub class should have been")
def init_pulse(self):
"""
Initialise the pulse parameters
"""
if self.tau is None:
self.tau = np.ones(self.num_tslots, dtype='f') * \
self.pulse_time/self.num_tslots
if self._uses_time:
self.time = np.zeros(self.num_tslots, dtype=float)
for k in range(self.num_tslots-1):
self.time[k+1] = self.time[k] + self.tau[k]
self._pulse_initialised = True
if not self.lbound is None:
if np.isinf(self.lbound):
self.lbound = None
if not self.ubound is None:
if np.isinf(self.ubound):
self.ubound = None
if not self.ubound is None and not self.lbound is None:
if self.ubound < self.lbound:
raise ValueError("ubound cannot be less the lbound")
def _apply_bounds_and_offset(self, pulse):
"""
Ensure that the randomly generated pulse fits within the bounds
(after applying the offset)
Assumes that pulses passed are centered around zero (on average)
"""
if self.lbound is None and self.ubound is None:
return pulse + self.offset
max_amp = max(pulse)
min_amp = min(pulse)
if ((self.ubound is None or max_amp + self.offset <= self.ubound) and
(self.lbound is None or min_amp + self.offset >= self.lbound)):
return pulse + self.offset
# Some shifting / scaling is required.
if self.ubound is None or self.lbound is None:
# One of the bounds is inf, so just shift the pulse
if self.lbound is None:
# max_amp + offset must exceed the ubound
return pulse + self.ubound - max_amp
else:
# min_amp + offset must exceed the lbound
return pulse + self.lbound - min_amp
else:
bound_range = self.ubound - self.lbound
amp_range = max_amp - min_amp
if max_amp - min_amp > bound_range:
# pulse range is too high, it must be scaled
pulse = pulse * bound_range / amp_range
# otherwise the pulse should fit anyway
return pulse + self.lbound - min(pulse)
def _apply_ramping_pulse(self, pulse, ramping_pulse=None):
if ramping_pulse is None:
ramping_pulse = self.ramping_pulse
if ramping_pulse is not None:
pulse = pulse*ramping_pulse
return pulse
class PulseGenZero(PulseGen):
"""
Generates a flat pulse
"""
def gen_pulse(self):
"""
Generate a pulse with the same value in every timeslot.
The value will be zero, unless the offset is not zero,
in which case it will be the offset
"""
pulse = np.zeros(self.num_tslots)
return self._apply_bounds_and_offset(pulse)
class PulseGenRandom(PulseGen):
"""
Generates random pulses as simply random values for each timeslot
"""
def reset(self):
PulseGen.reset(self)
self.random = True
self.apply_params()
def gen_pulse(self):
"""
Generate a pulse of random values between 1 and -1
Values are scaled using the scaling property
and shifted using the offset property
Returns the pulse as an array of vales for each timeslot
"""
pulse = (2*np.random.random(self.num_tslots) - 1) * self.scaling
return self._apply_bounds_and_offset(pulse)
class PulseGenRndFourier(PulseGen):
"""
Generates pulses by summing sine waves as a Fourier series
with random coefficients
Attributes
----------
scaling : float
The pulses should fit approximately within -/+scaling
(before the offset is applied)
as it is used to set a maximum for each component wave
Use bounds to be sure
(copied from Dynamics.initial_ctrl_scaling if given)
min_wavelen : float
Minimum wavelength of any component wave
Set by default to 1/10th of the pulse time
"""
def reset(self):
"""
reset attributes to default values
"""
PulseGen.reset(self)
self.random = True
self._uses_time = True
try:
self.min_wavelen = self.pulse_time / 10.0
except:
self.min_wavelen = 0.1
self.apply_params()
def gen_pulse(self, min_wavelen=None):
"""
Generate a random pulse based on a Fourier series with a minimum
wavelength
"""
if min_wavelen is not None:
self.min_wavelen = min_wavelen
min_wavelen = self.min_wavelen
if min_wavelen > self.pulse_time:
raise ValueError("Minimum wavelength cannot be greater than "
"the pulse time")
if not self._pulse_initialised:
self.init_pulse()
# use some phase to avoid the first pulse being always 0
sum_wave = np.zeros(self.tau.shape)
wavelen = 2.0*self.pulse_time
t = self.time
wl = []
while wavelen > min_wavelen:
wl.append(wavelen)
wavelen = wavelen/2.0
num_comp_waves = len(wl)
amp_scale = np.sqrt(8)*self.scaling / float(num_comp_waves)
for wavelen in wl:
amp = amp_scale*(np.random.rand()*2 - 1)
phase_off = np.random.rand()*np.pi/2.0
curr_wave = amp*np.sin(2*np.pi*t/wavelen + phase_off)
sum_wave += curr_wave
return self._apply_bounds_and_offset(sum_wave)
class PulseGenRndWaves(PulseGen):
"""
Generates pulses by summing sine waves with random frequencies
amplitudes and phase offset
Attributes
----------
scaling : float
The pulses should fit approximately within -/+scaling
(before the offset is applied)
as it is used to set a maximum for each component wave
Use bounds to be sure
(copied from Dynamics.initial_ctrl_scaling if given)
num_comp_waves : integer
Number of component waves. That is the number of waves that
are summed to make the pulse signal
Set to 20 by default.
min_wavelen : float
Minimum wavelength of any component wave
Set by default to 1/10th of the pulse time
max_wavelen : float
Maximum wavelength of any component wave
Set by default to twice the pulse time
"""
def reset(self):
"""
reset attributes to default values
"""
PulseGen.reset(self)
self.random = True
self._uses_time = True
self.num_comp_waves = 20
try:
self.min_wavelen = self.pulse_time / 10.0
except:
self.min_wavelen = 0.1
try:
self.max_wavelen = 2*self.pulse_time
except:
self.max_wavelen = 10.0
self.apply_params()
def gen_pulse(self, num_comp_waves=None,
min_wavelen=None, max_wavelen=None):
"""
Generate a random pulse by summing sine waves with random freq,
amplitude and phase offset
"""
if num_comp_waves is not None:
self.num_comp_waves = num_comp_waves
if min_wavelen is not None:
self.min_wavelen = min_wavelen
if max_wavelen is not None:
self.max_wavelen = max_wavelen
num_comp_waves = self.num_comp_waves
min_wavelen = self.min_wavelen
max_wavelen = self.max_wavelen
if min_wavelen > self.pulse_time:
raise ValueError("Minimum wavelength cannot be greater than "
"the pulse time")
if max_wavelen <= min_wavelen:
raise ValueError("Maximum wavelength must be greater than "
"the minimum wavelength")
if not self._pulse_initialised:
self.init_pulse()
# use some phase to avoid the first pulse being always 0
sum_wave = np.zeros(self.tau.shape)
t = self.time
wl_range = max_wavelen - min_wavelen
amp_scale = np.sqrt(8)*self.scaling / float(num_comp_waves)
for n in range(num_comp_waves):
amp = amp_scale*(np.random.rand()*2 - 1)
phase_off = np.random.rand()*np.pi/2.0
wavelen = min_wavelen + np.random.rand()*wl_range
curr_wave = amp*np.sin(2*np.pi*t/wavelen + phase_off)
sum_wave += curr_wave
return self._apply_bounds_and_offset(sum_wave)
class PulseGenRndWalk1(PulseGen):
"""
Generates pulses by using a random walk algorithm
Attributes
----------
scaling : float
Used as the range for the starting amplitude
Note must used bounds if values must be restricted.
Also scales the max_d_amp value
(copied from Dynamics.initial_ctrl_scaling if given)
max_d_amp : float
Maximum amount amplitude will change between timeslots
Note this is also factored by the scaling attribute
"""
def reset(self):
"""
reset attributes to default values
"""
PulseGen.reset(self)
self.random = True
self.max_d_amp = 0.1
self.apply_params()
def gen_pulse(self, max_d_amp=None):
"""
Generate a pulse by changing the amplitude a random amount between
-max_d_amp and +max_d_amp at each timeslot. The walk will start at
a random amplitude between -/+scaling.
"""
if max_d_amp is not None:
self.max_d_amp = max_d_amp
max_d_amp = self.max_d_amp*self.scaling
if not self._pulse_initialised:
self.init_pulse()
walk = np.zeros(self.tau.shape)
amp = self.scaling*(np.random.rand()*2 - 1)
for k in range(len(walk)):
walk[k] = amp
amp += (np.random.rand()*2 - 1)*max_d_amp
return self._apply_bounds_and_offset(walk)
class PulseGenRndWalk2(PulseGen):
"""
Generates pulses by using a random walk algorithm
Note this is best used with bounds as the walks tend to wander far
Attributes
----------
scaling : float
Used as the range for the starting amplitude
Note must used bounds if values must be restricted.
Also scales the max_d2_amp value
(copied from Dynamics.initial_ctrl_scaling if given)
max_d2_amp : float
Maximum amount amplitude gradient will change between timeslots
Note this is also factored by the scaling attribute
"""
def reset(self):
"""
reset attributes to default values
"""
PulseGen.reset(self)
self.random = True
self.max_d2_amp = 0.01
self.apply_params()
def gen_pulse(self, init_grad_range=None, max_d2_amp=None):
"""
Generate a pulse by changing the amplitude gradient a random amount
between -max_d2_amp and +max_d2_amp at each timeslot.
The walk will start at a random amplitude between -/+scaling.
The gradient will start at 0
"""
if max_d2_amp is not None:
self.max_d2_amp = max_d2_amp
max_d2_amp = self.max_d2_amp
if not self._pulse_initialised:
self.init_pulse()
walk = np.zeros(self.tau.shape)
amp = self.scaling*(np.random.rand()*2 - 1)
print("Start amp {}".format(amp))
grad = 0.0
print("Start grad {}".format(grad))
for k in range(len(walk)):
walk[k] = amp
grad += (np.random.rand()*2 - 1)*max_d2_amp
amp += grad
# print("grad {}".format(grad))
return self._apply_bounds_and_offset(walk)
class PulseGenLinear(PulseGen):
"""
Generates linear pulses
Attributes
----------
gradient : float
Gradient of the line.
Note this is calculated from the start_val and end_val if these
are given
start_val : float
Start point of the line. That is the starting amplitude
end_val : float
End point of the line.
That is the amplitude at the start of the last timeslot
"""
def reset(self):
"""
reset attributes to default values
"""
PulseGen.reset(self)
self.gradient = None
self.start_val = -1.0
self.end_val = 1.0
self.apply_params()
def init_pulse(self, gradient=None, start_val=None, end_val=None):
"""
Calculate the gradient if pulse is defined by start and
end point values
"""
PulseGen.init_pulse(self)
if start_val is not None and end_val is not None:
self.start_val = start_val
self.end_val = end_val
if self.start_val is not None and self.end_val is not None:
self.gradient = float(self.end_val - self.start_val) / \
(self.pulse_time - self.tau[-1])
def gen_pulse(self, gradient=None, start_val=None, end_val=None):
"""
Generate a linear pulse using either the gradient and start value
or using the end point to calulate the gradient
Note that the scaling and offset parameters are still applied,
so unless these values are the default 1.0 and 0.0, then the
actual gradient etc will be different
Returns the pulse as an array of vales for each timeslot
"""
if (gradient is not None or
start_val is not None or end_val is not None):
self.init_pulse(gradient, start_val, end_val)
if not self._pulse_initialised:
self.init_pulse()
pulse = np.empty(self.num_tslots)
t = 0.0
for k in range(self.num_tslots):
y = self.gradient*t + self.start_val
pulse[k] = self.scaling*y
t = t + self.tau[k]
return self._apply_bounds_and_offset(pulse)
class PulseGenPeriodic(PulseGen):
"""
Intermediate class for all periodic pulse generators
All of the periodic pulses range from -1 to 1
All have a start phase that can be set between 0 and 2pi
Attributes
----------
num_waves : float
Number of complete waves (cycles) that occur in the pulse.
wavelen and freq calculated from this if it is given
wavelen : float
Wavelength of the pulse (assuming the speed is 1)
freq is calculated from this if it is given
freq : float
Frequency of the pulse
start_phase : float
Phase of the pulse signal when t=0
"""
def reset(self):
"""
reset attributes to default values
"""
PulseGen.reset(self)
self.periodic = True
self.num_waves = None
self.freq = 1.0
self.wavelen = None
self.start_phase = 0.0
self.apply_params()
def init_pulse(self, num_waves=None, wavelen=None,
freq=None, start_phase=None):
"""
Calculate the wavelength, frequency, number of waves etc
from the each other and the other parameters
If num_waves is given then the other parameters are worked from this
Otherwise if the wavelength is given then it is the driver
Otherwise the frequency is used to calculate wavelength and num_waves
"""
PulseGen.init_pulse(self)
if start_phase is not None:
self.start_phase = start_phase
if num_waves is not None or wavelen is not None or freq is not None:
self.num_waves = num_waves
self.wavelen = wavelen
self.freq = freq
if self.num_waves is not None:
self.freq = float(self.num_waves) / self.pulse_time
self.wavelen = 1.0/self.freq
elif self.wavelen is not None:
self.freq = 1.0/self.wavelen
self.num_waves = self.wavelen*self.pulse_time
else:
self.wavelen = 1.0/self.freq
self.num_waves = self.wavelen*self.pulse_time
class PulseGenSine(PulseGenPeriodic):
"""
Generates sine wave pulses
"""
def gen_pulse(self, num_waves=None, wavelen=None,
freq=None, start_phase=None):
"""
Generate a sine wave pulse
If no params are provided then the class object attributes are used.
If they are provided, then these will reinitialise the object attribs.
returns the pulse as an array of vales for each timeslot
"""
if start_phase is not None:
self.start_phase = start_phase
if num_waves is not None or wavelen is not None or freq is not None:
self.init_pulse(num_waves, wavelen, freq, start_phase)
if not self._pulse_initialised:
self.init_pulse()
pulse = np.empty(self.num_tslots)
t = 0.0
for k in range(self.num_tslots):
phase = 2*np.pi*self.freq*t + self.start_phase
pulse[k] = self.scaling*np.sin(phase)
t = t + self.tau[k]
return self._apply_bounds_and_offset(pulse)
class PulseGenSquare(PulseGenPeriodic):
"""
Generates square wave pulses
"""
def gen_pulse(self, num_waves=None, wavelen=None,
freq=None, start_phase=None):
"""
Generate a square wave pulse
If no parameters are pavided then the class object attributes are used.
If they are provided, then these will reinitialise the object attribs
"""
if start_phase is not None:
self.start_phase = start_phase
if num_waves is not None or wavelen is not None or freq is not None:
self.init_pulse(num_waves, wavelen, freq, start_phase)
if not self._pulse_initialised:
self.init_pulse()
pulse = np.empty(self.num_tslots)
t = 0.0
for k in range(self.num_tslots):
phase = 2*np.pi*self.freq*t + self.start_phase
x = phase/(2*np.pi)
y = 4*np.floor(x) - 2*np.floor(2*x) + 1
pulse[k] = self.scaling*y
t = t + self.tau[k]
return self._apply_bounds_and_offset(pulse)
class PulseGenSaw(PulseGenPeriodic):
"""
Generates saw tooth wave pulses
"""
def gen_pulse(self, num_waves=None, wavelen=None,
freq=None, start_phase=None):
"""
Generate a saw tooth wave pulse
If no parameters are pavided then the class object attributes are used.
If they are provided, then these will reinitialise the object attribs
"""
if start_phase is not None:
self.start_phase = start_phase
if num_waves is not None or wavelen is not None or freq is not None:
self.init_pulse(num_waves, wavelen, freq, start_phase)
if not self._pulse_initialised:
self.init_pulse()
pulse = np.empty(self.num_tslots)
t = 0.0
for k in range(self.num_tslots):
phase = 2*np.pi*self.freq*t + self.start_phase
x = phase/(2*np.pi)
y = 2*(x - np.floor(0.5 + x))
pulse[k] = self.scaling*y
t = t + self.tau[k]
return self._apply_bounds_and_offset(pulse)
class PulseGenTriangle(PulseGenPeriodic):
"""
Generates triangular wave pulses
"""
def gen_pulse(self, num_waves=None, wavelen=None,
freq=None, start_phase=None):
"""
Generate a sine wave pulse
If no parameters are pavided then the class object attributes are used.
If they are provided, then these will reinitialise the object attribs
"""
if start_phase is not None:
self.start_phase = start_phase
if num_waves is not None or wavelen is not None or freq is not None:
self.init_pulse(num_waves, wavelen, freq, start_phase)
if not self._pulse_initialised:
self.init_pulse()
pulse = np.empty(self.num_tslots)
t = 0.0
for k in range(self.num_tslots):
phase = 2*np.pi*self.freq*t + self.start_phase + np.pi/2.0
x = phase/(2*np.pi)
y = 2*np.abs(2*(x - np.floor(0.5 + x))) - 1
pulse[k] = self.scaling*y
t = t + self.tau[k]
return self._apply_bounds_and_offset(pulse)
class PulseGenGaussian(PulseGen):
"""
Generates pulses with a Gaussian profile
"""
def reset(self):
"""
reset attributes to default values
"""
PulseGen.reset(self)
self._uses_time = True
self.mean = 0.5*self.pulse_time
self.variance = 0.5*self.pulse_time
self.apply_params()
def gen_pulse(self, mean=None, variance=None):
"""
Generate a pulse with Gaussian shape. The peak is centre around the
mean and the variance determines the breadth
The scaling and offset attributes are applied as an amplitude
and fixed linear offset. Note that the maximum amplitude will be
scaling + offset.
"""
if not self._pulse_initialised:
self.init_pulse()
if mean:
Tm = mean
else:
Tm = self.mean
if variance:
Tv = variance
else:
Tv = self.variance
t = self.time
T = self.pulse_time
pulse = self.scaling*np.exp(-(t-Tm)**2/(2*Tv))
return self._apply_bounds_and_offset(pulse)
class PulseGenGaussianEdge(PulseGen):
"""
Generate pulses with inverted Gaussian ramping in and out
It's intended use for a ramping modulation, which is often required in
experimental setups.
Attributes
----------
decay_time : float
Determines the ramping rate. It is approximately the time
required to bring the pulse to full amplitude
It is set to 1/10 of the pulse time by default
"""
def reset(self):
"""
reset attributes to default values
"""
PulseGen.reset(self)
self._uses_time = True
self.decay_time = self.pulse_time / 10.0
self.apply_params()
def gen_pulse(self, decay_time=None):
"""
Generate a pulse that starts and ends at zero and 1.0 in between
then apply scaling and offset
The tailing in and out is an inverted Gaussian shape
"""
if not self._pulse_initialised:
self.init_pulse()
t = self.time
if decay_time:
Td = decay_time
else:
Td = self.decay_time
T = self.pulse_time
pulse = 1.0 - np.exp(-t**2/Td) - np.exp(-(t-T)**2/Td)
pulse = pulse*self.scaling
return self._apply_bounds_and_offset(pulse)
### The following are pulse generators for the CRAB algorithm ###
# AJGP 2015-05-14:
# The intention is to have a more general base class that allows
# setting of general basis functions
class PulseGenCrab(PulseGen):
"""
Base class for all CRAB pulse generators
Note these are more involved in the optimisation process as they are
used to produce piecewise control amplitudes each time new optimisation
parameters are tried
Attributes
----------
num_coeffs : integer
Number of coefficients used for each basis function
num_basis_funcs : integer
Number of basis functions
In this case set at 2 and should not be changed
coeffs : float array[num_coeffs, num_basis_funcs]
The basis coefficient values
randomize_coeffs : bool
If True (default) then the coefficients are set to some random values
when initialised, otherwise they will all be equal to self.scaling
"""
def __init__(self, dyn=None, num_coeffs=None, params=None):
self.parent = dyn
self.num_coeffs = num_coeffs
self.params = params
self.reset()
def reset(self):
"""
reset attributes to default values
"""
PulseGen.reset(self)
self.NUM_COEFFS_WARN_LVL = 20
self.DEF_NUM_COEFFS = 4
self._BSC_ALL = 1
self._BSC_GT_MEAN = 2
self._BSC_LT_MEAN = 3
self._uses_time = True
self.time = None
self.num_basis_funcs = 2
self.num_optim_vars = 0
self.coeffs = None
self.randomize_coeffs = True
self._num_coeffs_estimated = False
self.guess_pulse_action = 'MODULATE'
self.guess_pulse = None
self.guess_pulse_func = None
self.apply_params()
def init_pulse(self, num_coeffs=None):
"""
Set the initial freq and coefficient values
"""
PulseGen.init_pulse(self)
self.init_coeffs(num_coeffs=num_coeffs)
if self.guess_pulse is not None:
self.init_guess_pulse()
self._init_bounds()
if self.log_level <= logging.DEBUG and not self._num_coeffs_estimated:
logger.debug(
"CRAB pulse initialised with {} coefficients per basis "
"function, which means a total of {} "
"optimisation variables for this pulse".format(
self.num_coeffs, self.num_optim_vars))
# def generate_guess_pulse(self)
# if isinstance(self.guess_pulsegen, PulseGen):
# self.guess_pulse = self.guess_pulsegen.gen_pulse()
# return self.guess_pulse
def init_coeffs(self, num_coeffs=None):
"""
Generate the initial ceofficent values.
Parameters
----------
num_coeffs : integer
Number of coefficients used for each basis function
If given this overides the default and sets the attribute
of the same name.
"""
if num_coeffs:
self.num_coeffs = num_coeffs
self._num_coeffs_estimated = False
if not self.num_coeffs:
if isinstance(self.parent, dynamics.Dynamics):
dim = self.parent.get_drift_dim()
self.num_coeffs = self.estimate_num_coeffs(dim)
self._num_coeffs_estimated = True
else:
self.num_coeffs = self.DEF_NUM_COEFFS
self.num_optim_vars = self.num_coeffs*self.num_basis_funcs
if self._num_coeffs_estimated:
if self.log_level <= logging.INFO:
logger.info(
"The number of CRAB coefficients per basis function "
"has been estimated as {}, which means a total of {} "
"optimisation variables for this pulse. Based on the "
"dimension ({}) of the system".format(
self.num_coeffs, self.num_optim_vars, dim))
# Issue warning if beyond the recommended level
if self.log_level <= logging.WARN:
if self.num_coeffs > self.NUM_COEFFS_WARN_LVL:
logger.warn(
"The estimated number of coefficients {} exceeds "
"the amount ({}) recommended for efficient "
"optimisation. You can set this level explicitly "
"to suppress this message.".format(
self.num_coeffs, self.NUM_COEFFS_WARN_LVL))
if self.randomize_coeffs:
r = np.random.random([self.num_coeffs, self.num_basis_funcs])
self.coeffs = (2*r - 1.0) * self.scaling
else:
self.coeffs = np.ones([self.num_coeffs,
self.num_basis_funcs])*self.scaling
def estimate_num_coeffs(self, dim):
"""
Estimate the number coefficients based on the dimensionality of the
system.
Returns
-------
num_coeffs : int
estimated number of coefficients
"""
num_coeffs = max(2, dim - 1)
return num_coeffs
def get_optim_var_vals(self):
"""
Get the parameter values to be optimised
Returns
-------
list (or 1d array) of floats
"""
return self.coeffs.ravel().tolist()
def set_optim_var_vals(self, param_vals):
"""
Set the values of the any of the pulse generation parameters
based on new values from the optimisation method
Typically this will be the basis coefficients
"""
# Type and size checking avoided here as this is in the
# main optmisation call sequence
self.set_coeffs(param_vals)
def set_coeffs(self, param_vals):
self.coeffs = param_vals.reshape(
[self.num_coeffs, self.num_basis_funcs])
def init_guess_pulse(self):
self.guess_pulse_func = None
if not self.guess_pulse_action:
logger.WARN("No guess pulse action given, hence ignored.")
elif self.guess_pulse_action.upper() == 'MODULATE':
self.guess_pulse_func = self.guess_pulse_modulate
elif self.guess_pulse_action.upper() == 'ADD':
self.guess_pulse_func = self.guess_pulse_add
else:
logger.WARN("No option for guess pulse action '{}' "
", hence ignored.".format(self.guess_pulse_action))
def guess_pulse_add(self, pulse):
pulse = pulse + self.guess_pulse
return pulse
def guess_pulse_modulate(self, pulse):
pulse = (1.0 + pulse)*self.guess_pulse
return pulse
def _init_bounds(self):
add_guess_pulse_scale = False
if self.lbound is None and self.ubound is None:
# no bounds to apply
self._bound_scale_cond = None
elif self.lbound is None:
# only upper bound
if self.ubound > 0:
self._bound_mean = 0.0
self._bound_scale = self.ubound
else:
add_guess_pulse_scale = True
self._bound_scale = self.scaling*self.num_coeffs + \
self.get_guess_pulse_scale()
self._bound_mean = -abs(self._bound_scale) + self.ubound
self._bound_scale_cond = self._BSC_GT_MEAN
elif self.ubound is None:
# only lower bound
if self.lbound < 0:
self._bound_mean = 0.0
self._bound_scale = abs(self.lbound)
else:
self._bound_scale = self.scaling*self.num_coeffs + \
self.get_guess_pulse_scale()
self._bound_mean = abs(self._bound_scale) + self.lbound
self._bound_scale_cond = self._BSC_LT_MEAN
else:
# lower and upper bounds
self._bound_mean = 0.5*(self.ubound + self.lbound)
self._bound_scale = 0.5*(self.ubound - self.lbound)
self._bound_scale_cond = self._BSC_ALL
def get_guess_pulse_scale(self):
scale = 0.0
if self.guess_pulse is not None:
scale = max(np.amax(self.guess_pulse) - np.amin(self.guess_pulse),
np.amax(self.guess_pulse))
return scale
def _apply_bounds(self, pulse):
"""
Scaling the amplitudes using the tanh function if there are bounds
"""
if self._bound_scale_cond == self._BSC_ALL:
pulse = np.tanh(pulse)*self._bound_scale + self._bound_mean
return pulse
elif self._bound_scale_cond == self._BSC_GT_MEAN:
scale_where = pulse > self._bound_mean
pulse[scale_where] = (np.tanh(pulse[scale_where])*self._bound_scale
+ self._bound_mean)
return pulse
elif self._bound_scale_cond == self._BSC_LT_MEAN:
scale_where = pulse < self._bound_mean
pulse[scale_where] = (np.tanh(pulse[scale_where])*self._bound_scale
+ self._bound_mean)
return pulse
else:
return pulse
class PulseGenCrabFourier(PulseGenCrab):
"""
Generates a pulse using the Fourier basis functions, i.e. sin and cos
Attributes
----------
freqs : float array[num_coeffs]
Frequencies for the basis functions
randomize_freqs : bool
If True (default) the some random offset is applied to the frequencies
"""
def reset(self):
"""
reset attributes to default values
"""
PulseGenCrab.reset(self)
self.freqs = None
self.randomize_freqs = True
def init_pulse(self, num_coeffs=None):
"""
Set the initial freq and coefficient values
"""
PulseGenCrab.init_pulse(self)
self.init_freqs()
def init_freqs(self):
"""
Generate the frequencies
These are the Fourier harmonics with a uniformly distributed
random offset
"""
self.freqs = np.empty(self.num_coeffs)
ff = 2*np.pi / self.pulse_time
for i in range(self.num_coeffs):
self.freqs[i] = ff*(i + 1)
if self.randomize_freqs:
self.freqs += np.random.random(self.num_coeffs) - 0.5
def gen_pulse(self, coeffs=None):
"""
Generate a pulse using the Fourier basis with the freqs and
coeffs attributes.
Parameters
----------
coeffs : float array[num_coeffs, num_basis_funcs]
The basis coefficient values
If given this overides the default and sets the attribute
of the same name.
"""
if coeffs:
self.coeffs = coeffs
if not self._pulse_initialised:
self.init_pulse()
pulse = np.zeros(self.num_tslots)
for i in range(self.num_coeffs):
phase = self.freqs[i]*self.time
# basis1comp = self.coeffs[i, 0]*np.sin(phase)
# basis2comp = self.coeffs[i, 1]*np.cos(phase)
# pulse += basis1comp + basis2comp
pulse += self.coeffs[i, 0]*np.sin(phase) + \
self.coeffs[i, 1]*np.cos(phase)
if self.guess_pulse_func:
pulse = self.guess_pulse_func(pulse)
if self.ramping_pulse is not None:
pulse = self._apply_ramping_pulse(pulse)
return self._apply_bounds(pulse)
|
|
from random import randrange
class Male:
def __init__(self):
self.pronoun = "M'Lord"
self.address = "Sir"
self.highaddress = "King"
self.refer = "He"
class Female:
def __init__(self):
self.pronoun = "M'Lady"
self.address = "Mam"
self.highaddress = "Queen"
self.refer = "She"
class GenderNeutral:
def __init__(self):
self.pronoun = "young heir"
self.address = ""
self.highaddress = "Ruler"
self.refer = "They"
class Menu(object):
def __init__(self):
self.choices = []
self.texts = []
self.selected_value = None
self.invalid_text = "That's not one of the choices, please use one of the numbers displayed."
def add_choice(self, display_text, value):
self.choices.append(value)
self.texts.append(display_text)
def current_weapon(self, weapon):
self.current_weapon = weapon.selected_value
def prompt(self, text):
while self.selected_value is None:
print text
num = 1
for display_text in self.texts:
print "%d. %s" % (num, display_text)
num = num + 1
user_choice = int(raw_input("> "))
if user_choice < 1 or user_choice > len(self.texts):
print self.invalid_text
else:
self.selected_value = self.choices[user_choice - 1]
#((((Sparing Pit 1 / Weapon Select))))
def weapon():
weapon = Menu()
weapon.add_choice('Sword',1)
weapon.add_choice('Shield',2)
weapon.add_choice('Lance',3)
weapon.add_choice('Mace',4)
if startgame.selected_value == 1:
print "Lord Maul: Good morning %s, glad you got here early. Maybe I'll go easy on you during training today." % player.pronoun
else:
print "Lord Maul: Sleep in early today again? You know the King never slept in during his sparring days. He alway wanted to try and put me in my place before he filled himself with mead."
print ""
print "Alright, lets get too it."
print ""
weapon.prompt("What do you want to work on today?")
if weapon.selected_value == 1:
print ""
elif weapon.selected_value == 2:
print ""
elif weapon.selected_value == 3:
print ""
elif weapon.selected_value == 4:
thr1
#((((Sisters Room 1))))
def sisroom_1():
sisroom_1 = Menu()
sisroom_1.add_choice('Sparing Pit',1)
sisroom_1.add_choice('Sisters Room',2)
sisroom_1.add_choice('Thrown Room',3)
sisroom_1.add_choice('Dining Hall',4)
sisroom_1.prompt("You get dressed and look out the window, where would you like to go?")
if sisroom_1.selected_value == 1:
weapon()
elif sisroom_1.selected_value == 2:
sisroom_1()
elif sisroom_1.selected_value == 3:
Thrownroom_1()
elif sisroom_1.selected_value == 4:
Dinehall_1()
def Thrownroom_1():
thr1 = Menu()
thr1.add_choice('Persuade to enter the room',1)
thr1.add_choice('Go to Sparing Room',2)
thr1.add_choice('Dining Hall',3)
thr1.add_choice('Sisters Room',4)
print "You walk down the hall to the thrown room but are stopped by two gaurds at the doors."
print "Gaurd: Sorry %s, the Queen is in a negotiation right now with the Rockdawn Kingdom." % player.pronoun
thr1.prompt("What do you say?")
if thr1.selected_value == 1:
print ""
elif thr1.selected_value == 2:
weapon()
elif thr1.selected_value == 3:
Dinehall_1()
elif thr1.selected_value == 4:
sisroom_1()
def Dinehall_1():
weapon = Menu()
weapon.add_choice('Sword',1)
weapon.add_choice('Shield',2)
weapon.add_choice('Lance',3)
weapon.add_choice('Mace',4)
if startgame.selected_value == 1:
print "Lord Maul: Good morning %s, glad you got here early. Maybe I'll go easy on you during training today." % player.pronoun
else:
print "Sleep in early today again? You know the King never slept in during his sparring days. He alway wanted to try and put me in my place before he filled himself with mead."
print "Alright, lets get too it."
weapon.prompt("What do you want to work on today?")
if weapon.selected_value == 1:
print ""
elif weapon.selected_value == 2:
print ""
elif weapon.selected_value == 3:
print ""
elif weapon.selected_value == 4:
print ""
#def Startgame():
#((((Start of Game)))):
gender = Menu()
gender.add_choice('Male',Male)
gender.add_choice('Female',Female)
gender.add_choice('No preference',GenderNeutral)
print "What's your name?"
ChaName = raw_input("> ")
gender.prompt("Gender?")
player = gender.selected_value()
print "---------------------------------------------------------------------------------------------------"
print "---------------------------------------------------------------------------------------------------"
print "---------------------------------------------------------------------------------------------------"
print "Welcome %s, you are a child of the royal family. Next in line for the thrown and become the next %s." % (ChaName, player.highaddress)
print "Right now your kingdom is in a mess and everyone is watching out for a knife in the back."
print "Your younger sister, Roth, is worried you may be in danger and could be on an assassins list along with the rest of your family."
print "You will have to make choices that influence your kingdom and watch your own neck for it may become a lot lighter if you don't."
print "Stay clam and keep your eyes peeled, watch out for danger and rule your kingdom how you want."
print "Do all that and you shall make a fine %s." % player.highaddress
print "---------------------------------------------------------------------------------------------------"
print "---------------------------------------------------------------------------------------------------"
print "---------------------------------------------------------------------------------------------------"
print " "
print "It's early in the day and the sun has just rose through the curtains."
print "Sir Saber(Your Butler): You know the sun only goes up once a day, might wanna see it before it's gone %s. Whenever you do decide to leave your bed don't forget that you have training today with Lord Maul. So please do get some breakfast and be on your way." % player.pronoun
print " "
print "(You shuffle out of bed and Sir Saber leaves the room)"
print " "
startgame = Menu()
startgame.add_choice('Sparing Pit',1)
startgame.add_choice('Sisters Room',2)
startgame.add_choice('Thrown Room',3)
startgame.add_choice('Dining Hall',4)
startgame.prompt("You get dressed and look out the window, where would you like to go?")
if startgame.selected_value == 1:
weapon()
elif startgame.selected_value == 2:
sisroom_1()
elif startgame.selected_value == 3:
Thrownroom_1()
elif startgame.selected_value == 4:
Dinehall_1()
|
|
"""HDL Primitives."""
import pytest
import ast
from hdltools.abshdl.vector import HDLVectorDescriptor
from hdltools.abshdl.module import HDLModule, HDLModuleParameter
from hdltools.abshdl.port import HDLModulePort
from hdltools.abshdl.expr import HDLExpression
from hdltools.abshdl.signal import HDLSignal, HDLSignalSlice
from hdltools.abshdl.const import HDLIntegerConstant, HDLStringConstant
from hdltools.abshdl.sens import HDLSensitivityList, HDLSensitivityDescriptor
from hdltools.abshdl.seq import HDLSequentialBlock
from hdltools.abshdl.assign import HDLAssignment
from hdltools.abshdl.concat import HDLConcatenation
def test_constants():
"""Test constants."""
with pytest.raises(ValueError):
fit_1 = HDLIntegerConstant(256, size=8)
fit_1 = HDLIntegerConstant(255, size=8)
fit_2 = HDLIntegerConstant(128, size=9)
ret = 3 - fit_1
ret = fit_1 - fit_2
ret = fit_2 - fit_1
ret = fit_1 + fit_2
ret = 2 + fit_1
ret = 2 * fit_1
ret = fit_1 * 2
with pytest.raises(TypeError):
_ = HDLIntegerConstant(2) - "123"
with pytest.raises(TypeError):
_ = HDLIntegerConstant(2) + "123"
with pytest.raises(TypeError):
_ = HDLIntegerConstant(2) * 1.0
ret = HDLIntegerConstant(2) == 2
assert ret is True
ret = HDLIntegerConstant(2) == "x"
assert ret is False
_ = abs(HDLIntegerConstant(-1))
HDLStringConstant(value="some_value")
# test HDL primitives
def test_vector_descriptor():
"""Test vectors."""
# basic testing
vec = HDLVectorDescriptor(0, 0)
print(vec.dumps())
assert len(vec) == 1
vec = HDLVectorDescriptor(7)
# test failure modes
with pytest.raises(ValueError):
vec = HDLVectorDescriptor(-1, 0)
with pytest.raises(TypeError):
vec = HDLVectorDescriptor("1", 0)
with pytest.raises(TypeError):
vec = HDLVectorDescriptor(left_size=1, right_size="1")
with pytest.raises(TypeError):
vec = HDLVectorDescriptor(7, stored_value="a")
vec = HDLVectorDescriptor(8, stored_value=256)
left, right = vec.evaluate()
with pytest.raises(ValueError):
HDLVectorDescriptor(7, stored_value=256)
def test_module_port():
"""Test ports."""
HDLModulePort("in", "myport", 3)
HDLModulePort("out", "myport", (2, 0))
HDLModulePort("inout", "myport", HDLVectorDescriptor(2, 0))
# fail cases
with pytest.raises(ValueError):
HDLModulePort("unknown", "myport", 0)
with pytest.raises(ValueError):
HDLModulePort("in", "myport", -1)
with pytest.raises(ValueError):
HDLModulePort("in", "myport", (2, 3, 0))
with pytest.raises(TypeError):
HDLModulePort("in", "myport", "INVALID")
def test_hdl_parameter():
"""Test parameters."""
param = HDLModuleParameter("myparam", "integer", param_default=0)
print(param.dumps())
def test_hdl_module():
"""Test modules."""
mod = HDLModule("my_module")
mod = HDLModule("my_module", [HDLModulePort("in", "myport", 8)])
mod = HDLModule(
"my_module", params=[HDLModuleParameter("myparam", "integer", 0)]
)
expr = ast.parse("myparam-1", mode="eval")
vec = HDLVectorDescriptor(left_size=HDLExpression(expr), right_size=0)
mod = HDLModule(
"my_module",
ports=[HDLModulePort("in", "myport", vec)],
params=HDLModuleParameter("myparam", "integer", 0),
)
print(mod.dumps(evaluate=True))
_ = mod.get_parameter_scope()
_ = mod.get_full_scope()
_ = mod.get_param_names()
_ = mod.get_port_names()
# failures
with pytest.raises(TypeError):
mod = HDLModule("my_module", 0)
with pytest.raises(TypeError):
mod = HDLModule("my_module", [0])
with pytest.raises(TypeError):
mod = HDLModule("my_module", params=[0])
with pytest.raises(TypeError):
mod = HDLModule("my_module", params=0)
def test_hdl_expression():
"""Test expressions."""
expr_1 = "PARAM-2"
expr_2 = "PARAM_X+1"
expr_3 = "a and ~b"
hdl_expr_1 = HDLExpression(ast.parse(expr_1, mode="eval"))
hdl_expr_2 = HDLExpression(ast.parse(expr_2, mode="eval"))
hdl_expr_3 = HDLExpression(expr_3)
print(hdl_expr_3.dumps())
sum = hdl_expr_1 + hdl_expr_2
neg = ~sum
bool_neg = sum.bool_neg()
bool_and = hdl_expr_1.bool_and(hdl_expr_2)
bool_or = hdl_expr_1.bool_or(hdl_expr_2)
print(sum.dumps())
print(neg.dumps())
print(bool_neg.dumps())
print(bool_and.dumps())
print(bool_or.dumps())
_ = hdl_expr_1 & 0x1
_ = 0x1 | hdl_expr_1
_ = 0x1 & hdl_expr_1
_ = 0x1 ^ hdl_expr_1
_ = hdl_expr_1 ^ 0x1
my_signal = HDLSignal("reg", "signal_a", size=2)
_ = HDLExpression(HDLIntegerConstant(1))
_ = HDLExpression(1)
_ = HDLExpression(my_signal)
_ = HDLExpression(HDLSignalSlice(my_signal, 1))
_ = HDLExpression(my_signal[1:0])
# test reduction
expr_a = HDLExpression("value_a")
expr_b = HDLExpression("value_b")
full_expr = expr_a << 0 | expr_b << 16 | HDLExpression(0)
case_1 = ast.BinOp(
left=ast.Constant(value=0), op=ast.BitOr(), right=ast.Name(id="VAR")
)
case_2 = ast.BinOp(
left=ast.Constant(value=1), op=ast.Mult(), right=ast.Name(id="VAR")
)
case_3 = ast.BinOp(
left=ast.Constant(value=0), op=ast.Mult(), right=ast.Name(id="VAR")
)
hdl_expr = HDLExpression(ast.Expression(body=case_1))
hdl_expr_2 = HDLExpression(ast.Expression(body=case_2))
hdl_expr_3 = HDLExpression(ast.Expression(body=case_3))
print(hdl_expr.dumps())
print(hdl_expr_2.dumps())
reduced_1 = HDLExpression._reduce_binop(case_1)
hdl_expr = HDLExpression(ast.Expression(body=reduced_1))
print(hdl_expr.dumps())
reduced_2 = HDLExpression._reduce_binop(case_2)
hdl_expr_2 = HDLExpression(ast.Expression(body=reduced_2))
print(hdl_expr_2.dumps())
reduced_3 = HDLExpression._reduce_binop(case_3)
hdl_expr_3 = HDLExpression(ast.Expression(body=reduced_3))
print(hdl_expr_3.dumps())
print(full_expr.dumps())
full_expr.reduce_expr()
print(full_expr.dumps())
def test_hdl_signal():
"""Test signals."""
my_sig = HDLSignal("reg", "signal_x", size=(7, 0))
print(my_sig.dumps())
_ = my_sig[3:1]
_ = my_sig[7]
yet_another = my_sig[2:]
_ = my_sig[:2]
print(yet_another.dumps())
_ = HDLSignal("reg", "sig", HDLVectorDescriptor(1, 0))
# exceptions
with pytest.raises(ValueError):
_ = HDLSignal("unknown", "sig", 1)
with pytest.raises(ValueError):
_ = HDLSignal("reg", "sig", -1)
with pytest.raises(ValueError):
_ = HDLSignal("reg", "sig", (1, 2, 3))
with pytest.raises(TypeError):
_ = HDLSignal("reg", "sig", "invalid")
_ = HDLSignalSlice(my_sig, HDLVectorDescriptor(1, 0))
with pytest.raises(TypeError):
_ = HDLSignalSlice(my_sig, "invalid")
def test_sens():
"""Test sensitivity list."""
some_signal = HDLSignal("reg", "signal", size=1)
sens_1 = HDLSensitivityDescriptor(sens_type="rise", sig=some_signal)
sens_list = HDLSensitivityList()
sens_list.add(sens_1)
print(sens_list.dumps())
def test_seq():
"""Test sequential block."""
some_signal = HDLSignal("reg", "signal", size=1)
sens_1 = HDLSensitivityDescriptor(sens_type="rise", sig=some_signal)
sens_list = HDLSensitivityList()
sens_list.add(sens_1)
ass_sig = HDLSignal("reg", "counter", size=2)
ass_expr = HDLExpression(ass_sig) + 1
assign = HDLAssignment(ass_sig, ass_expr)
seq = HDLSequentialBlock(sens_list)
seq.add(assign)
print(seq.dumps())
def test_assign():
"""Test assignment."""
# this module is extensively tested already, being used as a support
# class for many others. here we test whatever is missing
sig = HDLSignal("comb", "my_signal")
assign = HDLAssignment(signal=sig, value=0)
print(assign.dumps())
# test fail cases
with pytest.raises(TypeError):
_ = HDLAssignment("not_allowed", 0)
def test_concat():
"""Test concatenation."""
sig = HDLSignal("comb", "my_signal", size=4)
concat = HDLConcatenation(sig, HDLExpression(0x0C, size=8))
assert len(concat) == 12
concat.append(HDLExpression(0x1, size=1))
# failures
with pytest.raises(TypeError):
_ = HDLConcatenation(sig, "not_allowed")
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions related to preprocessing inputs."""
import tensorflow.compat.v1 as tf
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width, pad_value):
"""Pads the given image with the given pad_value.
Works like tf.image.pad_to_bounding_box, except it can pad the image
with any given arbitrary pad value and also handle images whose sizes are not
known during graph construction.
Args:
image: 3-D tensor with shape [height, width, channels]
offset_height: Number of rows of zeros to add on top.
offset_width: Number of columns of zeros to add on the left.
target_height: Height of output image.
target_width: Width of output image.
pad_value: Value to pad the image tensor with.
Returns:
3-D tensor of shape [target_height, target_width, channels].
Raises:
ValueError: If the shape of image is incompatible with the offset_* or
target_* arguments.
"""
image_rank = tf.rank(image)
image_rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong image tensor rank [Expected] [Actual]', 3, image_rank])
with tf.control_dependencies([image_rank_assert]):
image -= pad_value
image_shape = tf.shape(image)
height, width = image_shape[0], image_shape[1]
target_width_assert = tf.Assert(
tf.greater_equal(target_width, width), ['target_width must be >= width'])
target_height_assert = tf.Assert(
tf.greater_equal(target_height, height),
['target_height must be >= height'])
with tf.control_dependencies([target_width_assert]):
after_padding_width = target_width - offset_width - width
with tf.control_dependencies([target_height_assert]):
after_padding_height = target_height - offset_height - height
offset_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(after_padding_width, 0),
tf.greater_equal(after_padding_height, 0)),
['target size not possible with the given target offsets'])
height_params = tf.stack([offset_height, after_padding_height])
width_params = tf.stack([offset_width, after_padding_width])
channel_params = tf.stack([0, 0])
with tf.control_dependencies([offset_assert]):
paddings = tf.stack([height_params, width_params, channel_params])
padded = tf.pad(image, paddings)
return padded + pad_value
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
The cropped (and resized) image.
Raises:
ValueError: if `image` doesn't have rank of 3.
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
if len(image.get_shape().as_list()) != 3:
raise ValueError('input must have rank of 3')
original_channels = image.get_shape().as_list()[2]
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
image = tf.reshape(image, cropped_shape)
image.set_shape([crop_height, crop_width, original_channels])
return image
def random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3), [
'Wrong rank for tensor %s [expected] [actual]', image_list[i].name,
3, image_rank
])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height), [
'Wrong height for tensor %s [expected][actual]', image.name, height,
image_height
])
width_assert = tf.Assert(
tf.equal(width, image_width), [
'Wrong width for tensor %s [expected][actual]', image.name, width,
image_width
])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform([],
maxval=max_offset_height,
dtype=tf.int32)
offset_width = tf.random_uniform([], maxval=max_offset_width, dtype=tf.int32)
return [
_crop(image, offset_height, offset_width, crop_height, crop_width)
for image in image_list
]
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
"""Gets a random scale value.
Args:
min_scale_factor: Minimum scale value.
max_scale_factor: Maximum scale value.
step_size: The step size from minimum to maximum value.
Returns:
A random scale value selected between minimum and maximum value.
Raises:
ValueError: min_scale_factor has unexpected value.
"""
if min_scale_factor < 0 or min_scale_factor > max_scale_factor:
raise ValueError('Unexpected value of min_scale_factor.')
if min_scale_factor == max_scale_factor:
return tf.to_float(min_scale_factor)
# When step_size = 0, we sample the value uniformly from [min, max).
if step_size == 0:
return tf.random_uniform([1],
minval=min_scale_factor,
maxval=max_scale_factor)
# When step_size != 0, we randomly select one discrete value from [min, max].
num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1)
scale_factors = tf.lin_space(min_scale_factor, max_scale_factor, num_steps)
shuffled_scale_factors = tf.random_shuffle(scale_factors)
return shuffled_scale_factors[0]
def randomly_scale_image_and_label(image, label=None, scale=1.0):
"""Randomly scales image and label.
Args:
image: Image with shape [height, width, 3].
label: Label with shape [height, width, 1].
scale: The value to scale image and label.
Returns:
Scaled image and label.
"""
# No random scaling if scale == 1.
if scale == 1.0:
return image, label
image_shape = tf.shape(image)
new_dim = tf.to_int32(tf.to_float([image_shape[0], image_shape[1]]) * scale)
# Need squeeze and expand_dims because image interpolation takes
# 4D tensors as input.
image = tf.squeeze(
tf.image.resize_bilinear(
tf.expand_dims(image, 0), new_dim, align_corners=True), [0])
if label is not None:
label = tf.squeeze(
tf.image.resize_nearest_neighbor(
tf.expand_dims(label, 0), new_dim, align_corners=True), [0])
return image, label
def resolve_shape(tensor, rank=None, scope=None):
"""Fully resolves the shape of a Tensor.
Use as much as possible the shape components already known during graph
creation and resolve the remaining ones during runtime.
Args:
tensor: Input tensor whose shape we query.
rank: The rank of the tensor, provided that we know it.
scope: Optional name scope.
Returns:
shape: The full shape of the tensor.
"""
with tf.name_scope(scope, 'resolve_shape', [tensor]):
if rank is not None:
shape = tensor.get_shape().with_rank(rank).as_list()
else:
shape = tensor.get_shape().as_list()
if None in shape:
shape_dynamic = tf.shape(tensor)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = shape_dynamic[i]
return shape
def resize_to_range(image,
label=None,
min_size=None,
max_size=None,
factor=None,
align_corners=True,
label_layout_is_chw=False,
scope=None,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image or label so their sides are within the provided range.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum size is equal to min_size
without the other side exceeding max_size, then do so.
2. Otherwise, resize so the largest side is equal to max_size.
An integer in `range(factor)` is added to the computed sides so that the
final dimensions are multiples of `factor` plus one.
Args:
image: A 3D tensor of shape [height, width, channels].
label: (optional) A 3D tensor of shape [height, width, channels] (default)
or [channels, height, width] when label_layout_is_chw = True.
min_size: (scalar) desired size of the smaller image side.
max_size: (scalar) maximum allowed size of the larger image side. Note that
the output dimension is no larger than max_size and may be slightly
smaller than min_size when factor is not None.
factor: Make output size multiple of factor plus one.
align_corners: If True, exactly align all 4 corners of input and output.
label_layout_is_chw: If true, the label has shape [channel, height, width].
We support this case because for some instance segmentation dataset, the
instance segmentation is saved as [num_instances, height, width].
scope: Optional name scope.
method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR.
Returns:
A 3-D tensor of shape [new_height, new_width, channels], where the image
has been resized (with the specified method) so that
min(new_height, new_width) == ceil(min_size) or
max(new_height, new_width) == ceil(max_size).
Raises:
ValueError: If the image is not a 3D tensor.
"""
with tf.name_scope(scope, 'resize_to_range', [image]):
new_tensor_list = []
min_size = tf.to_float(min_size)
if max_size is not None:
max_size = tf.to_float(max_size)
# Modify the max_size to be a multiple of factor plus 1 and make sure the
# max dimension after resizing is no larger than max_size.
if factor is not None:
max_size = (
max_size + (factor - (max_size - 1) % factor) % factor - factor)
[orig_height, orig_width, _] = resolve_shape(image, rank=3)
orig_height = tf.to_float(orig_height)
orig_width = tf.to_float(orig_width)
orig_min_size = tf.minimum(orig_height, orig_width)
# Calculate the larger of the possible sizes
large_scale_factor = min_size / orig_min_size
large_height = tf.to_int32(tf.ceil(orig_height * large_scale_factor))
large_width = tf.to_int32(tf.ceil(orig_width * large_scale_factor))
large_size = tf.stack([large_height, large_width])
new_size = large_size
if max_size is not None:
# Calculate the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_size = tf.maximum(orig_height, orig_width)
small_scale_factor = max_size / orig_max_size
small_height = tf.to_int32(tf.ceil(orig_height * small_scale_factor))
small_width = tf.to_int32(tf.ceil(orig_width * small_scale_factor))
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
tf.to_float(tf.reduce_max(large_size)) >
max_size, lambda: small_size, lambda: large_size)
# Ensure that both output sides are multiples of factor plus one.
if factor is not None:
new_size += (factor - (new_size - 1) % factor) % factor
new_tensor_list.append(
tf.image.resize_images(
image, new_size, method=method, align_corners=align_corners))
if label is not None:
if label_layout_is_chw:
# Input label has shape [channel, height, width].
resized_label = tf.expand_dims(label, 3)
resized_label = tf.image.resize_nearest_neighbor(
resized_label, new_size, align_corners=align_corners)
resized_label = tf.squeeze(resized_label, 3)
else:
# Input label has shape [height, width, channel].
resized_label = tf.image.resize_images(
label,
new_size,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
new_tensor_list.append(resized_label)
else:
new_tensor_list.append(None)
return new_tensor_list
|
|
#!/usr/bin/env python
from marmoset import Marmoset
from getpass import getpass
import os
import glob
import sys
import threading
import fileinput
# monkey-patch SSL because verification fails on 2.7.9
if sys.hexversion == 34015728:
import ssl
if hasattr(ssl, '_create_unverified_context'):
# noinspection PyProtectedMember
ssl._create_default_https_context = ssl._create_unverified_context
RACKET_KEYWORD = ';;;!'
C_KEYWORD = '///!'
LINE_SEARCH_LIMIT = 10
langLookup = {'.rkt': RACKET_KEYWORD, '.c': C_KEYWORD, '.h': C_KEYWORD}
username = raw_input('Username: ')
password = getpass('Password: ')
class MarmosetAssignment:
"""
Stores one marmoset assignment problem and a list of its files.
"""
def __init__(self, course='', assignment=''):
"""
Construct with course and assignment.
:param course:
:type course: str
:param assignment:
:type assignment: str
:return: An assignment instance
:rtype: MarmosetAssignment
"""
self.course = course
self.assignment = assignment
self.files = []
def set_course(self, course):
"""
Setter for the instance field "course"
:param course:
:type course: str
:return: None
"""
self.course = course
def set_assignment(self, assignment):
"""
Setter for the instance field "assignment"
:param assignment:
:type assignment: str
:return: None
"""
self.assignment = assignment
def add_file(self, f):
"""
Add a file to the instance's list of files
:param f: A path
:type f: str
:return: None
"""
self.files.append(f)
def submit(self, username, password):
"""
Submit all files in the instance's files field.
:param username: CAS username
:type username: str
:param password: CAS password
:type password: str
:return: None
"""
marmoset = Marmoset(username, password)
if len(self.files) == 1:
self.files = self.files[0] # Fix for zipping the entire directory structure
print("Submitting " + self.course + " " + self.assignment + '\n')
result = marmoset.submit(self.course, self.assignment, self.files)
if result:
print("Success!\n")
else:
print("Submission failed (check login credentials)\n")
def async_submit(self, username, password):
"""
Submit all files in the instance's files field without blocking.
:param username: CAS username
:type username: str
:param password: CAS password
:type password: str
:return: None
"""
for i in range(len(self.files)):
t = threading.Thread(target=self.submit, args=(username, password, ))
t.start()
def get_params_from_file(path):
"""
Get a list of parameters by searching the file.
:param path: Path to the file
:type path: str
:return: A list of parameters [course, assignment]
:rtype: list
"""
f = fileinput.input(path)
file_extension = os.path.splitext(path)[1]
keyword = langLookup[file_extension]
for line in f:
if f.lineno() >= 10: # limit of 10 lines
break
if line.startswith(keyword):
params = line.split(' ', 2)
if len(params) != 3:
return False
params.pop(0)
params[1] = params[1].rstrip(')\n')
f.close()
return params
f.close()
return False
def get_file_paths(file_extension):
"""
Get all the files of the specified type from the cwd.
:param file_extension: The file type to search for
:type file_extension: str
:return: A list of file paths
:rtype: list
"""
cwd = os.getcwd()
return glob.glob(cwd + '/*' + file_extension)
def get_all_params(file_list):
"""
Get all parameters from the list of files.
:param file_list: A list of file paths
:type file_list: list
:return: A list of MarmosetAssignments
:rtype: list
"""
marmo_problems = {}
params_map = zip(file_list, map(lambda f: get_params_from_file(f), file_list))
valid_files = (x for x in params_map if x[1] is not False)
for entry in valid_files:
course = entry[1][0]
assignment = entry[1][1]
filename = os.path.basename(entry[0]) # FIXME: Zip behaviour is weird
key = (course, assignment) # 'course' + 'assignment'
if key in marmo_problems:
# add filename to existing MarmosetAssignment
marmo_problems[key].add_file(filename)
else:
marmo_problems[key] = MarmosetAssignment(course, assignment)
marmo_problems[key].add_file(filename)
return marmo_problems.values()
def submit_all(assignments, username, password):
for problem in assignments:
problem.async_submit(username, password)
files = []
for file_exts in langLookup:
files += get_file_paths(file_exts)
submit_all(get_all_params(files), username, password)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
import os
class Migration(SchemaMigration):
def forwards(self, orm):
# TODO: Load sample json
# os.system("shp2pgsql -s 26986 -g geometry data/parcels/Parcels_L3E.shp development_parcel | psql -h localhost -d ddtest -U mapcuser")
# db.execute("ALTER SEQUENCE development_parcel_gid_seq RENAME TO development_parcel_parcel_id_seq")
# db.execute("ALTER TABLE development_parcel RENAME gid TO parcel_id")
db.rename_column('development_parcel', 'mapc_id', 'parcel_id')
db.rename_column('development_parcel', 'muni_id', 'municipality_id')
db.alter_column('development_parcel', 'municipality_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['development.Municipality']))
db.delete_column('development_parcel', 'shape_leng')
db.delete_column('development_parcel', 'shape_area')
db.add_column('development_project', 'parcel',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['development.Parcel'], null=True, blank=True),
keep_default=False)
db.execute('CREATE INDEX "development_parcel_geometry_id" ON "development_parcel" USING GIST ( "geometry" )')
db.send_create_signal('development', ['Parcel'])
def backwards(self, orm):
db.delete_table('development_parcel', cascade=True)
# db.delete_column('development_project', 'parcel_id')
# Deleting field 'Project.parcel'
db.delete_column('development_project', 'parcel_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'development.communitytype': {
'Meta': {'object_name': 'CommunityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'development.municipality': {
'Meta': {'ordering': "['name']", 'object_name': 'Municipality'},
'communitytype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.CommunityType']", 'null': 'True', 'blank': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '26986'}),
'muni_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Subregion']", 'null': 'True'})
},
'development.parcel': {
'Meta': {'object_name': 'Parcel'},
'addr_num': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'addr_str': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'addr_zip': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'bldg_area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'bldg_value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'far': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'fy': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '26986', 'null': 'True'}),
'gid': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'land_value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'loc_id_cnt': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'lot_areaft': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'ls_date': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True'}),
'ls_price': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'luc_1': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True'}),
'luc_2': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True'}),
'luc_adjust': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Municipality']", 'null': 'True'}),
'othr_value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'owner_addr': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'owner_city': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True'}),
'owner_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'owner_stat': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'owner_zip': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'parloc_id': ('django.db.models.fields.CharField', [], {'max_length': '18', 'null': 'True'}),
'res_area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'rooms_num': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'site_addr': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'taxloc_id': ('django.db.models.fields.CharField', [], {'max_length': '18', 'null': 'True'}),
'total_valu': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'units_num': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'yr_built': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'zoning': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True'})
},
'development.project': {
'Meta': {'ordering': "['dd_id']", 'object_name': 'Project'},
'affordable_comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'as_of_right': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ch40': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.ZoningTool']", 'null': 'True', 'blank': 'True'}),
'clustosrd': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'commsf': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'complyr': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_created_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'dd_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ddname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dev_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'edinstpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'emploss': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gqpop': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hotelrms': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'indmfpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_last_modified_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'lgmultifam': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '26986', 'null': 'True', 'blank': 'True'}),
'mapcintrnl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mfdisc': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mxduse': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ofcmdpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'otheremprat2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'othpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ovr55': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'parcel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Parcel']", 'null': 'True', 'blank': 'True'}),
'parking_spaces': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pctaffall': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'phased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prjacrs': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'projecttype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.ProjectType']", 'null': 'True'}),
'projecttype_detail': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rdv': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'retpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rndpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rptdemp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'singfamhu': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stalled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.ProjectStatus']"}),
'taz': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Taz']", 'null': 'True', 'blank': 'True'}),
'todstation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.TODStation']", 'null': 'True', 'blank': 'True'}),
'total_cost': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_cost_allocated_pct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'totemp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tothu': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'twnhsmmult': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'url_add': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'walkscore': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.WalkScore']", 'null': 'True', 'blank': 'True'}),
'whspct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'xcoord': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ycoord': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'development.projectstatus': {
'Meta': {'object_name': 'ProjectStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'development.projecttype': {
'Meta': {'ordering': "['order']", 'object_name': 'ProjectType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'development.subregion': {
'Meta': {'ordering': "['abbr']", 'object_name': 'Subregion'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'development.taz': {
'Meta': {'ordering': "['taz_id']", 'object_name': 'Taz'},
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '26986'}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Municipality']"}),
'taz_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'development.todstation': {
'Meta': {'ordering': "['station_name']", 'object_name': 'TODStation'},
'comrail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '26986'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'station_id': ('django.db.models.fields.IntegerField', [], {}),
'station_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subway': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'taz': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Taz']"})
},
'development.walkscore': {
'Meta': {'object_name': 'WalkScore'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snapped_lat': ('django.db.models.fields.FloatField', [], {}),
'snapped_lon': ('django.db.models.fields.FloatField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'walkscore': ('django.db.models.fields.IntegerField', [], {}),
'ws_link': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'development.zipcode': {
'Meta': {'ordering': "['zipcode']", 'object_name': 'ZipCode'},
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '26986'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'development.zoningtool': {
'Meta': {'object_name': 'ZoningTool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '3'})
}
}
complete_apps = ['development']
|
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#------------------------------------------------------------------------------
""" Classes to provide a switcher. """
# paranoid checkin in case Mr Chilver's changes break the distribution code
# todo - it wasn't paranoia - reconcile this with lazy_switcher.py at some point
# Major package imports.
import wx
from wx.lib.scrolledpanel import wxScrolledPanel
# Enthought library imports.
from traits.api import HasTraits
class SwitcherModel(HasTraits):
""" Base class for switcher models. """
__traits__ = {
# The index of the selected 'page'.
'selected' : -1,
}
def __init__(self):
""" Creates a new switcher model. """
# The items to display in the switcher control.
self.items = [] # (str label, object value)
return
###########################################################################
# 'SwitcherModel' interface.
###########################################################################
def create_page(self, parent, index):
""" Creates a page for the switcher panel. """
raise NotImplementedError
class SwitcherControl(wx.Panel):
""" The default switcher control (a combo box). """
def __init__(self, parent, id, model, label=None, **kw):
""" Create a new switcher control. """
# Base-class constructor.
wx.Panel.__init__(self, parent, id, **kw)
# The switcher model that we are a controller for.
self.model = model
# The optional label.
self.label = label
# Create the widget!
self._create_widget(model, label)
# Listen for when the selected item in the model is changed.
model.on_trait_change(self._on_selected_changed, 'selected')
return
###########################################################################
# Trait event handlers.
###########################################################################
def _on_selected_changed(self, selected):
""" Called when the selected item in the model is changed. """
self.combo.SetSelection(selected)
return
###########################################################################
# wx event handlers.
###########################################################################
def _on_combobox(self, event):
""" Called when the combo box selection is changed. """
combo = event.GetEventObject()
# Update the model.
self.model.selected = combo.GetSelection()
return
###########################################################################
# Private interface.
###########################################################################
def _create_widget(self, model, label):
""" Creates the widget. """
self.sizer = sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
self.SetAutoLayout(True)
##self.SetBackgroundColour("light grey")
# Switcher combo.
sizer.Add(self._combo(self, model, label), 1, wx.EXPAND)
# Resize the panel to match the sizer's minimal size.
sizer.Fit(self)
return
def _combo(self, parent, model, label):
""" Creates the switcher combo box. """
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Label.
if label is not None:
text = wx.StaticText(parent, -1, label)
sizer.Add(text, 0, wx.ALIGN_CENTER | wx.ALL, 5)
# Combo.
self.combo = combo = wx.ComboBox(
parent,
-1,
style=wx.CB_DROPDOWN | wx.CB_READONLY
)
sizer.Add(combo, 1, wx.EXPAND | wx.ALIGN_CENTER | wx.ALL, 5)
# Ask the model for the available options.
items = model.items
if len(items) > 0:
for name, data in model.items:
combo.Append(name, data)
# Listen for changes to the selected item.
wx.EVT_COMBOBOX(self, combo.GetId(), self._on_combobox)
# If the model's selected variable has been set ...
if model.selected != -1:
combo.SetSelection(model.selected)
return sizer
class SwitcherPanel(wxScrolledPanel):
""" The default switcher panel. """
def __init__(self, parent, id, model, label=None, cache=True, **kw):
# Base-class constructor.
wxScrolledPanel.__init__(self, parent, id, **kw)
self.SetupScrolling()
# The switcher model that we are a panel for.
self.model = model
# Should we cache pages as we create them?
self.cache = cache
# The page cache (if caching was requested).
self._page_cache = {}
# The currently displayed page.
self.current = None
# Create the widget!
self._create_widget(model, label)
# Listen for when the selected item in the model is changed.
model.on_trait_change(self._on_selected_changed, 'selected')
return
###########################################################################
# Trait event handlers.
###########################################################################
def _on_selected_changed(self, selected):
""" Called when the selected item in the model is changed. """
self._show_page(selected)
return
###########################################################################
# Private interface.
###########################################################################
def _create_widget(self, model, label):
""" Creates the widget. """
self.sizer = sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
self.SetAutoLayout(True)
if model.selected != -1:
self._show_page(model.selected)
# Nothing to add here as we add the panel contents lazily!
pass
# Resize the panel to match the sizer's minimal size.
sizer.Fit(self)
return
def _show_page(self, index):
""" Shows the page at the specified index. """
# If a page is already displayed then hide it.
if self.current is not None:
self.current.Show(False)
self.sizer.Remove(self.current)
# Is the page in the cache?
page = self._page_cache.get(index)
if not self.cache or page is None:
# If not then ask our panel factory to create it.
page = self.model.create_page(self, index)
# Add it to the cache!
self._page_cache[index] = page
# Display the page.
self.sizer.Add(page, 1, wx.EXPAND)
page.Show(True)
self.current = page
# Force a new layout of the sizer's children but KEEPING the current
# dimension.
self.sizer.Layout()
return
class Switcher(wx.Panel):
""" A switcher. """
def __init__(self, parent, id, model, label=None, **kw):
# Base-class constructor.
wx.Panel.__init__(self, parent, id, **kw)
# The model that we are a switcher for.
self.model = model
# Create the widget!
self._create_widget(model, label)
return
###########################################################################
# Private interface.
###########################################################################
def _create_widget(self, model, label):
""" Creates the widget. """
self.sizer = sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
self.SetAutoLayout(True)
# Switcher control.
self.control = control = SwitcherControl(self, -1, model, label)
sizer.Add(control, 0, wx.EXPAND)
# Switcher panel.
self.panel = panel = SwitcherPanel(self, -1, model, label)
sizer.Add(panel, 1, wx.EXPAND)
# Resize the panel to match the sizer's minimal size.
sizer.Fit(self)
return
#### EOF ######################################################################
|
|
# test_remote.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import random
import tempfile
try:
from unittest import skipIf
except ImportError:
from unittest2 import skipIf
from git import (
RemoteProgress,
FetchInfo,
Reference,
SymbolicReference,
Head,
Commit,
PushInfo,
RemoteReference,
TagReference,
Remote,
GitCommandError
)
from git.cmd import Git
from git.compat import string_types
from git.test.lib import (
TestBase,
with_rw_repo,
with_rw_and_rw_remote_repo,
fixture,
GIT_DAEMON_PORT,
assert_raises
)
from git.util import IterableList, rmtree, HIDE_WINDOWS_FREEZE_ERRORS
import os.path as osp
# assure we have repeatable results
random.seed(0)
class TestRemoteProgress(RemoteProgress):
__slots__ = ("_seen_lines", "_stages_per_op", '_num_progress_messages')
def __init__(self):
super(TestRemoteProgress, self).__init__()
self._seen_lines = list()
self._stages_per_op = dict()
self._num_progress_messages = 0
def _parse_progress_line(self, line):
# we may remove the line later if it is dropped
# Keep it for debugging
self._seen_lines.append(line)
rval = super(TestRemoteProgress, self)._parse_progress_line(line)
assert len(line) > 1, "line %r too short" % line
return rval
def line_dropped(self, line):
try:
self._seen_lines.remove(line)
except ValueError:
pass
def update(self, op_code, cur_count, max_count=None, message=''):
# check each stage only comes once
op_id = op_code & self.OP_MASK
assert op_id in (self.COUNTING, self.COMPRESSING, self.WRITING)
if op_code & self.WRITING > 0:
if op_code & self.BEGIN > 0:
assert not message, 'should not have message when remote begins writing'
elif op_code & self.END > 0:
assert message
assert not message.startswith(', '), "Sanitize progress messages: '%s'" % message
assert not message.endswith(', '), "Sanitize progress messages: '%s'" % message
self._stages_per_op.setdefault(op_id, 0)
self._stages_per_op[op_id] = self._stages_per_op[op_id] | (op_code & self.STAGE_MASK)
if op_code & (self.WRITING | self.END) == (self.WRITING | self.END):
assert message
# END check we get message
self._num_progress_messages += 1
def make_assertion(self):
# we don't always receive messages
if not self._seen_lines:
return
# sometimes objects are not compressed which is okay
assert len(self._seen_ops) in (2, 3), len(self._seen_ops)
assert self._stages_per_op
# must have seen all stages
for op, stages in self._stages_per_op.items(): # @UnusedVariable
assert stages & self.STAGE_MASK == self.STAGE_MASK
# END for each op/stage
def assert_received_message(self):
assert self._num_progress_messages
class TestRemote(TestBase):
def tearDown(self):
import gc
gc.collect()
def _print_fetchhead(self, repo):
with open(osp.join(repo.git_dir, "FETCH_HEAD")):
pass
def _do_test_fetch_result(self, results, remote):
# self._print_fetchhead(remote.repo)
self.assertGreater(len(results), 0)
self.assertIsInstance(results[0], FetchInfo)
for info in results:
self.assertIsInstance(info.note, string_types)
if isinstance(info.ref, Reference):
self.assertTrue(info.flags)
# END reference type flags handling
self.assertIsInstance(info.ref, (SymbolicReference, Reference))
if info.flags & (info.FORCED_UPDATE | info.FAST_FORWARD):
self.assertIsInstance(info.old_commit, Commit)
else:
self.assertIsNone(info.old_commit)
# END forced update checking
# END for each info
def _do_test_push_result(self, results, remote):
self.assertGreater(len(results), 0)
self.assertIsInstance(results[0], PushInfo)
for info in results:
self.assertTrue(info.flags)
self.assertIsInstance(info.summary, string_types)
if info.old_commit is not None:
self.assertIsInstance(info.old_commit, Commit)
if info.flags & info.ERROR:
has_one = False
for bitflag in (info.REJECTED, info.REMOTE_REJECTED, info.REMOTE_FAILURE):
has_one |= bool(info.flags & bitflag)
# END for each bitflag
self.assertTrue(has_one)
else:
# there must be a remote commit
if info.flags & info.DELETED == 0:
self.assertIsInstance(info.local_ref, Reference)
else:
self.assertIsNone(info.local_ref)
self.assertIn(type(info.remote_ref), (TagReference, RemoteReference))
# END error checking
# END for each info
def _do_test_fetch_info(self, repo):
self.failUnlessRaises(ValueError, FetchInfo._from_line, repo, "nonsense", '')
self.failUnlessRaises(
ValueError, FetchInfo._from_line, repo, "? [up to date] 0.1.7RC -> origin/0.1.7RC", '')
def _commit_random_file(self, repo):
# Create a file with a random name and random data and commit it to repo.
# Return the committed absolute file path
index = repo.index
new_file = self._make_file(osp.basename(tempfile.mktemp()), str(random.random()), repo)
index.add([new_file])
index.commit("Committing %s" % new_file)
return new_file
def _do_test_fetch(self, remote, rw_repo, remote_repo):
# specialized fetch testing to de-clutter the main test
self._do_test_fetch_info(rw_repo)
def fetch_and_test(remote, **kwargs):
progress = TestRemoteProgress()
kwargs['progress'] = progress
res = remote.fetch(**kwargs)
progress.make_assertion()
self._do_test_fetch_result(res, remote)
return res
# END fetch and check
def get_info(res, remote, name):
return res["%s/%s" % (remote, name)]
# put remote head to master as it is guaranteed to exist
remote_repo.head.reference = remote_repo.heads.master
res = fetch_and_test(remote)
# all up to date
for info in res:
self.assertTrue(info.flags & info.HEAD_UPTODATE)
# rewind remote head to trigger rejection
# index must be false as remote is a bare repo
rhead = remote_repo.head
remote_commit = rhead.commit
rhead.reset("HEAD~2", index=False)
res = fetch_and_test(remote)
mkey = "%s/%s" % (remote, 'master')
master_info = res[mkey]
self.assertTrue(master_info.flags & FetchInfo.FORCED_UPDATE)
self.assertIsNotNone(master_info.note)
# normal fast forward - set head back to previous one
rhead.commit = remote_commit
res = fetch_and_test(remote)
self.assertTrue(res[mkey].flags & FetchInfo.FAST_FORWARD)
# new remote branch
new_remote_branch = Head.create(remote_repo, "new_branch")
res = fetch_and_test(remote)
new_branch_info = get_info(res, remote, new_remote_branch)
self.assertTrue(new_branch_info.flags & FetchInfo.NEW_HEAD)
# remote branch rename ( causes creation of a new one locally )
new_remote_branch.rename("other_branch_name")
res = fetch_and_test(remote)
other_branch_info = get_info(res, remote, new_remote_branch)
self.assertEqual(other_branch_info.ref.commit, new_branch_info.ref.commit)
# remove new branch
Head.delete(new_remote_branch.repo, new_remote_branch)
res = fetch_and_test(remote)
# deleted remote will not be fetched
self.failUnlessRaises(IndexError, get_info, res, remote, new_remote_branch)
# prune stale tracking branches
stale_refs = remote.stale_refs
self.assertEqual(len(stale_refs), 2)
self.assertIsInstance(stale_refs[0], RemoteReference)
RemoteReference.delete(rw_repo, *stale_refs)
# test single branch fetch with refspec including target remote
res = fetch_and_test(remote, refspec="master:refs/remotes/%s/master" % remote)
self.assertEqual(len(res), 1)
self.assertTrue(get_info(res, remote, 'master'))
# ... with respec and no target
res = fetch_and_test(remote, refspec='master')
self.assertEqual(len(res), 1)
# ... multiple refspecs ... works, but git command returns with error if one ref is wrong without
# doing anything. This is new in later binaries
# res = fetch_and_test(remote, refspec=['master', 'fred'])
# self.assertEqual(len(res), 1)
# add new tag reference
rtag = TagReference.create(remote_repo, "1.0-RV_hello.there")
res = fetch_and_test(remote, tags=True)
tinfo = res[str(rtag)]
self.assertIsInstance(tinfo.ref, TagReference)
self.assertEqual(tinfo.ref.commit, rtag.commit)
self.assertTrue(tinfo.flags & tinfo.NEW_TAG)
# adjust tag commit
Reference.set_object(rtag, rhead.commit.parents[0].parents[0])
res = fetch_and_test(remote, tags=True)
tinfo = res[str(rtag)]
self.assertEqual(tinfo.commit, rtag.commit)
self.assertTrue(tinfo.flags & tinfo.TAG_UPDATE)
# delete remote tag - local one will stay
TagReference.delete(remote_repo, rtag)
res = fetch_and_test(remote, tags=True)
self.failUnlessRaises(IndexError, get_info, res, remote, str(rtag))
# provoke to receive actual objects to see what kind of output we have to
# expect. For that we need a remote transport protocol
# Create a new UN-shared repo and fetch into it after we pushed a change
# to the shared repo
other_repo_dir = tempfile.mktemp("other_repo")
# must clone with a local path for the repo implementation not to freak out
# as it wants local paths only ( which I can understand )
other_repo = remote_repo.clone(other_repo_dir, shared=False)
remote_repo_url = osp.basename(remote_repo.git_dir) # git-daemon runs with appropriate `--base-path`.
remote_repo_url = Git.polish_url("git://localhost:%s/%s" % (GIT_DAEMON_PORT, remote_repo_url))
# put origin to git-url
other_origin = other_repo.remotes.origin
with other_origin.config_writer as cw:
cw.set("url", remote_repo_url)
# it automatically creates alternates as remote_repo is shared as well.
# It will use the transport though and ignore alternates when fetching
# assert not other_repo.alternates # this would fail
# assure we are in the right state
rw_repo.head.reset(remote.refs.master, working_tree=True)
try:
self._commit_random_file(rw_repo)
remote.push(rw_repo.head.reference)
# here I would expect to see remote-information about packing
# objects and so on. Unfortunately, this does not happen
# if we are redirecting the output - git explicitly checks for this
# and only provides progress information to ttys
res = fetch_and_test(other_origin)
finally:
rmtree(other_repo_dir)
# END test and cleanup
def _assert_push_and_pull(self, remote, rw_repo, remote_repo):
# push our changes
lhead = rw_repo.head
# assure we are on master and it is checked out where the remote is
try:
lhead.reference = rw_repo.heads.master
except AttributeError:
# if the author is on a non-master branch, the clones might not have
# a local master yet. We simply create it
lhead.reference = rw_repo.create_head('master')
# END master handling
lhead.reset(remote.refs.master, working_tree=True)
# push without spec should fail ( without further configuration )
# well, works nicely
# self.failUnlessRaises(GitCommandError, remote.push)
# simple file push
self._commit_random_file(rw_repo)
progress = TestRemoteProgress()
res = remote.push(lhead.reference, progress)
self.assertIsInstance(res, IterableList)
self._do_test_push_result(res, remote)
progress.make_assertion()
# rejected - undo last commit
lhead.reset("HEAD~1")
res = remote.push(lhead.reference)
self.assertTrue(res[0].flags & PushInfo.ERROR)
self.assertTrue(res[0].flags & PushInfo.REJECTED)
self._do_test_push_result(res, remote)
# force rejected pull
res = remote.push('+%s' % lhead.reference)
self.assertEqual(res[0].flags & PushInfo.ERROR, 0)
self.assertTrue(res[0].flags & PushInfo.FORCED_UPDATE)
self._do_test_push_result(res, remote)
# invalid refspec
self.failUnlessRaises(GitCommandError, remote.push, "hellothere")
# push new tags
progress = TestRemoteProgress()
to_be_updated = "my_tag.1.0RV"
new_tag = TagReference.create(rw_repo, to_be_updated) # @UnusedVariable
other_tag = TagReference.create(rw_repo, "my_obj_tag.2.1aRV", message="my message")
res = remote.push(progress=progress, tags=True)
self.assertTrue(res[-1].flags & PushInfo.NEW_TAG)
progress.make_assertion()
self._do_test_push_result(res, remote)
# update push new tags
# Rejection is default
new_tag = TagReference.create(rw_repo, to_be_updated, ref='HEAD~1', force=True)
res = remote.push(tags=True)
self._do_test_push_result(res, remote)
self.assertTrue(res[-1].flags & PushInfo.REJECTED)
self.assertTrue(res[-1].flags & PushInfo.ERROR)
# push force this tag
res = remote.push("+%s" % new_tag.path)
self.assertEqual(res[-1].flags & PushInfo.ERROR, 0)
self.assertTrue(res[-1].flags & PushInfo.FORCED_UPDATE)
# delete tag - have to do it using refspec
res = remote.push(":%s" % new_tag.path)
self._do_test_push_result(res, remote)
self.assertTrue(res[0].flags & PushInfo.DELETED)
# Currently progress is not properly transferred, especially not using
# the git daemon
# progress.assert_received_message()
# push new branch
new_head = Head.create(rw_repo, "my_new_branch")
progress = TestRemoteProgress()
res = remote.push(new_head, progress)
self.assertGreater(len(res), 0)
self.assertTrue(res[0].flags & PushInfo.NEW_HEAD)
progress.make_assertion()
self._do_test_push_result(res, remote)
# delete new branch on the remote end and locally
res = remote.push(":%s" % new_head.path)
self._do_test_push_result(res, remote)
Head.delete(rw_repo, new_head)
self.assertTrue(res[-1].flags & PushInfo.DELETED)
# --all
res = remote.push(all=True)
self._do_test_push_result(res, remote)
remote.pull('master')
# cleanup - delete created tags and branches as we are in an innerloop on
# the same repository
TagReference.delete(rw_repo, new_tag, other_tag)
remote.push(":%s" % other_tag.path)
@skipIf(HIDE_WINDOWS_FREEZE_ERRORS, "FIXME: Freezes!")
@with_rw_and_rw_remote_repo('0.1.6')
def test_base(self, rw_repo, remote_repo):
num_remotes = 0
remote_set = set()
ran_fetch_test = False
for remote in rw_repo.remotes:
num_remotes += 1
self.assertEqual(remote, remote)
self.assertNotEqual(str(remote), repr(remote))
remote_set.add(remote)
remote_set.add(remote) # should already exist
# REFS
refs = remote.refs
self.assertTrue(refs)
for ref in refs:
self.assertEqual(ref.remote_name, remote.name)
self.assertTrue(ref.remote_head)
# END for each ref
# OPTIONS
# cannot use 'fetch' key anymore as it is now a method
for opt in ("url",):
val = getattr(remote, opt)
reader = remote.config_reader
assert reader.get(opt) == val
assert reader.get_value(opt, None) == val
# unable to write with a reader
self.failUnlessRaises(IOError, reader.set, opt, "test")
# change value
with remote.config_writer as writer:
new_val = "myval"
writer.set(opt, new_val)
assert writer.get(opt) == new_val
writer.set(opt, val)
assert writer.get(opt) == val
assert getattr(remote, opt) == val
# END for each default option key
# RENAME
other_name = "totally_other_name"
prev_name = remote.name
self.assertEqual(remote.rename(other_name), remote)
self.assertNotEqual(prev_name, remote.name)
# multiple times
for _ in range(2):
self.assertEqual(remote.rename(prev_name).name, prev_name)
# END for each rename ( back to prev_name )
# PUSH/PULL TESTING
self._assert_push_and_pull(remote, rw_repo, remote_repo)
# FETCH TESTING
# Only for remotes - local cases are the same or less complicated
# as additional progress information will never be emitted
if remote.name == "daemon_origin":
self._do_test_fetch(remote, rw_repo, remote_repo)
ran_fetch_test = True
# END fetch test
remote.update()
# END for each remote
self.assertTrue(ran_fetch_test)
self.assertTrue(num_remotes)
self.assertEqual(num_remotes, len(remote_set))
origin = rw_repo.remote('origin')
assert origin == rw_repo.remotes.origin
# Verify we can handle prunes when fetching
# stderr lines look like this: x [deleted] (none) -> origin/experiment-2012
# These should just be skipped
# If we don't have a manual checkout, we can't actually assume there are any non-master branches
remote_repo.create_head("myone_for_deletion")
# Get the branch - to be pruned later
origin.fetch()
num_deleted = False
for branch in remote_repo.heads:
if branch.name != 'master':
branch.delete(remote_repo, branch, force=True)
num_deleted += 1
# end
# end for each branch
self.assertGreater(num_deleted, 0)
self.assertEqual(len(rw_repo.remotes.origin.fetch(prune=True)), 1, "deleted everything but master")
@with_rw_repo('HEAD', bare=True)
def test_creation_and_removal(self, bare_rw_repo):
new_name = "test_new_one"
arg_list = (new_name, "git@server:hello.git")
remote = Remote.create(bare_rw_repo, *arg_list)
self.assertEqual(remote.name, "test_new_one")
self.assertIn(remote, bare_rw_repo.remotes)
self.assertTrue(remote.exists())
# create same one again
self.failUnlessRaises(GitCommandError, Remote.create, bare_rw_repo, *arg_list)
Remote.remove(bare_rw_repo, new_name)
self.assertTrue(remote.exists()) # We still have a cache that doesn't know we were deleted by name
remote._clear_cache()
assert not remote.exists() # Cache should be renewed now. This is an issue ...
for remote in bare_rw_repo.remotes:
if remote.name == new_name:
raise AssertionError("Remote removal failed")
# END if deleted remote matches existing remote's name
# END for each remote
# Issue #262 - the next call would fail if bug wasn't fixed
bare_rw_repo.create_remote('bogus', '/bogus/path', mirror='push')
def test_fetch_info(self):
# assure we can handle remote-tracking branches
fetch_info_line_fmt = "c437ee5deb8d00cf02f03720693e4c802e99f390 not-for-merge %s '0.3' of "
fetch_info_line_fmt += "git://github.com/gitpython-developers/GitPython"
remote_info_line_fmt = "* [new branch] nomatter -> %s"
self.failUnlessRaises(ValueError, FetchInfo._from_line, self.rorepo,
remote_info_line_fmt % "refs/something/branch",
"269c498e56feb93e408ed4558c8138d750de8893\t\t/Users/ben/test/foo\n")
fi = FetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "local/master",
fetch_info_line_fmt % 'remote-tracking branch')
assert not fi.ref.is_valid()
self.assertEqual(fi.ref.name, "local/master")
# handles non-default refspecs: One can specify a different path in refs/remotes
# or a special path just in refs/something for instance
fi = FetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "subdir/tagname",
fetch_info_line_fmt % 'tag')
self.assertIsInstance(fi.ref, TagReference)
assert fi.ref.path.startswith('refs/tags'), fi.ref.path
# it could be in a remote direcftory though
fi = FetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "remotename/tags/tagname",
fetch_info_line_fmt % 'tag')
self.assertIsInstance(fi.ref, TagReference)
assert fi.ref.path.startswith('refs/remotes/'), fi.ref.path
# it can also be anywhere !
tag_path = "refs/something/remotename/tags/tagname"
fi = FetchInfo._from_line(self.rorepo,
remote_info_line_fmt % tag_path,
fetch_info_line_fmt % 'tag')
self.assertIsInstance(fi.ref, TagReference)
self.assertEqual(fi.ref.path, tag_path)
# branches default to refs/remotes
fi = FetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "remotename/branch",
fetch_info_line_fmt % 'branch')
self.assertIsInstance(fi.ref, RemoteReference)
self.assertEqual(fi.ref.remote_name, 'remotename')
# but you can force it anywhere, in which case we only have a references
fi = FetchInfo._from_line(self.rorepo,
remote_info_line_fmt % "refs/something/branch",
fetch_info_line_fmt % 'branch')
assert type(fi.ref) is Reference, type(fi.ref)
self.assertEqual(fi.ref.path, "refs/something/branch")
def test_uncommon_branch_names(self):
stderr_lines = fixture('uncommon_branch_prefix_stderr').decode('ascii').splitlines()
fetch_lines = fixture('uncommon_branch_prefix_FETCH_HEAD').decode('ascii').splitlines()
# The contents of the files above must be fetched with a custom refspec:
# +refs/pull/*:refs/heads/pull/*
res = [FetchInfo._from_line('ShouldntMatterRepo', stderr, fetch_line)
for stderr, fetch_line in zip(stderr_lines, fetch_lines)]
self.assertGreater(len(res), 0)
self.assertEqual(res[0].remote_ref_path, 'refs/pull/1/head')
self.assertEqual(res[0].ref.path, 'refs/heads/pull/1/head')
self.assertIsInstance(res[0].ref, Head)
@with_rw_repo('HEAD', bare=False)
def test_multiple_urls(self, rw_repo):
# test addresses
test1 = 'https://github.com/gitpython-developers/GitPython'
test2 = 'https://github.com/gitpython-developers/gitdb'
test3 = 'https://github.com/gitpython-developers/smmap'
remote = rw_repo.remotes[0]
# Testing setting a single URL
remote.set_url(test1)
self.assertEqual(list(remote.urls), [test1])
# Testing replacing that single URL
remote.set_url(test1)
self.assertEqual(list(remote.urls), [test1])
# Testing adding new URLs
remote.set_url(test2, add=True)
self.assertEqual(list(remote.urls), [test1, test2])
remote.set_url(test3, add=True)
self.assertEqual(list(remote.urls), [test1, test2, test3])
# Testing removing an URL
remote.set_url(test2, delete=True)
self.assertEqual(list(remote.urls), [test1, test3])
# Testing changing an URL
remote.set_url(test2, test3)
self.assertEqual(list(remote.urls), [test1, test2])
# will raise: fatal: --add --delete doesn't make sense
assert_raises(GitCommandError, remote.set_url, test2, add=True, delete=True)
# Testing on another remote, with the add/delete URL
remote = rw_repo.create_remote('another', url=test1)
remote.add_url(test2)
self.assertEqual(list(remote.urls), [test1, test2])
remote.add_url(test3)
self.assertEqual(list(remote.urls), [test1, test2, test3])
# Testing removing all the URLs
remote.delete_url(test2)
self.assertEqual(list(remote.urls), [test1, test3])
remote.delete_url(test1)
self.assertEqual(list(remote.urls), [test3])
# will raise fatal: Will not delete all non-push URLs
assert_raises(GitCommandError, remote.delete_url, test3)
def test_fetch_error(self):
rem = self.rorepo.remote('origin')
with self.assertRaisesRegex(GitCommandError, "Couldn't find remote ref __BAD_REF__"):
rem.fetch('__BAD_REF__')
@with_rw_repo('0.1.6', bare=False)
def test_push_error(self, repo):
rem = repo.remote('origin')
with self.assertRaisesRegex(GitCommandError, "src refspec __BAD_REF__ does not match any"):
rem.push('__BAD_REF__')
|
|
import base64
from collections import defaultdict
import datetime
import hashlib
import json
import Levenshtein
import logging
import math
import os
import pytz
import requests
import traceback
import xml.etree.ElementTree as ET
import urllib
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db.models import Count, F
from django.http import (
Http404,
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseRedirect,
)
from django.shortcuts import render
from django.utils import timezone
from django.utils.datastructures import MultiValueDictKeyError
from django_browserid.views import Verify as BrowserIDVerifyBase
from operator import itemgetter
from pontoon.administration.vcs import commit_to_vcs
from pontoon.administration import files
from pontoon.base import utils
from pontoon.base.models import (
Entity,
Locale,
Project,
Resource,
Subpage,
Translation,
Stats,
UserProfile,
get_locales_with_project_stats,
get_locales_with_stats,
get_projects_with_stats,
get_translation,
unapprove,
unfuzzy,
)
from session_csrf import anonymous_csrf_exempt
from suds.client import Client, WebFault
log = logging.getLogger('pontoon')
def home(request):
"""Home view."""
log.debug("Home view.")
project = Project.objects.get(id=1)
locale = utils.get_project_locale_from_request(
request, project.locales) or 'en-GB'
return translate(request, locale, project.slug)
def locale(request, locale, template='locale.html'):
"""Locale view."""
log.debug("Locale view.")
# Validate locale
try:
l = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist:
raise Http404
projects = Project.objects.filter(
disabled=False, pk__in=Resource.objects.values('project'), locales=l) \
.order_by("name")
if not projects:
messages.error(
request, "Oops, no projects available for this locale.")
request.session['translate_error'] = {
'none': None,
}
return HttpResponseRedirect(reverse('pontoon.home'))
data = {
'projects': get_projects_with_stats(projects, l),
'locale': l,
}
return render(request, template, data)
def locales(request):
"""Localization teams."""
data = {
'locales': get_locales_with_stats(),
}
return render(request, 'locales.html', data)
def project(request, slug, template='project.html'):
"""Project view."""
log.debug("Project view.")
# Validate project
try:
p = Project.objects.get(slug=slug, disabled=False,
pk__in=Resource.objects.values('project'))
except Project.DoesNotExist:
messages.error(request, "Oops, project could not be found.")
request.session['translate_error'] = {
'none': None,
}
return HttpResponseRedirect(reverse('pontoon.home'))
locales = p.locales.all().order_by("name")
data = {
'locales': get_locales_with_project_stats(p),
'project': p,
'project_locales': json.dumps(
[i.lower() for i in p.locales.values_list('code', flat=True)]),
}
return render(request, template, data)
def projects(request, template='projects.html'):
"""Project overview."""
log.debug("Project overview.")
projects = Project.objects.filter(
disabled=False, pk__in=Resource.objects.values('project')) \
.order_by("name")
data = {
'projects': get_projects_with_stats(projects),
}
return render(request, template, data)
def translate(request, locale, slug, part=None, template='translate.html'):
"""Translate view."""
log.debug("Translate view.")
invalid_locale = invalid_project = False
# Validate locale
try:
l = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist:
invalid_locale = True
# Validate project
try:
p = Project.objects.get(
disabled=False,
slug=slug, pk__in=Resource.objects.values('project'))
except Project.DoesNotExist:
invalid_project = True
if invalid_locale:
if invalid_project:
raise Http404
else:
messages.error(request, "Oops, locale is not supported.")
request.session['translate_error'] = {
'none': None,
}
return HttpResponseRedirect(reverse('pontoon.home'))
if invalid_project:
messages.error(request, "Oops, project could not be found.")
request.session['translate_error'] = {
'none': None,
}
return HttpResponseRedirect(reverse('pontoon.home'))
# Validate project locales
if p.locales.filter(code__iexact=locale).count() == 0:
request.session['translate_error'] = {
'none': None,
}
messages.error(
request, "Oops, locale is not supported for this project.")
return HttpResponseRedirect(reverse('pontoon.home'))
# Check if user authenticated
if not p.pk == 1:
if not request.user.is_authenticated():
messages.error(request, "You need to sign in first.")
request.session['translate_error'] = {
'redirect': request.get_full_path(),
}
return HttpResponseRedirect(reverse('pontoon.home'))
# Set project details (locales and pages or paths + stats)
projects = Project.objects.filter(
disabled=False, pk__in=Resource.objects.values('project')) \
.order_by("name")
for project in projects:
pages = Subpage.objects.filter(project=project)
r = Entity.objects.filter(obsolete=False).values('resource')
resources = Resource.objects.filter(project=project, pk__in=r)
details = {}
for loc in project.locales.all():
stats = Stats.objects.filter(resource__in=resources, locale=loc)
if len(pages) == 0 and len(resources) > 1:
locale_details = stats.order_by('resource__path') \
.values(
'resource__path',
'resource__entity_count',
'translated_count',
'approved_count',
)
else:
locale_details = pages.values('name')
if len(pages) > 0 and pages[0].resources.exists():
locale_details = pages.filter(
resources__stats=stats).values('name')
details[loc.code.lower()] = list(locale_details)
project.details = json.dumps(details)
data = {
'accept_language': utils.get_project_locale_from_request(
request, Locale.objects),
'locale': l,
'locales': Locale.objects.all(),
'page_url': p.url,
'project': p,
'projects': projects,
}
# Set subpage
pages = Subpage.objects.filter(project=p)
setPart = False
if len(pages) > 0:
try:
page = pages.get(name=part)
if pages.count() > 1:
setPart = True
# If page not specified or doesn't exist
except Subpage.DoesNotExist:
page = pages[0]
if pages.count() > 1:
setPart = True
locale_pages = pages.filter(resources__stats__locale=l)
if locale_pages:
page = locale_pages[0]
setPart = True if locale_pages.count() > 1 else False
data['page_url'] = page.url
if setPart:
data['part'] = page.name
# Set part if subpages not defined and entities in more than one file
else:
paths = (Resource.objects
.filter(project=p, entity_count__gt=0, stats__locale=l)
.order_by('path')
.values_list('path', flat=True))
if len(paths) > 1:
data['part'] = part if part in paths else paths[0]
# Set error data
translate_error = request.session.pop('translate_error', {})
if translate_error:
data['redirect'] = translate_error.get('redirect', None)
return render(request, template, data)
@login_required(redirect_field_name='', login_url='/403')
def profile(request):
"""Current user profile."""
log.debug("Current user profile.")
return contributor(request, request.user.email)
def contributor(request, email, template='user.html'):
"""Contributor profile."""
log.debug("Contributor profile.")
# Validate user
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
raise Http404
# Exclude unchanged translations
translations = (
Translation.objects.filter(user=user)
.exclude(string=F('entity__string'))
.exclude(string=F('entity__string_plural'))
)
# Exclude obsolete translations
current = translations.exclude(entity__obsolete=True) \
.extra({'day': "date(date)"}).order_by('day')
# Timeline
timeline = [{
'date': user.date_joined,
'type': 'join',
}]
for event in current.values('day').annotate(count=Count('id')):
daily = current.filter(date__startswith=event['day'])
example = daily[0]
timeline.append({
'date': example.date,
'type': 'translation',
'count': event['count'],
'project': example.entity.resource.project,
'translation': example,
})
timeline.reverse()
data = {
'contributor': user,
'timeline': timeline,
'translations': translations,
}
return render(request, template, data)
def contributors(request, template='users.html'):
"""Top contributors view."""
log.debug("Top contributors view.")
try:
period = int(request.GET['period'])
if period <= 0:
raise ValueError
start_date = (timezone.now() + relativedelta(months=-period))
except (KeyError, ValueError):
period = None
start_date = None
data = {
'contributors': User.translators.with_translation_counts(start_date),
'period': period,
}
return render(request, template, data)
def search(request, template='search.html'):
"""Terminology search view."""
log.debug("Terminology search view.")
locale = utils.get_project_locale_from_request(
request, Locale.objects) or 'en-GB'
data = {
'locale': Locale.objects.get(code__iexact=locale),
'locales': Locale.objects.all(),
}
return render(request, template, data)
def entities(request, template=None):
"""Get entities for the specified project, locale and paths."""
log.debug("Get entities for the specified project, locale and paths.")
if not request.is_ajax():
log.error("Non-AJAX request")
raise Http404
try:
project = request.GET['project']
locale = request.GET['locale']
paths = json.loads(request.GET['paths'])
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
log.debug("Project: " + project)
log.debug("Locale: " + locale)
log.debug("Paths: " + str(paths))
try:
project = Project.objects.get(pk=project)
except Entity.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
try:
locale = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
entities = Entity.for_project_locale(project, locale, paths)
return HttpResponse(json.dumps(entities), content_type='application/json')
def get_translations_from_other_locales(request, template=None):
"""Get entity translations for all but specified locale."""
log.debug("Get entity translation for all but specified locale.")
if not request.is_ajax():
log.error("Non-AJAX request")
raise Http404
try:
entity = request.GET['entity']
locale = request.GET['locale']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
log.debug("Entity: " + entity)
log.debug("Locale: " + locale)
try:
entity = Entity.objects.get(pk=entity)
except Entity.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
try:
locale = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
payload = []
locales = entity.resource.project.locales.all().exclude(
code__iexact=locale.code)
for l in locales:
plural_form = None if entity.string_plural == "" else 0
translation = get_translation(
entity=entity, locale=l, plural_form=plural_form)
if translation.string != '' or translation.pk is not None:
payload.append({
"locale": {
"code": l.code,
"name": l.name
},
"translation": translation.string
})
if len(payload) == 0:
log.debug("Translations do not exist")
return HttpResponse("error")
else:
return HttpResponse(
json.dumps(payload, indent=4), content_type='application/json')
def get_translation_history(request, template=None):
"""Get history of translations of given entity to given locale."""
log.debug("Get history of translations of given entity to given locale.")
if not request.is_ajax():
log.error("Non-AJAX request")
raise Http404
try:
entity = request.GET['entity']
locale = request.GET['locale']
plural_form = request.GET['plural_form']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
log.debug("Entity: " + entity)
log.debug("Locale: " + locale)
try:
entity = Entity.objects.get(pk=entity)
except Entity.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
try:
locale = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
translations = Translation.objects.filter(entity=entity, locale=locale)
if plural_form != "-1":
translations = translations.filter(plural_form=plural_form)
translations = translations.order_by('-approved', '-date')
if len(translations) > 0:
payload = []
offset = timezone.now().strftime('%z')
for t in translations:
u = t.user
a = t.approved_user
o = {
"id": t.id,
"user": "Imported" if u is None else u.first_name or u.email,
"email": "" if u is None else u.email,
"translation": t.string,
"date": t.date.strftime('%b %d, %Y %H:%M'),
"date_iso": t.date.isoformat() + offset,
"approved": t.approved,
"approved_user": "" if a is None else a.first_name or a.email,
}
payload.append(o)
return HttpResponse(
json.dumps(payload, indent=4), content_type='application/json')
else:
log.debug("Translations do not exist")
return HttpResponse("error")
def delete_translation(request, template=None):
"""Delete given translation."""
log.debug("Delete given translation.")
if not request.is_ajax():
log.error("Non-AJAX request")
raise Http404
try:
t = request.POST['translation']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
log.debug("Translation: " + t)
try:
translation = Translation.objects.get(pk=t)
except Translation.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
# Non-privileged users can only delete own non-approved translations
if not request.user.has_perm('base.can_localize'):
if translation.user == request.user:
if translation.approved is True:
log.error(
"Non-privileged users cannot delete approved translation")
return HttpResponse("error")
else:
return render(request, '403.html', status=403)
entity = translation.entity
locale = translation.locale
plural_form = translation.plural_form
translation.mark_for_deletion()
# Mark next translation approved if needed
next = get_translation(
entity=entity, locale=locale, plural_form=plural_form)
if next.pk is not None and request.user.has_perm('base.can_localize'):
next.approved = True
next.approved_user = request.user
next.approved_date = timezone.now()
next.save()
return HttpResponse(json.dumps({
'type': 'deleted',
'next': next.id,
}), content_type='application/json')
@anonymous_csrf_exempt
def update_translation(request, template=None):
"""Update entity translation for the specified locale and user."""
log.debug("Update entity translation for the specified locale and user.")
if not request.is_ajax():
log.error("Non-AJAX request")
raise Http404
if request.method != 'POST':
log.error("Non-POST request")
raise Http404
try:
entity = request.POST['entity']
string = request.POST['translation']
locale = request.POST['locale']
plural_form = request.POST['plural_form']
original = request.POST['original']
ignore_check = request.POST['ignore_check']
except MultiValueDictKeyError as error:
log.error(str(error))
return HttpResponse("error")
log.debug("Entity: " + entity)
log.debug("Translation: " + string)
log.debug("Locale: " + locale)
try:
e = Entity.objects.get(pk=entity)
except Entity.DoesNotExist as error:
log.error(str(error))
return HttpResponse("error")
try:
l = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist as error:
log.error(str(error))
return HttpResponse("error")
if plural_form == "-1":
plural_form = None
user = request.user
if not request.user.is_authenticated():
if e.resource.project.pk != 1:
log.error("Not authenticated")
return HttpResponse("error")
else:
user = None
try:
quality_checks = UserProfile.objects.get(user=user).quality_checks
except UserProfile.DoesNotExist as error:
quality_checks = True
ignore = False
if ignore_check == 'true' or not quality_checks:
ignore = True
now = timezone.now()
can_localize = request.user.has_perm('base.can_localize')
translations = Translation.objects.filter(
entity=e, locale=l, plural_form=plural_form)
# Newlines are not allowed in .lang files (bug 1190754)
if e.resource.format == 'lang' and '\n' in string:
return HttpResponse('Newline characters are not allowed.')
# Translations exist
if len(translations) > 0:
# Same translation exists
try:
t = translations.get(string=string)
# If added by privileged user, approve and unfuzzy it
if can_localize:
# Unless there's nothing to be changed
if t.user is not None and t.approved and t.approved_user \
and t.approved_date and not t.fuzzy:
return HttpResponse("Same translation already exists.")
warnings = utils.quality_check(original, string, l, ignore)
if warnings:
return warnings
unapprove(translations)
unfuzzy(translations)
if t.user is None:
t.user = user
t.approved = True
t.approved_date = timezone.now()
t.fuzzy = False
if t.approved_user is None:
t.approved_user = user
t.approved_date = now
if request.user.is_authenticated():
t.save()
return HttpResponse(json.dumps({
'type': 'updated',
'translation': t.serialize(),
}), content_type='application/json')
# If added by non-privileged user, unfuzzy it
else:
if t.fuzzy:
warnings = utils.quality_check(original, string, l, ignore)
if warnings:
return warnings
if t.user is None:
t.user = user
t.approved = False
t.approved_user = None
t.approved_date = None
t.fuzzy = False
if request.user.is_authenticated():
t.save()
return HttpResponse(json.dumps({
'type': 'updated',
'translation': t.serialize(),
}), content_type='application/json')
return HttpResponse("Same translation already exists.")
# Different translation added
except:
warnings = utils.quality_check(original, string, l, ignore)
if warnings:
return warnings
if can_localize:
unapprove(translations)
unfuzzy(translations)
t = Translation(
entity=e, locale=l, user=user, string=string,
plural_form=plural_form, date=now,
approved=can_localize)
if can_localize:
t.approved_user = user
t.approved_date = now
if request.user.is_authenticated():
t.save()
active = get_translation(
entity=e, locale=l, plural_form=plural_form)
return HttpResponse(json.dumps({
'type': 'added',
'translation': active.serialize(),
}), content_type='application/json')
# No translations saved yet
else:
warnings = utils.quality_check(original, string, l, ignore)
if warnings:
return warnings
t = Translation(
entity=e, locale=l, user=user, string=string,
plural_form=plural_form, date=now,
approved=can_localize)
if can_localize:
t.approved_user = user
t.approved_date = now
if request.user.is_authenticated():
t.save()
return HttpResponse(json.dumps({
'type': 'saved',
'translation': t.serialize(),
}), content_type='application/json')
def translation_memory(request):
"""Get translations from internal translations."""
log.debug("Get translations from internal translations.")
try:
text = request.GET['text']
locale = request.GET['locale']
pk = request.GET['pk']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
try:
locale = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist as e:
log.error(e)
return HttpResponse("error")
min_quality = 0.7
max_results = 5
length = len(text)
min_dist = math.ceil(max(length * min_quality, 2))
max_dist = math.floor(min(length / min_quality, 1000))
# Only check entities with similar length
entities = Entity.objects.all().extra(
where=["CHAR_LENGTH(string) BETWEEN %s AND %s" % (min_dist, max_dist)])
# Exclude existing entity
if pk:
entities = entities.exclude(pk=pk)
translations = {}
for e in entities:
source = e.string
quality = Levenshtein.ratio(text, source)
if quality > min_quality:
plural_form = None if e.string_plural == "" else 0
translation = get_translation(
entity=e, locale=locale, fuzzy=False, plural_form=plural_form)
if translation.string != '' or translation.pk is not None:
count = 1
quality = quality * 100
if translation.string in translations:
existing = translations[translation.string]
count = existing['count'] + 1
# Store data for best match among equal translations only
if quality < existing['quality']:
quality = existing['quality']
source = existing['source']
translations[translation.string] = {
'source': source,
'quality': quality,
'count': count,
}
if len(translations) > 0:
# Sort by translation count
t = sorted(translations.iteritems(), key=itemgetter(1), reverse=True)
translations_array = []
for a, b in t[:max_results]:
b["target"] = a
translations_array.append(b)
return HttpResponse(json.dumps({
'translations': translations_array
}), content_type='application/json')
else:
return HttpResponse("no")
def machine_translation(request):
"""Get translation from machine translation service."""
log.debug("Get translation from machine translation service.")
try:
text = request.GET['text']
locale = request.GET['locale']
check = request.GET['check']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
if hasattr(settings, 'MICROSOFT_TRANSLATOR_API_KEY'):
api_key = settings.MICROSOFT_TRANSLATOR_API_KEY
else:
log.error("MICROSOFT_TRANSLATOR_API_KEY not set")
return HttpResponse("apikey")
obj = {}
# On first run, check if target language supported
if check == "true":
supported = False
languages = settings.MICROSOFT_TRANSLATOR_LOCALES
if locale in languages:
supported = True
else:
for lang in languages:
if lang.startswith(locale.split("-")[0]): # Neutral locales
supported = True
locale = lang
break
if not supported:
log.debug("Locale not supported.")
return HttpResponse("not-supported")
obj['locale'] = locale
url = "http://api.microsofttranslator.com/V2/Http.svc/Translate"
payload = {
"appId": api_key,
"text": text,
"from": "en",
"to": locale,
"contentType": "text/html",
}
try:
r = requests.get(url, params=payload)
log.debug(r.content)
# Parse XML response
root = ET.fromstring(r.content)
translation = root.text
obj['translation'] = translation
return HttpResponse(json.dumps(obj), content_type='application/json')
except Exception as e:
log.error(e)
return HttpResponse("error")
def microsoft_terminology(request):
"""Get translations from Microsoft Terminology Service."""
log.debug("Get translations from Microsoft Terminology Service.")
try:
text = request.GET['text']
locale = request.GET['locale']
check = request.GET['check']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
obj = {}
locale = locale.lower()
url = 'http://api.terminology.microsoft.com/Terminology.svc?singleWsdl'
client = Client(url)
# On first run, check if target language supported
if check == "true":
supported = False
languages = settings.MICROSOFT_TERMINOLOGY_LOCALES
if locale in languages:
supported = True
elif "-" not in locale:
temp = locale + "-" + locale # Try e.g. "de-de"
if temp in languages:
supported = True
locale = temp
else:
for lang in languages:
if lang.startswith(locale + "-"): # Try e.g. "de-XY"
supported = True
locale = lang
break
if not supported:
log.debug("Locale not supported.")
return HttpResponse("not-supported")
obj['locale'] = locale
sources = client.factory.create('ns0:TranslationSources')
sources["TranslationSource"] = ['Terms', 'UiStrings']
payload = {
'text': text,
'from': 'en-US',
'to': locale,
'sources': sources,
'maxTranslations': 5
}
try:
r = client.service.GetTranslations(**payload)
translations = []
if len(r) != 0:
for translation in r.Match:
translations.append({
'source': translation.OriginalText,
'target': translation.Translations[0][0].TranslatedText,
'quality': translation.ConfidenceLevel,
})
obj['translations'] = translations
return HttpResponse(json.dumps(obj), content_type='application/json')
except WebFault as e:
log.error(e)
return HttpResponse("error")
def amagama(request):
"""Get open source translations from amaGama service."""
log.debug("Get open source translations from amaGama service.")
try:
text = request.GET['text']
locale = request.GET['locale']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
try:
text = urllib.quote(text.encode('utf-8'))
except KeyError as e:
log.error(str(e))
return HttpResponse("error")
url = "http://amagama.locamotion.org/tmserver" \
"/en/%s/unit/%s?max_candidates=%s" \
% (locale, text, 5)
try:
r = requests.get(url)
if r.text != '[]':
translations = r.json()
return HttpResponse(json.dumps({
'translations': translations
}), content_type='application/json')
else:
return HttpResponse("no")
except Exception as e:
log.error(e)
return HttpResponse("error")
def transvision(request, repo, title):
"""Get Mozilla translations from Transvision service."""
log.debug("Get Mozilla translations from Transvision service.")
try:
text = request.GET['text']
locale = request.GET['locale']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
src = "en-US"
url = "https://transvision.mozfr.org/api/v1/tm/%s/%s/" \
"%s/%s/?max_results=%s&min_quality=70" % (repo, src, locale, text, 5)
try:
r = requests.get(url)
if r.text != '[]':
translations = r.json()
return HttpResponse(json.dumps({
'translations': translations,
'title': title,
}), content_type='application/json')
else:
return HttpResponse("no")
except Exception as e:
log.error(e)
return HttpResponse("error")
def transvision_aurora(request):
return transvision(request, "aurora", "Mozilla Aurora")
def transvision_gaia(request):
return transvision(request, "gaia", "Firefox OS")
def transvision_mozilla_org(request):
return transvision(request, "mozilla_org", "Mozilla.org")
@anonymous_csrf_exempt
def download(request, template=None):
"""Download translations in appropriate form."""
log.debug("Download translations.")
if request.method != 'POST':
log.error("Non-POST request")
raise Http404
try:
format = request.POST['type']
locale = request.POST['locale']
project = request.POST['project']
except MultiValueDictKeyError as e:
log.error(str(e))
raise Http404
if format in ('html', 'json'):
try:
content = request.POST['content']
except MultiValueDictKeyError as e:
log.error(str(e))
raise Http404
try:
p = Project.objects.get(pk=project)
except Project.DoesNotExist as e:
log.error(e)
raise Http404
filename = '%s-%s' % (p.slug, locale)
response = HttpResponse()
if format == 'html':
response['Content-Type'] = 'text/html'
elif format == 'json':
response['Content-Type'] = 'application/json'
elif format == 'zip':
content = files.generate_zip(p, locale)
if content is False:
raise Http404
response['Content-Type'] = 'application/x-zip-compressed'
response.content = content
response['Content-Disposition'] = \
'attachment; filename=' + filename + '.' + format
return response
@login_required(redirect_field_name='', login_url='/403')
def save_to_transifex(request, template=None):
"""Save translations to Transifex."""
log.debug("Save to Transifex.")
if request.method != 'POST':
log.error("Non-POST request")
raise Http404
try:
data = json.loads(request.POST['data'])
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
"""Check if user authenticated to Transifex."""
profile = UserProfile.objects.get(user=request.user)
username = data.get('auth', {}) \
.get('username', profile.transifex_username)
password = data.get('auth', {}) \
.get('password',
base64.decodestring(profile.transifex_password))
if len(username) == 0 or len(password) == 0:
return HttpResponse("authenticate")
"""Make PUT request to Transifex API."""
payload = []
for entity in data.get('strings'):
obj = {
# Identify translation strings using hashes
"source_entity_hash": hashlib.md5(
':'.join([entity['original'], ''])
.encode('utf-8')).hexdigest(),
"translation": entity['translation']
}
payload.append(obj)
log.debug(json.dumps(payload, indent=4))
"""Make PUT request to Transifex API."""
try:
p = Project.objects.get(url=data['url'])
except Project.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
response = utils.req('put', p.transifex_project, p.transifex_resource,
data['locale'], username, password, payload)
"""Save Transifex username and password."""
if data.get('auth', {}).get('remember', {}) == 1:
profile.transifex_username = data['auth']['username']
profile.transifex_password = base64.encodestring(
data['auth']['password'])
profile.save()
try:
return HttpResponse(response.status_code)
except AttributeError:
return HttpResponse(response)
@login_required(redirect_field_name='', login_url='/403')
def quality_checks_switch(request):
"""Turn quality checks on/off for the current user."""
log.debug("Turn quality checks on/off for the current user.")
if request.method != 'POST':
log.error("Non-POST request")
raise Http404
profile = request.user.profile
profile.quality_checks = not profile.quality_checks
profile.save()
return HttpResponse("ok")
@login_required(redirect_field_name='', login_url='/403')
def save_user_name(request):
"""Save user name."""
log.debug("Save user name.")
if request.method != 'POST':
log.error("Non-POST request")
raise Http404
try:
name = request.POST['name']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
if len(name) < 3 or len(name) > 30:
return HttpResponse("length")
log.debug("New name: " + name)
user = request.user
user.first_name = name
user.save()
return HttpResponse("ok")
@login_required(redirect_field_name='', login_url='/403')
def request_locale(request):
"""Request new locale to be added to project."""
log.debug("Request new locale to be added to project.")
if request.method != 'POST':
log.error("Non-POST request")
raise Http404
try:
project = request.POST['project']
locale = request.POST['locale']
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
log.debug("Project: " + project)
log.debug("Locale: " + locale)
try:
project = Project.objects.get(slug=project)
except Entity.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
try:
locale = Locale.objects.get(code__iexact=locale)
except Locale.DoesNotExist as e:
log.error(str(e))
return HttpResponse("error")
subject = '[Pontoon] Locale Request'
message = 'Add locale %s (%s) to project %s (%s).' % (
locale.name, locale.code, project.name, project.slug)
sender = request.user.email
if settings.ADMINS[0][1] != '':
recipients = [settings.ADMINS[0][1]]
send_mail(subject, message, sender, recipients)
else:
log.error("ADMIN not defined in settings. Email recipient unknown.")
return HttpResponse("error")
return HttpResponse()
class BrowserIDVerify(BrowserIDVerifyBase):
def login_success(self):
# Check for permission to localize if not granted on every login
if not self.user.has_perm('base.can_localize'):
utils.add_can_localize(self.user)
return super(BrowserIDVerify, self).login_success()
def get_csrf(request, template=None):
"""Get CSRF token."""
log.debug("Get CSRF token.")
if not request.is_ajax():
log.error("Non-AJAX request")
raise Http404
return HttpResponse(request.csrf_token)
|
|
#=========================================================================
# mesh_net_RTL_v1.py
#=========================================================================
from pymtl import *
from pclib.ifcs import InValRdyBundle, OutValRdyBundle, NetMsg
from pclib.rtl import Crossbar, RoundRobinArbiterEn, NormalQueue
from math import sqrt
#=========================================================================
# MeshNetworkRTL
#=========================================================================
class MeshNetworkRTL( Model ):
#-----------------------------------------------------------------------
# __init__
#-----------------------------------------------------------------------
def __init__( s, nrouters, nmessages, payload_nbits, nentries ):
# ensure nrouters is a perfect square
assert sqrt( nrouters ) % 1 == 0
s.nrouters = nrouters
s.params = [ nrouters, nmessages, payload_nbits, nentries ]
net_msg = NetMsg( nrouters, nmessages, payload_nbits )
s.in_ = InValRdyBundle [ nrouters ]( net_msg )
s.out = OutValRdyBundle[ nrouters ]( net_msg )
#-----------------------------------------------------------------------
# elaborate_logic
#-----------------------------------------------------------------------
def elaborate_logic( s ):
# instantiate routers
R = Router = MeshRouterRTL
s.routers = [ Router( x, *s.params ) for x in xrange( s.nrouters ) ]
# connect injection terminals
for i in xrange( s.nrouters ):
s.connect( s.in_[i], s.routers[i].in_[ R.TERM ] )
s.connect( s.out[i], s.routers[i].out[ R.TERM ] )
# connect mesh routers
nrouters_1D = int( sqrt( s.nrouters ) )
for j in range( nrouters_1D ):
for i in range( nrouters_1D ):
idx = i + j * nrouters_1D
current = s.routers[ idx ]
# East
if i + 1 < nrouters_1D:
right = s.routers[ idx + 1 ]
s.connect( current.out[ R.EAST ], right.in_[ R.WEST ] )
s.connect( current.in_[ R.EAST ], right.out[ R.WEST ] )
# South
if j + 1 < nrouters_1D:
below = s.routers[ idx + nrouters_1D ]
s.connect( current.out[ R.SOUTH ], below.in_[ R.NORTH ] )
s.connect( current.in_[ R.SOUTH ], below.out[ R.NORTH ] )
#-----------------------------------------------------------------------
# line_trace
#-----------------------------------------------------------------------
def line_trace( s ):
router_traces = []
for i, r in enumerate( s.routers ):
in_str = s.in_[ i ].to_str( s.in_[ i ].msg.dest )
out_str = s.out[ i ].to_str( s.out[ i ].msg.dest )
west = r.out[ r.WEST ].to_str( r.out[ r.WEST ].msg.dest )
north = r.out[ r.NORTH ].to_str( r.out[ r.NORTH ].msg.dest )
south = r.out[ r.SOUTH ].to_str( r.out[ r.SOUTH ].msg.dest )
east = r.out[ r.EAST ].to_str( r.out[ r.EAST ].msg.dest )
router_traces += ['{} ({}{}{}{}) {}'.format( in_str,
west, north, south, east, out_str ) ]
#router_traces += ['{} {}'.format( in_str, out_str ) ]
return '|'.join( router_traces )
#=======================================================================
# MeshRouterRTL
#=======================================================================
class MeshRouterRTL( Model ):
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
TERM = 4
#---------------------------------------------------------------------
# __init__
#---------------------------------------------------------------------
def __init__( s, id_, nrouters, nmessages, payload_nbits, nentries ):
s.id_ = id_
s.nrouters = nrouters
s.nmessages = nmessages
s.payload_nbits = payload_nbits
s.nentries = nentries
s.msg_type = NetMsg( nrouters, nmessages, payload_nbits )
#-------------------------------------------------------------------
# Interface
#-------------------------------------------------------------------
s.in_ = InValRdyBundle [ 5 ]( s.msg_type )
s.out = OutValRdyBundle[ 5 ]( s.msg_type )
#---------------------------------------------------------------------
# elaborate_logic
#---------------------------------------------------------------------
def elaborate_logic( s ):
s.dpath = MeshRouterRTLDpath( s.id_, s.nrouters, s.nmessages,
s.payload_nbits, s.nentries )
s.ctrl = MeshRouterRTLCtrl ( s.id_, s.nrouters, s.nmessages,
s.payload_nbits, s.nentries )
for i in xrange( 5 ):
s.connect( s.in_[i], s.dpath.in_[i] )
s.connect( s.out[i], s.dpath.out[i] )
s.connect( s.ctrl.c2d[i], s.dpath.c2d[i] )
#---------------------------------------------------------------------
# line_trace
#---------------------------------------------------------------------
def line_trace( s ):
router_traces = []
for i in range( 5 ):
in_str = s.in_[ i ].to_str( s.in_[ i ].msg.payload )
out_str = s.out[ i ].to_str( s.out[ i ].msg.payload )
router_traces += ['{} {}'.format( in_str, out_str ) ]
return '|'.join( router_traces )
#-----------------------------------------------------------------------
# MeshRouterRTLDpath
#-----------------------------------------------------------------------
class MeshRouterRTLDpath( Model ):
#---------------------------------------------------------------------
# __init__
#---------------------------------------------------------------------
def __init__( s, id_, nrouters, nmessages, payload_nbits, nentries ):
s.id_ = id_
s.nrouters = nrouters
s.nmessages = nmessages
s.payload_nbits = payload_nbits
s.nentries = nentries
s.msg_type = NetMsg( nrouters, nmessages, payload_nbits )
#-------------------------------------------------------------------
# Interface
#-------------------------------------------------------------------
s.in_ = InValRdyBundle [ 5 ]( s.msg_type )
s.out = OutValRdyBundle[ 5 ]( s.msg_type )
s.c2d = [ DpathBundle( s.msg_type ) for x in xrange(5) ]
#---------------------------------------------------------------------
# elaborate_logic
#---------------------------------------------------------------------
def elaborate_logic( s ):
# Input Queues
s.q_in = [ NormalQueue( s.nentries, s.msg_type ) for x in range( 5 ) ]
# Crossbar
s.xbar = Crossbar( 5, s.msg_type )
# Output Queues
s.q_out = [ NormalQueue( s.nentries, s.msg_type ) for x in range( 5 ) ]
for i in xrange( 5 ):
s.connect( s.q_in [i].enq, s.in_[i] )
s.connect( s.q_in [i].deq.msg, s.c2d[i].inbuf_msg )
s.connect( s.q_in [i].deq.val, s.c2d[i].inbuf_val )
s.connect( s.q_in [i].deq.rdy, s.c2d[i].inbuf_rdy )
s.connect( s.q_in [i].deq.msg, s.xbar.in_[i] )
s.connect( s.q_out[i].enq.msg, s.xbar.out[i] )
s.connect( s.c2d [i].xbar_sel, s.xbar.sel[i] )
s.connect( s.q_out[i].enq.val, s.c2d[i].outbuf_val )
s.connect( s.q_out[i].enq.rdy, s.c2d[i].outbuf_rdy )
s.connect( s.q_out[i].deq, s.out[i] )
#-----------------------------------------------------------------------
# MeshRouterRTLCtrl
#-----------------------------------------------------------------------
class MeshRouterRTLCtrl( Model ):
#---------------------------------------------------------------------
# __init__
#---------------------------------------------------------------------
def __init__( s, id_, nrouters, nmessages, payload_nbits, nentries ):
s.id_ = id_
s.nrouters = nrouters
s.nmessages = nmessages
s.payload_nbits = payload_nbits
s.nentries = nentries
s.msg_type = NetMsg( nrouters, nmessages, payload_nbits )
#-------------------------------------------------------------------
# Interface
#-------------------------------------------------------------------
s.c2d = [ CtrlBundle( s.msg_type ) for x in xrange(5) ]
#---------------------------------------------------------------------
# elaborate_logic
#---------------------------------------------------------------------
def elaborate_logic( s ):
s.arbiters = RoundRobinArbiterEn[ 5 ]( 5 )
s.routes = RouteCompute[ 5 ]( s.id_, s.nrouters )
for i in xrange( 5 ):
s.connect( s.arbiters[i].en, s.c2d[i].outbuf_rdy )
s.connect( s.routes[i].dest, s.c2d[i].inbuf_msg.dest )
@s.combinational
def arb_req():
for i in range( 5 ):
# Set arbiter requests
s.arbiters[i].reqs[0].value = s.c2d[0].inbuf_val and s.routes[0].route[i]
s.arbiters[i].reqs[1].value = s.c2d[1].inbuf_val and s.routes[1].route[i]
s.arbiters[i].reqs[2].value = s.c2d[2].inbuf_val and s.routes[2].route[i]
s.arbiters[i].reqs[3].value = s.c2d[3].inbuf_val and s.routes[3].route[i]
s.arbiters[i].reqs[4].value = s.c2d[4].inbuf_val and s.routes[4].route[i]
@s.combinational
def set_ctrl_signals():
for i in range( 5 ):
# Set outbuf valid
s.c2d[i].outbuf_val.value = s.arbiters[i].grants > 0
# Set inbuf rdy
s.c2d[i].inbuf_rdy.value = reduce_or(
concat(
s.arbiters[0].grants[i],
s.arbiters[1].grants[i],
s.arbiters[2].grants[i],
s.arbiters[3].grants[i],
s.arbiters[4].grants[i],
)
)
# Set xbar select
if s.arbiters[i].grants == 0b00001: s.c2d[i].xbar_sel.value = 0
elif s.arbiters[i].grants == 0b00010: s.c2d[i].xbar_sel.value = 1
elif s.arbiters[i].grants == 0b00100: s.c2d[i].xbar_sel.value = 2
elif s.arbiters[i].grants == 0b01000: s.c2d[i].xbar_sel.value = 3
elif s.arbiters[i].grants == 0b10000: s.c2d[i].xbar_sel.value = 4
else : s.c2d[i].xbar_sel.value = 0
#=======================================================================
# CtrlDpathBundle
#=======================================================================
class CtrlDpathBundle( PortBundle ):
def __init__( s, msg_type ):
# Control signals (ctrl -> dpath)
s.inbuf_msg = InPort ( msg_type )
s.inbuf_val = InPort ( 1 )
s.inbuf_rdy = OutPort( 1 )
s.xbar_sel = OutPort( 3 )
s.outbuf_val = OutPort( 1 )
s.outbuf_rdy = InPort ( 1 )
CtrlBundle, DpathBundle = create_PortBundles( CtrlDpathBundle )
#=======================================================================
# RouteCompute
#=======================================================================
# Dimension-ordered (x then y) routing module.
class RouteCompute( Model ):
NORTH = 0b00001
EAST = 0b00010
SOUTH = 0b00100
WEST = 0b01000
TERM = 0b10000
def __init__( s, id_, nrouters ):
s.xnodes = int( sqrt( nrouters ) )
s.x = id_ % s.xnodes
s.y = id_ / s.xnodes
nbits = clog2( nrouters )
s.dest = InPort ( nbits )
s.route = OutPort( 5 )
def elaborate_logic( s ):
s.x_dest = Wire( s.dest.nbits )
s.y_dest = Wire( s.dest.nbits )
@s.combinational
def logic():
# TODO: bitwidth inference for % and / don't work
s.x_dest.value = s.dest % s.xnodes
s.y_dest.value = s.dest / s.xnodes
if s.x_dest < s.x: s.route.value = s.WEST
elif s.x_dest > s.x: s.route.value = s.EAST
elif s.y_dest < s.y: s.route.value = s.NORTH
elif s.y_dest > s.y: s.route.value = s.SOUTH
else:
assert s.x_dest == s.x
assert s.y_dest == s.y
s.route.value = s.TERM
|
|
from common import * # NOQA
from cattle import ClientApiError
SP_CREATE = "storagepool.create"
VOLUME_CREATE = "volume.create"
def more_hosts(context):
host2 = register_simulated_host(context)
host3 = register_simulated_host(context)
return context.host, host2, host3
def from_context(context):
return context.client, context.agent_client, context.host
def add_storage_pool(context, host_uuids=None, **kwargs):
client, agent_client, host = from_context(context)
sp_name = 'new-sp-%s' % random_str()
if not host_uuids:
host_uuids = [host.uuid]
create_sp_event(client, agent_client, context,
sp_name, sp_name, SP_CREATE, host_uuids, sp_name, **kwargs)
storage_pool = wait_for(lambda: sp_wait(client, sp_name))
assert storage_pool.state == 'active'
return storage_pool
def create_new_agent(super_client, project):
scope = 'io.cattle.platform.agent.connection.simulator' \
'.AgentConnectionSimulator'
uri = 'sim://{}'.format(random_str())
data = {scope: {}}
account_id = get_plain_id(super_client, project)
data[scope]['agentResourcesAccountId'] = account_id
data['agentResourcesAccountId'] = account_id
agent = super_client.create_agent(uri=uri, data=data)
agent = super_client.wait_success(agent)
assert agent.state == "active"
account = agent.account()
creds = filter(lambda x: x.kind == 'agentApiKey', account.credentials())
agent_client = api_client(creds[0].publicValue, creds[0].secretValue)
return agent, account, agent_client
def test_single_instance_rw_new_disks(super_client, new_context):
disks = [
{
'size': '2g',
},
{
'name': 'foo',
'size': '2g',
'root': True,
},
]
single_instance_rw_test(super_client, new_context, disks)
def test_single_instance_rw_preexisting_volume(super_client, new_context):
client = new_context.client
name = 'exists-%s' % random_str()
sp_name = 'storage-%s' % random_str()
volume = client.create_volume(name=name, driver=sp_name)
volume = client.wait_success(volume)
assert volume.state == 'inactive'
disks = [
{
'name': name,
'size': '2g',
},
]
single_instance_rw_test(super_client, new_context, disks, sp_name=sp_name)
def single_instance_rw_test(super_client, new_context, disks, sp_name=None):
client, agent_client, host = from_context(new_context)
if not sp_name:
sp_name = 'storage-%s' % random_str()
host2 = register_simulated_host(new_context)
host_uuids = [host.uuid, host2.uuid]
create_sp_event(client, agent_client, new_context, sp_name, sp_name,
SP_CREATE, host_uuids, sp_name,
access_mode='singleHostRW')
storage_pool = wait_for(lambda: sp_wait(client, sp_name))
assert storage_pool.state == 'active'
assert storage_pool.volumeAccessMode == 'singleHostRW'
vm = _create_virtual_machine(client, new_context, name=random_str(),
volumeDriver=sp_name,
userdata='hi', vcpu=2, memoryMb=42,
disks=disks)
vm = client.wait_success(vm)
assert vm.state == 'running'
svm = super_client.reload(vm)
for k, vol_id in svm.dataVolumeMounts.__dict__.iteritems():
create_mount(vol_id, vm, client, super_client)
data_volumes = []
for dv in svm.dataVolumes:
if not dv.startswith('/'):
vol_name = dv.split(':')[0]
data_volumes.append('%s:/%s' % (vol_name, vol_name))
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=data_volumes)
c = client.wait_transitioning(c)
assert c.transitioning == 'error'
assert c.transitioningMessage.startswith('Scheduling failed: Volume')
assert c.state == 'error'
vm = client.wait_success(vm.stop())
client.wait_success(vm.remove())
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=data_volumes)
c = client.wait_success(c)
assert c.state == 'running'
def _create_virtual_machine(client, context, **kw):
args = {
'accountId': context.project.id,
'imageUuid': context.image_uuid,
}
args.update(kw)
return client.create_virtual_machine(**args)
def test_single_host_rw(super_client, new_context):
client, agent_client, host = from_context(new_context)
sp_name = 'storage-%s' % random_str()
host2 = register_simulated_host(new_context)
host_uuids = [host.uuid, host2.uuid]
create_sp_event(client, agent_client, new_context, sp_name, sp_name,
SP_CREATE, host_uuids, sp_name,
access_mode='singleHostRW')
storage_pool = wait_for(lambda: sp_wait(client, sp_name))
assert storage_pool.state == 'active'
assert storage_pool.volumeAccessMode == 'singleHostRW'
# Create a volume with a driver that points to a storage pool
v1 = client.create_volume(name=random_str(), driver=sp_name)
v1 = client.wait_success(v1)
data_volume_mounts = {'/con/path': v1.id}
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
c = client.wait_success(c)
assert c.state == 'running'
v1 = client.wait_success(v1)
create_mount(v1.id, c, client, super_client)
sps = v1.storagePools()
assert len(sps) == 1
assert sps[0].id == storage_pool.id
assert v1.accessMode == 'singleHostRW'
# Deactivate the host that c was deployed to
c1_host = c.hosts()[0]
if c1_host.uuid == host.uuid:
client.wait_success(host.deactivate())
else:
client.wait_success(host2.deactivate())
c2 = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=['%s:/test/it' % v1.name])
c2 = client.wait_transitioning(c2)
assert c2.transitioning == 'error'
assert c2.transitioningMessage.startswith('Scheduling failed: Volume')
assert c2.state == 'error'
c = client.wait_success(c.stop())
c3 = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=['%s:/test/it' % v1.name])
c3 = client.wait_success(c3)
def create_mount(vol_id, container, client, super_client):
mount = super_client.create_mount(volumeId=vol_id,
instanceId=container.id,
accountId=container.accountId)
mount = super_client.wait_success(mount)
return client.reload(container), mount
def test_storage_pool_update(new_context, super_client):
client = new_context.client
sp = add_storage_pool(new_context)
original_agent = super_client.list_agent(accountId=new_context.agent.id)[0]
assert super_client.reload(sp).agentId == original_agent.id
new_agent, new_agent_account, new_client = \
create_new_agent(super_client, new_context.project)
uuids = [new_context.host.uuid]
create_sp_event(client, new_client, new_context, sp.name, sp.name,
SP_CREATE, uuids, sp.name, new_agent_account)
assert super_client.wait_success(sp).agentId == new_agent.id
sp = client.wait_success(sp)
assert sp.state == 'active'
def test_storage_pool_agent_delete(new_context, super_client):
client = new_context.client
sp = add_storage_pool(new_context)
original_agent = super_client.list_agent(accountId=new_context.agent.id)[0]
original_agent = super_client.wait_success(original_agent.deactivate())
original_agent = super_client.wait_success(original_agent.remove())
sp = client.reload(sp)
assert sp.state == 'active'
def test_multiple_sp_volume_schedule(new_context):
# Tests that when a host has more than one storage pool (one local, one
# shared), and a container is scheduled to it, the root volume can be
# properly scheduled.
client = new_context.client
add_storage_pool(new_context)
# The allocation bug that caused this issue is much more likely to occur
# when two containers are created back-to-back
c = client.create_container(imageUuid=new_context.image_uuid,
networkMode=None)
c2 = client.create_container(imageUuid=new_context.image_uuid,
networkMode=None)
c = client.wait_success(c)
assert c is not None
vols = c.volumes()
assert len(vols) == 1
vol_pools = vols[0].storagePools()
assert len(vol_pools) == 1
assert vol_pools[0].kind == 'sim'
c2 = client.wait_success(c2)
assert c2 is not None
vols = c2.volumes()
assert len(vols) == 1
vol_pools = vols[0].storagePools()
assert len(vol_pools) == 1
assert vol_pools[0].kind == 'sim'
def test_finding_shared_volumes(new_context):
# Tests that when a named is specified in dataVolumes and a volume of
# that name already exists in a shared storage pool, the pre-existing
# volume is used
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
name = random_str()
uri = '/foo/bar'
create_volume_event(client, agent_client, new_context, VOLUME_CREATE,
name, driver=sp_name, uri=uri)
volume = wait_for(lambda: volume_wait(client, name))
volume = wait_for(lambda: volume_in_sp(client, volume, storage_pool))
path = '/container/path'
# Previously created volume should show up in dataVolumeMounts
data_volumes = ['%s:%s' % (name, path)]
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=data_volumes)
c = client.wait_success(c)
assert c.state == 'running'
assert c.dataVolumeMounts[path] == volume.id
# Same behavior if volumeDriver == local
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver='local',
dataVolumes=data_volumes)
c = client.wait_success(c)
assert c.state == 'running'
assert c.dataVolumeMounts[path] == volume.id
# Create another storage pool and add a volume of the same name to it
storage_pool = add_storage_pool(new_context)
sp_name2 = storage_pool.name
uri = '/foo/bar'
create_volume_event(client, agent_client, new_context, VOLUME_CREATE,
name, driver=sp_name2, uri=uri)
volume2 = wait_for(lambda: volume_in_sp_by_name_wait(name, storage_pool))
assert volume2.id != volume.id
# Container should not create successfully because name is ambiguous
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=data_volumes)
with pytest.raises(ClientApiError):
client.wait_success(c)
# Even if the volume driver is specified, should fail
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver=sp_name2,
dataVolumes=data_volumes)
with pytest.raises(ClientApiError):
client.wait_success(c)
def test_data_volume_mounts(new_context):
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
external_id = random_str()
uri = '/foo/bar'
create_volume_event(client, agent_client, new_context, VOLUME_CREATE,
external_id, driver=sp_name, uri=uri)
volume = wait_for(lambda: volume_wait(client, external_id))
volume = wait_for(lambda: volume_in_sp(client, volume, storage_pool))
data_volume_mounts = {'/somedir': volume.id}
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver='local',
dataVolumeMounts=data_volume_mounts)
c = client.wait_success(c, timeout=240)
assert c.state == 'running'
assert c.dataVolumes[0] == '%s:/somedir' % external_id
def test_volume_create(new_context):
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
add_storage_pool(new_context)
# Create a volume with a driver that points to a storage pool
v1 = client.create_volume(name=random_str(), driver=sp_name)
v1 = client.wait_success(v1)
sps = v1.storagePools()
assert len(sps) == 1
assert sps[0].id == storage_pool.id
# Create a volume with a driver that cattle doesn't know about
v2 = client.create_volume(name=random_str(), driver='driver-%s' %
random_str())
v2 = client.wait_success(v2)
data_volume_mounts = {'/con/path': v1.id,
'/con/path2': v2.id}
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
c = client.wait_success(c)
assert c.state == 'running'
v1 = client.wait_success(v1)
sps = v1.storagePools()
assert len(sps) == 1
assert sps[0].id == storage_pool.id
v2 = client.wait_success(v2)
sps = v2.storagePools()
assert len(sps) == 1
assert sps[0].kind == 'sim'
# Create a new, unmapped volume, assign to container via dataVolumes
# Should be translated to a dataVolumeMount entry.
v3 = client.create_volume(name=random_str(), driver=sp_name)
v3 = client.wait_success(v3)
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=['%s:/foo' % v3.name])
c = client.wait_success(c)
assert c.state == 'running'
assert c.dataVolumeMounts['/foo'] == v3.id
v3 = client.wait_success(v3)
sps = v3.storagePools()
assert len(sps) == 1
assert sps[0].id == storage_pool.id
def create_and_map_volume(client, context):
name = random_str()
v = client.create_volume(name=name, driver='local')
v = client.wait_success(v)
c = client.wait_success(client.create_container(
imageUuid=context.image_uuid,
dataVolumeMounts={'/foo': v.id}))
assert c.state == 'running'
assert c.dataVolumeMounts['/foo'] == v.id
return name, v
def test_volume_create_failed_allocation(new_context):
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
add_storage_pool(new_context)
v1 = client.wait_success(client.create_volume(name=random_str(),
driver=sp_name))
assert v1.state == 'inactive'
# Will fail because new_host is not in the storage_pool that v1 belongs to
new_host = register_simulated_host(new_context)
data_volume_mounts = {'/con/path': v1.id}
with pytest.raises(ClientApiError) as e:
c = client.create_container(imageUuid=new_context.image_uuid,
requestedHostId=new_host.id,
dataVolumeMounts=data_volume_mounts)
client.wait_success(c)
assert 'must have exactly these pool' in e.value.message
# Put two volumes from mutually exclusive storage pools onto a container
# and it should fail to find placement
sp2 = add_storage_pool(new_context, [new_host.uuid])
v2 = client.create_volume(name=random_str(), driver=sp2.name)
v2 = client.wait_success(v2)
assert v1.state == 'inactive'
data_volume_mounts['/con/path2'] = v2.id
with pytest.raises(ClientApiError) as e:
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
client.wait_success(c)
assert e.value.message.startswith('Scheduling failed')
def test_external_volume_event(super_client, new_context):
client, agent_client, host = from_context(new_context)
storage_pool = add_storage_pool(new_context)
sp_name = storage_pool.name
external_id = random_str()
uri = '/foo/bar'
create_volume_event(client, agent_client, new_context, VOLUME_CREATE,
external_id, driver=sp_name, uri=uri)
volume = wait_for(lambda: volume_wait(client, external_id))
volume = wait_for(lambda: volume_in_sp(client, volume, storage_pool))
assert volume.state == 'inactive'
assert volume.externalId == external_id
assert volume.name == external_id
assert volume.driver == sp_name
assert volume.uri == uri
assert volume.isHostPath is False
super_volume = super_client.by_id('volume', volume.id)
assert super_volume.deviceNumber == -1
assert super_volume.format == 'docker'
# Send event again to ensure two volumes are not created
create_volume_event(client, agent_client, new_context,
VOLUME_CREATE, external_id, driver=sp_name, uri=uri)
volumes = client.list_volume(externalId=external_id)
assert len(volumes) == 1
def test_external_storage_pool_event(new_context):
client, agent_client, host = from_context(new_context)
sp_name = 'convoy-%s' % random_str()
# Create a new storage pool with a single host
uuids = [host.uuid]
create_sp_event(client, agent_client, new_context,
sp_name, sp_name, SP_CREATE, uuids, sp_name)
storage_pool = wait_for(lambda: sp_wait(client, sp_name))
assert storage_pool.state == 'active'
assert storage_pool.externalId == sp_name
assert storage_pool.name == sp_name
assert storage_pool.driverName == sp_name
hosts = wait_for(lambda: wait_host_count(storage_pool, 1))
assert len(hosts) == 1
assert hosts[0].uuid == host.uuid
# Send event again to ensure a second storage pool is not created
create_sp_event(client, agent_client, new_context,
sp_name, sp_name, SP_CREATE, uuids, sp_name)
# Add a second host
host2 = register_simulated_host(new_context)
uuids.append(host2.uuid)
create_sp_event(client, agent_client, new_context,
sp_name,
sp_name, SP_CREATE, uuids, sp_name)
hosts = wait_for(lambda: wait_host_count(storage_pool, 2))
host_ids = [h.id for h in hosts]
assert host.id in host_ids
assert host2.id in host_ids
# Remove a host
uuids.pop(0)
create_sp_event(client, agent_client, new_context,
sp_name,
sp_name, SP_CREATE, uuids, sp_name)
hosts = wait_for(lambda: wait_host_count(storage_pool, 1))
assert host2.id in hosts[0].id
# Send empty host list
uuids = []
create_sp_event(client, agent_client, new_context,
sp_name,
sp_name, SP_CREATE, uuids, sp_name)
hosts = wait_for(lambda: wait_host_count(storage_pool, 0))
assert len(hosts) == 0
def create_volume_event(client, agent_client, context, event_type,
external_id, driver=None, uri=None):
vol_event = {
'externalId': external_id,
'eventType': event_type,
'volume': {
'externalId': external_id,
'name': external_id,
'driver': driver,
'uri': uri,
'format': 'docker',
'isHostPath': False,
}
}
event = agent_client.create_external_volume_event(vol_event)
assert event.externalId == external_id
assert event.eventType == event_type
event = wait_for(lambda: event_wait(client, event))
assert event.accountId == context.project.id
assert event.reportedAccountId == context.agent.id
return event
def create_sp_event(client, agent_client, context, external_id, name,
event_type, host_uuids, driver_name, agent_account=None,
access_mode=None, block_device_path=None,
volume_capabilities=None):
storage_pool = {
'name': name,
'externalId': external_id,
'driverName': driver_name,
}
if access_mode is not None:
storage_pool['volumeAccessMode'] = access_mode
if block_device_path is not None:
storage_pool['blockDevicePath'] = block_device_path
if volume_capabilities is not None:
storage_pool['volumeCapabilities'] = volume_capabilities
event = agent_client.create_external_storage_pool_event(
externalId=external_id,
eventType=event_type,
hostUuids=host_uuids,
storagePool=storage_pool)
assert event.externalId == external_id
assert event.eventType == event_type
assert event.hostUuids == host_uuids
event = wait_for(lambda: event_wait(client, event))
assert event.accountId == context.project.id
if agent_account:
assert event.reportedAccountId == agent_account.id
else:
assert event.reportedAccountId == context.agent.id
return event
def sp_wait(client, external_id):
storage_pools = client.list_storage_pool(externalId=external_id)
if len(storage_pools) and storage_pools[0].state == 'active':
return storage_pools[0]
def volume_in_sp_by_name_wait(name, storage_pool):
volumes = storage_pool.volumes(name=name)
if len(volumes) and volumes[0].state == 'inactive':
return volumes[0]
def volume_wait(client, external_id):
volumes = client.list_volume(externalId=external_id)
if len(volumes) and volumes[0].state == 'inactive':
return volumes[0]
def wait_host_count(storage_pool, count):
new_hosts = storage_pool.hosts()
if len(new_hosts) == count:
return new_hosts
def volume_in_sp(client, volume, storage_pool):
volumes = storage_pool.volumes()
if len(volumes) > 0:
for v in volumes:
if v.id == volume.id:
return volume
def event_wait(client, event):
created = client.by_id('externalEvent', event.id)
if created is not None and created.state == 'created':
return created
|
|
import os
import simplejson
from datetime import datetime, timedelta
from uuid import uuid4
from django.conf import settings
import boto
from celery.decorators import task
from celery.task import PeriodicTask
from pdf.models import Document
BOOTSTRAP_SCRIPT = """#!/bin/bash
apt-get update
apt-get install -y imagemagick
python -c "import os
import json
from datetime import datetime
from subprocess import Popen, PIPE
from time import sleep
from uuid import uuid4
import boto
KEY = '%(KEY)s'
SECRET = '%(SECRET)s'
request_queue = boto.connect_sqs(KEY, SECRET).create_queue('%(REQUEST_QUEUE)s')
response_queue = boto.connect_sqs(KEY, SECRET).create_queue('%(RESPONSE_QUEUE)s')
count = 0
def read_json_pointer_message():
m = request_queue.read(3600) # Give the job an hour to run, plenty of time to avoid double-runs
if m is not None:
pointer = json.loads(m.get_body())
k = boto.connect_s3(KEY, SECRET).get_bucket(pointer['bucket']).get_key(pointer['key'])
data = json.loads(k.get_contents_as_string())
data['pointer'] = m
return data
def delete_json_pointer_message(data):
request_queue.delete_message(data['pointer'])
def write_json_pointer_message(data, bucket, key_name, base_key):
b = boto.connect_s3(KEY, SECRET).get_bucket(bucket)
k = b.new_key(base_key.replace(os.path.basename(base_key), key_name))
k.set_contents_from_string(json.dumps(data))
response_message = {'bucket': b.name, 'key': k.name}
message = response_queue.new_message(body=json.dumps(response_message))
response_queue.write(message)
def download(bucket, key, local_file):
b = boto.connect_s3(KEY, SECRET).get_bucket(bucket)
k = b.get_key(key)
k.get_contents_to_filename(local_file)
def upload_file(local_file, bucket, key, public=False):
b = boto.connect_s3(KEY, SECRET).get_bucket(bucket)
k = b.new_key(key)
k.set_contents_from_filename(local_file)
if public:
k.set_acl('public-read')
def get_tstamp():
return datetime.utcnow().isoformat(' ').split('.')[0]
while True:
request_data = read_json_pointer_message()
start = get_tstamp()
if request_data is None:
count += 1
if count > 10:
break
else:
sleep(5)
else:
RUN_ID = str(uuid4())
WORKING_PATH = '/mnt/' + RUN_ID
try:
os.makedirs(WORKING_PATH)
except:
pass
count = 0
try:
try:
bname = request_data['bucket']
kname = request_data['key']
doc_uuid = request_data['uuid']
local_filename = os.path.join(WORKING_PATH, os.path.basename(kname))
output = os.path.join(WORKING_PATH, 'page.png')
cmd = 'convert -density 400 ' + local_filename + ' ' + output
start_data = {'status': 'P', 'uuid': doc_uuid, 'now': start}
write_json_pointer_message(start_data, bucket=bname, key_name='start.json', base_key=kname)
download(bname, kname, local_filename)
p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)
rc = p.wait()
images = [f for f in os.listdir(WORKING_PATH) if f.endswith('png')]
for image in images:
new_key_name = kname.replace(os.path.basename(kname), image)
local_image = os.path.join(WORKING_PATH, image)
upload_file(local_image, bname, new_key_name, public=True)
data = {'status': 'F', 'uuid': doc_uuid, 'pages': len(images), 'now': get_tstamp()}
write_json_pointer_message(data, bucket=bname, key_name='results.json', base_key=kname)
except:
import sys, traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
e = traceback.format_exception(exc_type, exc_value, exc_traceback)
e = ''.join(e)
data = {'status': 'E', 'uuid': doc_uuid, 'exception': str(e), 'now': get_tstamp()}
write_json_pointer_message(data, bucket=bname, key_name='error.json', base_key=kname)
except Exception, e:
pass
delete_json_pointer_message(request_data)
"
/sbin/shutdown now -h
"""
REQUEST_QUEUE = getattr(settings, "PDF_REQUEST_QUEUE", "pdf_requests")
RESPONSE_QUEUE = getattr(settings, "PDF_RESPONSE_QUEUE", "pdf_responses")
ACL = getattr(settings, "PDF_AWS_ACL", "public-read")
AMI_ID = getattr(settings, "PDF_AMI_ID", "ami-bb709dd2")
KEYPAIR = getattr(settings, "PDF_KEYPAIR_NAME", None)
MAX_INSTANCES = getattr(settings, 'PDF_MAX_NODES', 20)
SECURITY_GROUPS = getattr(settings, 'PDF_SECURITY_GROUPS', None)
def queue_json_message(doc, doc_key):
key_name = doc_key.name.replace(os.path.basename(doc_key.name), "message-%s.json" % str(uuid4()))
key = doc_key.bucket.new_key(key_name)
message_data = simplejson.dumps({'bucket': doc_key.bucket.name, 'key': doc_key.name, 'uuid': doc.uuid})
key.set_contents_from_string(message_data)
msg_body = {'bucket': key.bucket.name, 'key': key.name}
queue = boto.connect_sqs(settings.PDF_AWS_KEY, settings.PDF_AWS_SECRET).create_queue(REQUEST_QUEUE)
msg = queue.new_message(body=simplejson.dumps(msg_body))
queue.write(msg)
def upload_file_to_s3(doc):
file_path = doc.local_document.path
b = boto.connect_s3(settings.PDF_AWS_KEY, settings.PDF_AWS_SECRET).get_bucket(settings.PDF_UPLOAD_BUCKET)
name = '%s/%s' % (doc.uuid, os.path.basename(file_path))
k = b.new_key(name)
k.set_contents_from_filename(file_path)
k.set_acl(ACL)
return k
@task
def process_file(doc):
"""Transfer uploaded file to S3 and queue up message to process PDF."""
key = upload_file_to_s3(doc)
doc.remote_document = "http://%s.s3.amazonaws.com/%s" % (key.bucket.name, key.name)
doc.date_stored = datetime.utcnow()
doc.status = 'S'
doc.save()
queue_json_message(doc, key)
doc.status = 'Q'
doc.date_queued = datetime.utcnow()
doc.save()
return True
class CheckResponseQueueTask(PeriodicTask):
"""
Checks response queue for messages returned from running processes in the
cloud. The messages are read and corresponding `pdf.models.Document`
records are updated.
"""
run_every = timedelta(seconds=30)
def _dequeue_json_message(self):
sqs = boto.connect_sqs(settings.PDF_AWS_KEY, settings.PDF_AWS_SECRET)
queue = sqs.create_queue(RESPONSE_QUEUE)
msg = queue.read()
if msg is not None:
data = simplejson.loads(msg.get_body())
bucket = data.get('bucket', None)
key = data.get("key", None)
queue.delete_message(msg)
if bucket is not None and key is not None:
return data
def run(self, **kwargs):
logger = self.get_logger(**kwargs)
logger.info("Running periodic task!")
data = self._dequeue_json_message()
if data is not None:
Document.process_response(data)
return True
return False
class CheckQueueLevelsTask(PeriodicTask):
"""
Checks the number of messages in the queue and compares it with the number
of instances running, only booting nodes if the number of queued messages
exceed the number of nodes running.
"""
run_every = timedelta(seconds=60)
def run(self, **kwargs):
ec2 = boto.connect_ec2(settings.PDF_AWS_KEY, settings.PDF_AWS_SECRET)
sqs = boto.connect_sqs(settings.PDF_AWS_KEY, settings.PDF_AWS_SECRET)
queue = sqs.create_queue(REQUEST_QUEUE)
num = queue.count()
launched = 0
icount = 0
reservations = ec2.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
if instance.state == "running" and instance.image_id == AMI_ID:
icount += 1
to_boot = min(num - icount, MAX_INSTANCES)
if to_boot > 0:
startup = BOOTSTRAP_SCRIPT % {
'KEY': settings.PDF_AWS_KEY,
'SECRET': settings.PDF_AWS_SECRET,
'RESPONSE_QUEUE': RESPONSE_QUEUE,
'REQUEST_QUEUE': REQUEST_QUEUE}
r = ec2.run_instances(
image_id=AMI_ID,
min_count=to_boot,
max_count=to_boot,
key_name=KEYPAIR,
security_groups=SECURITY_GROUPS,
user_data=startup)
launched = len(r.instances)
return launched
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import six
from webob.util import status_reasons
from nova import context
from nova import exception
from nova import test
class FakeNotifier(object):
"""Acts like messaging.Notifier."""
def __init__(self):
self.provided_context = None
self.provided_event = None
self.provided_payload = None
def error(self, context, event, payload):
self.provided_context = context
self.provided_event = event
self.provided_payload = payload
def good_function(self, context):
return 99
def bad_function_exception(self, context, extra, blah="a", boo="b", zoo=None):
raise test.TestingException()
class WrapExceptionTestCase(test.NoDBTestCase):
def test_wrap_exception_good_return(self):
wrapped = exception.wrap_exception('foo')
self.assertEqual(99, wrapped(good_function)(1, 2))
def test_wrap_exception_with_notifier(self):
notifier = FakeNotifier()
wrapped = exception.wrap_exception(notifier)
ctxt = context.get_admin_context()
self.assertRaises(test.TestingException,
wrapped(bad_function_exception), 1, ctxt, 3, zoo=3)
self.assertEqual("bad_function_exception", notifier.provided_event)
self.assertEqual(notifier.provided_context, ctxt)
self.assertEqual(3, notifier.provided_payload['args']['extra'])
for key in ['exception', 'args']:
self.assertIn(key, notifier.provided_payload.keys())
class NovaExceptionTestCase(test.NoDBTestCase):
def test_default_error_msg(self):
class FakeNovaException(exception.NovaException):
msg_fmt = "default message"
exc = FakeNovaException()
self.assertEqual('default message', six.text_type(exc))
def test_error_msg(self):
self.assertEqual('test',
six.text_type(exception.NovaException('test')))
def test_default_error_msg_with_kwargs(self):
class FakeNovaException(exception.NovaException):
msg_fmt = "default message: %(code)s"
exc = FakeNovaException(code=500)
self.assertEqual('default message: 500', six.text_type(exc))
self.assertEqual('default message: 500', exc.message)
def test_error_msg_exception_with_kwargs(self):
class FakeNovaException(exception.NovaException):
msg_fmt = "default message: %(misspelled_code)s"
exc = FakeNovaException(code=500, misspelled_code='blah')
self.assertEqual('default message: blah', six.text_type(exc))
self.assertEqual('default message: blah', exc.message)
def test_default_error_code(self):
class FakeNovaException(exception.NovaException):
code = 404
exc = FakeNovaException()
self.assertEqual(404, exc.kwargs['code'])
def test_error_code_from_kwarg(self):
class FakeNovaException(exception.NovaException):
code = 500
exc = FakeNovaException(code=404)
self.assertEqual(exc.kwargs['code'], 404)
def test_cleanse_dict(self):
kwargs = {'foo': 1, 'blah_pass': 2, 'zoo_password': 3, '_pass': 4}
self.assertEqual({'foo': 1}, exception._cleanse_dict(kwargs))
kwargs = {}
self.assertEqual({}, exception._cleanse_dict(kwargs))
def test_format_message_local(self):
class FakeNovaException(exception.NovaException):
msg_fmt = "some message"
exc = FakeNovaException()
self.assertEqual(six.text_type(exc), exc.format_message())
def test_format_message_remote(self):
class FakeNovaException_Remote(exception.NovaException):
msg_fmt = "some message"
if six.PY3:
def __str__(self):
return "print the whole trace"
else:
def __unicode__(self):
return u"print the whole trace"
exc = FakeNovaException_Remote()
self.assertEqual(u"print the whole trace", six.text_type(exc))
self.assertEqual("some message", exc.format_message())
def test_format_message_remote_error(self):
class FakeNovaException_Remote(exception.NovaException):
msg_fmt = "some message %(somearg)s"
def __unicode__(self):
return u"print the whole trace"
self.flags(fatal_exception_format_errors=False)
exc = FakeNovaException_Remote(lame_arg='lame')
self.assertEqual("some message %(somearg)s", exc.format_message())
class ConvertedExceptionTestCase(test.NoDBTestCase):
def test_instantiate(self):
exc = exception.ConvertedException(400, 'Bad Request', 'reason')
self.assertEqual(exc.code, 400)
self.assertEqual(exc.title, 'Bad Request')
self.assertEqual(exc.explanation, 'reason')
def test_instantiate_without_title_known_code(self):
exc = exception.ConvertedException(500)
self.assertEqual(exc.title, status_reasons[500])
def test_instantiate_without_title_unknown_code(self):
exc = exception.ConvertedException(499)
self.assertEqual(exc.title, 'Unknown Client Error')
def test_instantiate_bad_code(self):
self.assertRaises(KeyError, exception.ConvertedException, 10)
class ExceptionTestCase(test.NoDBTestCase):
@staticmethod
def _raise_exc(exc):
raise exc(500)
def test_exceptions_raise(self):
# NOTE(dprince): disable format errors since we are not passing kwargs
self.flags(fatal_exception_format_errors=False)
for name in dir(exception):
exc = getattr(exception, name)
if isinstance(exc, type):
self.assertRaises(exc, self._raise_exc, exc)
class ExceptionValidMessageTestCase(test.NoDBTestCase):
def test_messages(self):
failures = []
for name, obj in inspect.getmembers(exception):
if name in ['NovaException', 'InstanceFaultRollback']:
continue
if not inspect.isclass(obj):
continue
if not issubclass(obj, exception.NovaException):
continue
e = obj
if e.msg_fmt == "An unknown exception occurred.":
failures.append('%s needs a more specific msg_fmt' % name)
if failures:
self.fail('\n'.join(failures))
|
|
from model.contact import Contact
import re
import time
class ContactHelper:
def __init__(self, app):
self.app = app
def create(self, contact):
wd = self.app.wd
# init contact creation
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
wd.find_element_by_link_text("home page").click()
self.contact_cash = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.telhome)
self.change_field_value("mobile", contact.telmobile)
self.change_field_value("work", contact.telwork)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("homepage", contact.homepage)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[18]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[18]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").click()
self.change_field_value("byear", contact.birth)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[18]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[18]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[2]").click()
self.change_field_value("ayear", contact.anniversary)
self.change_field_value("address2", contact.address2)
self.change_field_value("phone2", contact.home2)
self.change_field_value("notes", contact.notes)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_xpath("//img[@title='Edit']")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def modify_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
# open modification form
self.select_contact_by_index(index)
# fill group form
self.fill_contact_form(new_contact_data)
# submit modification
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("home").click()
self.contact_cash = None
def modify_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
# open modification form
self.open_contact_to_edit_by_id(id)
# fill group form
self.fill_contact_form(new_contact_data)
# submit modification
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("home").click()
time.sleep(3)
self.contact_cash = None
def edit_contact(self, contact):
wd = self.app.wd
# init contact edition
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
# edit contact form
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.telhome)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.telmobile)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.telwork)
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(contact.fax)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(contact.email2)
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(contact.email3)
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(contact.homepage)
if not wd.find_element_by_xpath("//div[@id='content']/form[1]/select[2]//option[4]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form[1]/select[2]//option[4]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form[1]/select[4]//option[4]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form[1]/select[4]//option[4]").click()
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(contact.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact.home2)
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(contact.notes)
# submit contact editing
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
wd.find_element_by_link_text("home").click()
self.contact_cash = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
self.contact_cash = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.select_contact_by_id(id)
wd.find_element_by_css_selector("input[value='Delete']").click()
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
self.contact_cash = None
def count(self):
wd = self.app.wd
if not (wd.current_url.endswith("/addressbook/") and len(wd.find_elements_by_name("searchform"))) > 0:
wd.get("http://localhost/addressbook/")
return len(wd.find_elements_by_name("selected[]"))
contact_cash = None
"""def get_contact_list(self):
if self.contact_cash is None:
wd = self.app.wd
self.contact_cash = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
first_name = cells[2].text
last_name = cells[1].text
address = cells[3].text
id = element.find_element_by_tag_name("input").get_attribute("id")
all_phones = cells[5].text
all_emails = cells[4].text
self.contact_cash.append(Contact(firstname=first_name, lastname=last_name, id=id, address=address,
all_phones_from_home_page=all_phones,
all_emails_from_home_page=all_emails))
return list(self.contact_cash)"""
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastmane = wd.find_element_by_name("lastname").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
telhome = wd.find_element_by_name("home").get_attribute("value")
telwork = wd.find_element_by_name("work").get_attribute("value")
telmobile = wd.find_element_by_name("mobile").get_attribute("value")
home2 = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(firstname=firstname, lastname=lastmane, id=id, address=address,
telhome=telhome, telwork=telwork, telmobile=telmobile, home2=home2,
email=email, email2=email2, email3=email3)
def open_contact_view_by_index(self, index):
wd = self.app.wd
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_to_edit_by_id(self, id):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_xpath("//input[@id='%s']" % id).click()
wd.find_element_by_xpath("//a[@href='edit.php?id=%s']/img[@title='Edit']" % id).click()
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
telhome = re.search("H: (.*)", text).group(1)
telwork = re.search("W: (.*)", text).group(1)
telmobile = re.search("M: (.*)", text).group(1)
home2 = re.search("P: (.*)", text).group(1)
return Contact(telhome=telhome, telwork=telwork,
telmobile=telmobile, home2=home2)
def add_contact_to_group_by_id(self, id):
wd = self.app.wd
self.select_contact_by_id(id)
wd.find_element_by_css_selector("input[value='Add to']").click()
wd.find_element_by_css_selector("i a").click()
def del_contact_from_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_xpath("//input[@id='%s']" % id).click()
wd.find_element_by_xpath("//a[@href='view.php?id=%s']/img[@title='Details']" % id).click()
wd.find_element_by_css_selector("i a").click()
self.select_contact_by_id(id)
wd.find_element_by_css_selector("input[name='remove']").click()
wd.find_element_by_css_selector("i a").click()
time.sleep(3)
def filter_not_in_group_contact(self):
wd = self.app.wd
wd.find_element_by_xpath("//select[@name='group']/option[@value='[none]']").click()
def get_contact_list(self):
wd = self.app.wd
contacts = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
first_name = cells[2].text
last_name = cells[1].text
id = element.find_element_by_tag_name("input").get_attribute("id")
contacts.append(Contact(firstname=first_name, lastname=last_name, id=id))
return contacts
def remove_contact_from_group_by_id(self, id):
wd = self.app.wd
self.select_contact_by_id(id)
wd.find_element_by_css_selector("input[name='remove']").click()
wd.find_element_by_css_selector("i a").click()
time.sleep(3)
|
|
"""Config flow for Network UPS Tools (NUT) integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_ALIAS,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_RESOURCES,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import PyNUTData, find_resources_in_config_entry
from .const import (
DEFAULT_HOST,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
KEY_STATUS,
KEY_STATUS_DISPLAY,
SENSOR_NAME,
SENSOR_TYPES,
)
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
SENSOR_DICT = {
sensor_id: sensor_spec[SENSOR_NAME]
for sensor_id, sensor_spec in SENSOR_TYPES.items()
}
def _base_schema(discovery_info):
"""Generate base schema."""
base_schema = {}
if not discovery_info:
base_schema.update(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
}
)
base_schema.update(
{vol.Optional(CONF_USERNAME): str, vol.Optional(CONF_PASSWORD): str}
)
return vol.Schema(base_schema)
def _resource_schema_base(available_resources, selected_resources):
"""Resource selection schema."""
known_available_resources = {
sensor_id: sensor[SENSOR_NAME]
for sensor_id, sensor in SENSOR_TYPES.items()
if sensor_id in available_resources
}
if KEY_STATUS in known_available_resources:
known_available_resources[KEY_STATUS_DISPLAY] = SENSOR_TYPES[
KEY_STATUS_DISPLAY
][SENSOR_NAME]
return {
vol.Required(CONF_RESOURCES, default=selected_resources): cv.multi_select(
known_available_resources
)
}
def _ups_schema(ups_list):
"""UPS selection schema."""
return vol.Schema({vol.Required(CONF_ALIAS): vol.In(ups_list)})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from _base_schema with values provided by the user.
"""
host = data[CONF_HOST]
port = data[CONF_PORT]
alias = data.get(CONF_ALIAS)
username = data.get(CONF_USERNAME)
password = data.get(CONF_PASSWORD)
data = PyNUTData(host, port, alias, username, password)
await hass.async_add_executor_job(data.update)
status = data.status
if not status:
raise CannotConnect
return {"ups_list": data.ups_list, "available_resources": status}
def _format_host_port_alias(user_input):
"""Format a host, port, and alias so it can be used for comparison or display."""
host = user_input[CONF_HOST]
port = user_input[CONF_PORT]
alias = user_input.get(CONF_ALIAS)
if alias:
return f"{alias}@{host}:{port}"
return f"{host}:{port}"
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Network UPS Tools (NUT)."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the nut config flow."""
self.nut_config = {}
self.available_resources = {}
self.discovery_info = {}
self.ups_list = None
self.title = None
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered nut device."""
self.discovery_info = discovery_info
await self._async_handle_discovery_without_unique_id()
self.context["title_placeholders"] = {
CONF_PORT: discovery_info.get(CONF_PORT, DEFAULT_PORT),
CONF_HOST: discovery_info[CONF_HOST],
}
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle the user input."""
errors = {}
if user_input is not None:
if self.discovery_info:
user_input.update(
{
CONF_HOST: self.discovery_info[CONF_HOST],
CONF_PORT: self.discovery_info.get(CONF_PORT, DEFAULT_PORT),
}
)
info, errors = await self._async_validate_or_error(user_input)
if not errors:
self.nut_config.update(user_input)
if len(info["ups_list"]) > 1:
self.ups_list = info["ups_list"]
return await self.async_step_ups()
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="user", data_schema=_base_schema(self.discovery_info), errors=errors
)
async def async_step_ups(self, user_input=None):
"""Handle the picking the ups."""
errors = {}
if user_input is not None:
self.nut_config.update(user_input)
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
info, errors = await self._async_validate_or_error(self.nut_config)
if not errors:
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="ups",
data_schema=_ups_schema(self.ups_list),
errors=errors,
)
async def async_step_resources(self, user_input=None):
"""Handle the picking the resources."""
if user_input is None:
return self.async_show_form(
step_id="resources",
data_schema=vol.Schema(
_resource_schema_base(self.available_resources, [])
),
)
self.nut_config.update(user_input)
title = _format_host_port_alias(self.nut_config)
return self.async_create_entry(title=title, data=self.nut_config)
def _host_port_alias_already_configured(self, user_input):
"""See if we already have a nut entry matching user input configured."""
existing_host_port_aliases = {
_format_host_port_alias(entry.data)
for entry in self._async_current_entries()
if CONF_HOST in entry.data
}
return _format_host_port_alias(user_input) in existing_host_port_aliases
async def _async_validate_or_error(self, config):
errors = {}
info = {}
try:
info = await validate_input(self.hass, config)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return info, errors
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for nut."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
resources = find_resources_in_config_entry(self.config_entry)
scan_interval = self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
info = await validate_input(self.hass, self.config_entry.data)
base_schema = _resource_schema_base(info["available_resources"], resources)
base_schema[
vol.Optional(CONF_SCAN_INTERVAL, default=scan_interval)
] = cv.positive_int
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(base_schema),
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
from eventlet.green import httplib
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
import webob
import webob.dec
import webob.exc
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
from nova import context
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova.openstack.common import timeutils
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.IntOpt('lockout_attempts',
default=5,
help='Number of failed auths before lockout.'),
cfg.IntOpt('lockout_minutes',
default=15,
help='Number of minutes to lockout if triggered.'),
cfg.IntOpt('lockout_window',
default=15,
help='Number of minutes for lockout window.'),
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
cfg.BoolOpt('ec2_private_dns_show_ip',
default=False,
help='Return the IP address as private dns hostname in '
'describe instances'),
cfg.BoolOpt('ec2_strict_validation',
default=True,
help='Validate security group names'
' according to EC2 specification'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
## Fault Wrapper around all EC2 requests ##
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
LOG.exception(_("FaultWrapper: %s"), unicode(ex))
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts.
"""
def __init__(self, application):
"""middleware can use fake for testing."""
self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(explanation=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
LOG.warn(_('Access key %(access_key)s has had %(failures)d '
'failed authentications and will be locked out '
'for %(lock_mins)d minutes.'),
{'access_key': access_key,
'failures': failures,
'lock_mins': CONF.lockout_minutes})
self.mc.set(failures_key, str(failures),
time=CONF.lockout_minutes * 60)
return res
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
request_id = context.generate_request_id()
signature = req.params.get('Signature')
if not signature:
msg = _("Signature not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
access = req.params.get('AWSAccessKeyId')
if not access:
msg = _("Access key not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature')
cred_dict = {
'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
}
if "ec2" in CONF.keystone_ec2_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
o = urlparse.urlparse(CONF.keystone_ec2_url)
if o.scheme == "http":
conn = httplib.HTTPConnection(o.netloc)
else:
conn = httplib.HTTPSConnection(o.netloc)
conn.request('POST', o.path, body=creds_json, headers=headers)
response = conn.getresponse()
data = response.read()
if response.status != 200:
if response.status == 401:
msg = response.reason
else:
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=response.status)
result = jsonutils.loads(data)
conn.close()
try:
token_id = result['access']['token']['id']
user_id = result['access']['user']['id']
project_id = result['access']['token']['tenant']['id']
user_name = result['access']['user'].get('name')
project_name = result['access']['token']['tenant'].get('name')
roles = [role['name'] for role
in result['access']['user']['roles']]
except (AttributeError, KeyError) as e:
LOG.exception(_("Keystone failure: %s") % e)
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
catalog = result['access']['serviceCatalog']
ctxt = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog)
req.environ['nova.context'] = ctxt
return self.application
class NoAuth(wsgi.Middleware):
"""Add user:project as 'nova.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'AWSAccessKeyId' not in req.params:
raise webob.exc.HTTPBadRequest()
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class Requestify(wsgi.Middleware):
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = importutils.import_object(controller)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.exception(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
version = req.params['SignatureVersion']
if int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=unicode(err))
LOG.debug('action: %s', action)
for key, value in args.items():
LOG.debug('arg: %(key)s\t\tval: %(value)s',
{'key': key, 'value': value})
# Success!
api_request = apirequest.APIRequest(self.controller, action,
req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
executed in nova.context.
"""
def __init__(self, application):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
'ImportKeyPair': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
'DescribeVolumes': ['projectmanager', 'sysadmin'],
'CreateVolume': ['projectmanager', 'sysadmin'],
'AttachVolume': ['projectmanager', 'sysadmin'],
'DetachVolume': ['projectmanager', 'sysadmin'],
'DescribeInstances': ['all'],
'DescribeAddresses': ['all'],
'AllocateAddress': ['netadmin'],
'ReleaseAddress': ['netadmin'],
'AssociateAddress': ['netadmin'],
'DisassociateAddress': ['netadmin'],
'RunInstances': ['projectmanager', 'sysadmin'],
'TerminateInstances': ['projectmanager', 'sysadmin'],
'RebootInstances': ['projectmanager', 'sysadmin'],
'UpdateInstance': ['projectmanager', 'sysadmin'],
'StartInstances': ['projectmanager', 'sysadmin'],
'StopInstances': ['projectmanager', 'sysadmin'],
'DeleteVolume': ['projectmanager', 'sysadmin'],
'DescribeImages': ['all'],
'DeregisterImage': ['projectmanager', 'sysadmin'],
'RegisterImage': ['projectmanager', 'sysadmin'],
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
'CreateImage': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
# superusers will be allowed to run them
# all others will get HTTPUnauthorized.
},
}
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_('Unauthorized request for controller=%(controller)s '
'and action=%(action)s'),
{'controller': controller, 'action': action},
context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
if context.is_admin:
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
return any(role in context.roles for role in roles)
class Validator(wsgi.Middleware):
def validate_ec2_id(val):
if not validator.validate_str()(val):
return False
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
return False
return True
validator.validate_ec2_id = validate_ec2_id
validator.DEFAULT_VALIDATOR = {
'instance_id': validator.validate_ec2_id,
'volume_id': validator.validate_ec2_id,
'image_id': validator.validate_ec2_id,
'attribute': validator.validate_str(),
'image_location': validator.validate_image_path,
'public_ip': utils.is_valid_ipv4,
'region_name': validator.validate_str(),
'group_name': validator.validate_str(max_length=255),
'group_description': validator.validate_str(max_length=255),
'size': validator.validate_int(),
'user_data': validator.validate_user_data
}
def __init__(self, application):
super(Validator, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if validator.validate(req.environ['ec2.request'].args,
validator.DEFAULT_VALIDATOR):
return self.application
else:
raise webob.exc.HTTPBadRequest()
def exception_to_ec2code(ex):
"""Helper to extract EC2 error code from exception.
For other than EC2 exceptions (those without ec2_code attribute),
use exception name.
"""
if hasattr(ex, 'ec2_code'):
code = ex.ec2_code
else:
code = type(ex).__name__
return code
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
"""Return an EC2 error response based on passed exception and log
the exception on an appropriate log level:
* DEBUG: expected errors
* ERROR: unexpected errors
All expected errors are treated as client errors and 4xx HTTP
status codes are always returned for them.
Unexpected 5xx errors may contain sensitive information,
suppress their messages for security.
"""
if not code:
code = exception_to_ec2code(ex)
status = getattr(ex, 'code', None)
if not status:
status = 500
if unexpected:
log_fun = LOG.error
log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s")
else:
log_fun = LOG.debug
log_msg = _("%(ex_name)s raised: %(ex_str)s")
# NOTE(jruzicka): For compatibility with EC2 API, treat expected
# exceptions as client (4xx) errors. The exception error code is 500
# by default and most exceptions inherit this from NovaException even
# though they are actually client errors in most cases.
if status >= 500:
status = 400
context = req.environ['nova.context']
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
'ex_str': unicode(ex)
}
log_fun(log_msg % log_msg_args, context=context)
if ex.args and not message and (not unexpected or status < 500):
message = unicode(ex.args[0])
if unexpected:
# Log filtered environment for unexpected errors.
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], six.string_types):
env.pop(k)
log_fun(_('Environment: %s') % jsonutils.dumps(env))
if not message:
message = _('Unknown error occurred.')
return faults.ec2_error_response(request_id, code, message, status=status)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
api_request = req.environ['ec2.request']
try:
result = api_request.invoke(context)
except exception.InstanceNotFound as ex:
ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
message = ex.msg_fmt % {'instance_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.VolumeNotFound as ex:
ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
message = ex.msg_fmt % {'volume_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.SnapshotNotFound as ex:
ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
message = ex.msg_fmt % {'snapshot_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except (exception.CannotDisassociateAutoAssignedFloatingIP,
exception.FloatingIpAssociated,
exception.FloatingIpNotFound,
exception.ImageNotActive,
exception.InvalidInstanceIDMalformed,
exception.InvalidKeypair,
exception.InvalidParameterValue,
exception.InvalidPortRange,
exception.InvalidVolume,
exception.KeyPairExists,
exception.KeypairNotFound,
exception.MissingParameter,
exception.NoFloatingIpInterface,
exception.NoMoreFixedIps,
exception.Forbidden,
exception.QuotaError,
exception.SecurityGroupExists,
exception.SecurityGroupLimitExceeded,
exception.SecurityGroupRuleExists,
exception.VolumeUnattached,
# Following aren't translated to valid EC2 errors.
exception.ImageNotFound,
exception.ImageNotFoundEC2,
exception.InvalidAttribute,
exception.InvalidRequest,
exception.NotFound) as ex:
return ec2_error_ex(ex, req)
except Exception as ex:
return ec2_error_ex(ex, req, unexpected=True)
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
|
|
"""
Test for the SmartThings climate platform.
The only mocking required is of the underlying SmartThings API object so
real HTTP calls are not initiated during testing.
"""
from pysmartthings import Attribute, Capability
from pysmartthings.device import Status
import pytest
from homeassistant.components.climate.const import (
ATTR_CURRENT_HUMIDITY,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_IDLE,
DOMAIN as CLIMATE_DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_TEMPERATURE,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.components.smartthings import climate
from homeassistant.components.smartthings.const import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_UNKNOWN,
)
from .conftest import setup_platform
@pytest.fixture(name="legacy_thermostat")
def legacy_thermostat_fixture(device_factory):
"""Fixture returns a legacy thermostat."""
device = device_factory(
"Legacy Thermostat",
capabilities=[Capability.thermostat],
status={
Attribute.cooling_setpoint: 74,
Attribute.heating_setpoint: 68,
Attribute.thermostat_fan_mode: "auto",
Attribute.supported_thermostat_fan_modes: ["auto", "on"],
Attribute.thermostat_mode: "auto",
Attribute.supported_thermostat_modes: climate.MODE_TO_STATE.keys(),
Attribute.thermostat_operating_state: "idle",
},
)
device.status.attributes[Attribute.temperature] = Status(70, "F", None)
return device
@pytest.fixture(name="basic_thermostat")
def basic_thermostat_fixture(device_factory):
"""Fixture returns a basic thermostat."""
device = device_factory(
"Basic Thermostat",
capabilities=[
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
],
status={
Attribute.cooling_setpoint: 74,
Attribute.heating_setpoint: 68,
Attribute.thermostat_mode: "off",
Attribute.supported_thermostat_modes: ["off", "auto", "heat", "cool"],
},
)
device.status.attributes[Attribute.temperature] = Status(70, "F", None)
return device
@pytest.fixture(name="thermostat")
def thermostat_fixture(device_factory):
"""Fixture returns a fully-featured thermostat."""
device = device_factory(
"Thermostat",
capabilities=[
Capability.temperature_measurement,
Capability.relative_humidity_measurement,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
Capability.thermostat_operating_state,
Capability.thermostat_fan_mode,
],
status={
Attribute.cooling_setpoint: 74,
Attribute.heating_setpoint: 68,
Attribute.thermostat_fan_mode: "on",
Attribute.supported_thermostat_fan_modes: ["auto", "on"],
Attribute.thermostat_mode: "heat",
Attribute.supported_thermostat_modes: [
"auto",
"heat",
"cool",
"off",
"eco",
],
Attribute.thermostat_operating_state: "idle",
Attribute.humidity: 34,
},
)
device.status.attributes[Attribute.temperature] = Status(70, "F", None)
return device
@pytest.fixture(name="buggy_thermostat")
def buggy_thermostat_fixture(device_factory):
"""Fixture returns a buggy thermostat."""
device = device_factory(
"Buggy Thermostat",
capabilities=[
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
],
status={
Attribute.thermostat_mode: "heating",
Attribute.cooling_setpoint: 74,
Attribute.heating_setpoint: 68,
},
)
device.status.attributes[Attribute.temperature] = Status(70, "F", None)
return device
@pytest.fixture(name="air_conditioner")
def air_conditioner_fixture(device_factory):
"""Fixture returns a air conditioner."""
device = device_factory(
"Air Conditioner",
capabilities=[
Capability.air_conditioner_mode,
Capability.demand_response_load_control,
Capability.air_conditioner_fan_mode,
Capability.power_consumption_report,
Capability.switch,
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
],
status={
Attribute.air_conditioner_mode: "auto",
Attribute.supported_ac_modes: [
"cool",
"dry",
"wind",
"auto",
"heat",
"fanOnly",
],
Attribute.drlc_status: {
"duration": 0,
"drlcLevel": -1,
"start": "1970-01-01T00:00:00Z",
"override": False,
},
Attribute.fan_mode: "medium",
Attribute.supported_ac_fan_modes: [
"auto",
"low",
"medium",
"high",
"turbo",
],
Attribute.power_consumption: {
"start": "2019-02-24T21:03:04Z",
"power": 0,
"energy": 500,
"end": "2019-02-26T02:05:55Z",
},
Attribute.switch: "on",
Attribute.cooling_setpoint: 23,
},
)
device.status.attributes[Attribute.temperature] = Status(24, "C", None)
return device
async def test_async_setup_platform():
"""Test setup platform does nothing (it uses config entries)."""
await climate.async_setup_platform(None, None, None)
async def test_legacy_thermostat_entity_state(hass, legacy_thermostat):
"""Tests the state attributes properly match the thermostat type."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[legacy_thermostat])
state = hass.states.get("climate.legacy_thermostat")
assert state.state == HVAC_MODE_HEAT_COOL
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_FAN_MODE
| SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_TARGET_TEMPERATURE
)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
assert sorted(state.attributes[ATTR_HVAC_MODES]) == [
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
]
assert state.attributes[ATTR_FAN_MODE] == "auto"
assert state.attributes[ATTR_FAN_MODES] == ["auto", "on"]
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 20 # celsius
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 23.3 # celsius
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 21.1 # celsius
async def test_basic_thermostat_entity_state(hass, basic_thermostat):
"""Tests the state attributes properly match the thermostat type."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[basic_thermostat])
state = hass.states.get("climate.basic_thermostat")
assert state.state == HVAC_MODE_OFF
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_TARGET_TEMPERATURE_RANGE | SUPPORT_TARGET_TEMPERATURE
)
assert ATTR_HVAC_ACTION not in state.attributes
assert sorted(state.attributes[ATTR_HVAC_MODES]) == [
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
]
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 21.1 # celsius
async def test_thermostat_entity_state(hass, thermostat):
"""Tests the state attributes properly match the thermostat type."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
state = hass.states.get("climate.thermostat")
assert state.state == HVAC_MODE_HEAT
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_FAN_MODE
| SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_TARGET_TEMPERATURE
)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
assert sorted(state.attributes[ATTR_HVAC_MODES]) == [
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
]
assert state.attributes[ATTR_FAN_MODE] == "on"
assert state.attributes[ATTR_FAN_MODES] == ["auto", "on"]
assert state.attributes[ATTR_TEMPERATURE] == 20 # celsius
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 21.1 # celsius
assert state.attributes[ATTR_CURRENT_HUMIDITY] == 34
async def test_buggy_thermostat_entity_state(hass, buggy_thermostat):
"""Tests the state attributes properly match the thermostat type."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[buggy_thermostat])
state = hass.states.get("climate.buggy_thermostat")
assert state.state == STATE_UNKNOWN
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_TARGET_TEMPERATURE_RANGE | SUPPORT_TARGET_TEMPERATURE
)
assert state.state is STATE_UNKNOWN
assert state.attributes[ATTR_TEMPERATURE] is None
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 21.1 # celsius
assert state.attributes[ATTR_HVAC_MODES] == []
async def test_buggy_thermostat_invalid_mode(hass, buggy_thermostat):
"""Tests when an invalid operation mode is included."""
buggy_thermostat.status.update_attribute_value(
Attribute.supported_thermostat_modes, ["heat", "emergency heat", "other"]
)
await setup_platform(hass, CLIMATE_DOMAIN, devices=[buggy_thermostat])
state = hass.states.get("climate.buggy_thermostat")
assert state.attributes[ATTR_HVAC_MODES] == [HVAC_MODE_HEAT]
async def test_air_conditioner_entity_state(hass, air_conditioner):
"""Tests when an invalid operation mode is included."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_HEAT_COOL
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE
)
assert sorted(state.attributes[ATTR_HVAC_MODES]) == [
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
]
assert state.attributes[ATTR_FAN_MODE] == "medium"
assert sorted(state.attributes[ATTR_FAN_MODES]) == [
"auto",
"high",
"low",
"medium",
"turbo",
]
assert state.attributes[ATTR_TEMPERATURE] == 23
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 24
assert state.attributes["drlc_status_duration"] == 0
assert state.attributes["drlc_status_level"] == -1
assert state.attributes["drlc_status_start"] == "1970-01-01T00:00:00Z"
assert state.attributes["drlc_status_override"] is False
assert state.attributes["power_consumption_start"] == "2019-02-24T21:03:04Z"
assert state.attributes["power_consumption_power"] == 0
assert state.attributes["power_consumption_energy"] == 500
assert state.attributes["power_consumption_end"] == "2019-02-26T02:05:55Z"
async def test_set_fan_mode(hass, thermostat, air_conditioner):
"""Test the fan mode is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat, air_conditioner])
entity_ids = ["climate.thermostat", "climate.air_conditioner"]
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: entity_ids, ATTR_FAN_MODE: "auto"},
blocking=True,
)
for entity_id in entity_ids:
state = hass.states.get(entity_id)
assert state.attributes[ATTR_FAN_MODE] == "auto", entity_id
async def test_set_hvac_mode(hass, thermostat, air_conditioner):
"""Test the hvac mode is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat, air_conditioner])
entity_ids = ["climate.thermostat", "climate.air_conditioner"]
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: entity_ids, ATTR_HVAC_MODE: HVAC_MODE_COOL},
blocking=True,
)
for entity_id in entity_ids:
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_COOL, entity_id
async def test_ac_set_hvac_mode_from_off(hass, air_conditioner):
"""Test setting HVAC mode when the unit is off."""
air_conditioner.status.update_attribute_value(
Attribute.air_conditioner_mode, "heat"
)
air_conditioner.status.update_attribute_value(Attribute.switch, "off")
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: "climate.air_conditioner",
ATTR_HVAC_MODE: HVAC_MODE_HEAT_COOL,
},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_HEAT_COOL
async def test_ac_set_hvac_mode_off(hass, air_conditioner):
"""Test the AC HVAC mode can be turned off set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state != HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.air_conditioner", ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_OFF
async def test_set_temperature_heat_mode(hass, thermostat):
"""Test the temperature is set successfully when in heat mode."""
thermostat.status.thermostat_mode = "heat"
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_TEMPERATURE: 21},
blocking=True,
)
state = hass.states.get("climate.thermostat")
assert state.state == HVAC_MODE_HEAT
assert state.attributes[ATTR_TEMPERATURE] == 21
assert thermostat.status.heating_setpoint == 69.8
async def test_set_temperature_cool_mode(hass, thermostat):
"""Test the temperature is set successfully when in cool mode."""
thermostat.status.thermostat_mode = "cool"
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_TEMPERATURE: 21},
blocking=True,
)
state = hass.states.get("climate.thermostat")
assert state.attributes[ATTR_TEMPERATURE] == 21
async def test_set_temperature(hass, thermostat):
"""Test the temperature is set successfully."""
thermostat.status.thermostat_mode = "auto"
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.thermostat",
ATTR_TARGET_TEMP_HIGH: 25.5,
ATTR_TARGET_TEMP_LOW: 22.2,
},
blocking=True,
)
state = hass.states.get("climate.thermostat")
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 25.5
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 22.2
async def test_set_temperature_ac(hass, air_conditioner):
"""Test the temperature is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.air_conditioner", ATTR_TEMPERATURE: 27},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.attributes[ATTR_TEMPERATURE] == 27
async def test_set_temperature_ac_with_mode(hass, air_conditioner):
"""Test the temperature is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.air_conditioner",
ATTR_TEMPERATURE: 27,
ATTR_HVAC_MODE: HVAC_MODE_COOL,
},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.attributes[ATTR_TEMPERATURE] == 27
assert state.state == HVAC_MODE_COOL
async def test_set_temperature_ac_with_mode_from_off(hass, air_conditioner):
"""Test the temp and mode is set successfully when the unit is off."""
air_conditioner.status.update_attribute_value(
Attribute.air_conditioner_mode, "heat"
)
air_conditioner.status.update_attribute_value(Attribute.switch, "off")
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
assert hass.states.get("climate.air_conditioner").state == HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.air_conditioner",
ATTR_TEMPERATURE: 27,
ATTR_HVAC_MODE: HVAC_MODE_COOL,
},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.attributes[ATTR_TEMPERATURE] == 27
assert state.state == HVAC_MODE_COOL
async def test_set_temperature_ac_with_mode_to_off(hass, air_conditioner):
"""Test the temp and mode is set successfully to turn off the unit."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
assert hass.states.get("climate.air_conditioner").state != HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.air_conditioner",
ATTR_TEMPERATURE: 27,
ATTR_HVAC_MODE: HVAC_MODE_OFF,
},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.attributes[ATTR_TEMPERATURE] == 27
assert state.state == HVAC_MODE_OFF
async def test_set_temperature_with_mode(hass, thermostat):
"""Test the temperature and mode is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.thermostat",
ATTR_TARGET_TEMP_HIGH: 25.5,
ATTR_TARGET_TEMP_LOW: 22.2,
ATTR_HVAC_MODE: HVAC_MODE_HEAT_COOL,
},
blocking=True,
)
state = hass.states.get("climate.thermostat")
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 25.5
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 22.2
assert state.state == HVAC_MODE_HEAT_COOL
async def test_set_turn_off(hass, air_conditioner):
"""Test the a/c is turned off successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_HEAT_COOL
await hass.services.async_call(
CLIMATE_DOMAIN, SERVICE_TURN_OFF, {"entity_id": "all"}, blocking=True
)
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_OFF
async def test_set_turn_on(hass, air_conditioner):
"""Test the a/c is turned on successfully."""
air_conditioner.status.update_attribute_value(Attribute.switch, "off")
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN, SERVICE_TURN_ON, {"entity_id": "all"}, blocking=True
)
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_HEAT_COOL
async def test_entity_and_device_attributes(hass, thermostat):
"""Test the attributes of the entries are correct."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
entity_registry = await hass.helpers.entity_registry.async_get_registry()
device_registry = await hass.helpers.device_registry.async_get_registry()
entry = entity_registry.async_get("climate.thermostat")
assert entry
assert entry.unique_id == thermostat.device_id
entry = device_registry.async_get_device({(DOMAIN, thermostat.device_id)}, [])
assert entry
assert entry.name == thermostat.label
assert entry.model == thermostat.device_type_name
assert entry.manufacturer == "Unavailable"
|
|
from django.conf import settings
from django.utils.translation import gettext
from rest_framework import serializers
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.access.models import Group
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.utils import (
clean_nl,
has_links,
ImageCheck,
SafeStorage,
subscribe_newsletter,
unsubscribe_newsletter,
urlparams,
)
from olympia.api.serializers import SiteStatusSerializer
from olympia.api.utils import is_gate_active
from olympia.api.validators import OneOrMorePrintableCharacterAPIValidator
from olympia.users import notifications
from olympia.users.models import DeniedName, UserProfile
from olympia.users.tasks import resize_photo
log = olympia.core.logger.getLogger('accounts')
class BaseUserSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
class Meta:
model = UserProfile
fields = ('id', 'name', 'url', 'username')
def get_url(self, obj):
def is_adminish(user):
return user and acl.action_allowed_user(user, amo.permissions.USERS_EDIT)
request = self.context.get('request', None)
current_user = getattr(request, 'user', None) if request else None
# Only return your own profile url, and for developers.
if obj == current_user or is_adminish(current_user) or obj.is_public:
return obj.get_absolute_url()
# Used in subclasses.
def get_permissions(self, obj):
out = {perm for group in obj.groups_list for perm in group.rules.split(',')}
return sorted(out)
# Used in subclasses.
def get_picture_url(self, obj):
if obj.picture_type:
return absolutify(obj.picture_url)
return None
class PublicUserProfileSerializer(BaseUserSerializer):
picture_url = serializers.SerializerMethodField()
average_addon_rating = serializers.FloatField(source='averagerating')
class Meta(BaseUserSerializer.Meta):
fields = BaseUserSerializer.Meta.fields + (
'average_addon_rating',
'created',
'biography',
'has_anonymous_display_name',
'has_anonymous_username',
'homepage',
'is_addon_developer',
'is_artist',
'location',
'occupation',
'num_addons_listed',
'picture_type',
'picture_url',
)
# This serializer should never be used for updates but just to be sure.
read_only_fields = fields
class UserProfileSerializer(PublicUserProfileSerializer):
display_name = serializers.CharField(
min_length=2,
max_length=50,
validators=[OneOrMorePrintableCharacterAPIValidator()],
)
picture_upload = serializers.ImageField(use_url=True, write_only=True)
permissions = serializers.SerializerMethodField()
fxa_edit_email_url = serializers.SerializerMethodField()
# Just Need to specify any field for the source - '*' is the entire obj.
site_status = SiteStatusSerializer(source='*')
class Meta(PublicUserProfileSerializer.Meta):
fields = PublicUserProfileSerializer.Meta.fields + (
'deleted',
'display_name',
'email',
'fxa_edit_email_url',
'last_login',
'last_login_ip',
'permissions',
'picture_upload',
'read_dev_agreement',
'site_status',
'username',
)
writeable_fields = (
'biography',
'display_name',
'homepage',
'location',
'occupation',
'picture_upload',
)
read_only_fields = tuple(set(fields) - set(writeable_fields))
def get_fxa_edit_email_url(self, user):
base_url = f'{settings.FXA_CONTENT_HOST}/settings'
return urlparams(
base_url, uid=user.fxa_id, email=user.email, entrypoint='addons'
)
def validate_biography(self, value):
if has_links(clean_nl(str(value))):
# There's some links, we don't want them.
raise serializers.ValidationError(gettext('No links are allowed.'))
return value
def validate_display_name(self, value):
if DeniedName.blocked(value):
raise serializers.ValidationError(
gettext('This display name cannot be used.')
)
return value
def validate_homepage(self, value):
if settings.DOMAIN.lower() in value.lower():
raise serializers.ValidationError(
gettext(
'The homepage field can only be used to link to '
'external websites.'
)
)
return value
def validate_picture_upload(self, value):
image_check = ImageCheck(value)
if value.content_type not in amo.IMG_TYPES or not image_check.is_image():
raise serializers.ValidationError(
gettext('Images must be either PNG or JPG.')
)
if image_check.is_animated():
raise serializers.ValidationError(gettext('Images cannot be animated.'))
if value.size > settings.MAX_PHOTO_UPLOAD_SIZE:
raise serializers.ValidationError(
gettext(
'Please use images smaller than %dMB.'
% (settings.MAX_PHOTO_UPLOAD_SIZE / 1024 / 1024)
)
)
return value
def update(self, instance, validated_data):
instance = super().update(instance, validated_data)
photo = validated_data.get('picture_upload')
if photo:
original = instance.picture_path_original
storage = SafeStorage(user_media='userpics')
with storage.open(original, 'wb') as original_file:
for chunk in photo.chunks():
original_file.write(chunk)
instance.update(picture_type=photo.content_type)
resize_photo.delay(
original,
instance.picture_path,
set_modified_on=instance.serializable_reference(),
)
return instance
def to_representation(self, obj):
data = super().to_representation(obj)
request = self.context.get('request', None)
if request and is_gate_active(request, 'del-accounts-fxa-edit-email-url'):
data.pop('fxa_edit_email_url', None)
return data
group_rules = {
'reviewer': 'Addons:Review',
'admin': '*:*',
}
class AccountSuperCreateSerializer(serializers.Serializer):
username = serializers.CharField(required=False)
email = serializers.EmailField(required=False)
fxa_id = serializers.CharField(required=False)
group = serializers.ChoiceField(choices=list(group_rules.items()), required=False)
def validate_email(self, email):
if email and UserProfile.objects.filter(email=email).exists():
raise serializers.ValidationError(
'Someone with this email already exists in the system'
)
return email
def validate_username(self, username):
if username and UserProfile.objects.filter(username=username).exists():
raise serializers.ValidationError(
'Someone with this username already exists in the system'
)
return username
def validate_group(self, group):
if group:
rule = group_rules[group]
# This isn't perfect. It makes an assumption that a single group
# will exist having a *single* rule for what we want. In the
# case of reviewers and admins this should always be true.
qs = Group.objects.filter(rules=rule)
count = qs.count()
if count != 1:
log.info(
'Super creation: looking for group with '
'permissions {} {} (count: {})'.format(group, rule, count)
)
raise serializers.ValidationError(
'Could not find a permissions group with the exact rules needed.'
)
group = qs.get()
return group
class UserNotificationSerializer(serializers.Serializer):
name = serializers.CharField(source='notification.short')
enabled = serializers.BooleanField()
mandatory = serializers.BooleanField(source='notification.mandatory')
def update(self, instance, validated_data):
if instance.notification.mandatory:
raise serializers.ValidationError(
"Attempting to set [%s] to %s. Mandatory notifications can't "
'be modified'
% (instance.notification.short, validated_data.get('enabled'))
)
enabled = validated_data['enabled']
request = self.context['request']
current_user = request.user
remote_by_id = {ntfn.id: ntfn for ntfn in notifications.REMOTE_NOTIFICATIONS}
if instance.notification_id in remote_by_id:
notification = remote_by_id[instance.notification_id]
if not enabled:
unsubscribe_newsletter(current_user, notification.basket_newsletter_id)
elif enabled:
subscribe_newsletter(
current_user, notification.basket_newsletter_id, request=request
)
elif 'enabled' in validated_data:
# Only save if non-mandatory and 'enabled' is set.
# Ignore other fields.
instance.enabled = validated_data['enabled']
# Not .update because some of the instances are new.
instance.save()
return instance
class UserProfileBasketSyncSerializer(UserProfileSerializer):
class Meta(UserProfileSerializer.Meta):
model = UserProfile
fields = (
'id',
'deleted',
'display_name',
'homepage',
'fxa_id',
'last_login',
'location',
)
read_only_fields = fields
|
|
#!/usr/bin/env python
# Plugin Update Service Code
#------------------------------------------------------------
# Code modified from installservice.py and bundleservice.py
#------------------------------------------------------------
# Twoure - 09/07/2016
from os.path import split as split_path
import shutil
#CHECK_INTERVAL = CACHE_1MINUTE * 5 # cache Github request URL for 5 mins. ONLY for testing
CHECK_INTERVAL = CACHE_1HOUR * 12 # cache Github request URL for 12 hours
HISTORY_KEY = u"_{}:History"
IDENTIFIER_KEY = "InstallIdentifier"
NOTES_KEY = "InstallNotes"
DATE_KEY = "InstallDate"
VERSION_KEY = "InstallVersion"
ACTION_KEY = "InstallAction"
BRANCH_KEY = "InstallBranch"
TAG_KEY = "InstallTag"
class BundleInfo(object):
def __init__(self, plugin_path):
self.path = plugin_path
self.bundled = Core.bundled_plugins_path in plugin_path if Core.bundled_plugins_path is not None else False
self.load_plist()
def load_plist(self):
plist = Plist.ObjectFromString(Core.storage.load(Core.storage.join_path(self.path, "Contents", "Info.plist")))
self.service_dict = Core.services.get_services_from_bundle(self.path, plist)
self.identifier = plist['CFBundleIdentifier']
self.version = plist['CFBundleVersion'] if 'CFBundleVersion' in plist else None
self.bundle_class = plist['PlexPluginClass'].lower() if 'PlexPluginClass' in plist else 'content'
self.ignore = 'PlexPluginDevMode' in plist and plist['PlexPluginDevMode'] == '1'
self.plugin_class = plist.get('PlexPluginClass', 'Channel')
if self.plugin_class == 'Agent':
self.ignore = True
if Core.storage.link_exists(self.path):
Log("Plug-in bundle with identifier '%s' is a symbolic link, and will be ignored.", self.identifier)
self.ignore = True
@property
def has_services(self):
for key in ('Services', 'ServiceSets', 'OldServices'):
for service_type in self.service_dict[self.identifier][key]:
if len(self.service_dict[self.identifier][key][service_type]) > 0:
return True
return False
class PluginUpdateService(object):
def __init__(self):
self.bundle_name = self.splitall(Core.bundle_path)[-1]
self.name = self.bundle_name.split('.bundle')[0]
Log(u"Starting the {} Install Service".format(self.name))
self.plugins_path = Core.storage.join_path(Core.app_support_path, 'Plug-ins')
self.bundle = BundleInfo(Core.bundle_path)
self.identifier = self.bundle.identifier
self.stage = Core.storage.data_item_path('Stage')
self.stage_path = Core.storage.join_path(self.stage, self.identifier)
self.inactive = Core.storage.data_item_path('Deactivated')
self.archive_url = u'https://github.com/{}/archive/{}.zip'
self.commits_url = u'https://api.github.com/repos/{}/commits/{}'
self.release_url = u'https://api.github.com/repos/{}/releases/{}'
self.temp_info = dict()
self.update_info = dict()
self.current_info = dict()
try:
Core.storage.remove_tree(self.stage)
except:
Log.Error("Unalbe to remove staging root")
Core.storage.make_dirs(self.stage)
try:
Core.storage.remove_tree(self.inactive)
except:
Log.Error("Unable to remove inactive root")
Core.storage.make_dirs(self.inactive)
if HISTORY_KEY.format(self.name) in Dict:
self.history = Dict[HISTORY_KEY.format(self.name)]
else:
self.history = list()
self.history_lock = Thread.Lock()
self.setup_current_info()
def info_record(self, action, branch='master', tag=None, version=None, notes=None):
info = dict()
info[IDENTIFIER_KEY] = self.identifier
info[DATE_KEY] = Datetime.Now()
info[ACTION_KEY] = action
info[BRANCH_KEY] = branch
if notes:
info[NOTES_KEY] = notes
if tag:
info[TAG_KEY] = tag
if version:
info[VERSION_KEY] = version
return info
def add_history_record(self, action, branch='master', tag=None, version=None, notes=None):
info = self.info_record(action, branch, tag, version, notes)
try:
self.history_lock.acquire()
self.history.append(info)
Dict[HISTORY_KEY.format(self.name)] = self.history
Dict.Save()
finally:
self.history_lock.release()
def read_history_record(self):
ident_history = list()
for item in self.history:
if item[IDENTIFIER_KEY] == self.identifier:
ident_history.append(item)
return ident_history
def read_last_history_record(self):
record = self.read_history_record()
if not record:
return False
record.reverse()
return record[0]
def setup_current_info(self):
self.current_info.clear()
record = self.read_last_history_record()
if record:
self.current_info.update({'date': record[DATE_KEY], 'branch': record[BRANCH_KEY]})
if NOTES_KEY in record.keys():
self.current_info.update({'notes': record[NOTES_KEY]})
if VERSION_KEY in record.keys():
self.current_info.update({'version': record[VERSION_KEY]})
if TAG_KEY in record.keys():
self.current_info.update({'tag': record[TAG_KEY]})
return bool(self.current_info)
def splitall(self, path):
allparts = list()
while True:
parts = split_path(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def copytree(self, src, dst):
if not Core.storage.file_exists(dst):
Log(u"Creating dir at '{}'".format(dst))
Core.storage.make_dirs(dst)
Log(u"Recursively copying contents of '{}' into '{}'".format(src, dst))
for item in Core.storage.list_dir(src):
s = Core.storage.join_path(src, item)
d = Core.storage.join_path(dst, item)
if Core.storage.dir_exists(s):
Log(u"Copying '{}' into '{}'".format(s, d))
self.copytree(s, d)
else:
Log(u"Copying with copy2 '{}' into '{}'".format(s, d))
shutil.copy2(s, d)
def datetime_to_utc(self, dt):
n = Datetime.Now().replace(microsecond=0)
nutc = Datetime.UTCNow().replace(microsecond=0)
if n < nutc:
return dt + (nutc - n)
elif n == nutc:
return dt
return dt - (n - nutc)
@property
def setup_stage(self):
Log(u"Setting up staging area for {} at {}".format(self.identifier, self.stage_path))
Core.storage.remove_tree(self.stage_path)
Core.storage.make_dirs(self.stage_path)
return self.stage_path
def unstage(self):
Log(u"Unstaging files for {} (removing {})".format(self.identifier, self.stage_path))
Core.storage.remove_tree(self.stage_path)
def cleanup(self):
inactive_path = Core.storage.join_path(self.inactive, self.identifier)
if Core.storage.dir_exists(inactive_path):
Log(u"Cleaning up after {} (removing {})".format(self.identifier, inactive_path))
Core.storage.remove_tree(inactive_path)
def clean_old_bundle(self):
stage_paths = list()
root = self.bundle_name
stage_path = self.stage_path.lstrip('\\\?')
bundle_path = Core.storage.abs_path(self.bundle.path).lstrip('\\\?')
stage_index = int([i for i, l in enumerate(self.splitall(stage_path)) if l == self.identifier][1])
bundle_index = int([i for i, l in enumerate(self.splitall(bundle_path)) if l == root][0])
for dirpath, dirname, filenames in Core.storage.walk(stage_path):
for f in filenames:
filepath = Core.storage.join_path(stage_path, dirpath, f).lstrip('\\\?')
filepaths = self.splitall(filepath)[stage_index:]
stage_paths.append(Core.storage.join_path(root, *filepaths[1:]))
for dirpath, dirname, filenames in Core.storage.walk(bundle_path):
for f in filenames:
filepath = Core.storage.join_path(bundle_path, dirpath, f).lstrip('\\\?')
filepaths = self.splitall(filepath)[bundle_index:]
if Core.storage.join_path(root, *filepaths[1:]) not in stage_paths:
old_item_path = Core.storage.abs_path(Core.storage.join_path(self.plugins_path, root, *filepaths[1:]))
Log(u"File/Folder does not exists in current Version. Attempting to remove '{}'".format(old_item_path))
try:
if Core.storage.dir_exists(old_item_path):
Core.storage.remove_tree(old_item_path)
elif Core.storage.file_exists(old_item_path):
Core.storage.remove(old_item_path)
else:
Log.Warn(u"Cannot Remove Old '{}' file/folder, does not exists.".format(old_item_path))
except:
Log.Exception(u"Error Removing Old '{}' file/folder".format(old_item_path))
def activate(self, fail_count=0):
final_path = Core.storage.join_path(self.plugins_path, self.bundle_name)
if not Core.storage.dir_exists(self.stage_path):
Log(u"Unable to find stage for {}".format(self.identifier))
return False
Log(u"Activating a new installation of {}".format(self.identifier))
try:
if not Core.storage.dir_exists(final_path):
Core.storage.rename(self.stage_path, final_path)
else:
self.copytree(self.stage_path, final_path)
except:
Log.Exception(u"Unable to activate {} at {}".format(self.identifier, final_path))
if fail_count < 5:
Log.Info("Waiting 2s and trying again")
Thread.Sleep(2)
return self.activate(fail_count + 1)
else:
Log.Info("Too many failures - returning")
return False
return True
def install_zip_from_url(self, url):
stage_path = self.setup_stage
try:
archive = Archive.Zip(HTTP.Request(url, cacheTime=0))
except:
Log(u"Unable to download archive for {}".format(self.identifier))
self.unstage()
return False
if archive.Test() != None:
Log(u"The archive of {} is invalid - unable to continue".format(self.identifier))
self.unstage()
return False
try:
for archive_name in archive:
parts = archive_name.split('/')[1:]
if parts[0] == '' and len(parts) > 1:
parts = parts[1:]
if len(parts) > 1 and parts[0] == 'Contents' and len(parts[-1]) > 0 and parts[-1][0] != '.':
file_path = Core.storage.join_path(stage_path, *parts)
dir_path = Core.storage.join_path(stage_path, *parts[:-1])
if not Core.storage.dir_exists(dir_path):
Core.storage.make_dirs(dir_path)
Core.storage.save(file_path, archive[archive_name])
Log(u"Extracted {} to {} for {}".format(parts[-1], dir_path, self.identifier))
else:
Log(U"Not extracting {}".format(archive_name))
except:
Log(u"Error extracting archive of {}".format(self.identifier))
Log(Plugin.Traceback())
self.unstage()
return False
finally:
archive.Close()
self.clean_old_bundle()
if not self.activate():
Log.Critical(u"Unable to activate {}".format(self.identifier))
self.unstage()
return False
self.unstage()
self.cleanup()
return True
def install(self, url, action, branch='master', tag=None, version=None, notes=None):
Log(u"Preforming Update of {}".format(self.identifier))
if not self.install_zip_from_url(url):
return False
# add install info to history record
self.add_history_record(action, branch, tag, version, notes)
# Check whether this bundle contains services & instruct it to reload if necessary
#if self.bundle.has_services:
#self.reload_services()
Log("Installation of {} complete".format(self.identifier))
return True
def get_install_info(self, repo, branch='master', tag=None):
url = self.release_url.format(repo, tag) if tag else self.commits_url.format(repo, branch)
Log(u"Fetching {} update info from '{}'".format(self.identifier, url))
Log(u"CHECK_INTERVAL = '{}'".format(Datetime.Delta(seconds=CHECK_INTERVAL)))
try:
info = JSON.ObjectFromURL(url, cacheTime=CHECK_INTERVAL, timeout=5)
if tag:
date = Datetime.ParseDate(info['published_at'][:-1], "%Y-%m-%dT%H:%M:%S")
message = info['body']
zipId = info['tag_name']
version = zipId
else:
date = Datetime.ParseDate(info['commit']['author']['date'][:-1], "%Y-%m-%dT%H:%M:%S")
message = info['commit']['message']
zipId = branch
version = str(date)
self.temp_info.update({'date': date, 'notes': message, 'branch': branch, 'zipId': zipId, 'version': version})
Log("Successfully retrieved Github info >>>")
Log(u"branch='{}', date='{}', version='{}', zipId='{}'".format(
self.temp_info['branch'], self.temp_info['date'], self.temp_info['version'], self.temp_info['zipId']))
except:
Log.Exception(u"Error retrieving {} Github info from {}".format(self.identifier, url))
return False
return bool(self.temp_info)
def is_update_available(self, repo, branch='master', tag=None):
if not self.get_install_info(repo, branch, tag):
Log(u"Unable to check update {} because it has no {}".format(self.identifier, 'releases' if tag else 'commits'))
return False
if not self.temp_info:
Log(u"Unable to check update {} because temp_info is empty".format(self.identifier))
return False
if 'init_run' in Dict:
date = Dict['init_run']
else:
date = Datetime.UTCNow().replace(microsecond=0)
Dict['init_run'] = date
Dict.Save()
#Log(u"Is Repo datetime '{}' > init_run datetime '{}'? If so then present the update function".format(self.temp_info['date'], date))
#Log(u"Current Contents of update_info = '{}'".format(self.update_info))
if self.temp_info['date'] > date:
self.update_info.update(self.temp_info.copy())
return bool(self.update_info)
def update(self, repo, branch='master', tag=None):
if not self.update_info:
try: return ObjectContainer(header=u'{}'.format(L('updater.error')), message=u'Unable to install Update')
except: return
url = self.archive_url.format(repo, branch if tag == branch else tag)
tag = tag if tag != branch else None
version = self.update_info['version']
action = 'Preform Update'
if not self.install(url, action, branch, tag, version, self.update_info['notes']):
try: return ObjectContainer(header=u'{}'.format(L('updater.error')), message=u'Unable to install Update')
except: return
# cleanup dict info
self.update_info.clear()
self.temp_info.clear()
Log(u"Update of {} to {} complete".format(self.identifier, version))
self.restart_channel()
try: return ObjectContainer(header=u'{}'.format(L('updater.success')), message=u'%s' % F('updater.updated', version))
except: return
def reload_services(self):
"""Reload this channels Service Code"""
try:
Log(u"Plug-in {} is currrently running with old service code - reloading".format(self.identifier))
HTTP.Request(u'http://127.0.0.1:32400/:/plugins/{}/reloadServices'.format(self.identifier), cacheTime=0, immediate=True)
except:
Log.Exception(u"Unable to reload services in {}".format(self.identifier))
# Reload system services
Core.services.load()
def restart_self_silently(self):
"""Try to restart the channel from Plex API"""
HTTP.Request(u'http://127.0.0.1:32400/:/plugins/{}/restart'.format(self.identifier), immediate=True)
def restart_channel(self):
"""Try to restart the channel by updating the timestamp of the Info.plist file"""
if Core.storage.file_exists(Core.plist_path):
Log(u"Restarting {}".format(self.identifier))
Core.storage.utime(Core.plist_path, None)
return True
Log(u"Failed to restart {} because of missing Info.plist file.".format(self.identifier))
return False
def item_last_modified(self, path, utc=False):
if Core.storage.file_exists(path):
ts = Core.storage.last_modified(path)
if utc:
return self.datetime_to_utc(Datetime.FromTimestamp(ts)).replace(microsecond=0)
return Datetime.FromTimestamp(ts).replace(microsecond=0)
return False
@property
def initial_run(self):
"""setup initial run status"""
self.init_datetime = self.item_last_modified(Core.plist_path, utc=True)
if not Dict['init_run']:
Log('{} initial run. Logging datetime into Dict[\'init_run\']'.format(self.name))
Dict['init_run'] = Datetime.UTCNow().replace(microsecond=0)
Dict.Save()
return False
# Check Info.plist for changes, file modified time should only change with updates or install
elif Dict['init_run'] < self.init_datetime:
Log(u"* Updating old init time {} to {}".format(Dict['init_run'], self.init_datetime))
Dict['init_run'] = self.init_datetime
return False
else:
Log(u"* Dict['init_run'] = '{}'".format(Dict['init_run']))
Log(u"* Info.plist last modified datetime.utc = '{}'".format(self.init_datetime))
return True
def gui_update(self, prefix, oc, repo, branch='master', tag=None, list_view_clients=list):
"""
Create route for updater, and check for the latest release or commit depending on branch or tag inputs.
Update option will on appear when an update is available.
Change CHECK_INTERVAL to desired interval to allow for checking updates.
CHECK_INTERVAL = cache time for Github request URL.
Requires 'icon-update.png' within channel Resources directory for icon to display in menu, otherwise will be blank icon
Input Help:
prefix (required)
Pass-in the channel's prefix
Example: prefix='/video/kissnetwork/updater'
oc (required)
Pass-in the channel's current ObjectContainer so the updater can be added as a DirectoryObject
repo (required)
Pass-in the channel's Github repository information
Example: repo='Twoure/KissNetwork.bundle'
branch (optional)
Pass-in the channels'Github repository branch to track
Default: branch='master'
Example: branch='dev'
tag (optional)
Use tag if wanting to track Github releases
If tag is set, then branch is ignored...
Example: if branch='dev' and tag='latest' then the updater will only check for the latest release
list_view_clients (optional)
Pass in a list of Client.Platform values to not display the updater icon on
Example: list_view_clients=['Android', 'iOS'], will set icon to None for Android and iOS clients.
"""
Route.Connect(prefix, self.update)
if self.is_update_available(repo, branch, tag):
oc.add(DirectoryObject(
key=Callback(self.update, repo=repo, branch=branch, tag=self.update_info['zipId']),
title=u'%s' % F('updater.update_available', self.update_info['version']),
summary=u'{}\n{}'.format(L('updater.install'), self.update_info['notes']),
thumb=R('icon-update.png') if Client.Platform not in list_view_clients else None
))
|
|
#! /usr/bin/env python
import operator
################################################################################
def evaluate(source, local):
"Execute all math operations found in the source."
for expression in expressions(source):
local['_'] = tokens(expression).evaluate(local)
def expressions(source):
"Separate expressions and yield each individually."
lines = source.replace('\r\n', '\n').replace('\r', '\n').split('\n')
uncommented = map(lambda line: line.split('#', 1)[0], lines)
for line in uncommented:
if line and not line.isspace():
for expression in line.split(';'):
yield expression
def tokens(string):
"Build an expression tree by tokenizing expression."
evaluator = _tokens(string)
if isinstance(evaluator, Operation) and \
evaluator._Operation__symbol == Operation.ASSIGNMENT:
return evaluator
return Print(evaluator)
def _tokens(string):
"Private module function: recursively builds a tree."
expression = string.strip()
if not expression:
raise SyntaxError('empty expression')
divisions = Operation.split(expression)
if divisions:
left, symbol, right = divisions
return Operation(_tokens(left), symbol, _tokens(right))
if len(expression.split()) > 1:
raise SyntaxError(expression)
if expression.startswith('0x'):
return Constant(int(expression[2:], 16))
if expression.startswith('0d'):
return Constant(int(expression[2:], 10))
if expression.startswith('0o'):
return Constant(int(expression[2:], 8))
if expression.startswith('0q'):
return Constant(int(expression[2:], 4))
if expression.startswith('0b'):
return Constant(int(expression[2:], 2))
if expression.isdigit():
return Constant(int(expression))
if expression.isidentifier():
return Variable(expression)
raise SyntaxError(expression)
################################################################################
class Expression:
"Abstract class for Expression objects."
def __init__(self):
"Initialize the Expression object."
raise NotImplementedError()
def evaluate(self, bindings):
"Calculate the value of this object."
raise NotImplementedError()
def __repr__(self):
"Return a representation of this object."
klass = self.__class__.__name__
private = '_{}__'.format(klass)
args = []
for name in vars(self):
if name.startswith(private):
key = name[len(private):]
value = getattr(self, name)
args.append('{}={!r}'.format(key, value))
return '{}({})'.format(klass, ', '.join(args))
################################################################################
class Constant(Expression):
"Class for storing all math constants."
def __init__(self, value):
"Initialize the Constant object."
self.__value = value
def evaluate(self, bindings):
"Calculate the value of this object."
return self.__value
################################################################################
class Variable(Expression):
"Class for storing all math variables."
def __init__(self, name):
"Initialize the Variable object."
self.__name = name
def evaluate(self, bindings):
"Calculate the value of this object."
if self.__name not in bindings:
raise NameError(self.__name)
return bindings[self.__name]
################################################################################
class Operation(Expression):
"Class for executing math operations."
ASSIGNMENT = '->'
OPERATORS = {ASSIGNMENT: lambda a, b: None,
'and': lambda a, b: a and b,
'or': lambda a, b: a or b,
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.floordiv,
'%': operator.mod,
'**': operator.pow,
'&': operator.and_,
'|': operator.or_,
'^': operator.xor,
'>>': operator.rshift,
'<<': operator.lshift,
'==': operator.eq,
'!=': operator.ne,
'>': operator.gt,
'>=': operator.ge,
'<': operator.lt,
'<=': operator.le}
def __init__(self, left, symbol, right):
"Initialize the Operation object."
self.__left = left
self.__symbol = symbol
self.__right = right
def evaluate(self, bindings):
"Calculate the value of this object."
if self.__symbol == self.ASSIGNMENT:
if not isinstance(self.__right, Variable):
raise TypeError(self.__right)
key = self.__right._Variable__name
value = self.__left.evaluate(bindings)
bindings[key] = value
return value
return self.__operate(bindings)
def __operate(self, bindings):
"Execute operation defined by symbol."
if self.__symbol not in self.OPERATORS:
raise SyntaxError(self.__symbol)
a = self.__left.evaluate(bindings)
b = self.__right.evaluate(bindings)
return self.OPERATORS[self.__symbol](a, b)
__operators = sorted(OPERATORS, key=len, reverse=True)
@classmethod
def split(cls, expression):
"Split expression on rightmost symbol."
tail = cls.__split(expression)
if tail:
symbol, right = tail
return expression[:-sum(map(len, tail))], symbol, right
@classmethod
def __split(cls, expression):
"Private class method: help with split."
for symbol in cls.__operators:
if symbol in expression:
right = expression.rsplit(symbol, 1)[1]
tail = cls.__split(right)
if tail is None:
return symbol, right
return tail
################################################################################
class Print(Expression):
"Class for printing all math results."
def __init__(self, expression):
"Initialize the Print object."
self.__expression = expression
def evaluate(self, bindings):
"Calculate the value of this object."
value = self.__expression.evaluate(bindings)
print(value)
return value
################################################################################
def test():
"Run a simple demo that shows evaluator's capability."
from sys import exc_info, stderr
from traceback import format_exception_only
local = {}
while True:
try:
evaluate(input('>>> '), local)
except EOFError:
break
except:
stderr.write(format_exception_only(*exc_info()[:2])[-1])
if __name__ == '__main__':
test()
|
|
# -*- coding: utf-8 -*-
"""
===============================
eeglcf.py
===============================
This is the main file from the eeglcf package.
"""
import numpy as np
import scipy.signal as sp_signal
# Define here the dimensions for readability of the algorithm
_comp_dim = 0 # Components dimension
_t_dim = 1 # Time dimension
_ev_dim = 2 # Events dimension
def lcf(comp0, comp1=None,
integrator_width=20, detection_th=1.,
dilator_width=10, transition_width=10):
"""
Localized Component Filtering
Detects the location of artifacts in the time representation of source
components and mixes them with an alternative (cleaned) version.
Parameters
----------
comp0 : array
Array containing the original components, which will be analysed in
search of noise. It must be a 3D array with shape CxTxE, where C, T and
E are the number of components, time samples and recorded events
respectively.
comp1 : array, optional
Array containing the alternative (cleaned) components. It must have
the same shape as *comp0*. If not specified, an all 0s alternative
components will be used (this is equivalent to component rejection).
integrator_width : int > 0, optional
Width (in number of samples) of the integration
detection_th : float > 0, optional
Detection threshold
dilator_width : int > 0, optional
Width (in number of samples) of the dilator
transition_width : int > 0, optional
Width (in number of samples) of the transition window
Returns
-------
comp2 : array
Array with the resulting components. This will have the same shape
as *comp0*.
"""
# 1. Compute features
features = _features(comp0)
# 2. Integrate features
integrated_features = _integrate(integrator_width, *features)
# 3. Classify features
ctr_signal = _classify(detection_th, dilator_width, *integrated_features)
# 4. Mix components
return _mix(transition_width, comp0, ctr_signal, comp1)
def _chk_parameters(comp0=None, comp1=None,
integrator_width=None, detection_th=None,
dilator_width=None, transition_width=None):
"""
Checks input parameters.
Parameters
----------
comp0 : array
A 3D array with shape CxTxE, where C, T and E are the number of
components, time samples and recorded events respectively.
comp1 : array
A 3D array with shape CxTxE, where C, T and E are the number of
components, time samples and recorded events respectively. If both
*comp0* and *comp1* are provided, the must have the same shape.
integrator_width : int > 0, optional
Width (in number of samples) of the integration.
detection_th : {float, int} > 0, optional
Detection threshold.
dilator_width : int > 0, optional
Width (in number of samples) of the dilator.
transition_width : int > 0, optional
Width (in number of samples) of the transition window.
"""
# Check components comp0 and comp1
if comp0 is not None:
if not isinstance(comp0, np.ndarray):
return TypeError('comp0 must be {}; '
'is {} instead'
.format(type(np.ndarray(0)), type(comp0)))
if comp0.ndim != 3:
return ValueError('comp0 must be 3D array; '
'is {}D instead'.format(comp0.ndim))
if comp1 is not None:
if not isinstance(comp1, np.ndarray):
return TypeError('comp1 must be {}; '
'is {} instead'
.format(type(np.ndarray(0)), type(comp1)))
if comp1.ndim != 3:
return ValueError('comp1 must be 3D array; '
'is {}D instead'.format(comp1.ndim))
if (comp0 is not None) and (comp1 is not None):
if comp0.shape != comp1.shape:
return ValueError('comp0 and comp1 must have equal shape')
# Check integrator_width
if integrator_width is not None:
if not isinstance(integrator_width, int):
return TypeError('integrator_width must be {}; '
'is {} instead'
.format(type(1), type(integrator_width)))
if not integrator_width > 0:
return ValueError('integrator_width must be > 0')
# Check detection_th
if detection_th is not None:
if not isinstance(detection_th, (float, int)):
return TypeError('detection_th must be {}; '
'is {} instead'
.format(type(0.), type(detection_th)))
if not detection_th > 0:
return ValueError('detection_th must be > 0')
# Check dilator_width
if dilator_width is not None:
if not isinstance(dilator_width, int):
return TypeError('dilator_width must be {}; '
'is {} instead'
.format(type(0), type(dilator_width)))
if not dilator_width > 0:
return ValueError('dilator_width must be int')
# Check transition_width
if transition_width is not None:
if not isinstance(transition_width, int):
return TypeError('transition_width must be {}; '
'is {} instead'
.format(type(0), type(transition_width)))
if not transition_width > 0:
return ValueError('transition_width must be > 0')
def _features(comp0):
"""
Computes features characterizing the presence of noise in the components.
Parameters
----------
comp0 : array
Array containing the original components, which will be analysed in
search of noise. It must be a 3D array with shape CxTxE, where C, T and
E are the number of components, time samples and recorded events
respectively.
Returns
-------
v : array
Normalized voltages.
dvdt : array
Normalized first backward time derivative of the voltage.
"""
def zscore(x):
"""
Computes (in place) a robust zscore of the data, using the trimmed
mean and std.
Parameters
----------
x : array
Data with shape equal to comp0.shape
Returns
-------
y : array
Normalized data, with shape equal to comp0.shape
"""
# Compute mean and std
try:
m = x.mean(axis=(_t_dim, _ev_dim), keepdims=True)
except IndexError:
raise _chk_parameters(comp0=comp0)
s = x.std(axis=(_t_dim, _ev_dim), keepdims=True)
# A masked array will be used to remove outliers
masked_x = np.ma.array(x, mask=np.abs((x - m) / s) > 3)
del m, s
# Recompute mean and std. As for now, masked arrays don't support
# a tuple of axes. Hence, the array has to be reshaped.
# assert(_comp_dim is 0)
masked_x = masked_x.reshape([masked_x.shape[0],
np.prod(masked_x.shape[1:])])
m = masked_x.mean(axis=1)
s = masked_x.std(axis=1)
del masked_x
# Now, mean and std have to be reshaped to match x
shape_ms = np.ones(x.ndim) # Shape of mean and std vectors
shape_ms[_comp_dim] = x.shape[_comp_dim]
m = m.view(np.ndarray).reshape(shape_ms)
s = s.view(np.ndarray).reshape(shape_ms)
del shape_ms
# Compute final z-scores
return (x - m) / s
# Absolute voltage peaks
try:
abs_comp0 = np.abs(comp0)
except TypeError:
raise _chk_parameters(comp0=comp0)
v = zscore(abs_comp0)
# Time derivative of voltage
aux_shape = list(comp0.shape)
aux_shape[_t_dim] = 1
dvdt0 = zscore(np.abs(np.diff(comp0, axis=_t_dim)))
# Trailing zeros are added to make dvdt.shape == v.shape
dvdt = np.concatenate((np.zeros(aux_shape), dvdt0), _t_dim)
# comp0 dimensionality has to be checked explicitly, as ND with N>3 runs
# unnoticed through the function
if comp0.ndim > 3:
raise _chk_parameters(comp0=comp0)
# Return features
return v, dvdt
def _integrate(integrator_width, *args, **kwargs):
"""
Smooth features response.
Parameters
----------
integrator_width : int > 0
Width (in number of samples) of the integration
feats : {keyworded: tuple of numpy.ndarray, non-keyworded: numpy.ndarray}
Features to be integrated can be passed as non-keyworded arguments in
the shape of arrays or as a keyworded argument *feats* in the shape of
a tuple of arrays.
All the arrays must have equal shape CxTxE, with C the number of
components, T the number of time samples and E the number of recorded
events.
Returns
-------
*res : arrays
Arrays (as many as passed to the function) containing the integrated
features
"""
if len(args) > 0:
feats = args
if 'feats' in kwargs:
raise KeyError('_integrate() got multiple feature definitions. '
'Features must be passed as non-keyworded arguments'
' OR as a list under the keyword "feats".')
if len(kwargs) is not 0:
raise KeyError('_integrate() got unexpected keyword arguments {}'
.format(kwargs.keys()))
elif 'feats' in kwargs:
if not isinstance(kwargs['feats'], tuple):
raise TypeError('feats keyword must be {}; is {} instead'
.format(type(tuple()), type(kwargs['feats'])))
feats = kwargs['feats']
if len(kwargs) is not 1:
keys = list(kwargs.keys()).remove('feats')
raise KeyError('_integrate() got unexpected keyword arguments {}'
.format(keys))
else:
raise KeyError('No features parameters entered. At least one feature '
'has to be specified.')
# integrator_width has to be checked explicitly, since np.hanning doesn't
# raise an exception for non-int values.
if not isinstance(integrator_width, int):
# Generate the error from _chk_parameters for consistency
raise _chk_parameters(integrator_width=integrator_width)
# Allocate integrated features
integrated_feats = []
# Allocate the integrator as a Hanning window
integrator = np.hanning(integrator_width)
# Allocate an corrector to circumvent border effects
win_ones = np.ones(feats[0].shape[_t_dim])
try:
corrector = np.convolve(win_ones, integrator, 'same')
except ValueError:
raise _chk_parameters(integrator_width=integrator_width)
# Integrate each feature individually
fcn = lambda x: np.convolve(x, integrator, 'same')/corrector
try:
for feat_i in feats:
# Integrate
integrated_feats.append(
np.apply_along_axis(fcn, axis=_t_dim, arr=feat_i)
)
# features dimensionality has to be checked explicitly, as ND
# values with N != 3 run unnoticed in the code
if np.ndim(feat_i) != 3:
raise ValueError('features must be 3D; '
'some are {}D instead'
.format(np.ndim(feat_i)))
# Some debugging plots
# n = np.linspace(0, 1, feat_i.shape[_t_dim])
# plt.plot(n, integrated_feats[-1][0, :, 0], n, feat_i[0, :, 0])
# plt.show()
except ValueError: # Bad feature
if not isinstance(feat_i, np.ndarray):
raise TypeError('features must be {}; some are {} instead'
.format(type(np.zeros(0)), type(feat_i)))
if np.ndim(feat_i) != 3:
raise ValueError('features must be 3D arrays; '
'some are {}D instead'.format(np.ndim(feat_i)))
# Return a tuple instead of a list
return tuple(integrated_feats)
def _classify(detection_th, dilator_width, *args, **kwargs):
"""
Identifies noisy segments and builds a control signal for the mixer.
Parameters
----------
detection_th : float > 0, optional
Detection threshold
dilator_width : int > 0, optional
Width (in number of samples) of the dilator
feats : {keyworded: tuple of numpy.ndarray, non-keyworded: numpy.ndarray}
Features to be classified can be passed as non-keyworded arguments in
the shape of arrays or as a keyworded argument *feats* in the shape of
a tuple of arrays.
All the arrays must have equal shape CxTxE, with C the number of
components, T the number of time samples and E the number of recorded
events.
Returns
-------
ctrl : array
Control signal with shape CxTxE. Noisy segments are denoted by '1'.
"""
if len(args) > 0:
feats = args
if 'feats' in kwargs:
raise KeyError('_integrate() got multiple feature definitions. '
'Features must be passed as non-keyworded arguments'
' OR as a list under the keyword "feats".')
if len(kwargs) is not 0:
raise KeyError('_integrate() got unexpected keyword arguments {}'
.format(kwargs.keys()))
elif 'feats' in kwargs:
if not isinstance(kwargs['feats'], tuple):
raise TypeError('feats keyword must be {}; is {} instead'
.format(type(tuple()), type(kwargs['feats'])))
feats = kwargs['feats']
if len(kwargs) is not 1:
keys = list(kwargs.keys()).remove('feats')
raise KeyError('_integrate() got unexpected keyword arguments {}'
.format(keys))
else:
raise KeyError('No features parameters entered. At least one feature '
'has to be specified.')
# detection_th has to be explicitly checked. The code doesn't raise an
# exception with an unsupported value
if not isinstance(detection_th, (float, int)) or detection_th <= 0:
# Raise from _chk_parameters for uniformity
raise _chk_parameters(detection_th=detection_th)
# Apply the specified threshold to the features
ctrl_signal = feats[0] > detection_th
# An "or" operator is used to combine the result of different features
for feat_i in feats[1:]:
ctrl_signal |= (feat_i > detection_th)
# Events with more than 75% of noisy time are fully labeled as noise
try:
tsum_ctrl_signal = ctrl_signal.sum(_t_dim, keepdims=True)
except AttributeError:
# Check controlled parameter errors
for feat_i in feats:
if not isinstance(feat_i, np.ndarray):
raise TypeError('features must be {}; some are {} instead'
.format(type(np.zeros(0)), type(feat_i)))
# At this point features are np.ndarrays. Still, they have to be checked
# explicitly because ND arrays with N>3 are not detected by the code.
for feat_i in feats:
if not feat_i.ndim == 3:
raise ValueError('features must be 3D; some are {}D instead'
.format(feat_i.ndim))
rm_ev_bool = (tsum_ctrl_signal.astype(float)
/ ctrl_signal.shape[_t_dim]) > .75
rm_ev = np.where(rm_ev_bool)
rm_ev_slice = [slice(None)]*ctrl_signal.ndim
rm_ev_slice[_ev_dim] = rm_ev[_ev_dim]
rm_ev_slice[_comp_dim] = rm_ev[_comp_dim]
ctrl_signal[rm_ev_slice] = True
del rm_ev_slice, rm_ev
# Components with more than 75% of noisy time are completely labels as
# noise
rm_c = np.where(
(rm_ev_bool.sum(_ev_dim, keepdims=True) /
float(ctrl_signal.shape[_ev_dim])) > .75)
del rm_ev_bool
rm_ic_slice = [slice(None)]*ctrl_signal.ndim
rm_ic_slice[_comp_dim] = rm_c[_comp_dim]
ctrl_signal[rm_ic_slice] = True
del rm_c, rm_ic_slice
# Dilate the detected zones to account for the mixer transition equation
# dilator_width has to be explicitly checked. np.ones(n) doesn't raise an
# exception for non-int or negative values
err = _chk_parameters(dilator_width=dilator_width)
if err is not None:
raise err
dilator = np.ones(dilator_width)
ctrl_signal = np.apply_along_axis(
lambda x: np.convolve(x.astype(int), dilator, 'same'),
axis=_t_dim, arr=ctrl_signal)
# Binarize signal
ctrl_signal[ctrl_signal > 0] = 1
return ctrl_signal
def _mix(transition_width, comp0, ctrl_signal, comp1):
"""
Mixes two components according to a control signal.
Parameters
----------
transition_width : int > 0, optional
Width (in number of samples) of the transition window
comp0 : array
Array containing the original components, which will be analysed in
search of noise. It must be a 3D array with shape CxTxE, where C, T and
E are the number of components, time samples and recorded events
respectively.
ctrl_signal : array
Binary control signal with values equal to 0 and 1 where *comp0* and
comp1 are to be used respectively.
comp1 : array, optional
Array containing the alternative (cleaned) components. It must have
the same shape as *comp0*. If not specified, an all 0s alternative
components will be used (this is equivalent to component rejection).
Returns
-------
comp2 : array
Resulting mixed component with shape equal to comp0.shape.
"""
# Check controlled parameter errors. transition_width has to be explicitly
# checked because sp_signal.hann doesn't raise and error with non-int
# values
if not isinstance(transition_width, int):
# Raise from _chk_parameters for uniformity
raise _chk_parameters(transition_width=transition_width)
# Allocate normalized transition window
trans_win = sp_signal.hann(transition_width, True)
trans_win /= trans_win.sum()
# Pad extremes of control signal
try:
pad_width = [tuple([0, 0])]*ctrl_signal.ndim
except AttributeError:
# Check ctrl_signal parameter errors
if not isinstance(ctrl_signal, np.ndarray):
raise TypeError('ctrl_signal must be {}; is {} instead'
.format(type(np.zeros(0)), type(ctrl_signal)))
pad_size = transition_width/2 + 1
pad_width[_t_dim] = (pad_size, pad_size)
# Padded control signal
pad_ctrl = np.pad(ctrl_signal, tuple(pad_width), mode='edge')
del pad_width
# Combine the transition window and the control signal to build a final
# transition-control signal, which could be applied to the components
fcn = lambda x: np.convolve(x, trans_win, 'same')
try:
transition_ctrl = np.apply_along_axis(fcn, axis=_t_dim, arr=pad_ctrl)
except ValueError:
raise _chk_parameters(transition_width=transition_width)
del pad_ctrl
rm_pad_slice = [slice(None)]*ctrl_signal.ndim
rm_pad_slice[_t_dim] = slice(pad_size, -pad_size)
transition_ctrl = transition_ctrl[rm_pad_slice]
# Some debugging plots
# ctrl_signal.plot(rm_baseline=False, y_offset=2)
# plt.figure()
# ctrl_signal.based(transition_ctrl).plot(rm_baseline=False, y_offset=2)
# plt.show()
del rm_pad_slice, pad_size
# Apply mixer
# comp0 has to be checked explicitly, as numeric types does not rise an
# exception
if not isinstance(comp0, np.ndarray):
raise _chk_parameters(comp0=comp0)
try:
mix_data = comp0*(1 - transition_ctrl)
except ValueError:
if ctrl_signal.shape != comp0.shape:
raise ValueError('ctrl_signal and components must have equal '
'shape.')
# If comp1 is not specified, return as it is
if comp1 is None:
return mix_data
# ... else, mix the signal also with comp1.
# comp1 has to be checked explicitly, as numeric types does not rise an
# exception
if not isinstance(comp1, np.ndarray):
raise _chk_parameters(comp1=comp1)
try:
return mix_data + comp1*transition_ctrl
except ValueError:
raise _chk_parameters(comp0=comp0, comp1=comp1)
|
|
# vim:fileencoding=utf-8:ts=2:sw=2:expandtab
#
# Copyright 2013 AppCove, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import collections
import inspect
import datetime
from ..JSON import JSON_Encode, JSON_Decode
from ..Util import *
from ..Date import TextToDate, DateToText
###############################################################################
class FieldFlag(FlagType):
InsertRead = 2**0
InsertWrite = 2**1
InsertRequired = 2**2
UpdateRead = 2**3
UpdateWrite = 2**4
UpdateRequired = 2**5
Read = InsertRead | UpdateRead
Write = InsertWrite | UpdateWrite
Required = InsertRequired | UpdateRequired
Virtual = 2**6
# Only to expose in __all__
InsertRead = FieldFlag.InsertRead
InsertWrite = FieldFlag.InsertWrite
InsertRequired = FieldFlag.InsertRequired
UpdateRead = FieldFlag.UpdateRead
UpdateWrite = FieldFlag.UpdateWrite
UpdateRequired = FieldFlag.UpdateRequired
Read = FieldFlag.Read
Write = FieldFlag.Write
Required = FieldFlag.Required
Virtual = FieldFlag.Virtual
###############################################################################
class FieldValue():
__slots__ = 'Field', 'Error', 'Ignore', 'value'
def __init__(self, Field, Error, value):
'''
Field is a reference to the field object
Error is a reference to an ErrorList
Ignore, if set to True, will cause this value to be ignored
value is the value
'''
self.Field = Field
self.Error = Error
self.Ignore = False
self.value = value
def AddError(self, message, **formatargs):
'''
Accept keyword formatting arguments which will be merged with Label
'''
formatargs.update(Label=self.Field.Label)
self.Error.Add(message.format(**formatargs), Key=self.Field.Name)
###############################################################################
class FieldDescriptor():
'''
This is the base class for all field classes.
It is a descriptor.
When a subclass (CLASS, not INSTANCE) of this class is used as an attribute
in a subclass of RecordBase, instances are automatically created.
InsertValue and UpdateValue are placed into _SetValueMap by normal means.
all validation rules apply.
InsertDefault and UpdateDefault are used as defaults prior to starting validation
so they may be overwritten by validated _SetValueMap value
'''
Flags = FieldFlag()
# not-validated value when entering insert mode
InsertValue = Undefined
# already-validated value when entering insert mode
InsertDefault = Undefined
# not-validated value when entering update mode
UpdateValue = Undefined
# already-validated value when entering update
UpdateDefault = Undefined
# Human friendly label for this field. Defaults to Name.
Label = None
AllowNone = False
AllowSQL = False
@property
def Name(self):
return self._Name
# A subclass may indeed decide to just make this a plain attribute
@property
def Label(self):
return self._Name
# Must pass field name
# NOTE: this is only instanciated once at the time of parent class creation
def __init__(self, Name):
self._Name = Name
if self.Flags.Virtual and self.InsertDefault is not Undefined:
raise RuntimeError("{0}: The 'Virtual' flag must not be combined with an 'InsertDefault' value.".format(self.Label))
if self.Flags.Virtual and self.InsertDefault is not Undefined:
raise RuntimeError("{0}: The 'Virtual' flag must not be combined with an 'UpdateDefault' value.".format(self.Label))
def __get__(self, record, owner):
# If accessed via class attribute then we want this field instance to be returned
if record is None:
return self
if record._Mode == 'Insert':
if not self.Flags.InsertRead:
raise AttributeError("Field '{0}' is not readable when Record._Mode == True".format(self.Label))
elif record._Mode == 'Update':
if not self.Flags.UpdateRead:
raise AttributeError("Field '{0}' is not readable when Record._Mode == False".format(self.Label))
else:
raise InvalidOperation('Object must be in Insert or Update mode to read any field: Mode={0}'.format(record._Mode))
if self._Name in record._SetValueMap:
return record._SetValueMap[self._Name]
else:
return record._CurValueMap[self._Name]
def __set__(self, record, value):
# If someone attempts to change the class attribute, that could mess up the internal
# state and we do not want that
if record is None:
raise AttributeError('{0}: Field attributes are read only'.format(self.Label))
if record._Mode == 'Insert':
if not self.Flags.InsertWrite:
raise AttributeError("Field '{0}' is not writable when Record._Mode == True".format(self.Label))
elif record._Mode == 'Update':
if not self.Flags.UpdateWrite:
raise AttributeError("Field '{0}' is not writable when Record._Mode == False".format(self.Label))
else:
raise InvalidOperation('{0}: Object must be in Insert or Update mode to write to any field: Mode={1}'.format(self.Label, record._Mode))
# Whatever the user sends this way is set to the _SetValueMap, period
# Validation will happen later.
record._SetValueMap[self._Name] = value
# This is used to detect changed fields during validation
if record._SetValueSet is not None:
record._SetValueSet.add(self._Name)
def __delete__(self, record):
raise AttributeError('{0}: Field attributes may not be deleted'.format(self.Label))
def Validate(self, record, fv):
raise NotImplementedError('{0}: Validate must be implemented in the subclass.'.format(self.Label))
def Serialize(self, record, value):
return value
def Unserialize(self, record, value):
return value
###############################################################################
class BooleanField(FieldDescriptor):
def Validate(self, record, fv):
fv.value = bool(fv.value)
#
# At this point we have a `bool` instance
#
return True
###############################################################################
class IntegerField(FieldDescriptor):
MinValue = None
MaxValue = None
AllowEmpty = False
def Validate(self, record, fv):
# Handle string input
if isinstance(fv.value, str):
fv.value = fv.value.strip()
if fv.value in ('', '0'):
if self.AllowEmpty:
fv.value = 0
else:
fv.AddError('{Label} is required.')
return False
else:
try:
fv.value = int(re.sub('[,$+]', '', fv.value))
except ValueError:
fv.AddError('{Label} must be an integer.')
return False
# Pass integer instance
elif isinstance(fv.value, int):
pass
# convert Float, Decimal
elif isinstance(fv.value, (float, Decimal)):
fv.value = int(fv.value)
# invalid type
else:
raise ValueError("{0}: Expected instance of (str, int, float, decimal.Decimal) instead of {1}".format(self.Label, repr(type(fv.value))))
#
# At this point we have a `int` instance
#
if not self.AllowEmpty and fv.value == 0:
fv.AddError('{Label} is required.')
return False
# MinValue?
if self.MinValue is not None and fv.value < self.MinValue:
fv.AddError('{Label} must not be less than {MinValue}', MinValue=self.MinValue)
return False
# MaxValue?
if self.MaxValue is not None and fv.value > self.MaxValue:
fv.AddError('{Label} must not be greater than {MaxValue}', MaxValue=self.MaxValue)
return False
# At this point return a valid `int` instance
return True
class StringField(FieldDescriptor):
Strip = True
Truncate = False
MinLength = None
MaxLength = None
AllowEmpty = False
RegexMatch = None #2-tuple of regex and error message
def Validate(self, record, fv):
if not isinstance(fv.value, str):
raise TypeError("On field `{0}`, fv.value must be an instance of `str`, not `{1}`".format(self.Name, repr(type(fv.value))))
#
# at this point we have a `str` instance
#
# Strip?
if self.Strip:
fv.value = fv.value.strip()
# Check for AllowEmpty or valid Default
if fv.value == '':
if self.AllowEmpty:
pass
else:
fv.AddError('{Label} is required.')
return False
# Truncate?
if self.Truncate and len(fv.value) > self.MaxLength:
fv.value = fv.value[0:self.MaxLength]
# MinLength?
if self.MinLength and len(fv.value) < self.MinLength and not (self.AllowEmpty and len(fv.value) == 0):
fv.AddError('{Label} must contain at least {MinLength} characters', MinLength=self.MinLength)
return False
# MaxLength?
if self.MaxLength and len(fv.value) > self.MaxLength:
fv.AddError('{Label} must not contain more than {MaxLength} characters', MaxLength=self.MaxLength)
return False
# RegexMatch?
if self.RegexMatch and not re.search(self.RegexMatch[0], fv.value):
fv.AddError(self.RegexMatch[1])
return False
# If we are here, return Success
return True
pass
class DateField(FieldDescriptor):
AllowEmpty = False
MinDate = None
MaxDate = None
def Validate(self, record, fv):
# Handle string input
if isinstance(fv.value, str):
fv.value = fv.value.strip()
if fv.value == '':
if self.AllowEmpty:
fv.value = None
else:
fv.AddError('{Label} is required.')
return False
else:
fv.value = TextToDate(fv.value)
if fv.value is None:
fv.AddError('{Label} must have a valid mm/dd/yyyy format.')
return False
# Pass integer instance
elif isinstance(fv.value, datetime.date):
pass
# invalid type
else:
raise ValueError("{0}: Expected instance of (str, datetime.date) instead of {1}".format(self.Label, repr(type(fv.value))))
# Check again for None because we might have had an empty value come through and convert to None
if fv.value is None:
if self.AllowNone:
return True
else:
raise ValueError("{0}: Internal validation of input resulted in None but (field.AllowNone == False)".format(self.Label,))
#
# At this point we have a `date` instance
#
# MinDate?
if self.MinDate is not None and fv.value < self.MinDate:
fv.AddError('{Label} must not be less than {MinDate}', MinDate=DateToText(self.MinDate))
return False
# MaxDate?
if self.MaxDate is not None and fv.value > self.MaxDate:
fv.AddError('{Label} must not be greater than {MaxDate}', MaxDate=DateToText(self.MaxDate))
return False
# At this point return a valid `date` instance
return True
class DateTimeField(FieldDescriptor):
def Validate(self, record, fv):
# Pass instance
if isinstance(fv.value, datetime.datetime):
pass
# invalid type
else:
raise ValueError("{0}: Expected instance of (str, datetime.date) instead of {1}".format(self.Label, repr(type(fv.value))))
return True
class JSONField(FieldDescriptor):
def Validate(self, record, fv):
return True
def Serialize(self, record, value):
return JSON_Encode(value)
def Unserialize(self, record, value):
return JSON_Decode(value)
class JSONObjectField(FieldDescriptor):
@property
def InsertDefault(self):
return {}
def Validate(self, record, fv):
if not isinstance(fv.value, dict):
raise ValueError('{0}: Expected instance of (dict) instead of {1}'.format(self.Label, repr(type(fv.value))))
return True
def Serialize(self, record, value):
return JSON_Encode(value)
def Unserialize(self, record, value):
if value is None:
value = {}
else:
value = JSON_Decode(value)
if not isinstance(value, dict):
value = {}
return value
class ArrayField(FieldDescriptor):
@property
def InsertDefault(self):
if self.AllowNone:
return None
else:
return []
def Validate(self, record, fv):
if not isinstance(fv.value, (list, tuple)):
raise ValueError('{0}: Expected instance of (list) instead of {1}'.format(self.Label, repr(type(fv.value))))
return True
def Serialize(self, record, value):
return list(value)
def Unserialize(self, record, value):
if value is None:
if self.AllowNone:
return None
else:
return []
else:
return list(value)
class DecimalField(FieldDescriptor):
MinValue = None
MaxValue = None
AllowZero = False
def Validate(self, record, fv):
# Handle string input
if isinstance(fv.value, str):
fv.value = fv.value.strip().replace(',', '').replace(' ', '')
if fv.value in ('', '0'):
if self.AllowEmpty:
fv.value = Decimal(0.0)
else:
fv.AddError('{Label} is required.')
return False
else:
try:
fv.value = Decimal(fv.value)
#int(re.sub('[,$+]', '', fv.value))
except ValueError:
fv.AddError('{Label} must be an decimal.')
return False
# Pass integer instance
elif isinstance(fv.value, Decimal):
pass
# convert Float, Decimal
elif isinstance(fv.value, (float, Decimal)):
fv.value = Decimal(fv.value)
# invalid type
else:
raise ValueError("{0}: Expected instance of (float, decimal.Decimal) instead of {1}: ".format(self.Label, repr(type(fv.value))))
#
# At this point we have a `Decimal` instance
#
if not self.AllowZero and fv.value == 0:
fv.AddError('{Label} is required.')
return False
# MinValue?
if self.MinValue is not None and fv.value < self.MinValue:
fv.AddError('{Label} must not be less than {MinValue}', MinValue=self.MinValue)
return False
# MaxValue?
if self.MaxValue is not None and fv.value > self.MaxValue:
fv.AddError('{Label} must not be greater than {MaxValue}', MaxValue=self.MaxValue)
return False
# At this point return a valid `Decimal` instance
return True
#============================================================================
class NullString(StringField):
AllowNone = True
AllowEmpty = True
def Validate(self, record, fv):
if not String.Validate(self, record, fv):
return False
if fv.value == '':
fv.value = None
return True
###############################################################################
# DEPRECATED: will be removed in future version
Integer = IntegerField
String = StringField
Datetime = DatetimeField = DateTimeField
Boolean = BooleanField
BoolField = BooleanField
DecimalPoint = DecimalField
###############################################################################
class MetaRecord(type):
'''
A metaclass -- for creating a record class. Don't confuse this with the
constructor of a record object...
This is run exactly ONCE per `class` statement which subclasses RecordBase
'''
@classmethod
def __prepare__(metacls, name, bases):
return OrderedDict()
#Note: classdict is an instance of OrderedDict
def __new__(metacls, classname, bases, classdict):
if 'PrimaryKeyFields' not in classdict or len(classdict['PrimaryKeyFields']) == 0:
raise TypeError('`PrimaryKeyFields` attribute must be defined and be a sequence of field classes which comprise the primary key')
# Convert to read-only tuple
classdict['PrimaryKeyFields'] = tuple(classdict['PrimaryKeyFields'])
# convenient mapping of fields.
fieldmap = OrderedDict()
for name in classdict:
attr = classdict[name]
# Auto-instanciate FieldDescriptor subclasses
if inspect.isclass(attr) and issubclass(attr, FieldDescriptor):
classdict[name] = fieldmap[name] = attr(name)
classdict['FieldMap'] = fieldmap
# Create and return the new class
return type.__new__(metacls, classname, bases + (RecordBase,), classdict)
###############################################################################
class RecordBase():
_CurValueMap = None
_SetValueMap = None
_SetValueSet = None
PrimaryKey = None
_Mode = None
#============================================================================
def __getattr__(self, name):
'''
This is only called when an attribute lookup fails and is the last resort.
It will check for "_" + FieldName and return the value from _CurValueMap
'''
if name[0] == '_' and name[1:] in self._CurValueMap:
return self._CurValueMap[name[1:]]
else:
raise AttributeError("Attribute '{0}' does not exist".format(name))
#============================================================================
def __setattr__(self, name, value):
'''
This is called EVERY time an attribute is set. Therefore our first check
is to see if a class attribute with the same name exists. This is designed
to prevent typos from introducing large logic bugs. If you want extra
attributes, simply create them as class attributes first, like this:
class MyRecord(metaclass=MetaRecord):
MyExtraData = None
def Whatever(self):
self.MyExtraData = 10 #see, no execption
self.NotMyExtraData = 10 #see, exception
If the attribute is not found, we will check to see of "_" + FieldName
exists, and undconditionally set the value to BOTH the _CurValueMap AND the
_SetValueMap.
This allows the interals of the subclass to affect it's fields in ways that
reach beyond the permission.
'''
if hasattr(type(self), name):
super().__setattr__(name, value)
elif name[0] == '_' and self._CurValueMap is not None and name[1:] in self._CurValueMap:
self._CurValueMap[name[1:]] = value
self._SetValueMap[name[1:]] = value
else:
raise AttributeError("Attribute '{0}' does not exist. It must exist as a class attribute in order for an instance attribute to be written to.".format(name))
#============================================================================
@property
def InsertMode(self):
return self._Mode == 'Insert'
#============================================================================
@property
def UpdateMode(self):
return self._Mode == 'Update'
#============================================================================
def __init__(self, *keyvalues, SELECT=None):
self._CurValueMap = {name: None for name in self.FieldMap}
self._SetValueMap = {}
self._SetValueSet = None #only used during validation to detect changed fields
self.PrimaryKey = keyvalues
# Check to make sure the number of parameters matches the primary key definition
if len(self.PrimaryKey) != len(self.PrimaryKeyFields):
raise ValueError('All primary key components must be passed: {0}'.format(tuple(self.PrimaryKeyFields)))
# We are in Insert mode if there are any None values in the primary key
if None in self.PrimaryKey:
self._Mode = 'Insert'
if SELECT is not None:
raise ValueError('SELECT parameter is only valid for UpdateMode: mode={0}'.format(self._Mode))
# Add NOT NULL primary key values to the _CurValueMap and _SetValueMap
for value, name in zip(self.PrimaryKey, self.PrimaryKeyFields):
if value is not None:
self._CurValueMap[name] = value
self._SetValueMap[name] = value
# Add entries to _SetValueMap
for name, field in self.FieldMap.items():
v = field.InsertValue
if v is not Undefined:
setattr(self, name, v) #via normal means
else:
self._Mode = 'Update'
# Restrict the field list to non-virtual fields
fields = tuple(name for name,field in self.FieldMap.items() if not field.Flags.Virtual)
# Passing SELECT is the same as injecting the record
if SELECT is None:
SELECT = self.SELECT(fields)
# Process _CurValueMap
for name in fields:
try:
# Unserialize and then set the data to CurValueMap for later use
self._CurValueMap[name] = self.FieldMap[name].Unserialize(self, SELECT[name])
except KeyError:
raise KeyError('SELECT MUST contain all keys requested. Missing key: {0}'.format(name))
# Add entries to _SetValueMap
for name, field in self.FieldMap.items():
v = field.UpdateValue
if v is not Undefined:
setattr(self, name, v) #via normal means
#============================================================================
def Reload(self):
if self._Mode != 'Update':
raise InvalidOperation('Object must be in Update mode in order to Reload it: Mode={0}'.format(self._Mode))
# Note: we want to use any updated keys for the reload
self.__init__(*(getattr(self, k) for k in self.PrimaryKeyFields))
#============================================================================
def GetData(self, FieldSet=None):
'''
Pass a set of field names (anything that supports `in`, but set is best)
and get all of the readable current data back.
Invalid field names or unreadable fields are ignored.
If None is passed, then all readable fields are returned.
'''
if self._Mode != 'Update':
raise InvalidOperation('Object must be in Update mode to call GetData: Mode={0}'.format(self._Mode))
rval = OrderedDict()
for name, field in self.FieldMap.items():
if field.Flags.UpdateRead:
if FieldSet is None or name in FieldSet:
rval[name] = self._CurValueMap[name]
return rval
#============================================================================
def SetData(self, Data, FieldSet=None):
'''
Pass a mapping of field data and an optional set of field names to use from
that mapping. If FieldSet is None, then all fields will be set.
Invalid field names raise AttributeErrors. Unwritable fields cause their own
exceptions.
'''
if self._Mode not in ('Insert', 'Update'):
raise InvalidOperation('Object must be in Insert or Update mode to call SetData: Mode={0}'.format(self._Mode))
for name, value in Data.items():
if name not in self.FieldMap:
raise AttributeError('unknown field: {0}'.format(name))
if FieldSet is None or name in FieldSet:
setattr(self, name, value)
#============================================================================
def Validate(self):
'''
Validate all values and return a dictionary containing such. Otherwise
raise a ValidationError with the error messages
'''
if self._Mode not in ('Insert', 'Update'):
raise InvalidOperation('Object must be in Insert or Update mode in order to Validate it: Mode={0}'.format(self._Mode))
data = {}
# Add default entries if in Insert mode
if self._Mode == 'Insert':
for name, field in self.FieldMap.items():
v = field.InsertDefault
if v is not Undefined:
data[name] = v #complete bypass of normal means
# Add default entries if in Update mode
elif self._Mode == 'Update':
for name, field in self.FieldMap.items():
v = field.UpdateDefault
if v is not Undefined:
data[name] = v #complete bypass of normal means
# Temp vars needed for this process
# _SetValueSet is a set of fields that still need validated
# It can be added to during the operation if the validation
# of one field sets another.
# In order to prevent infinite loops, we will track the number
# of iterations and raise an exception if
try:
localerror = ErrorList()
self._SetValueSet = set(self._SetValueMap)
validated_fields = []
validated_max = len(self._SetValueSet) * 2
# Validate and copy each value from _SetValueMap into data
while len(self._SetValueSet) > 0:
name = self._SetValueSet.pop()
value = self._SetValueMap[name]
field = self.FieldMap[name]
# Track and bail out if we validate too many
validated_fields.append(name)
if len(validated_fields) > validated_max:
raise RuntimeError('It appears that you have an re-validate loop which will not end. Fields are re-validated anytime they change during validation. Fields validated in this order: {0}'.format(str(validated_fields)))
# FieldValue objects are passed to each Validate call
fv = FieldValue(field, localerror, value)
# Do not bother calling validate on None if it is allowed
if field.AllowNone and fv.value is None:
okay = True
# Do not bother calling validate on SQL if it is allowed
elif field.AllowSQL and isinstance(fv.value, SQL):
okay = True
# Otherwise call Validate
else:
okay = field.Validate(self, fv)
# Only set data if it is validatedIgnored data and Virtual fields are a NO-OP
if (okay) and (not fv.Ignore) and (not field.Flags.Virtual):
data[name] = fv.value
finally:
self._SetValueSet = None
localerror.RaiseIfError()
return data
#============================================================================
def Save(self):
if self._Mode not in ('Insert', 'Update'):
raise InvalidOperation('Object must be in Insert or Update mode in order to Save it: Mode={0}'.format(self._Mode))
# Validate and then Serialize the data
data = aadict(((name, self.FieldMap[name].Serialize(self, value)) for name,value in self.Validate().items()))
# If we have nothing to update at this point, then consider it a NO-OP
if len(data) == 0:
return
# Insert the object and reload with the new PK values
if self._Mode == 'Insert':
# Make sure all required fields are present
for name, field in self.FieldMap.items():
if field.Flags.InsertRequired and name not in data:
raise KeyError("Field '{0}' is required for INSERT".format(field.Label))
# Call the INSERT callback and validate the return value
self.onBeforeSave(data)
self.onBeforeInsert(data)
pk = self.INSERT(data)
if not isinstance(pk, tuple) or len(pk) == 0:
raise ValueError('Return value of INSERT() must be a tuple of primary key parameters instead of: {0}'.format(repr(pk)))
# Reload self from the *new* primary key
self.__init__(*pk)
# Update the object and reload with existing PK values
elif self._Mode == 'Update':
# Make sure all required fields are present
for name, field in self.FieldMap.items():
if field.Flags.UpdateRequired and name not in data:
raise KeyError("Field '{0}' is required for UPDATE".format(field.Label))
# call the UPDATE callback and then reload self
self.onBeforeSave(data)
self.onBeforeUpdate(data)
self.UPDATE(data)
self.Reload()
#============================================================================
def Delete(self):
if self._Mode != 'Update':
raise InvalidOperation('Object must be in Update mode in order to delete it: Mode={0}'.format(self._Mode))
self.DELETE()
self._Mode = 'Delete'
#============================================================================
def onBeforeSave(self, data):
pass
def onBeforeUpdate(self, data):
pass
def onBeforeInsert(self, data):
pass
def SELECT(self, fields):
raise NotImplementedError('Must be implemented in subclass')
def INSERT(self, data):
raise NotImplementedError('Must be implemented in subclass')
def UPDATE(self, data):
raise NotImplementedError('Must be implemented in subclass')
def DELETE(self):
raise NotImplementedError('Must be implemented in subclass')
#============================================================================
IS_IDENTIFIER = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$').match
class SIUD():
# Note, having Schema=None is designed for temp tables definition.
@staticmethod
def SingleTable(Schema, Table, *primarykeys):
if len(primarykeys) == 0:
raise TypeError('Must pass at least one primary key argument')
if Schema is not None and not IS_IDENTIFIER(Schema):
raise ValueError('Invalid schema: {0}'.format(Schema))
if not IS_IDENTIFIER(Table):
raise ValueError('Invalid table: {0}'.format(Table))
if Schema is None:
sqlTable = '"{0}"'.format(Table)
else:
sqlTable = '"{0}"."{1}"'.format(Schema, Table)
sqlWhere = ''
sqlPrimaryFields = ''
for i,k in enumerate(primarykeys):
if not IS_IDENTIFIER(k):
raise ValueError('Invalid primary key field: {0}'.format(k))
sqlWhere += 'AND "{0}" = $PK_{1}\n'.format(k, i)
sqlPrimaryFields += '"{0}", '.format(k)
sqlPrimaryFields = sqlPrimaryFields[:-2] #strip comma space
#============================================================================
def SELECT(self, fields):
kwargs = dict((('PK_{0}'.format(i),v) for i,v in enumerate(self.PrimaryKey)))
return App.DB.Row('''
SELECT
[Field]
FROM
''' + sqlTable + '''
WHERE True
''' + sqlWhere + '''
''',
*fields,
**kwargs
)
def INSERT(self, data):
return App.DB.TRow('''
INSERT INTO
''' + sqlTable + '''
([Field])
VALUES
([Value])
RETURNING
''' + sqlPrimaryFields + '''
''',
*data.items()
)
def UPDATE(self, data):
kwargs = dict((('PK_{0}'.format(i),v) for i,v in enumerate(self.PrimaryKey)))
App.DB.Execute('''
UPDATE
''' + sqlTable + '''
SET
[Field=Value]
WHERE True
''' + sqlWhere + '''
''',
*data.items(),
**kwargs
)
def DELETE(self):
kwargs = dict((('PK_{0}'.format(i),v) for i,v in enumerate(self.PrimaryKey)))
App.DB.Execute('''
DELETE FROM
''' + sqlTable + '''
WHERE True
''' + sqlWhere + '''
''',
**kwargs
)
return SELECT, INSERT, UPDATE, DELETE
|
|
#
# Copyright 2013 Julien Danjou
# Copyright 2014 Red Hat, Inc
#
# Authors: Julien Danjou <julien@danjou.info>
# Eoghan Glynn <eglynn@redhat.com>
# Nejc Saje <nsaje@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import fnmatch
import itertools
import random
from oslo_config import cfg
from oslo_context import context
import six
from six.moves.urllib import parse as urlparse
from stevedore import extension
from ceilometer.agent import plugin_base
from ceilometer import coordination
from ceilometer.i18n import _
from ceilometer.openstack.common import log
from ceilometer.openstack.common import service as os_service
from ceilometer import pipeline as publish_pipeline
from ceilometer import utils
LOG = log.getLogger(__name__)
OPTS = [
cfg.IntOpt('shuffle_time_before_polling_task',
default=0,
help='To reduce large requests at same time to Nova or other '
'components from different compute agents, shuffle '
'start time of polling task.'),
]
cfg.CONF.register_opts(OPTS)
class PollsterListForbidden(Exception):
def __init__(self):
msg = ('It is forbidden to use pollster-list option of polling agent '
'in case of using coordination between multiple agents. Please '
'use either multiple agents being coordinated or polling list '
'option for one polling agent.')
super(PollsterListForbidden, self).__init__(msg)
class Resources(object):
def __init__(self, agent_manager):
self.agent_manager = agent_manager
self._resources = []
self._discovery = []
self.blacklist = []
self.last_dup = []
def setup(self, pipeline):
self._resources = pipeline.resources
self._discovery = pipeline.discovery
def get(self, discovery_cache=None):
source_discovery = (self.agent_manager.discover(self._discovery,
discovery_cache)
if self._discovery else [])
static_resources = []
if self._resources:
static_resources_group = self.agent_manager.construct_group_id(
utils.hash_of_set(self._resources))
p_coord = self.agent_manager.partition_coordinator
static_resources = p_coord.extract_my_subset(
static_resources_group, self._resources)
return static_resources + source_discovery
@staticmethod
def key(source_name, pollster):
return '%s-%s' % (source_name, pollster.name)
class PollingTask(object):
"""Polling task for polling samples and inject into pipeline.
A polling task can be invoked periodically or only once.
"""
def __init__(self, agent_manager):
self.manager = agent_manager
# elements of the Cartesian product of sources X pollsters
# with a common interval
self.pollster_matches = collections.defaultdict(set)
# per-sink publisher contexts associated with each source
self.publishers = {}
# we relate the static resources and per-source discovery to
# each combination of pollster and matching source
resource_factory = lambda: Resources(agent_manager)
self.resources = collections.defaultdict(resource_factory)
def add(self, pollster, pipeline):
if pipeline.source.name not in self.publishers:
publish_context = publish_pipeline.PublishContext(
self.manager.context)
self.publishers[pipeline.source.name] = publish_context
self.publishers[pipeline.source.name].add_pipelines([pipeline])
self.pollster_matches[pipeline.source.name].add(pollster)
key = Resources.key(pipeline.source.name, pollster)
self.resources[key].setup(pipeline)
def poll_and_publish(self):
"""Polling sample and publish into pipeline."""
cache = {}
discovery_cache = {}
for source_name in self.pollster_matches:
with self.publishers[source_name] as publisher:
for pollster in self.pollster_matches[source_name]:
LOG.info(_("Polling pollster %(poll)s in the context of "
"%(src)s"),
dict(poll=pollster.name, src=source_name))
key = Resources.key(source_name, pollster)
candidate_res = list(
self.resources[key].get(discovery_cache))
if not candidate_res and pollster.obj.default_discovery:
candidate_res = self.manager.discover(
[pollster.obj.default_discovery], discovery_cache)
# Remove duplicated resources and black resources. Using
# set() requires well defined __hash__ for each resource.
# Since __eq__ is defined, 'not in' is safe here.
seen = []
duplicated = []
polling_resources = []
black_res = self.resources[key].blacklist
for x in candidate_res:
if x not in seen:
seen.append(x)
if x not in black_res:
polling_resources.append(x)
else:
duplicated.append(x)
# Warn duplicated resources for the 1st time
if self.resources[key].last_dup != duplicated:
self.resources[key].last_dup = duplicated
LOG.warning(_(
'Found following duplicated resoures for '
'%(name)s in context of %(source)s:%(list)s. '
'Check pipeline configuration.')
% ({'name': pollster.name,
'source': source_name,
'list': duplicated
}))
# If no resources, skip for this pollster
if not polling_resources:
LOG.info(_("Skip polling pollster %s, no resources"
" found"), pollster.name)
continue
try:
samples = list(pollster.obj.get_samples(
manager=self.manager,
cache=cache,
resources=polling_resources
))
publisher(samples)
except plugin_base.PollsterPermanentError as err:
LOG.error(_(
'Prevent pollster %(name)s for '
'polling source %(source)s anymore!')
% ({'name': pollster.name, 'source': source_name}))
self.resources[key].blacklist.append(err.fail_res)
except Exception as err:
LOG.warning(_(
'Continue after error from %(name)s: %(error)s')
% ({'name': pollster.name, 'error': err}),
exc_info=True)
class AgentManager(os_service.Service):
def __init__(self, namespaces, pollster_list, group_prefix=None):
super(AgentManager, self).__init__()
def _match(pollster):
"""Find out if pollster name matches to one of the list."""
return any(fnmatch.fnmatch(pollster.name, pattern) for
pattern in pollster_list)
# features of using coordination and pollster-list are exclusive, and
# cannot be used at one moment to avoid both samples duplication and
# samples being lost
if pollster_list and cfg.CONF.coordination.backend_url:
raise PollsterListForbidden()
if type(namespaces) is not list:
namespaces = [namespaces]
# we'll have default ['compute', 'central'] here if no namespaces will
# be passed
extensions = (self._extensions('poll', namespace).extensions
for namespace in namespaces)
if pollster_list:
extensions = (itertools.ifilter(_match, exts)
for exts in extensions)
self.extensions = list(itertools.chain(*list(extensions)))
self.discovery_manager = self._extensions('discover')
self.context = context.RequestContext('admin', 'admin', is_admin=True)
self.partition_coordinator = coordination.PartitionCoordinator()
# Compose coordination group prefix.
# We'll use namespaces as the basement for this partitioning.
namespace_prefix = '-'.join(sorted(namespaces))
self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix)
if group_prefix else namespace_prefix)
@staticmethod
def _extensions(category, agent_ns=None):
namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns
else 'ceilometer.%s' % category)
def _catch_extension_load_error(mgr, ep, exc):
# Extension raising ExtensionLoadError can be ignored,
# and ignore anything we can't import as a safety measure.
if isinstance(exc, plugin_base.ExtensionLoadError):
LOG.error(_("Skip loading extension for %s") % ep.name)
return
if isinstance(exc, ImportError):
LOG.error(
_("Failed to import extension for %(name)s: %(error)s"),
{'name': ep.name, 'error': exc},
)
return
raise exc
return extension.ExtensionManager(
namespace=namespace,
invoke_on_load=True,
on_load_failure_callback=_catch_extension_load_error,
)
def join_partitioning_groups(self):
groups = set([self.construct_group_id(d.obj.group_id)
for d in self.discovery_manager])
# let each set of statically-defined resources have its own group
static_resource_groups = set([
self.construct_group_id(utils.hash_of_set(p.resources))
for p in self.pipeline_manager.pipelines
if p.resources
])
groups.update(static_resource_groups)
for group in groups:
self.partition_coordinator.join_group(group)
def create_polling_task(self):
"""Create an initially empty polling task."""
return PollingTask(self)
def setup_polling_tasks(self):
polling_tasks = {}
for pipeline in self.pipeline_manager.pipelines:
for pollster in self.extensions:
if pipeline.support_meter(pollster.name):
polling_task = polling_tasks.get(pipeline.get_interval())
if not polling_task:
polling_task = self.create_polling_task()
polling_tasks[pipeline.get_interval()] = polling_task
polling_task.add(pollster, pipeline)
return polling_tasks
def construct_group_id(self, discovery_group_id):
return ('%s-%s' % (self.group_prefix,
discovery_group_id)
if discovery_group_id else None)
def start(self):
self.pipeline_manager = publish_pipeline.setup_pipeline()
self.partition_coordinator.start()
self.join_partitioning_groups()
# allow time for coordination if necessary
delay_start = self.partition_coordinator.is_active()
# set shuffle time before polling task if necessary
delay_polling_time = random.randint(
0, cfg.CONF.shuffle_time_before_polling_task)
for interval, task in six.iteritems(self.setup_polling_tasks()):
delay_time = (interval + delay_polling_time if delay_start
else delay_polling_time)
self.tg.add_timer(interval,
self.interval_task,
initial_delay=delay_time,
task=task)
self.tg.add_timer(cfg.CONF.coordination.heartbeat,
self.partition_coordinator.heartbeat)
def stop(self):
if self.partition_coordinator:
self.partition_coordinator.stop()
super(AgentManager, self).stop()
@staticmethod
def interval_task(task):
task.poll_and_publish()
@staticmethod
def _parse_discoverer(url):
s = urlparse.urlparse(url)
return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None)
def _discoverer(self, name):
for d in self.discovery_manager:
if d.name == name:
return d.obj
return None
def discover(self, discovery=None, discovery_cache=None):
resources = []
discovery = discovery or []
for url in discovery:
if discovery_cache is not None and url in discovery_cache:
resources.extend(discovery_cache[url])
continue
name, param = self._parse_discoverer(url)
discoverer = self._discoverer(name)
if discoverer:
try:
discovered = discoverer.discover(self, param)
partitioned = self.partition_coordinator.extract_my_subset(
self.construct_group_id(discoverer.group_id),
discovered)
resources.extend(partitioned)
if discovery_cache is not None:
discovery_cache[url] = partitioned
except Exception as err:
LOG.exception(_('Unable to discover resources: %s') % err)
else:
LOG.warning(_('Unknown discovery extension: %s') % name)
return resources
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.versioncontrol import diff
import unittest
def get_opcodes(*args, **kwargs):
for hunk in diff.get_filtered_hunks(*args, **kwargs):
for opcode in hunk:
yield opcode
class DiffTestCase(unittest.TestCase):
def testget_change_extent(self):
self.assertEqual((3, 0), diff.get_change_extent('xxx', 'xxx'))
self.assertEqual((0, 0), diff.get_change_extent('', 'xxx'))
self.assertEqual((0, 0), diff.get_change_extent('xxx', ''))
self.assertEqual((0, 0), diff.get_change_extent('xxx', 'yyy'))
self.assertEqual((1, -1), diff.get_change_extent('xxx', 'xyx'))
self.assertEqual((1, -1), diff.get_change_extent('xxx', 'xyyyx'))
self.assertEqual((1, 0), diff.get_change_extent('xy', 'xzz'))
self.assertEqual((1, -1), diff.get_change_extent('xyx', 'xzzx'))
self.assertEqual((1, -1), diff.get_change_extent('xzzx', 'xyx'))
def test_insert_blank_line(self):
opcodes = get_opcodes(['A', 'B'], ['A', 'B', ''], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertEqual(('insert', 2, 2, 2, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B'], ['A', 'B', ''], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 2, 0, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A'], ['A', 'B', ''], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('insert', 1, 1, 1, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A'], ['A', 'B', ''], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('insert', 1, 1, 1, 3), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_delete_blank_line(self):
opcodes = get_opcodes(['A', 'B', ''], ['A', 'B'], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertEqual(('delete', 2, 3, 2, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B', ''], ['A', 'B'], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 3, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B', ''], ['A'], ignore_blank_lines=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('delete', 1, 3, 1, 1), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B', ''], ['A'], ignore_blank_lines=1)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('delete', 1, 3, 1, 1), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_space_changes(self):
opcodes = get_opcodes(['A', 'B b'], ['A', 'B b'],
ignore_space_changes=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 2, 1, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B b'], ['A', 'B b'],
ignore_space_changes=1)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_case_changes(self):
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'], ignore_case=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 2, 1, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'], ignore_case=1)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_space_and_case_changes(self):
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'],
ignore_case=0, ignore_space_changes=0)
self.assertEqual(('equal', 0, 1, 0, 1), opcodes.next())
self.assertEqual(('replace', 1, 2, 1, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
opcodes = get_opcodes(['A', 'B b'], ['A', 'B B'],
ignore_case=1, ignore_space_changes=1)
self.assertEqual(('equal', 0, 2, 0, 2), opcodes.next())
self.assertRaises(StopIteration, opcodes.next)
def test_grouped_opcodes_context1(self):
groups = diff.get_filtered_hunks(
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'],
['A', 'B', 'C', 'd', 'e', 'f', 'G', 'H'], context=1)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual(('equal', 2, 3, 2, 3), group[0])
self.assertEqual(('replace', 3, 6, 3, 6), group[1])
self.assertEqual(('equal', 6, 7, 6, 7), group[2])
def test_grouped_opcodes_context1_ignorecase(self):
old = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
new = ['X', 'B', 'C', 'd', 'e', 'f', 'G', 'Y']
groups = diff.get_filtered_hunks(old, new, context=1, ignore_case=1)
group = groups.next()
self.assertEqual([('replace', 0, 1, 0, 1), ('equal', 1, 2, 1, 2)],
group)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual([('equal', 6, 7, 6, 7), ('replace', 7, 8, 7, 8)],
group)
def test_grouped_opcodes_full_context(self):
old = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
new = ['X', 'B', 'C', 'd', 'e', 'f', 'G', 'Y']
groups = diff.get_filtered_hunks(old, new, context=None)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual([
('replace', 0, 1, 0, 1),
('equal', 1, 3, 1, 3),
('replace', 3, 6, 3, 6),
('equal', 6, 7, 6, 7),
('replace', 7, 8, 7, 8),
], group)
groups = diff.get_filtered_hunks(old, new, context=None, ignore_case=1)
group = groups.next()
self.assertRaises(StopIteration, groups.next)
self.assertEqual([
('replace', 0, 1, 0, 1),
('equal', 1, 7, 1, 7),
('replace', 7, 8, 7, 8),
], group)
def test_grouped_opcodes_insert_blank_line_at_top(self):
"""
Regression test for #2090. Make sure that the equal block following an
insert at the top of a file is correct.
"""
groups = diff.get_filtered_hunks(['B', 'C', 'D', 'E', 'F', 'G'],
['A', 'B', 'C', 'D', 'E', 'F', 'G'],
context=3)
self.assertEqual([('insert', 0, 0, 0, 1), ('equal', 0, 3, 1, 4)],
groups.next())
self.assertRaises(StopIteration, groups.next)
def test_unified_diff_no_context(self):
diff_lines = list(diff.unified_diff(['a'], ['b']))
self.assertEqual(['@@ -1,1 +1,1 @@', '-a', '+b'], diff_lines)
def test_quotes_not_marked_up(self):
"""Make sure that the escape calls leave quotes along, we don't need
to escape them."""
changes = diff.diff_blocks(['ab'], ['a"b'])
self.assertEqual(len(changes), 1)
blocks = changes[0]
self.assertEqual(len(blocks), 1)
block = blocks[0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]), 'a<del></del>b')
self.assertEqual(str(block['changed']['lines'][0]), 'a<ins>"</ins>b')
def test_whitespace_marked_up1(self):
"""Regression test for #5795"""
changes = diff.diff_blocks(['*a'], [' *a'])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]), '<del></del>*a')
self.assertEqual(str(block['changed']['lines'][0]),
'<ins> </ins>*a')
def test_whitespace_marked_up2(self):
"""Related to #5795"""
changes = diff.diff_blocks([' a'], [' b'])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]),
' <del>a</del>')
self.assertEqual(str(block['changed']['lines'][0]),
' <ins>b</ins>')
def test_whitespace_marked_up3(self):
"""Related to #5795"""
changes = diff.diff_blocks(['a '], ['b '])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]),
'<del>a</del> ')
self.assertEqual(str(block['changed']['lines'][0]),
'<ins>b</ins> ')
def test_expandtabs_works_right(self):
"""Regression test for #4557"""
changes = diff.diff_blocks(['aa\tb'], ['aaxb'])
block = changes[0][0]
self.assertEqual(block['type'], 'mod')
self.assertEqual(str(block['base']['lines'][0]),
'aa<del> </del>b')
self.assertEqual(str(block['changed']['lines'][0]),
'aa<ins>x</ins>b')
def suite():
return unittest.makeSuite(DiffTestCase)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
version = "1.0.0.1"
###############################################################################
#
# 17 Mar 2011
#
# A commandline script to build and run an alarm script. When alarm expires,
# an xterm is exec'd to show alarm content.
#
###############################################################################
import os
import sys
import re
import getopt
import time
#########################################################
# CONFIGURATION
#########################################################
#
TIMEOUT = '360'
# Other
dbg = False
#########################################################
# Functions for LOCAL ops box
#########################################################
def lo_execute(cmd):
if dbg:
print '# ' + cmd
outfile = os.tmpfile()
proc = subprocess.Popen(cmd, stdout=outfile, shell=True)
proc.wait()
outfile.seek(0)
output = outfile.read()
outfile.close()
return output
def lo_get_rules():
return lo_execute(ipt + '-L -n -v --line-numbers')
def lo_clear_rules():
lo_execute(ipt + '-F')
def lo_set_default_rules():
in_rules = [
ipt + '-t filter -P INPUT DROP',
ipt + '-t filter -A INPUT -i lo -j ACCEPT',
ipt + '-t filter -A INPUT -i eth1 -j ACCEPT',
ipt + '-t filter -A INPUT -i eth0 -p tcp -s 0.0.0.0/0 -d ' + ext_ip + ' --sport 80 -m state --state ESTABLISHED -j ACCEPT',
ipt + '-t filter -A INPUT -i eth0 -p tcp -s 0.0.0.0/0 -d ' + ext_ip + ' --sport 443 -m state --state ESTABLISHED -j ACCEPT',
ipt + '-t filter -A INPUT -i eth0 -p udp -s 0.0.0.0/0 -d ' + ext_ip + ' --sport 53 -m state --state ESTABLISHED -j ACCEPT',
ipt + '-t filter -A INPUT -i eth0 -p icmp -s 0.0.0.0/0 -d ' + ext_ip + ' -m state --state ESTABLISHED,RELATED -j ACCEPT'
]
out_rules = [
ipt + '-t filter -P OUTPUT DROP',
ipt + '-t filter -A OUTPUT -o lo -j ACCEPT',
ipt + '-t filter -A OUTPUT -o eth1 -j ACCEPT',
ipt + '-t filter -A OUTPUT -o eth0 -p tcp -s ' + ext_ip + ' -d 0.0.0.0/0 --dport 80 -j ACCEPT -m state --state NEW,ESTABLISHED',
ipt + '-t filter -A OUTPUT -o eth0 -p tcp -s ' + ext_ip + ' -d 0.0.0.0/0 --dport 443 -j ACCEPT -m state --state NEW,ESTABLISHED',
ipt + '-t filter -A OUTPUT -o eth0 -p udp -s ' + ext_ip + ' -d 0.0.0.0/0 --dport 53 -j ACCEPT',
ipt + '-t filter -A OUTPUT -o eth0 -p icmp -s ' + ext_ip + ' -d 0.0.0.0/0 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT'
]
fwd_rules = [
ipt + '-t filter -P FORWARD DROP'
]
for r in in_rules:
lo_execute(r)
for r in out_rules:
lo_execute(r)
for r in fwd_rules:
lo_execute(r)
def lo_allow_ip(ip):
lo_execute(ipt + '-t filter -A INPUT -i eth0 -p all -s ' + ip + ' -d ' + ext_ip + ' -j ACCEPT')
lo_execute(ipt + '-t filter -A OUTPUT -p all -s ' + ext_ip + ' -d ' + ip + ' -j ACCEPT')
def lo_remove_ip(ip):
lo_execute(ipt + '-t filter -D INPUT -i eth0 -p all -s ' + ip + ' -d ' + ext_ip + ' -j ACCEPT')
lo_execute(ipt + '-t filter -D OUTPUT -p all -s ' + ext_ip + ' -d ' + ip + ' -j ACCEPT')
# check if the local rules are set for http to communicate with
# the gateway
def lo_rules_set():
rules = lo_get_rules()
if re.search('0.0.0.0/0 +' + ext_ip + ' +tcp spt:80', rules) != None and \
re.search(ext_ip + ' +0.0.0.0/0 +tcp dpt:80', rules) != None:
return True
else:
return False
#########################################################
# Other Functions
#########################################################
def check_ipv4_fmt(ip):
if re.match('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', ip) != None:
return True
else:
return False
def get_login():
global user
global password
if user == '':
user = lo_execute('perl -e \'require ' +
'"/current/down/hostvars.global";' +
'print "$gbl_opuser";\'')
if user == '':
user = raw_input('username: ')
if password == '':
password = lo_execute('perl -e \'require ' +
'"/current/down/hostvars.global";' +
'print "$gbl_oppasswd";\'')
if password == '':
password = getpass.getpass('password: ')
return (user, password)
def write_alarm(alarm_file, alarm_sleep_file, timeout_mins):
expires = time.ctime(time.time() + (timeout_mins * 60))
sleep_secs = (timeout_mins - 30) * 60
# build alarm_file
script = '#!/bin/sh\n'
script += '/bin/rm -f $0\n'
script += 'EXPIRES="' + expires + '"\n'
script += 'while [ 1 ]; do\n'
script += ' clear\n'
script += ' echo -e "\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\a"\n'
script += ' echo -e "Current time: `date`\\n\\n\\n"\n'
script += ' [ "$EXPIRES" ] && echo -e "EXPIRES time: $EXPIRES\\n\\n\\n"\n'
script += ' echo -e "Your firewall rules will expire in 30 minutes.\\n\\n"\n'
script += ' echo -e "If necessary, use \\"fwrules.py -t <timeout>\\" to re-set it,\\n"\n'
script += ' echo -e "or use the browser GUI to add more time.\\n\\n"\n'
script += ' echo -e "\\n\\n\\n\\n\\n\\n"\n'
script += ' echo "^C or close this window as desired, but this alarm has no snooze!"\n'
script += ' sleep 5\n'
script += 'done\n'
f = open(alarm_file, 'w')
f.write(script)
f.close()
# build alarm_sleep_file
script = '#!/bin/sh\n'
script += '/bin/rm -f $0\n'
script += 'chmod 0777 ' + alarm_file + '\n'
script += 'sleep ' + str(sleep_secs) + '\n'
script += 'exec xterm -ut +cm +cn -sk -sb -ls -title ALARM '
script += '-geometry 174x52-53+26 -bg white -fg red -e '
script += alarm_file + '\n'
f = open(alarm_sleep_file, 'w')
f.write(script)
f.close()
os.system('chmod 0777 ' + alarm_sleep_file)
def start_alarm(timeout):
kill_alarms('Alarm')
write_alarm('/tmp/Alarm.sh', '/tmp/AlarmSleep.sh', timeout)
os.system('/tmp/AlarmSleep.sh&')
def kill_alarms(alarm_grep):
ps_line = lo_execute('ps -ef | grep ' + alarm_grep + ' | egrep -v grep')
if len(ps_line) > 0:
lo_execute('pkill ' + alarm_grep)
def usage(prog):
prog = prog.split(os.sep)[-1]
print 'usage: ' + prog + ' [-t <timeout>] [-f content]'
print ' options:'
print ' -h show this help'
print ' -v print the version and exit'
print ' -d debug, print the commands being executed'
print ' -t <timeout> set the alarm timeout ['
print '\n'
#########################################################
# Main
#########################################################
def main():
global dbg
global ext_ip
global int_ip
global gw_ip
global logged_in
global duration
global user
global password
user = ''
password = ''
print_it = False
reset = False
clear = False
set = False
ipaddr = None
addrule = False
set_timeout = False
duration = TIMEOUT
logged_in = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvdU:P:pscrt:A:D:')
except getopt.GetoptError, err:
print str(err)
usage(sys.argv[0])
sys.exit(1)
if len(opts) == 0:
usage(sys.argv[0])
sys.exit(1)
for o, a in opts:
if o == '-h':
usage(sys.argv[0])
sys.exit(0)
elif o == '-v':
print '%s version %s' % (sys.argv[0].split(os.sep)[-1], version)
sys.exit(0)
elif o == '-p':
print_it = True
elif o == '-r':
reset = True
elif o == '-A':
if ipaddr is not None:
print 'ERROR: either -A or -D can be specified, not both'
sys.exit(1)
ipaddr = a
addrule = True
elif o == '-D':
if ipaddr is not None:
print 'ERROR: either -A or -D can be specified, not both'
sys.exit(1)
ipaddr = a
addrule = False
elif o == '-d':
dbg = True
elif o == '-c':
clear = True
elif o == '-t':
if re.match('^\d+[mh]?$', a) is None:
print 'ERROR: bad timeout format'
sys.exit(1)
if a[-1] == 'm':
duration = a[:-1]
elif a[-1] == 'h':
duration = str(int(a[:-1]) * 60)
else:
duration = str(int(a) * 60)
if int(duration) > 480:
print 'ERROR: timeout max is 480m or 8h'
sys.exit(1)
set_timeout = True
elif o == '-s':
set = True
elif o == '-U':
user = a
elif o == '-P':
password = a
if (clear or set or reset) and not ((clear and not set and not reset) or
(not clear and set and not reset) or
(not clear and not set and reset)):
print 'ERROR: Only one of -s, -c, and -r can be specified'
sys.exit(1)
if lo_execute('uname -s').strip() != 'Linux':
print 'ERROR: This script is only meant to be run in Linux.'
sys.exit(1)
if ipaddr != None and not check_ipv4_fmt(ipaddr):
print 'ERROR: invalid IP address format'
sys.exit(1)
ext_ip = lo_get_ip('eth0')
if ext_ip is None:
print 'ERROR: Could not get IP address for eth0'
sys.exit(1)
int_ip = lo_get_ip('eth1')
if int_ip is None:
print 'ERROR: Could not get IP address for eth1'
sys.exit(1)
gw_ip = gw_get_ip()
if gw_ip is None:
print 'ERROR: Could not get IP address for the gateway'
sys.exit(1)
if clear:
print 'Removing firewall rules'
gw_clear_rules()
lo_clear_rules()
if print_it:
print gw_get_rules()
print lo_get_rules()
sys.exit(1)
if set or reset:
if reset:
print 'Removing firewall rules'
gw_clear_rules()
lo_clear_rules()
print 'Setting default firewall rules'
lo_set_default_rules()
gw_set_default_rules()
start_alarm(int(duration))
if ipaddr is not None and addrule and lo_rules_set():
print 'Allowing all traffic to/from ' + ipaddr
gw_get_rules()
gw_allow_ip(ipaddr)
lo_allow_ip(ipaddr)
elif ipaddr is not None and not addrule and lo_rules_set():
print 'Removing rule for ' + ipaddr
gw_get_rules()
gw_remove_ip(ipaddr)
lo_remove_ip(ipaddr)
if set_timeout:
gw_set_timeout()
start_alarm(int(duration))
if print_it:
print 'Local iptables rules:\n'
print lo_get_rules()
print 'Gateway firewall rules:\n'
print gw_get_rules()
gw_logout()
if __name__ == '__main__':
main()
|
|
from Box2D import *
import pyglet
from pyglet.gl import *
from itertools import chain
from math import *
import sys
def equals(x, y, epsilon=1e-9):
return abs(x - y) < epsilon
def normalize_angle(angle):
if angle < 0.:
while angle < 0.:
angle += 2 * pi
else:
while angle >= 2 * pi:
angle -= 2 * pi
return angle
def interpolate_angle(angle_1, angle_2, weight_2=0.5):
angle_1 = normalize_angle(angle_1)
angle_2 = normalize_angle(angle_2)
if angle_1 - angle_2 < -pi:
angle_2 -= 2 * pi
elif angle_1 - angle_2 >= pi:
angle_2 += 2 * pi
angle = angle_1 * (1. - weight_2) + angle_2 * weight_2
return normalize_angle(angle)
def sign(x):
return x / abs(x) if x else 0.
def init_shape_def(shape_def, **kwargs):
if 'density' in kwargs:
shape_def.density = kwargs.pop('density')
if 'friction' in kwargs:
shape_def.friction = kwargs.pop('friction')
if 'group_index' in kwargs:
shape_def.filter.groupIndex = kwargs.pop('group_index')
assert not kwargs
def create_circle_def(radius, center, **kwargs):
circle_def = b2CircleDef()
init_shape_def(circle_def, **kwargs)
circle_def.localPosition = center
circle_def.radius = radius
return circle_def
def create_box_def(half_width, half_height, center, angle, **kwargs):
polygon_def = b2PolygonDef()
init_shape_def(polygon_def, **kwargs)
polygon_def.SetAsBox(half_width, half_height, center, angle)
return polygon_def
def create_body(world, shape_defs):
body_def = b2BodyDef()
body = world.CreateBody(body_def)
for shape_def in shape_defs:
body.CreateShape(shape_def)
body.SetMassFromShapes()
return body
def create_circle_vertex_list(x=0., y=0., radius=1., vertex_count=100):
coords = []
for i in xrange(vertex_count):
angle = 2. * pi * float(i) / float(vertex_count)
coords.append(x + radius * cos(angle))
coords.append(y + radius * sin(angle))
if i:
coords.extend(coords[-2:])
coords.extend(coords[:2])
return pyglet.graphics.vertex_list(len(coords) // 2, ('v2f', coords))
class Ragdoll(object):
def __init__(self, world):
self.init_bodies(world)
self.init_joints(world)
def init_bodies(self, world):
self.bodies = {}
self.init_capsule_body(world, 'torso', 0.2, 0.4, (0., 0.))
self.init_circle_body(world, 'head', 0.4, (1., 0.))
self.init_box_body(world, 'left-upper-arm', 0.2, 0.1, (0.5, 0.3))
self.init_box_body(world, 'right-upper-arm', 0.2, 0.1, (0.5, -0.3))
self.init_box_body(world, 'left-lower-arm', 0.2, 0.1, (0.9, 0.3))
self.init_box_body(world, 'right-lower-arm', 0.2, 0.1, (0.9, -0.3))
self.init_box_body(world, 'left-upper-leg', 0.2, 0.1, (-0.2, 0.2))
self.init_box_body(world, 'right-upper-leg', 0.2, 0.1,
(-0.2, -0.2))
self.init_box_body(world, 'left-lower-leg', 0.2, 0.1, (0.2, 0.2))
self.init_box_body(world, 'right-lower-leg', 0.2, 0.1, (0.2, -0.2))
def init_joints(self, world):
self.joints = {}
self.init_joint(world, 'neck', 'torso', 'head', (0.6, 0.))
self.init_joint(world, 'left-shoulder', 'torso', 'left-upper-arm',
(0.3, 0.3))
self.init_joint(world, 'right-shoulder', 'torso', 'right-upper-arm',
(0.3, -0.3))
self.init_joint(world, 'left-elbow', 'left-upper-arm',
'left-lower-arm', (0.7, 0.3))
self.init_joint(world, 'right-elbow', 'right-upper-arm',
'right-lower-arm', (0.7, -0.3))
self.init_joint(world, 'left-hip', 'torso', 'left-upper-leg',
(-0.4, 0.2))
self.init_joint(world, 'right-hip', 'torso', 'right-upper-leg',
(-0.4, -0.2))
self.init_joint(world, 'left-knee', 'left-upper-leg', 'left-lower-leg',
(0., 0.2))
self.init_joint(world, 'right-knee', 'right-upper-leg',
'right-lower-leg', (0., -0.2))
for joint_name in ('left-knee', 'right-knee'):
joint = self.joints[joint_name]
joint.EnableLimit(True)
joint.SetLimits(-pi, 0.)
def init_circle_body(self, world, name, radius, center):
shape_def = create_circle_def(radius, center, group_index=-1,
density=1., friction=0.)
self.bodies[name] = create_body(world, [shape_def])
def init_box_body(self, world, name, half_width, half_height, center,
angle=0.):
shape_def = create_box_def(half_width, half_height, center, angle,
group_index=-1, density=1., friction=0.)
self.bodies[name] = create_body(world, [shape_def])
def init_capsule_body(self, world, name, half_width, half_height, center,
angle=0.):
shape_defs = []
x, y = center
dx = cos(angle) * half_width
dy = sin(angle) * half_width
kwargs = dict(group_index=-1, density=1., friction=0.)
shape_defs.append(create_circle_def(half_height, (x + dx, y + dy),
**kwargs))
shape_defs.append(create_box_def(half_width, half_height, center,
angle, **kwargs))
shape_defs.append(create_circle_def(half_height, (x - dx, y - dy),
**kwargs))
self.bodies[name] = create_body(world, shape_defs)
def init_joint(self, world, joint_name, body_name_1, body_name_2,
position):
joint_def = b2RevoluteJointDef()
joint_def.Initialize(self.bodies[body_name_1],
self.bodies[body_name_2], position)
joint = world.CreateJoint(joint_def).asRevoluteJoint()
joint.EnableMotor(joint_name == 'neck')
joint.SetMaxMotorTorque(50.)
self.joints[joint_name] = joint
def step(self, dt):
torso = self.bodies['torso']
angle_diff = pi / 2. - torso.angle
while angle_diff < -pi:
angle_diff += 2. * pi
while angle_diff >= pi:
angle_diff -= 2. * pi
torque = 500. * angle_diff - 20. * torso.angularVelocity
torso.ApplyTorque(torque)
class Pose(object):
def __init__(self, joint_angles):
self.joint_angles = joint_angles
def mirror(self):
self.joint_angles = dict((n, -a)
for n, a in self.joint_angles.iteritems())
class KeyFrameAnimation(object):
def __init__(self, duration, poses):
self.duration = duration
self.poses = poses
def mirror(self):
for pose in self.poses:
pose.mirror()
def loop(self, ragdoll):
player = KeyFrameAnimationPlayer(self, ragdoll)
player.on_end = player.start
player.start()
return player
class KeyFrameAnimationPlayer(object):
def __init__(self, animation, ragdoll, on_end=None):
self.animation = animation
self.ragdoll = ragdoll
self.on_end = on_end
self.index = 0
def start(self):
if self.animation.poses:
self.index = 0
self.set_motor_speeds()
self.index = 1
interval = self.animation.duration / len(self.animation.poses)
pyglet.clock.schedule_interval(self.step, interval)
def set_motor_speeds(self):
for joint in self.ragdoll.joints.itervalues():
joint.EnableMotor(False)
pose = self.animation.poses[self.index]
for joint_name, angle in pose.joint_angles.iteritems():
joint = self.ragdoll.joints[joint_name]
angle_diff = angle - joint.GetJointAngle()
if angle_diff < -pi:
angle_diff += 2. * pi
elif angle_diff >= pi:
angle_diff -= 2. * pi
joint.EnableMotor(True)
interval = self.animation.duration / len(self.animation.poses)
joint.SetMotorSpeed(angle_diff / interval)
def stop(self):
pyglet.clock.unschedule(self.step)
def step(self, dt):
if self.index < len(self.animation.poses):
self.set_motor_speeds()
self.index += 1
else:
self.stop()
if self.on_end is not None:
self.on_end()
def create_walk_animation():
poses = []
def add_pose(**kwargs):
poses.append(Pose(dict((key.replace('_', '-'), value)
for key, value in kwargs.iteritems())))
add_pose(neck=0.,
left_shoulder=(-0.75 * pi), right_shoulder=(0.75 * pi),
left_elbow=(0.5 * pi), right_elbow=(0.5 * pi),
left_hip=(0.75 * pi), right_hip=(-0.75 * pi),
left_knee=(-0.25 * pi), right_knee=(-0.25 * pi))
add_pose(neck=0.,
left_shoulder=pi, right_shoulder=pi,
left_elbow=(0.5 * pi), right_elbow=(0.5 * pi),
left_hip=(-0.5 * pi), right_hip=pi,
left_knee=(-0.5 * pi), right_knee=0.)
add_pose(neck=0.,
left_shoulder=(0.75 * pi), right_shoulder=(-0.75 * pi),
left_elbow=(0.5 * pi), right_elbow=(0.5 * pi),
left_hip=(-0.75 * pi), right_hip=(0.75 * pi),
left_knee=(-0.25 * pi), right_knee=(-0.25 * pi))
add_pose(neck=0.,
left_shoulder=pi, right_shoulder=pi,
left_elbow=(0.5 * pi), right_elbow=(0.5 * pi),
left_hip=pi, right_hip=(-0.5 * pi),
left_knee=0., right_knee=(-0.5 * pi))
return KeyFrameAnimation(1., poses)
class MyWindow(pyglet.window.Window):
def __init__(self, **kwargs):
super(MyWindow, self).__init__(**kwargs)
if self.fullscreen:
self.set_exclusive_keyboard(True)
self.set_exclusive_mouse(True)
self.init_world()
self.init_platform()
self.ragdoll = Ragdoll(self.world)
self.screen_time = 0.
self.world_time = 0.
self.time_step = 1. / 60.
self.circle_vertex_list = create_circle_vertex_list()
pyglet.clock.schedule_interval(self.step, self.time_step)
animation = create_walk_animation()
animation.loop(self.ragdoll)
def init_world(self):
aabb = b2AABB()
aabb.lowerBound = -10., -10.
aabb.upperBound = 10., 10.
self.world = b2World(aabb, (0., -10.), True)
def init_platform(self):
shape_def = create_box_def(5., 0.1, (0., -1.5), 0.)
create_body(self.world, [shape_def])
def step(self, dt):
self.screen_time += dt
while self.world_time + self.time_step < self.screen_time:
self.world_time += self.time_step
self.ragdoll.step(self.time_step)
self.world.Step(self.time_step, 10, 10)
def on_draw(self):
glClearColor(0., 0., 0., 1.)
self.clear()
self.debug_draw()
def on_close(self):
pyglet.clock.unschedule(self.step)
super(MyWindow, self).on_close()
def debug_draw(self):
glPushMatrix()
glTranslatef(float(self.width // 2), float(self.height // 2), 0.)
scale = 100.
glScalef(scale, scale, scale)
for body in self.world.bodyList:
self.debug_draw_body(body)
for joint in self.world.jointList:
self.debug_draw_joint(joint)
glPopMatrix()
def debug_draw_body(self, body):
glPushMatrix()
glTranslatef(body.position.x, body.position.y, 0.)
glRotatef(body.angle * 180. / pi, 0., 0., 1.)
for shape in body.shapeList:
if isinstance(shape, b2PolygonShape):
self.debug_draw_polygon(shape.vertices)
elif isinstance(shape, b2CircleShape):
x, y = shape.localPosition.tuple()
self.debug_draw_circle(x, y, shape.radius)
glPopMatrix()
def debug_draw_polygon(self, vertices):
coords = []
for i in xrange(len(vertices)):
coords.extend(vertices[i])
coords.extend(vertices[(i + 1) % len(vertices)])
pyglet.graphics.draw(len(coords) // 2, GL_LINES,
('v2f', coords))
def debug_draw_circle(self, x, y, radius):
glPushMatrix()
glTranslatef(x, y, 0.)
glScalef(radius, radius, radius)
self.circle_vertex_list.draw(GL_LINES)
glPopMatrix()
def debug_draw_joint(self, joint):
x, y = joint.GetAnchor1().tuple()
self.debug_draw_circle(x, y, 0.1)
self.debug_draw_circle(x, y, 0.05)
x, y = joint.GetAnchor2().tuple()
self.debug_draw_circle(x, y, 0.1)
self.debug_draw_circle(x, y, 0.05)
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.ESCAPE:
self.on_close()
if symbol == pyglet.window.key.F12:
color_buffer = pyglet.image.get_buffer_manager().get_color_buffer()
color_buffer.save('screenshot.png')
def main():
fullscreen = '--fullscreen' in sys.argv
window = MyWindow(fullscreen=fullscreen)
pyglet.app.run()
if __name__ == '__main__':
main()
|
|
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import logging
import mb2freedb
logger = logging.getLogger(__name__)
class CDDB(object):
EOL = "\r\n"
def __init__(self, config, conn):
self.config = config
self.conn = conn
self.cmd = None
self.proto = None
def handle_cmd_cddb_query(self):
"""Perform a CD search based on either the FreeDB DiscID or the CD TOC."""
if len(self.cmd) < 3:
return ["500 Command syntax error."]
discid = self.cmd[0]
try:
int(discid, 16)
except ValueError:
return ["500 ID not hex."]
try:
num_tracks = int(self.cmd[1])
except ValueError:
return ["500 Command syntax error."]
if len(self.cmd) < 3 + num_tracks:
return ["500 Command syntax error."]
offsets = []
for i in xrange(2, 2 + num_tracks):
offsets.append(int(self.cmd[i]))
offsets.append(int(self.cmd[2 + num_tracks]) * 75)
durations = []
for i in xrange(num_tracks):
durations.append((offsets[i + 1] - offsets[i]) * 1000 / 75)
toc_query = """
SELECT DISTINCT
m.id,
CASE
WHEN (SELECT count(*) FROM medium WHERE release = r.id) > 1 THEN
rn.name || ' (disc ' || m.position::text || ')'
ELSE
rn.name
END AS title,
CASE
WHEN artist_name.name = 'Various Artists' THEN
'Various'
ELSE
artist_name.name
END AS artist
FROM
medium m
JOIN tracklist t ON t.id = m.tracklist
JOIN tracklist_index ti ON ti.tracklist = t.id
JOIN release r ON m.release = r.id
JOIN release_name rn ON r.name = rn.id
JOIN artist_credit ON r.artist_credit = artist_credit.id
JOIN artist_name ON artist_credit.name = artist_name.id
WHERE
toc <@ create_bounding_cube(%(durations)s, %(fuzzy)s::int) AND
track_count = %(num_tracks)s
"""
discid_query = """
SELECT DISTINCT
m.id,
CASE
WHEN (SELECT count(*) FROM medium WHERE release = r.id) > 1 THEN
rn.name || ' (disc ' || m.position::text || ')'
ELSE
rn.name
END AS title,
CASE
WHEN artist_name.name = 'Various Artists' THEN
'Various'
ELSE
artist_name.name
END AS artist
FROM
medium m
JOIN medium_cdtoc mc ON m.id = mc.medium
JOIN cdtoc c ON c.id = mc.cdtoc
JOIN tracklist t ON t.id = m.tracklist
JOIN release r ON m.release = r.id
JOIN release_name rn ON r.name = rn.id
JOIN artist_credit ON r.artist_credit = artist_credit.id
JOIN artist_name ON artist_credit.name = artist_name.id
WHERE
c.freedb_id = %(discid)s AND
t.track_count = %(num_tracks)s
"""
#used_toc = False
#rows = self.conn.execute(discid_query, dict(discid=discid, num_tracks=num_tracks)).fetchall()
#if not rows:
used_toc = True
rows = self.conn.execute(toc_query, dict(durations=durations, num_tracks=num_tracks, fuzzy=10000)).fetchall()
if not rows:
return ["202 No match found."]
# Only one match and we didn't use the TOC
if len(rows) == 1 and not used_toc:
id, title, artist = rows[0]
return ["200 rock %08x %s / %s" % (id, artist, title)]
# Found multiple matches
res = ["211 Found inexact matches, list follows (until terminating `.')"]
for id, title, artist in rows:
res.append("rock %08x %s / %s" % (id, artist, title))
res.append(".")
return res
def handle_cmd_cddb_read(self):
"""Read entry from database."""
if len(self.cmd) < 2:
return ["500 Command syntax error."]
if self.cmd[0] != 'rock':
return ["401 Specified CDDB entry not found."]
try:
medium_id = int(self.cmd[1], 16)
except ValueError:
return ["500 ID not hex."]
release_query = """
SELECT
CASE
WHEN (SELECT count(*) FROM medium WHERE release = r.id) > 1 THEN
rn.name || ' (disc ' || m.position::text || ')'
ELSE
rn.name
END AS title,
CASE
WHEN racn.name = 'Various Artists' THEN
'Various'
ELSE
racn.name
END AS artist,
r.date_year AS year,
m.tracklist
FROM medium m
JOIN release r ON m.release = r.id
JOIN release_name rn ON r.name = rn.id
JOIN artist_credit rac ON r.artist_credit = rac.id
JOIN artist_name racn ON rac.name = racn.id
WHERE m.id = %(medium_id)s
"""
rows = self.conn.execute(release_query, dict(medium_id=medium_id)).fetchall()
if not rows:
return ["401 Specified CDDB entry not found."]
release = rows[0]
tracks_query = """
SELECT
t.length,
tn.name AS title,
CASE
WHEN tacn.name = 'Various Artists' THEN
'Various'
ELSE
tacn.name
END AS artist
FROM track t
JOIN track_name tn ON t.name = tn.id
JOIN artist_credit tac ON t.artist_credit = tac.id
JOIN artist_name tacn ON tac.name = tacn.id
WHERE t.tracklist = %(tracklist_id)s
ORDER BY t.position
"""
tracks = self.conn.execute(tracks_query, dict(tracklist_id=release['tracklist'])).fetchall()
res = ["210 OK, CDDB database entry follows (until terminating `.')"]
res.append("# xmcd CD database file")
res.append("#")
res.append("# Track frame offsets:")
offset = 150
disc_length = 0
artists = set()
for track in tracks:
res.append("#\t%d" % (offset,))
offset += track['length'] * 75 / 1000
disc_length += track['length'] / 1000
artists.add(track['artist'])
res.append("#")
res.append("# Disc length: %s seconds" % (disc_length,))
res.append("#")
res.append("# Revision: 1")
res.append("# Processed by: mb2freedb %s\r" % (mb2freedb.__version__))
res.append("# Submitted via: mb2freedb %s MusicBrainz FREEDB gateway\r" % (mb2freedb.__version__))
res.append("#")
res.append("DISCID=%08x" % (medium_id,))
res.append("DTITLE=%s / %s" % (release['artist'], release['title']))
if self.proto == '5' or self.proto == '6':
res.append("DYEAR=%s" % (release['year'] or '',))
res.append("DGENRE=Unknown")
if len(artists) > 1:
for i, track in enumerate(tracks):
res.append("TTITLE%d=%s / %s" % (i, track['artist'], track['title']))
else:
for i, track in enumerate(tracks):
res.append("TTITLE%d=%s" % (i, track['title']))
res.append("EXTD=")
for i in xrange(len(tracks)):
res.append("EXTT%d=" % (i,))
res.append("PLAYORDER=")
res.append(".")
return res
def handle_cmd_cddb_lscat(self):
return [
"210 OK, category list follows (until terminating `.')",
"rock", "."
]
def handle_cmd_sites(self):
return [
"210 OK, site information follows (until terminating `.')",
"%s http %d /~cddb/cddb.cgi N000.00 W000.00 MusicBrainz FREEDB gateway" % (config.server_name, config.server_port),
"."
]
def handle_cmd_motd(self):
return [
"210 Last modified: 07/04/2006 12:00:00 MOTD follows (until terminating `.')",
"Welcome to the MusicBrainz FREEDB gateway.",
"You can find the MusicBrainz website at http://musicbrainz.org/",
"."
]
def handle_cmd_stat(self):
return [
"210 OK, status information follows (until terminating `.')",
"Server status:",
" current proto: 6",
" max proto: 6",
" interface: http",
" gets: no",
" puts: no",
" updates: no",
" posting: no",
" validation: accepted",
" quotes: yes",
" strip ext: no",
" secure: no",
" current users: 1",
" max users: 1",
"Database entries: 2",
"Database entries by category:",
" rock: 1",
" jazz: 1",
"."
]
def handle_cmd_whom(self):
return ["401 No user information available."]
def handle_cmd_ver(self):
return ["200 mb2freedb %s, Copyright (c) 2006,2011 Lukas Lalinsky." % (__version__,)]
def handle_cmd_help(self):
return [
"210 OK, help information follows (until terminating `.')",
"The following commands are supported:",
"",
"CDDB <subcmd> (valid subcmds: HELLO LSCAT QUERY READ UNLINK WRITE)",
"DISCID <ntrks> <off_1> <off_2> <...> <off_n> <nsecs>",
"GET <file>",
"HELP [command [subcmd]]",
"LOG [-l lines] [get [-f flag]] [start_time [end_time]] | [day [days]]",
"MOTD",
"PROTO [level]",
"PUT <file>",
"QUIT",
"SITES",
"STAT",
"UPDATE",
"VALIDATE",
"VER",
"WHOM",
"."
]
def handle_cmd_cddb(self):
func_name = 'handle_cmd_cddb_' + self.cmd.pop(0)
if hasattr(self, func_name):
return getattr(self, func_name)()
return ["500 Command syntax error, command unknown, command unimplemented."]
def handle_cmd(self):
if not self.cmd or not self.proto:
return ["500 Command syntax error: incorrect number of arguments."]
self.cmd = self.cmd.lower().split()
func_name = 'handle_cmd_' + self.cmd.pop(0)
if hasattr(self, func_name):
return getattr(self, func_name)()
return ["500 Command syntax error, command unknown, command unimplemented."]
def handle(self, args):
self.cmd = args.get("cmd", [None])[0]
self.proto = args.get("proto", [None])[0]
response = self.EOL.join(self.handle_cmd()).encode('utf8') + self.EOL
logger.debug("Request %s:\n%s\n", args, response)
return response
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 8051, application)
httpd.serve_forever()
|
|
# Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
# Sergey Feldman <sergeyfeldman@gmail.com>
# License: BSD 3 clause
import numbers
import warnings
from collections import Counter
import numpy as np
import numpy.ma as ma
from scipy import sparse as sp
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils._mask import _get_mask
from ..utils import is_scalar_nan
def _check_inputs_dtype(X, missing_values):
if X.dtype.kind in ("f", "i", "u") and not isinstance(missing_values, numbers.Real):
raise ValueError(
"'X' and 'missing_values' types are expected to be"
" both numerical. Got X.dtype={} and "
" type(missing_values)={}.".format(X.dtype, type(missing_values))
)
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
if array.dtype == object:
# scipy.stats.mode is slow with object dtype array.
# Python Counter is more efficient
counter = Counter(array)
most_frequent_count = counter.most_common(1)[0][1]
# tie breaking similarly to scipy.stats.mode
most_frequent_value = min(
value
for value, count in counter.items()
if count == most_frequent_count
)
else:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# tie breaking similarly to scipy.stats.mode
return min(most_frequent_value, extra_value)
class _BaseImputer(TransformerMixin, BaseEstimator):
"""Base class for all imputers.
It adds automatically support for `add_indicator`.
"""
def __init__(self, *, missing_values=np.nan, add_indicator=False):
self.missing_values = missing_values
self.add_indicator = add_indicator
def _fit_indicator(self, X):
"""Fit a MissingIndicator."""
if self.add_indicator:
self.indicator_ = MissingIndicator(
missing_values=self.missing_values, error_on_new=False
)
self.indicator_._fit(X, precomputed=True)
else:
self.indicator_ = None
def _transform_indicator(self, X):
"""Compute the indicator mask.'
Note that X must be the original data as passed to the imputer before
any imputation, since imputation may be done inplace in some cases.
"""
if self.add_indicator:
if not hasattr(self, "indicator_"):
raise ValueError(
"Make sure to call _fit_indicator before _transform_indicator"
)
return self.indicator_.transform(X)
def _concatenate_indicator(self, X_imputed, X_indicator):
"""Concatenate indicator mask with the imputed data."""
if not self.add_indicator:
return X_imputed
hstack = sp.hstack if sp.issparse(X_imputed) else np.hstack
if X_indicator is None:
raise ValueError(
"Data from the missing indicator are not provided. Call "
"_fit_indicator and _transform_indicator in the imputer "
"implementation."
)
return hstack((X_imputed, X_indicator))
def _more_tags(self):
return {"allow_nan": is_scalar_nan(self.missing_values)}
class SimpleImputer(_BaseImputer):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <impute>`.
.. versionadded:: 0.20
`SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`
estimator which is now removed.
Parameters
----------
missing_values : int, float, str, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
strategy : string, default='mean'
The imputation strategy.
- If "mean", then replace missing values using the mean along
each column. Can only be used with numeric data.
- If "median", then replace missing values using the median along
each column. Can only be used with numeric data.
- If "most_frequent", then replace missing using the most frequent
value along each column. Can be used with strings or numeric data.
If there is more than one such value, only the smallest is returned.
- If "constant", then replace missing values with fill_value. Can be
used with strings or numeric data.
.. versionadded:: 0.20
strategy="constant" for fixed value imputation.
fill_value : string or numerical value, default=None
When strategy == "constant", fill_value is used to replace all
occurrences of missing_values.
If left to the default, fill_value will be 0 when imputing numerical
data and "missing_value" for strings or object data types.
verbose : integer, default=0
Controls the verbosity of the imputer.
copy : boolean, default=True
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is encoded as a CSR matrix;
- If add_indicator=True.
add_indicator : boolean, default=False
If True, a :class:`MissingIndicator` transform will stack onto output
of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on
the missing indicator even if there are missing values at
transform/test time.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature.
Computing statistics can result in `np.nan` values.
During :meth:`transform`, features corresponding to `np.nan`
statistics will be discarded.
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
``None`` if add_indicator is False.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
IterativeImputer : Multivariate imputation of missing values.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import SimpleImputer
>>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
SimpleImputer()
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
>>> print(imp_mean.transform(X))
[[ 7. 2. 3. ]
[ 4. 3.5 6. ]
[10. 3.5 9. ]]
Notes
-----
Columns which only contained missing values at :meth:`fit` are discarded
upon :meth:`transform` if strategy is not "constant".
"""
def __init__(
self,
*,
missing_values=np.nan,
strategy="mean",
fill_value=None,
verbose=0,
copy=True,
add_indicator=False,
):
super().__init__(missing_values=missing_values, add_indicator=add_indicator)
self.strategy = strategy
self.fill_value = fill_value
self.verbose = verbose
self.copy = copy
def _validate_input(self, X, in_fit):
allowed_strategies = ["mean", "median", "most_frequent", "constant"]
if self.strategy not in allowed_strategies:
raise ValueError(
"Can only use these strategies: {0} got strategy={1}".format(
allowed_strategies, self.strategy
)
)
if self.strategy in ("most_frequent", "constant"):
# If input is a list of strings, dtype = object.
# Otherwise ValueError is raised in SimpleImputer
# with strategy='most_frequent' or 'constant'
# because the list is converted to Unicode numpy array
if isinstance(X, list) and any(
isinstance(elem, str) for row in X for elem in row
):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
try:
X = self._validate_data(
X,
reset=in_fit,
accept_sparse="csc",
dtype=dtype,
force_all_finite=force_all_finite,
copy=self.copy,
)
except ValueError as ve:
if "could not convert" in str(ve):
new_ve = ValueError(
"Cannot use {} strategy with non-numeric data:\n{}".format(
self.strategy, ve
)
)
raise new_ve from None
else:
raise ve
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError(
"SimpleImputer does not support data with dtype "
"{0}. Please provide either a numeric array (with"
" a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype)
)
return X
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : SimpleImputer
"""
X = self._validate_input(X, in_fit=True)
# default fill_value is 0 for numerical input and "missing_value"
# otherwise
if self.fill_value is None:
if X.dtype.kind in ("i", "u", "f"):
fill_value = 0
else:
fill_value = "missing_value"
else:
fill_value = self.fill_value
# fill_value should be numerical in case of numerical input
if (
self.strategy == "constant"
and X.dtype.kind in ("i", "u", "f")
and not isinstance(fill_value, numbers.Real)
):
raise ValueError(
"'fill_value'={0} is invalid. Expected a "
"numerical value when imputing numerical "
"data".format(fill_value)
)
if sp.issparse(X):
# missing_values = 0 not allowed with sparse data as it would
# force densification
if self.missing_values == 0:
raise ValueError(
"Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead."
)
else:
self.statistics_ = self._sparse_fit(
X, self.strategy, self.missing_values, fill_value
)
else:
self.statistics_ = self._dense_fit(
X, self.strategy, self.missing_values, fill_value
)
return self
def _sparse_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on sparse data."""
missing_mask = _get_mask(X, missing_values)
mask_data = missing_mask.data
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i] : X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]
column = column[~mask_column]
# combine explicit and implicit zeros
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if strategy == "mean":
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif strategy == "median":
statistics[i] = _get_median(column, n_zeros)
elif strategy == "most_frequent":
statistics[i] = _most_frequent(column, 0, n_zeros)
super()._fit_indicator(missing_mask)
return statistics
def _dense_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on dense data."""
missing_mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=missing_mask)
super()._fit_indicator(missing_mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
median_masked = np.ma.median(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# Avoid use of scipy.stats.mstats.mode due to the required
# additional overhead and slow benchmarking performance.
# See Issue 14325 and PR 14399 for full discussion.
# To be able access the elements by columns
X = X.transpose()
mask = missing_mask.transpose()
if X.dtype.kind == "O":
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
# Constant
elif strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
return np.full(X.shape[1], fill_value, dtype=X.dtype)
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
X_imputed : {ndarray, sparse matrix} of shape \
(n_samples, n_features_out)
`X` with imputed values.
"""
check_is_fitted(self)
X = self._validate_input(X, in_fit=False)
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError(
"X has %d features per sample, expected %d"
% (X.shape[1], self.statistics_.shape[0])
)
# compute mask before eliminating invalid features
missing_mask = _get_mask(X, self.missing_values)
# Delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = statistics
valid_statistics_indexes = None
else:
# same as np.isnan but also works for object dtypes
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
missing = np.arange(X.shape[1])[invalid_mask]
if self.verbose:
warnings.warn(
"Deleting features without observed values: %s" % missing
)
X = X[:, valid_statistics_indexes]
# Do actual imputation
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError(
"Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead."
)
else:
# if no invalid statistics are found, use the mask computed
# before, else recompute mask
if valid_statistics_indexes is None:
mask = missing_mask.data
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(
np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)
)[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
else:
# use mask computed before eliminating invalid mask
if valid_statistics_indexes is None:
mask_valid_features = missing_mask
else:
mask_valid_features = missing_mask[:, valid_statistics_indexes]
n_missing = np.sum(mask_valid_features, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask_valid_features.transpose())[::-1]
X[coordinates] = values
X_indicator = super()._transform_indicator(missing_mask)
return super()._concatenate_indicator(X, X_indicator)
def inverse_transform(self, X):
"""Convert the data back to the original representation.
Inverts the `transform` operation performed on an array.
This operation can only be performed after :class:`SimpleImputer` is
instantiated with `add_indicator=True`.
Note that ``inverse_transform`` can only invert the transform in
features that have binary indicators for missing values. If a feature
has no missing values at ``fit`` time, the feature won't have a binary
indicator, and the imputation done at ``transform`` time won't be
inverted.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape \
(n_samples, n_features + n_features_missing_indicator)
The imputed data to be reverted to original data. It has to be
an augmented array of imputed data and the missing indicator mask.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
The original X with missing values as it was prior
to imputation.
"""
check_is_fitted(self)
if not self.add_indicator:
raise ValueError(
"'inverse_transform' works only when "
"'SimpleImputer' is instantiated with "
"'add_indicator=True'. "
f"Got 'add_indicator={self.add_indicator}' "
"instead."
)
n_features_missing = len(self.indicator_.features_)
non_empty_feature_count = X.shape[1] - n_features_missing
array_imputed = X[:, :non_empty_feature_count].copy()
missing_mask = X[:, non_empty_feature_count:].astype(bool)
n_features_original = len(self.statistics_)
shape_original = (X.shape[0], n_features_original)
X_original = np.zeros(shape_original)
X_original[:, self.indicator_.features_] = missing_mask
full_mask = X_original.astype(bool)
imputed_idx, original_idx = 0, 0
while imputed_idx < len(array_imputed.T):
if not np.all(X_original[:, original_idx]):
X_original[:, original_idx] = array_imputed.T[imputed_idx]
imputed_idx += 1
original_idx += 1
else:
original_idx += 1
X_original[full_mask] = self.missing_values
return X_original
class MissingIndicator(TransformerMixin, BaseEstimator):
"""Binary indicators for missing values.
Note that this component typically should not be used in a vanilla
:class:`Pipeline` consisting of transformers and a classifier, but rather
could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`.
Read more in the :ref:`User Guide <impute>`.
.. versionadded:: 0.20
Parameters
----------
missing_values : int, float, string, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
features : {'missing-only', 'all'}, default='missing-only'
Whether the imputer mask should represent all or a subset of
features.
- If 'missing-only' (default), the imputer mask will only represent
features containing missing values during fit time.
- If 'all', the imputer mask will represent all features.
sparse : bool or 'auto', default='auto'
Whether the imputer mask format should be sparse or dense.
- If 'auto' (default), the imputer mask will be of same type as
input.
- If True, the imputer mask will be a sparse matrix.
- If False, the imputer mask will be a numpy array.
error_on_new : bool, default=True
If True, transform will raise an error when there are features with
missing values in transform that have no missing values in fit. This is
applicable only when `features='missing-only'`.
Attributes
----------
features_ : ndarray, shape (n_missing_features,) or (n_features,)
The features indices which will be returned when calling ``transform``.
They are computed during ``fit``. For ``features='all'``, it is
to ``range(n_features)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import MissingIndicator
>>> X1 = np.array([[np.nan, 1, 3],
... [4, 0, np.nan],
... [8, 1, 0]])
>>> X2 = np.array([[5, 1, np.nan],
... [np.nan, 2, 3],
... [2, 4, 0]])
>>> indicator = MissingIndicator()
>>> indicator.fit(X1)
MissingIndicator()
>>> X2_tr = indicator.transform(X2)
>>> X2_tr
array([[False, True],
[ True, False],
[False, False]])
"""
def __init__(
self,
*,
missing_values=np.nan,
features="missing-only",
sparse="auto",
error_on_new=True,
):
self.missing_values = missing_values
self.features = features
self.sparse = sparse
self.error_on_new = error_on_new
def _get_missing_features_info(self, X):
"""Compute the imputer mask and the indices of the features
containing missing values.
Parameters
----------
X : {ndarray or sparse matrix}, shape (n_samples, n_features)
The input data with missing values. Note that ``X`` has been
checked in ``fit`` and ``transform`` before to call this function.
Returns
-------
imputer_mask : {ndarray or sparse matrix}, shape \
(n_samples, n_features)
The imputer mask of the original data.
features_with_missing : ndarray, shape (n_features_with_missing)
The features containing missing values.
"""
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if sp.issparse(X):
imputer_mask.eliminate_zeros()
if self.features == "missing-only":
n_missing = imputer_mask.getnnz(axis=0)
if self.sparse is False:
imputer_mask = imputer_mask.toarray()
elif imputer_mask.format == "csr":
imputer_mask = imputer_mask.tocsc()
else:
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if self.features == "missing-only":
n_missing = imputer_mask.sum(axis=0)
if self.sparse is True:
imputer_mask = sp.csc_matrix(imputer_mask)
if self.features == "all":
features_indices = np.arange(X.shape[1])
else:
features_indices = np.flatnonzero(n_missing)
return imputer_mask, features_indices
def _validate_input(self, X, in_fit):
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = self._validate_data(
X,
reset=in_fit,
accept_sparse=("csc", "csr"),
dtype=None,
force_all_finite=force_all_finite,
)
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError(
"MissingIndicator does not support data with "
"dtype {0}. Please provide either a numeric array"
" (with a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype)
)
if sp.issparse(X) and self.missing_values == 0:
# missing_values = 0 not allowed with sparse data as it would
# force densification
raise ValueError(
"Sparse input with missing_values=0 is "
"not supported. Provide a dense "
"array instead."
)
return X
def _fit(self, X, y=None, precomputed=False):
"""Fit the transformer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
If `precomputed` is True, then `X` is a mask of the
input data.
precomputed : bool
Whether the input data is a mask.
Returns
-------
imputer_mask : {ndarray or sparse matrix}, shape (n_samples, \
n_features)
The imputer mask of the original data.
"""
if precomputed:
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
raise ValueError("precomputed is True but the input data is not a mask")
self._precomputed = True
else:
self._precomputed = False
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=True)
self._n_features = X.shape[1]
if self.features not in ("missing-only", "all"):
raise ValueError(
"'features' has to be either 'missing-only' or "
"'all'. Got {} instead.".format(self.features)
)
if not (
(isinstance(self.sparse, str) and self.sparse == "auto")
or isinstance(self.sparse, bool)
):
raise ValueError(
"'sparse' has to be a boolean or 'auto'. Got {!r} instead.".format(
self.sparse
)
)
missing_features_info = self._get_missing_features_info(X)
self.features_ = missing_features_info[1]
return missing_features_info[0]
def fit(self, X, y=None):
"""Fit the transformer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
self._fit(X, y)
return self
def transform(self, X):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
check_is_fitted(self)
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=False)
else:
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
raise ValueError("precomputed is True but the input data is not a mask")
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = np.setdiff1d(features, self.features_)
if self.error_on_new and features_diff_fit_trans.size > 0:
raise ValueError(
"The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans)
)
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
def fit_transform(self, X, y=None):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
imputer_mask = self._fit(X, y)
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
def _more_tags(self):
return {
"allow_nan": True,
"X_types": ["2darray", "string"],
"preserves_dtype": [],
}
|
|
"""
Flux for Home-Assistant.
The idea was taken from https://github.com/KpaBap/hue-flux/
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.flux/
"""
import datetime
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
DOMAIN as LIGHT_DOMAIN,
VALID_TRANSITION,
is_on,
)
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_LIGHTS,
CONF_MODE,
CONF_NAME,
CONF_PLATFORM,
SERVICE_TURN_ON,
STATE_ON,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.sun import get_astral_event_date
from homeassistant.util import slugify
from homeassistant.util.color import (
color_RGB_to_xy_brightness,
color_temperature_kelvin_to_mired,
color_temperature_to_rgb,
)
from homeassistant.util.dt import as_local, utcnow as dt_utcnow
_LOGGER = logging.getLogger(__name__)
CONF_START_TIME = "start_time"
CONF_STOP_TIME = "stop_time"
CONF_START_CT = "start_colortemp"
CONF_SUNSET_CT = "sunset_colortemp"
CONF_STOP_CT = "stop_colortemp"
CONF_BRIGHTNESS = "brightness"
CONF_DISABLE_BRIGHTNESS_ADJUST = "disable_brightness_adjust"
CONF_INTERVAL = "interval"
MODE_XY = "xy"
MODE_MIRED = "mired"
MODE_RGB = "rgb"
DEFAULT_MODE = MODE_XY
PLATFORM_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "flux",
vol.Required(CONF_LIGHTS): cv.entity_ids,
vol.Optional(CONF_NAME, default="Flux"): cv.string,
vol.Optional(CONF_START_TIME): cv.time,
vol.Optional(CONF_STOP_TIME): cv.time,
vol.Optional(CONF_START_CT, default=4000): vol.All(
vol.Coerce(int), vol.Range(min=1000, max=40000)
),
vol.Optional(CONF_SUNSET_CT, default=3000): vol.All(
vol.Coerce(int), vol.Range(min=1000, max=40000)
),
vol.Optional(CONF_STOP_CT, default=1900): vol.All(
vol.Coerce(int), vol.Range(min=1000, max=40000)
),
vol.Optional(CONF_BRIGHTNESS): vol.All(
vol.Coerce(int), vol.Range(min=0, max=255)
),
vol.Optional(CONF_DISABLE_BRIGHTNESS_ADJUST): cv.boolean,
vol.Optional(CONF_MODE, default=DEFAULT_MODE): vol.Any(
MODE_XY, MODE_MIRED, MODE_RGB
),
vol.Optional(CONF_INTERVAL, default=30): cv.positive_int,
vol.Optional(ATTR_TRANSITION, default=30): VALID_TRANSITION,
}
)
async def async_set_lights_xy(hass, lights, x_val, y_val, brightness, transition):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
service_data = {ATTR_ENTITY_ID: light}
if x_val is not None and y_val is not None:
service_data[ATTR_XY_COLOR] = [x_val, y_val]
if brightness is not None:
service_data[ATTR_BRIGHTNESS] = brightness
service_data[ATTR_WHITE_VALUE] = brightness
if transition is not None:
service_data[ATTR_TRANSITION] = transition
await hass.services.async_call(LIGHT_DOMAIN, SERVICE_TURN_ON, service_data)
async def async_set_lights_temp(hass, lights, mired, brightness, transition):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
service_data = {ATTR_ENTITY_ID: light}
if mired is not None:
service_data[ATTR_COLOR_TEMP] = int(mired)
if brightness is not None:
service_data[ATTR_BRIGHTNESS] = brightness
if transition is not None:
service_data[ATTR_TRANSITION] = transition
await hass.services.async_call(LIGHT_DOMAIN, SERVICE_TURN_ON, service_data)
async def async_set_lights_rgb(hass, lights, rgb, transition):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
service_data = {ATTR_ENTITY_ID: light}
if rgb is not None:
service_data[ATTR_RGB_COLOR] = rgb
if transition is not None:
service_data[ATTR_TRANSITION] = transition
await hass.services.async_call(LIGHT_DOMAIN, SERVICE_TURN_ON, service_data)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Flux switches."""
name = config.get(CONF_NAME)
lights = config.get(CONF_LIGHTS)
start_time = config.get(CONF_START_TIME)
stop_time = config.get(CONF_STOP_TIME)
start_colortemp = config.get(CONF_START_CT)
sunset_colortemp = config.get(CONF_SUNSET_CT)
stop_colortemp = config.get(CONF_STOP_CT)
brightness = config.get(CONF_BRIGHTNESS)
disable_brightness_adjust = config.get(CONF_DISABLE_BRIGHTNESS_ADJUST)
mode = config.get(CONF_MODE)
interval = config.get(CONF_INTERVAL)
transition = config.get(ATTR_TRANSITION)
flux = FluxSwitch(
name,
hass,
lights,
start_time,
stop_time,
start_colortemp,
sunset_colortemp,
stop_colortemp,
brightness,
disable_brightness_adjust,
mode,
interval,
transition,
)
async_add_entities([flux])
async def async_update(call=None):
"""Update lights."""
await flux.async_flux_update()
service_name = slugify("{} {}".format(name, "update"))
hass.services.async_register(DOMAIN, service_name, async_update)
class FluxSwitch(SwitchDevice, RestoreEntity):
"""Representation of a Flux switch."""
def __init__(
self,
name,
hass,
lights,
start_time,
stop_time,
start_colortemp,
sunset_colortemp,
stop_colortemp,
brightness,
disable_brightness_adjust,
mode,
interval,
transition,
):
"""Initialize the Flux switch."""
self._name = name
self.hass = hass
self._lights = lights
self._start_time = start_time
self._stop_time = stop_time
self._start_colortemp = start_colortemp
self._sunset_colortemp = sunset_colortemp
self._stop_colortemp = stop_colortemp
self._brightness = brightness
self._disable_brightness_adjust = disable_brightness_adjust
self._mode = mode
self._interval = interval
self._transition = transition
self.unsub_tracker = None
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self.unsub_tracker is not None
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
last_state = await self.async_get_last_state()
if last_state and last_state.state == STATE_ON:
await self.async_turn_on()
async def async_turn_on(self, **kwargs):
"""Turn on flux."""
if self.is_on:
return
self.unsub_tracker = async_track_time_interval(
self.hass,
self.async_flux_update,
datetime.timedelta(seconds=self._interval),
)
# Make initial update
await self.async_flux_update()
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn off flux."""
if self.is_on:
self.unsub_tracker()
self.unsub_tracker = None
self.async_schedule_update_ha_state()
async def async_flux_update(self, utcnow=None):
"""Update all the lights using flux."""
if utcnow is None:
utcnow = dt_utcnow()
now = as_local(utcnow)
sunset = get_astral_event_date(self.hass, SUN_EVENT_SUNSET, now.date())
start_time = self.find_start_time(now)
stop_time = self.find_stop_time(now)
if stop_time <= start_time:
# stop_time does not happen in the same day as start_time
if start_time < now:
# stop time is tomorrow
stop_time += datetime.timedelta(days=1)
elif now < start_time:
# stop_time was yesterday since the new start_time is not reached
stop_time -= datetime.timedelta(days=1)
if start_time < now < sunset:
# Daytime
time_state = "day"
temp_range = abs(self._start_colortemp - self._sunset_colortemp)
day_length = int(sunset.timestamp() - start_time.timestamp())
seconds_from_start = int(now.timestamp() - start_time.timestamp())
percentage_complete = seconds_from_start / day_length
temp_offset = temp_range * percentage_complete
if self._start_colortemp > self._sunset_colortemp:
temp = self._start_colortemp - temp_offset
else:
temp = self._start_colortemp + temp_offset
else:
# Night time
time_state = "night"
if now < stop_time:
if stop_time < start_time and stop_time.day == sunset.day:
# we need to use yesterday's sunset time
sunset_time = sunset - datetime.timedelta(days=1)
else:
sunset_time = sunset
night_length = int(stop_time.timestamp() - sunset_time.timestamp())
seconds_from_sunset = int(now.timestamp() - sunset_time.timestamp())
percentage_complete = seconds_from_sunset / night_length
else:
percentage_complete = 1
temp_range = abs(self._sunset_colortemp - self._stop_colortemp)
temp_offset = temp_range * percentage_complete
if self._sunset_colortemp > self._stop_colortemp:
temp = self._sunset_colortemp - temp_offset
else:
temp = self._sunset_colortemp + temp_offset
rgb = color_temperature_to_rgb(temp)
x_val, y_val, b_val = color_RGB_to_xy_brightness(*rgb)
brightness = self._brightness if self._brightness else b_val
if self._disable_brightness_adjust:
brightness = None
if self._mode == MODE_XY:
await async_set_lights_xy(
self.hass, self._lights, x_val, y_val, brightness, self._transition
)
_LOGGER.debug(
"Lights updated to x:%s y:%s brightness:%s, %s%% "
"of %s cycle complete at %s",
x_val,
y_val,
brightness,
round(percentage_complete * 100),
time_state,
now,
)
elif self._mode == MODE_RGB:
await async_set_lights_rgb(self.hass, self._lights, rgb, self._transition)
_LOGGER.debug(
"Lights updated to rgb:%s, %s%% of %s cycle complete at %s",
rgb,
round(percentage_complete * 100),
time_state,
now,
)
else:
# Convert to mired and clamp to allowed values
mired = color_temperature_kelvin_to_mired(temp)
await async_set_lights_temp(
self.hass, self._lights, mired, brightness, self._transition
)
_LOGGER.debug(
"Lights updated to mired:%s brightness:%s, %s%% "
"of %s cycle complete at %s",
mired,
brightness,
round(percentage_complete * 100),
time_state,
now,
)
def find_start_time(self, now):
"""Return sunrise or start_time if given."""
if self._start_time:
sunrise = now.replace(
hour=self._start_time.hour, minute=self._start_time.minute, second=0
)
else:
sunrise = get_astral_event_date(self.hass, SUN_EVENT_SUNRISE, now.date())
return sunrise
def find_stop_time(self, now):
"""Return dusk or stop_time if given."""
if self._stop_time:
dusk = now.replace(
hour=self._stop_time.hour, minute=self._stop_time.minute, second=0
)
else:
dusk = get_astral_event_date(self.hass, "dusk", now.date())
return dusk
|
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1DataVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'blank': 'V1beta1DataVolumeBlankImage',
'http': 'V1beta1DataVolumeSourceHTTP',
'imageio': 'V1beta1DataVolumeSourceImageIO',
'pvc': 'V1beta1DataVolumeSourcePVC',
'registry': 'V1beta1DataVolumeSourceRegistry',
's3': 'V1beta1DataVolumeSourceS3',
'upload': 'V1beta1DataVolumeSourceUpload',
'vddk': 'V1beta1DataVolumeSourceVDDK'
}
attribute_map = {
'blank': 'blank',
'http': 'http',
'imageio': 'imageio',
'pvc': 'pvc',
'registry': 'registry',
's3': 's3',
'upload': 'upload',
'vddk': 'vddk'
}
def __init__(self, blank=None, http=None, imageio=None, pvc=None, registry=None, s3=None, upload=None, vddk=None):
"""
V1beta1DataVolumeSource - a model defined in Swagger
"""
self._blank = None
self._http = None
self._imageio = None
self._pvc = None
self._registry = None
self._s3 = None
self._upload = None
self._vddk = None
if blank is not None:
self.blank = blank
if http is not None:
self.http = http
if imageio is not None:
self.imageio = imageio
if pvc is not None:
self.pvc = pvc
if registry is not None:
self.registry = registry
if s3 is not None:
self.s3 = s3
if upload is not None:
self.upload = upload
if vddk is not None:
self.vddk = vddk
@property
def blank(self):
"""
Gets the blank of this V1beta1DataVolumeSource.
:return: The blank of this V1beta1DataVolumeSource.
:rtype: V1beta1DataVolumeBlankImage
"""
return self._blank
@blank.setter
def blank(self, blank):
"""
Sets the blank of this V1beta1DataVolumeSource.
:param blank: The blank of this V1beta1DataVolumeSource.
:type: V1beta1DataVolumeBlankImage
"""
self._blank = blank
@property
def http(self):
"""
Gets the http of this V1beta1DataVolumeSource.
:return: The http of this V1beta1DataVolumeSource.
:rtype: V1beta1DataVolumeSourceHTTP
"""
return self._http
@http.setter
def http(self, http):
"""
Sets the http of this V1beta1DataVolumeSource.
:param http: The http of this V1beta1DataVolumeSource.
:type: V1beta1DataVolumeSourceHTTP
"""
self._http = http
@property
def imageio(self):
"""
Gets the imageio of this V1beta1DataVolumeSource.
:return: The imageio of this V1beta1DataVolumeSource.
:rtype: V1beta1DataVolumeSourceImageIO
"""
return self._imageio
@imageio.setter
def imageio(self, imageio):
"""
Sets the imageio of this V1beta1DataVolumeSource.
:param imageio: The imageio of this V1beta1DataVolumeSource.
:type: V1beta1DataVolumeSourceImageIO
"""
self._imageio = imageio
@property
def pvc(self):
"""
Gets the pvc of this V1beta1DataVolumeSource.
:return: The pvc of this V1beta1DataVolumeSource.
:rtype: V1beta1DataVolumeSourcePVC
"""
return self._pvc
@pvc.setter
def pvc(self, pvc):
"""
Sets the pvc of this V1beta1DataVolumeSource.
:param pvc: The pvc of this V1beta1DataVolumeSource.
:type: V1beta1DataVolumeSourcePVC
"""
self._pvc = pvc
@property
def registry(self):
"""
Gets the registry of this V1beta1DataVolumeSource.
:return: The registry of this V1beta1DataVolumeSource.
:rtype: V1beta1DataVolumeSourceRegistry
"""
return self._registry
@registry.setter
def registry(self, registry):
"""
Sets the registry of this V1beta1DataVolumeSource.
:param registry: The registry of this V1beta1DataVolumeSource.
:type: V1beta1DataVolumeSourceRegistry
"""
self._registry = registry
@property
def s3(self):
"""
Gets the s3 of this V1beta1DataVolumeSource.
:return: The s3 of this V1beta1DataVolumeSource.
:rtype: V1beta1DataVolumeSourceS3
"""
return self._s3
@s3.setter
def s3(self, s3):
"""
Sets the s3 of this V1beta1DataVolumeSource.
:param s3: The s3 of this V1beta1DataVolumeSource.
:type: V1beta1DataVolumeSourceS3
"""
self._s3 = s3
@property
def upload(self):
"""
Gets the upload of this V1beta1DataVolumeSource.
:return: The upload of this V1beta1DataVolumeSource.
:rtype: V1beta1DataVolumeSourceUpload
"""
return self._upload
@upload.setter
def upload(self, upload):
"""
Sets the upload of this V1beta1DataVolumeSource.
:param upload: The upload of this V1beta1DataVolumeSource.
:type: V1beta1DataVolumeSourceUpload
"""
self._upload = upload
@property
def vddk(self):
"""
Gets the vddk of this V1beta1DataVolumeSource.
:return: The vddk of this V1beta1DataVolumeSource.
:rtype: V1beta1DataVolumeSourceVDDK
"""
return self._vddk
@vddk.setter
def vddk(self, vddk):
"""
Sets the vddk of this V1beta1DataVolumeSource.
:param vddk: The vddk of this V1beta1DataVolumeSource.
:type: V1beta1DataVolumeSourceVDDK
"""
self._vddk = vddk
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1DataVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
"""
Tests for the throttling implementations in the permissions module.
"""
import pytest
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import TestCase
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory, force_authenticate
from rest_framework.throttling import (
AnonRateThrottle, BaseThrottle, ScopedRateThrottle, SimpleRateThrottle,
UserRateThrottle
)
from rest_framework.views import APIView
class User3SecRateThrottle(UserRateThrottle):
rate = '3/sec'
scope = 'seconds'
class User3MinRateThrottle(UserRateThrottle):
rate = '3/min'
scope = 'minutes'
class User6MinRateThrottle(UserRateThrottle):
rate = '6/min'
scope = 'minutes'
class NonTimeThrottle(BaseThrottle):
def allow_request(self, request, view):
if not hasattr(self.__class__, 'called'):
self.__class__.called = True
return True
return False
class MockView_DoubleThrottling(APIView):
throttle_classes = (User3SecRateThrottle, User6MinRateThrottle,)
def get(self, request):
return Response('foo')
class MockView(APIView):
throttle_classes = (User3SecRateThrottle,)
def get(self, request):
return Response('foo')
class MockView_MinuteThrottling(APIView):
throttle_classes = (User3MinRateThrottle,)
def get(self, request):
return Response('foo')
class MockView_NonTimeThrottling(APIView):
throttle_classes = (NonTimeThrottle,)
def get(self, request):
return Response('foo')
class ThrottlingTests(TestCase):
def setUp(self):
"""
Reset the cache so that no throttles will be active
"""
cache.clear()
self.factory = APIRequestFactory()
def test_requests_are_throttled(self):
"""
Ensure request rate is limited
"""
request = self.factory.get('/')
for dummy in range(4):
response = MockView.as_view()(request)
assert response.status_code == 429
def set_throttle_timer(self, view, value):
"""
Explicitly set the timer, overriding time.time()
"""
for cls in view.throttle_classes:
cls.timer = lambda self: value
def test_request_throttling_expires(self):
"""
Ensure request rate is limited for a limited duration only
"""
self.set_throttle_timer(MockView, 0)
request = self.factory.get('/')
for dummy in range(4):
response = MockView.as_view()(request)
assert response.status_code == 429
# Advance the timer by one second
self.set_throttle_timer(MockView, 1)
response = MockView.as_view()(request)
assert response.status_code == 200
def ensure_is_throttled(self, view, expect):
request = self.factory.get('/')
request.user = User.objects.create(username='a')
for dummy in range(3):
view.as_view()(request)
request.user = User.objects.create(username='b')
response = view.as_view()(request)
assert response.status_code == expect
def test_request_throttling_is_per_user(self):
"""
Ensure request rate is only limited per user, not globally for
PerUserThrottles
"""
self.ensure_is_throttled(MockView, 200)
def test_request_throttling_multiple_throttles(self):
"""
Ensure all throttle classes see each request even when the request is
already being throttled
"""
self.set_throttle_timer(MockView_DoubleThrottling, 0)
request = self.factory.get('/')
for dummy in range(4):
response = MockView_DoubleThrottling.as_view()(request)
assert response.status_code == 429
assert int(response['retry-after']) == 1
# At this point our client made 4 requests (one was throttled) in a
# second. If we advance the timer by one additional second, the client
# should be allowed to make 2 more before being throttled by the 2nd
# throttle class, which has a limit of 6 per minute.
self.set_throttle_timer(MockView_DoubleThrottling, 1)
for dummy in range(2):
response = MockView_DoubleThrottling.as_view()(request)
assert response.status_code == 200
response = MockView_DoubleThrottling.as_view()(request)
assert response.status_code == 429
assert int(response['retry-after']) == 59
# Just to make sure check again after two more seconds.
self.set_throttle_timer(MockView_DoubleThrottling, 2)
response = MockView_DoubleThrottling.as_view()(request)
assert response.status_code == 429
assert int(response['retry-after']) == 58
def test_throttle_rate_change_negative(self):
self.set_throttle_timer(MockView_DoubleThrottling, 0)
request = self.factory.get('/')
for dummy in range(24):
response = MockView_DoubleThrottling.as_view()(request)
assert response.status_code == 429
assert int(response['retry-after']) == 60
previous_rate = User3SecRateThrottle.rate
try:
User3SecRateThrottle.rate = '1/sec'
for dummy in range(24):
response = MockView_DoubleThrottling.as_view()(request)
assert response.status_code == 429
assert int(response['retry-after']) == 60
finally:
# reset
User3SecRateThrottle.rate = previous_rate
def ensure_response_header_contains_proper_throttle_field(self, view, expected_headers):
"""
Ensure the response returns an Retry-After field with status and next attributes
set properly.
"""
request = self.factory.get('/')
for timer, expect in expected_headers:
self.set_throttle_timer(view, timer)
response = view.as_view()(request)
if expect is not None:
assert response['Retry-After'] == expect
else:
assert not'Retry-After' in response
def test_seconds_fields(self):
"""
Ensure for second based throttles.
"""
self.ensure_response_header_contains_proper_throttle_field(
MockView, (
(0, None),
(0, None),
(0, None),
(0, '1')
)
)
def test_minutes_fields(self):
"""
Ensure for minute based throttles.
"""
self.ensure_response_header_contains_proper_throttle_field(
MockView_MinuteThrottling, (
(0, None),
(0, None),
(0, None),
(0, '60')
)
)
def test_next_rate_remains_constant_if_followed(self):
"""
If a client follows the recommended next request rate,
the throttling rate should stay constant.
"""
self.ensure_response_header_contains_proper_throttle_field(
MockView_MinuteThrottling, (
(0, None),
(20, None),
(40, None),
(60, None),
(80, None)
)
)
def test_non_time_throttle(self):
"""
Ensure for second based throttles.
"""
request = self.factory.get('/')
self.assertFalse(hasattr(MockView_NonTimeThrottling.throttle_classes[0], 'called'))
response = MockView_NonTimeThrottling.as_view()(request)
self.assertFalse('Retry-After' in response)
self.assertTrue(MockView_NonTimeThrottling.throttle_classes[0].called)
response = MockView_NonTimeThrottling.as_view()(request)
self.assertFalse('Retry-After' in response)
class ScopedRateThrottleTests(TestCase):
"""
Tests for ScopedRateThrottle.
"""
def setUp(self):
self.throttle = ScopedRateThrottle()
class XYScopedRateThrottle(ScopedRateThrottle):
TIMER_SECONDS = 0
THROTTLE_RATES = {'x': '3/min', 'y': '1/min'}
def timer(self):
return self.TIMER_SECONDS
class XView(APIView):
throttle_classes = (XYScopedRateThrottle,)
throttle_scope = 'x'
def get(self, request):
return Response('x')
class YView(APIView):
throttle_classes = (XYScopedRateThrottle,)
throttle_scope = 'y'
def get(self, request):
return Response('y')
class UnscopedView(APIView):
throttle_classes = (XYScopedRateThrottle,)
def get(self, request):
return Response('y')
self.throttle_class = XYScopedRateThrottle
self.factory = APIRequestFactory()
self.x_view = XView.as_view()
self.y_view = YView.as_view()
self.unscoped_view = UnscopedView.as_view()
def increment_timer(self, seconds=1):
self.throttle_class.TIMER_SECONDS += seconds
def test_scoped_rate_throttle(self):
request = self.factory.get('/')
# Should be able to hit x view 3 times per minute.
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 429
# Should be able to hit y view 1 time per minute.
self.increment_timer()
response = self.y_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.y_view(request)
assert response.status_code == 429
# Ensure throttles properly reset by advancing the rest of the minute
self.increment_timer(55)
# Should still be able to hit x view 3 times per minute.
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 429
# Should still be able to hit y view 1 time per minute.
self.increment_timer()
response = self.y_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.y_view(request)
assert response.status_code == 429
def test_unscoped_view_not_throttled(self):
request = self.factory.get('/')
for idx in range(10):
self.increment_timer()
response = self.unscoped_view(request)
assert response.status_code == 200
def test_get_cache_key_returns_correct_key_if_user_is_authenticated(self):
class DummyView:
throttle_scope = 'user'
request = Request(HttpRequest())
user = User.objects.create(username='test')
force_authenticate(request, user)
request.user = user
self.throttle.allow_request(request, DummyView())
cache_key = self.throttle.get_cache_key(request, view=DummyView())
assert cache_key == 'throttle_user_%s' % user.pk
class XffTestingBase(TestCase):
def setUp(self):
class Throttle(ScopedRateThrottle):
THROTTLE_RATES = {'test_limit': '1/day'}
TIMER_SECONDS = 0
def timer(self):
return self.TIMER_SECONDS
class View(APIView):
throttle_classes = (Throttle,)
throttle_scope = 'test_limit'
def get(self, request):
return Response('test_limit')
cache.clear()
self.throttle = Throttle()
self.view = View.as_view()
self.request = APIRequestFactory().get('/some_uri')
self.request.META['REMOTE_ADDR'] = '3.3.3.3'
self.request.META['HTTP_X_FORWARDED_FOR'] = '0.0.0.0, 1.1.1.1, 2.2.2.2'
def config_proxy(self, num_proxies):
setattr(api_settings, 'NUM_PROXIES', num_proxies)
class IdWithXffBasicTests(XffTestingBase):
def test_accepts_request_under_limit(self):
self.config_proxy(0)
assert self.view(self.request).status_code == 200
def test_denies_request_over_limit(self):
self.config_proxy(0)
self.view(self.request)
assert self.view(self.request).status_code == 429
class XffSpoofingTests(XffTestingBase):
def test_xff_spoofing_doesnt_change_machine_id_with_one_app_proxy(self):
self.config_proxy(1)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '4.4.4.4, 5.5.5.5, 2.2.2.2'
assert self.view(self.request).status_code == 429
def test_xff_spoofing_doesnt_change_machine_id_with_two_app_proxies(self):
self.config_proxy(2)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '4.4.4.4, 1.1.1.1, 2.2.2.2'
assert self.view(self.request).status_code == 429
class XffUniqueMachinesTest(XffTestingBase):
def test_unique_clients_are_counted_independently_with_one_proxy(self):
self.config_proxy(1)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '0.0.0.0, 1.1.1.1, 7.7.7.7'
assert self.view(self.request).status_code == 200
def test_unique_clients_are_counted_independently_with_two_proxies(self):
self.config_proxy(2)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '0.0.0.0, 7.7.7.7, 2.2.2.2'
assert self.view(self.request).status_code == 200
class BaseThrottleTests(TestCase):
def test_allow_request_raises_not_implemented_error(self):
with pytest.raises(NotImplementedError):
BaseThrottle().allow_request(request={}, view={})
class SimpleRateThrottleTests(TestCase):
def setUp(self):
SimpleRateThrottle.scope = 'anon'
def test_get_rate_raises_error_if_scope_is_missing(self):
throttle = SimpleRateThrottle()
with pytest.raises(ImproperlyConfigured):
throttle.scope = None
throttle.get_rate()
def test_throttle_raises_error_if_rate_is_missing(self):
SimpleRateThrottle.scope = 'invalid scope'
with pytest.raises(ImproperlyConfigured):
SimpleRateThrottle()
def test_parse_rate_returns_tuple_with_none_if_rate_not_provided(self):
rate = SimpleRateThrottle().parse_rate(None)
assert rate == (None, None)
def test_allow_request_returns_true_if_rate_is_none(self):
assert SimpleRateThrottle().allow_request(request={}, view={}) is True
def test_get_cache_key_raises_not_implemented_error(self):
with pytest.raises(NotImplementedError):
SimpleRateThrottle().get_cache_key({}, {})
def test_allow_request_returns_true_if_key_is_none(self):
throttle = SimpleRateThrottle()
throttle.rate = 'some rate'
throttle.get_cache_key = lambda *args: None
assert throttle.allow_request(request={}, view={}) is True
def test_wait_returns_correct_waiting_time_without_history(self):
throttle = SimpleRateThrottle()
throttle.num_requests = 1
throttle.duration = 60
throttle.history = []
waiting_time = throttle.wait()
assert isinstance(waiting_time, float)
assert waiting_time == 30.0
def test_wait_returns_none_if_there_are_no_available_requests(self):
throttle = SimpleRateThrottle()
throttle.num_requests = 1
throttle.duration = 60
throttle.now = throttle.timer()
throttle.history = [throttle.timer() for _ in range(3)]
assert throttle.wait() is None
class AnonRateThrottleTests(TestCase):
def setUp(self):
self.throttle = AnonRateThrottle()
def test_authenticated_user_not_affected(self):
request = Request(HttpRequest())
user = User.objects.create(username='test')
force_authenticate(request, user)
request.user = user
assert self.throttle.get_cache_key(request, view={}) is None
def test_get_cache_key_returns_correct_value(self):
request = Request(HttpRequest())
cache_key = self.throttle.get_cache_key(request, view={})
assert cache_key == 'throttle_anon_None'
|
|
from mpi4py import MPI
import mpiunittest as unittest
import arrayimpl
from functools import reduce
prod = lambda sequence,start=1: reduce(lambda x, y: x*y, sequence, start)
def skip_op(typecode, op):
if typecode in 'FDG':
if op in (MPI.MAX, MPI.MIN):
return True
return False
def maxvalue(a):
try:
typecode = a.typecode
except AttributeError:
typecode = a.dtype.char
if typecode == ('f'):
return 1e30
elif typecode == ('d'):
return 1e300
else:
return 2 ** (a.itemsize * 7) - 1
def StartWaitFree(request):
request.Start()
request.Wait()
request.Free()
class BaseTestCCOBuf(object):
COMM = MPI.COMM_NULL
def testBarrier(self):
StartWaitFree(
self.COMM.Barrier_init()
)
def testBcast(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
if rank == root:
buf = array(root, typecode, root)
else:
buf = array( -1, typecode, root)
StartWaitFree(
self.COMM.Bcast_init(buf.as_mpi(), root=root)
)
for value in buf:
self.assertEqual(value, root)
def testGather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
sbuf = array(root, typecode, root+1)
if rank == root:
rbuf = array(-1, typecode, (size,root+1))
else:
rbuf = array([], typecode)
StartWaitFree(
self.COMM.Gather_init(sbuf.as_mpi(), rbuf.as_mpi(),
root=root)
)
if rank == root:
for value in rbuf.flat:
self.assertEqual(value, root)
def testScatter(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
rbuf = array(-1, typecode, size)
if rank == root:
sbuf = array(root, typecode, (size, size))
else:
sbuf = array([], typecode)
StartWaitFree(
self.COMM.Scatter_init(sbuf.as_mpi(), rbuf.as_mpi(),
root=root)
)
for value in rbuf:
self.assertEqual(value, root)
def testAllgather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
sbuf = array(root, typecode, root+1)
rbuf = array( -1, typecode, (size, root+1))
StartWaitFree(
self.COMM.Allgather_init(sbuf.as_mpi(), rbuf.as_mpi())
)
for value in rbuf.flat:
self.assertEqual(value, root)
def testAlltoall(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
sbuf = array(root, typecode, (size, root+1))
rbuf = array( -1, typecode, (size, root+1))
StartWaitFree(
self.COMM.Alltoall_init(sbuf.as_mpi(), rbuf.as_mpi_c(root+1))
)
for value in rbuf.flat:
self.assertEqual(value, root)
def assertAlmostEqual(self, first, second):
num = complex(second-first)
den = complex(second+first)/2 or 1.0
if (abs(num/den) > 1e-2):
raise self.failureException('%r != %r' % (first, second))
def testReduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
for root in range(size):
sbuf = array(range(size), typecode)
rbuf = array(-1, typecode, size)
StartWaitFree(
self.COMM.Reduce_init(sbuf.as_mpi(),
rbuf.as_mpi(),
op, root)
)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if rank != root:
self.assertEqual(value, -1)
continue
if op == MPI.SUM:
if (i * size) < max_val:
self.assertAlmostEqual(value, i*size)
elif op == MPI.PROD:
if (i ** size) < max_val:
self.assertAlmostEqual(value, i**size)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testAllreduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
sbuf = array(range(size), typecode)
rbuf = array(0, typecode, size)
StartWaitFree(
self.COMM.Allreduce_init(sbuf.as_mpi(),
rbuf.as_mpi(),
op)
)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if (i * size) < max_val:
self.assertAlmostEqual(value, i*size)
elif op == MPI.PROD:
if (i ** size) < max_val:
self.assertAlmostEqual(value, i**size)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testReduceScatter(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
rcnt = list(range(1,size+1))
sbuf = array([rank+1]*sum(rcnt), typecode)
rbuf = array(-1, typecode, rank+1)
StartWaitFree(
self.COMM.Reduce_scatter_init(sbuf.as_mpi(),
rbuf.as_mpi(),
None, op)
)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
redval = sum(range(size))+size
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.PROD:
redval = prod(range(1,size+1))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.MAX:
self.assertEqual(value, size)
elif op == MPI.MIN:
self.assertEqual(value, 1)
rbuf = array(-1, typecode, rank+1)
StartWaitFree(
self.COMM.Reduce_scatter_init(sbuf.as_mpi(),
rbuf.as_mpi(),
rcnt, op)
)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
redval = sum(range(size))+size
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.PROD:
redval = prod(range(1,size+1))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.MAX:
self.assertEqual(value, size)
elif op == MPI.MIN:
self.assertEqual(value, 1)
def testReduceScatterBlock(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
for rcnt in range(1,size):
sbuf = array([rank]*rcnt*size, typecode)
rbuf = array(-1, typecode, rcnt)
if op == MPI.PROD:
sbuf = array([rank+1]*rcnt*size, typecode)
StartWaitFree(
self.COMM.Reduce_scatter_block_init(sbuf.as_mpi(),
rbuf.as_mpi(),
op)
)
max_val = maxvalue(rbuf)
v_sum = (size*(size-1))/2
v_prod = 1
for i in range(1,size+1): v_prod *= i
v_max = size-1
v_min = 0
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if v_sum <= max_val:
self.assertAlmostEqual(value, v_sum)
elif op == MPI.PROD:
if v_prod <= max_val:
self.assertAlmostEqual(value, v_prod)
elif op == MPI.MAX:
self.assertEqual(value, v_max)
elif op == MPI.MIN:
self.assertEqual(value, v_min)
def testScan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
# --
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
sbuf = array(range(size), typecode)
rbuf = array(0, typecode, size)
StartWaitFree(
self.COMM.Scan_init(sbuf.as_mpi(),
rbuf.as_mpi(),
op)
)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if (i * (rank + 1)) < max_val:
self.assertAlmostEqual(value, i * (rank + 1))
elif op == MPI.PROD:
if (i ** (rank + 1)) < max_val:
self.assertAlmostEqual(value, i ** (rank + 1))
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testExscan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
sbuf = array(range(size), typecode)
rbuf = array(0, typecode, size)
StartWaitFree(
self.COMM.Exscan_init(sbuf.as_mpi(),
rbuf.as_mpi(),
op)
)
if rank == 1:
for i, value in enumerate(rbuf):
self.assertEqual(value, i)
elif rank > 1:
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if (i * rank) < max_val:
self.assertAlmostEqual(value, i * rank)
elif op == MPI.PROD:
if (i ** rank) < max_val:
self.assertAlmostEqual(value, i ** rank)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testBcastTypeIndexed(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
datatype = array.TypeMap[typecode]
for root in range(size):
#
if rank == root:
buf = array(range(10), typecode).as_raw()
else:
buf = array(-1, typecode, 10).as_raw()
indices = list(range(0, len(buf), 2))
newtype = datatype.Create_indexed_block(1, indices)
newtype.Commit()
newbuf = (buf, 1, newtype)
StartWaitFree(
self.COMM.Bcast_init(newbuf, root=root)
)
newtype.Free()
if rank != root:
for i, value in enumerate(buf):
if (i % 2):
self.assertEqual(value, -1)
else:
self.assertEqual(value, i)
#
if rank == root:
buf = array(range(10), typecode).as_raw()
else:
buf = array(-1, typecode, 10).as_raw()
indices = list(range(1, len(buf), 2))
newtype = datatype.Create_indexed_block(1, indices)
newtype.Commit()
newbuf = (buf, 1, newtype)
StartWaitFree(
self.COMM.Bcast_init(newbuf, root)
)
newtype.Free()
if rank != root:
for i, value in enumerate(buf):
if not (i % 2):
self.assertEqual(value, -1)
else:
self.assertEqual(value, i)
class BaseTestCCOBufInplace(object):
def testGather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
count = root+3
if rank == root:
sbuf = MPI.IN_PLACE
buf = array(-1, typecode, (size, count))
#buf.flat[(rank*count):((rank+1)*count)] = \
# array(root, typecode, count)
s, e = rank*count, (rank+1)*count
for i in range(s, e): buf.flat[i] = root
rbuf = buf.as_mpi()
else:
buf = array(root, typecode, count)
sbuf = buf.as_mpi()
rbuf = None
StartWaitFree(
self.COMM.Gather_init(sbuf, rbuf, root=root)
)
for value in buf.flat:
self.assertEqual(value, root)
@unittest.skipMPI('msmpi(==10.0.0)')
def testScatter(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
for count in range(1, 10):
if rank == root:
buf = array(root, typecode, (size, count))
sbuf = buf.as_mpi()
rbuf = MPI.IN_PLACE
else:
buf = array(-1, typecode, count)
sbuf = None
rbuf = buf.as_mpi()
StartWaitFree(
self.COMM.Scatter_init(sbuf, rbuf, root=root)
)
for value in buf.flat:
self.assertEqual(value, root)
def testAllgather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for count in range(1, 10):
buf = array(-1, typecode, (size, count))
#buf.flat[(rank*count):((rank+1)*count)] = \
# array(count, typecode, count)
s, e = rank*count, (rank+1)*count
for i in range(s, e): buf.flat[i] = count
StartWaitFree(
self.COMM.Allgather_init(MPI.IN_PLACE, buf.as_mpi())
)
for value in buf.flat:
self.assertEqual(value, count)
def assertAlmostEqual(self, first, second):
num = complex(second-first)
den = complex(second+first)/2 or 1.0
if (abs(num/den) > 1e-2):
raise self.failureException('%r != %r' % (first, second))
def testReduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
for root in range(size):
count = size
if rank == root:
buf = array(range(size), typecode)
sbuf = MPI.IN_PLACE
rbuf = buf.as_mpi()
else:
buf = array(range(size), typecode)
buf2 = array(range(size), typecode)
sbuf = buf.as_mpi()
rbuf = buf2.as_mpi()
StartWaitFree(
self.COMM.Reduce_init(sbuf, rbuf, op, root)
)
if rank == root:
max_val = maxvalue(buf)
for i, value in enumerate(buf):
if op == MPI.SUM:
if (i * size) < max_val:
self.assertAlmostEqual(value, i*size)
elif op == MPI.PROD:
if (i ** size) < max_val:
self.assertAlmostEqual(value, i**size)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testAllreduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
buf = array(range(size), typecode)
sbuf = MPI.IN_PLACE
rbuf = buf.as_mpi()
StartWaitFree(
self.COMM.Allreduce_init(sbuf, rbuf, op)
)
max_val = maxvalue(buf)
for i, value in enumerate(buf):
if op == MPI.SUM:
if (i * size) < max_val:
self.assertAlmostEqual(value, i*size)
elif op == MPI.PROD:
if (i ** size) < max_val:
self.assertAlmostEqual(value, i**size)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testReduceScatterBlock(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
for rcnt in range(size):
if op == MPI.PROD:
rbuf = array([rank+1]*rcnt*size, typecode)
else:
rbuf = array([rank]*rcnt*size, typecode)
StartWaitFree(
self.COMM.Reduce_scatter_block_init(MPI.IN_PLACE,
rbuf.as_mpi(),
op)
)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if i >= rcnt:
if op == MPI.PROD:
self.assertEqual(value, rank+1)
else:
self.assertEqual(value, rank)
else:
if op == MPI.SUM:
redval = sum(range(size))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.PROD:
redval = prod(range(1,size+1))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.MAX:
self.assertEqual(value, size-1)
elif op == MPI.MIN:
self.assertEqual(value, 0)
@unittest.skipMPI('MVAPICH2')
def testReduceScatter(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
rcnt = list(range(1, size+1))
if op == MPI.PROD:
rbuf = array([rank+1]*sum(rcnt), typecode)
else:
rbuf = array([rank]*sum(rcnt), typecode)
StartWaitFree(
self.COMM.Reduce_scatter_init(MPI.IN_PLACE,
rbuf.as_mpi(),
rcnt, op)
)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if i >= rcnt[rank]:
if op == MPI.PROD:
self.assertEqual(value, rank+1)
else:
self.assertEqual(value, rank)
else:
if op == MPI.SUM:
redval = sum(range(size))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.PROD:
redval = prod(range(1,size+1))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.MAX:
self.assertEqual(value, size-1)
elif op == MPI.MIN:
self.assertEqual(value, 0)
@unittest.skipMPI('openmpi(<=1.8.4)')
def testScan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
# --
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
buf = array(range(size), typecode)
StartWaitFree(
self.COMM.Scan_init(MPI.IN_PLACE,
buf.as_mpi(),
op)
)
max_val = maxvalue(buf)
for i, value in enumerate(buf):
if op == MPI.SUM:
if (i * (rank + 1)) < max_val:
self.assertAlmostEqual(value, i * (rank + 1))
elif op == MPI.PROD:
if (i ** (rank + 1)) < max_val:
self.assertAlmostEqual(value, i ** (rank + 1))
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testExscan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
buf = array(range(size), typecode)
StartWaitFree(
self.COMM.Exscan_init(MPI.IN_PLACE,
buf.as_mpi(),
op)
)
if rank == 1:
for i, value in enumerate(buf):
self.assertEqual(value, i)
elif rank > 1:
max_val = maxvalue(buf)
for i, value in enumerate(buf):
if op == MPI.SUM:
if (i * rank) < max_val:
self.assertAlmostEqual(value, i * rank)
elif op == MPI.PROD:
if (i ** rank) < max_val:
self.assertAlmostEqual(value, i ** rank)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
class TestCCOBufSelf(BaseTestCCOBuf, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestCCOBufWorld(BaseTestCCOBuf, unittest.TestCase):
COMM = MPI.COMM_WORLD
class TestCCOBufInplaceSelf(BaseTestCCOBufInplace, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestCCOBufInplaceWorld(BaseTestCCOBufInplace, unittest.TestCase):
COMM = MPI.COMM_WORLD
class TestCCOBufSelfDup(TestCCOBufSelf):
def setUp(self):
self.COMM = MPI.COMM_SELF.Dup()
def tearDown(self):
self.COMM.Free()
class TestCCOBufWorldDup(TestCCOBufWorld):
def setUp(self):
self.COMM = MPI.COMM_WORLD.Dup()
def tearDown(self):
self.COMM.Free()
try:
StartWaitFree( MPI.COMM_SELF.Barrier_init() )
except NotImplementedError:
unittest.disable(BaseTestCCOBuf, 'mpi-coll-persist')
unittest.disable(BaseTestCCOBufInplace, 'mpi-coll-persist')
if __name__ == '__main__':
unittest.main()
|
|
# The MIT License (MIT)
# Copyright (c) 2015 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PIL import Image
import math
class LocalMaximum:
# Local maxima as found during the image analysis.
# We need this class for ordering by cell hit count.
def __init__(self, hit_count, cell_index, r, g, b):
# Hit count of the cell
self.hit_count = hit_count
# Linear index of the cell
self.cell_index = cell_index
# Average color of the cell
self.r = r
self.g = g
self.b = b
class CubeCell:
# The color cube is made out of these cells
def __init__(self):
# Count of hits (dividing the accumulators by this value gives the average color)
self.hit_count = 0
# Accumulators for color components
self.r_acc = 0.0
self.g_acc = 0.0
self.b_acc = 0.0
class ColorCube:
# Uses a 3d RGB histogram to find local maximas in the density distribution
# in order to retrieve dominant colors of pixel images
def __init__(self, resolution=30, avoid_color=None, distinct_threshold=0.2, bright_threshold=0.6):
# Keep resolution
self.resolution = resolution
# Threshold for distinct local maxima
self.distinct_threshold = distinct_threshold
# Color to avoid
self.avoid_color = avoid_color
# Colors that are darker than this go away
self.bright_threshold = bright_threshold
# Helper variable to have cell count handy
self.cell_count = resolution * resolution * resolution
# Create cells
self.cells = [ CubeCell() for k in range(self.cell_count)]
# Indices for neighbour cells in three dimensional grid
self.neighbour_indices = [
[ 0, 0, 0],
[ 0, 0, 1],
[ 0, 0,-1],
[ 0, 1, 0],
[ 0, 1, 1],
[ 0, 1,-1],
[ 0,-1, 0],
[ 0,-1, 1],
[ 0,-1,-1],
[ 1, 0, 0],
[ 1, 0, 1],
[ 1, 0,-1],
[ 1, 1, 0],
[ 1, 1, 1],
[ 1, 1,-1],
[ 1,-1, 0],
[ 1,-1, 1],
[ 1,-1,-1],
[-1, 0, 0],
[-1, 0, 1],
[-1, 0,-1],
[-1, 1, 0],
[-1, 1, 1],
[-1, 1,-1],
[-1,-1, 0],
[-1,-1, 1],
[-1,-1,-1]
]
def cell_index(self, r, g, b):
# Returns linear index for cell with given 3d index
return (r+g*self.resolution+b*self.resolution*self.resolution)
def clear_cells(self):
for c in self.cells:
c.hit_count = 0
c.r_acc = 0.0
c.g_acc = 0.0
c.b_acc = 0.0
def get_colors(self, image):
m = self.find_local_maxima(image)
if not self.avoid_color is None:
m = self.filter_too_similar(m)
m = self.filter_distinct_maxima(m)
colors = []
for n in m:
r = int(n.r*255.0)
g = int(n.g*255.0)
b = int(n.b*255.0)
colors.append([r, g, b])
return colors
def find_local_maxima(self, image):
# Finds and returns local maxima in 3d histogram, sorted with respect to hit count
# Reset all cells
self.clear_cells()
# Iterate over all pixels of the image
for p in image.getdata():
# Get color components
r = float(p[0])/255.0
g = float(p[1])/255.0
b = float(p[2])/255.0
if r < self.bright_threshold and g < self.bright_threshold and b < self.bright_threshold:
continue
# If image has alpha channel, weight colors by it
if len(p) == 4:
a = float(p[3])/255.0
r *= a
g *= a
b *= a
# Map color components to cell indices in each color dimension
r_index = int(r*(float(self.resolution)-1.0))
g_index = int(g*(float(self.resolution)-1.0))
b_index = int(b*(float(self.resolution)-1.0))
# Compute linear cell index
index = self.cell_index(r_index, g_index, b_index)
# Increase hit count of cell
self.cells[index].hit_count += 1
# Add pixel colors to cell color accumulators
self.cells[index].r_acc += r
self.cells[index].g_acc += g
self.cells[index].b_acc += b
# We collect local maxima in here
local_maxima = []
# Find local maxima in the grid
for r in range(self.resolution):
for g in range(self.resolution):
for b in range(self.resolution):
local_index = self.cell_index(r, g, b)
# Get hit count of this cell
local_hit_count = self.cells[local_index].hit_count
# If this cell has no hits, ignore it (we are not interested in zero hit cells)
if local_hit_count == 0:
continue
# It is a local maximum until we find a neighbour with a higher hit count
is_local_maximum = True
# Check if any neighbour has a higher hit count, if so, no local maxima
for n in range(27):
r_index = r+self.neighbour_indices[n][0]
g_index = g+self.neighbour_indices[n][1]
b_index = b+self.neighbour_indices[n][2]
# Only check valid cell indices (skip out of bounds indices)
if r_index >= 0 and g_index >= 0 and b_index >= 0:
if r_index < self.resolution and g_index < self.resolution and b_index < self.resolution:
if self.cells[self.cell_index(r_index, g_index, b_index)].hit_count > local_hit_count:
# Neighbour hit count is higher, so this is NOT a local maximum.
is_local_maximum = False
# Break inner loop
break
# If this is not a local maximum, continue with loop.
if is_local_maximum == False:
continue
# Otherwise add this cell as local maximum
avg_r = self.cells[local_index].r_acc / float(self.cells[local_index].hit_count)
avg_g = self.cells[local_index].g_acc / float(self.cells[local_index].hit_count)
avg_b = self.cells[local_index].b_acc / float(self.cells[local_index].hit_count)
local_maxima.append(LocalMaximum(local_hit_count, local_index, avg_r, avg_g, avg_b))
# Return local maxima sorted with respect to hit count
return sorted(local_maxima, key=lambda x: x.hit_count, reverse=True)
def filter_distinct_maxima(self, maxima):
# Returns a filtered version of the specified array of maxima,
# in which all entries have a minimum distance of self.distinct_threshold
result = []
# Check for each maximum
for m in maxima:
# This color is distinct until a color from before is too close
is_distinct = True
for n in result:
# Compute delta components
r_delta = m.r - n.r
g_delta = m.g - n.g
b_delta = m.b - n.b
# Compute delta in color space distance
delta = math.sqrt(r_delta*r_delta + g_delta*g_delta + b_delta*b_delta)
# If too close mark as non-distinct and break inner loop
if delta < self.distinct_threshold:
is_distinct = False
break
# Add to filtered array if is distinct
if is_distinct == True:
result.append(m)
return result
def filter_too_similar(self, maxima):
# Returns a filtered version of the specified array of maxima,
# in which all entries are far enough away from the specified avoid_color
result = []
ar = float(self.avoid_color[0])/255.0
ag = float(self.avoid_color[1])/255.0
ab = float(self.avoid_color[2])/255.0
# Check for each maximum
for m in maxima:
# Compute delta components
r_delta = m.r - ar
g_delta = m.g - ag
b_delta = m.b - ab
# Compute delta in color space distance
delta = math.sqrt(r_delta*r_delta + g_delta*g_delta + b_delta*b_delta)
if delta >= 0.5:
result.append(m)
return result
################################################################################
# Command line example
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Get dominant colors of an image.')
parser.add_argument('image', help='Image file to process.')
args = parser.parse_args()
# Create color cube, avoiding resulting colors that are too close to white.
cc = ColorCube(avoid_color=[255, 255, 255])
# Load image and scale down to make the algorithm faster.
# Scaling down also gives colors that are more dominant in perception.
image = Image.open(args.image).resize((50, 50))
# Get colors for that image
colors = cc.get_colors(image)
# Print first four colors (there might be much more)
for c in colors[:10]:
print(c)
|
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from functools import partial
from textwrap import dedent
from typing import List, Optional
import pytest
from pants.backend.project_info.dependencies import Dependencies, DependencyType, rules
from pants.backend.python.target_types import PythonRequirementTarget, PythonSourcesGeneratorTarget
from pants.engine.target import SpecialCasedDependencies, Target
from pants.testutil.rule_runner import RuleRunner
# We verify that any subclasses of `SpecialCasedDependencies` will show up with the `dependencies`
# goal by creating a mock target.
class SpecialDepsField(SpecialCasedDependencies):
alias = "special_deps"
class SpecialDepsTarget(Target):
alias = "special_deps_tgt"
core_fields = (SpecialDepsField,)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=rules(),
target_types=[PythonSourcesGeneratorTarget, PythonRequirementTarget, SpecialDepsTarget],
)
def create_python_sources(
rule_runner: RuleRunner, path: str, *, dependencies: Optional[List[str]] = None
) -> None:
rule_runner.add_to_build_file(
path, f"python_sources(name='target', sources=[], dependencies={dependencies or []})"
)
def create_python_requirement_tgt(rule_runner: RuleRunner, name: str) -> None:
rule_runner.add_to_build_file(
"3rdparty/python",
dedent(
f"""\
python_requirement(
name='{name}',
requirements=['{name}==1.0.0'],
)
"""
),
)
def assert_dependencies(
rule_runner: RuleRunner,
*,
specs: List[str],
expected: List[str],
transitive: bool = False,
dependency_type: DependencyType = DependencyType.SOURCE,
closed: bool = False,
) -> None:
args = [f"--type={dependency_type.value}"]
if transitive:
args.append("--transitive")
if closed:
args.append("--closed")
result = rule_runner.run_goal_rule(Dependencies, args=[*args, *specs])
assert result.stdout.splitlines() == expected
def test_no_target(rule_runner: RuleRunner) -> None:
assert_dependencies(rule_runner, specs=[], expected=[])
assert_dependencies(rule_runner, specs=[], expected=[], transitive=True)
def test_no_dependencies(rule_runner: RuleRunner) -> None:
create_python_sources(rule_runner, path="some/target")
assert_dependencies(rule_runner, specs=["some/target"], expected=[])
assert_dependencies(rule_runner, specs=["some/target"], expected=[], transitive=True)
assert_dependencies(rule_runner, specs=["some/target"], expected=["some/target"], closed=True)
assert_dependencies(
rule_runner, specs=["some/target"], expected=["some/target"], transitive=True, closed=True
)
def test_special_cased_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.add_to_build_file(
"",
dedent(
"""\
special_deps_tgt(name='t1')
special_deps_tgt(name='t2', special_deps=[':t1'])
special_deps_tgt(name='t3', special_deps=[':t2'])
"""
),
)
assert_dependencies(rule_runner, specs=["//:t3"], expected=["//:t2"])
assert_dependencies(rule_runner, specs=["//:t3"], expected=["//:t1", "//:t2"], transitive=True)
def test_python_dependencies(rule_runner: RuleRunner) -> None:
create_python_requirement_tgt(rule_runner, name="req1")
create_python_requirement_tgt(rule_runner, name="req2")
create_python_sources(rule_runner, path="dep/target")
create_python_sources(
rule_runner, path="some/target", dependencies=["dep/target", "3rdparty/python:req1"]
)
create_python_sources(
rule_runner, path="some/other/target", dependencies=["some/target", "3rdparty/python:req2"]
)
assert_deps = partial(assert_dependencies, rule_runner)
# `--type=source`
assert_deps(
specs=["some/other/target"],
dependency_type=DependencyType.SOURCE,
expected=["3rdparty/python:req2", "some/target"],
)
assert_deps(
specs=["some/other/target"],
transitive=True,
dependency_type=DependencyType.SOURCE,
expected=["3rdparty/python:req1", "3rdparty/python:req2", "dep/target", "some/target"],
)
assert_deps(
specs=["some/other/target"],
dependency_type=DependencyType.SOURCE,
expected=["3rdparty/python:req2", "some/other/target", "some/target"],
closed=True,
)
assert_deps(
specs=["some/other/target"],
transitive=True,
dependency_type=DependencyType.SOURCE,
expected=[
"3rdparty/python:req1",
"3rdparty/python:req2",
"dep/target",
"some/other/target",
"some/target",
],
closed=True,
)
# `--type=3rdparty`
assert_deps(
specs=["some/other/target"],
dependency_type=DependencyType.THIRD_PARTY,
expected=["req2==1.0.0"],
)
assert_deps(
specs=["some/other/target"],
transitive=True,
dependency_type=DependencyType.THIRD_PARTY,
expected=["req1==1.0.0", "req2==1.0.0"],
)
# `--type=source-and-3rdparty`
assert_deps(
specs=["some/other/target"],
transitive=False,
dependency_type=DependencyType.SOURCE_AND_THIRD_PARTY,
expected=["3rdparty/python:req2", "some/target", "req2==1.0.0"],
)
assert_deps(
specs=["some/other/target"],
transitive=True,
dependency_type=DependencyType.SOURCE_AND_THIRD_PARTY,
expected=[
"3rdparty/python:req1",
"3rdparty/python:req2",
"dep/target",
"some/target",
"req1==1.0.0",
"req2==1.0.0",
],
)
# Glob the whole repo. `some/other/target` should not be included if --closed is not set,
# because nothing depends on it.
assert_deps(
specs=["::"],
expected=["3rdparty/python:req1", "3rdparty/python:req2", "dep/target", "some/target"],
)
assert_deps(
specs=["::"],
transitive=True,
expected=["3rdparty/python:req1", "3rdparty/python:req2", "dep/target", "some/target"],
)
assert_deps(
specs=["::"],
expected=[
"3rdparty/python:req1",
"3rdparty/python:req2",
"dep/target",
"some/other/target",
"some/target",
],
closed=True,
)
assert_deps(
specs=["::"],
transitive=True,
expected=[
"3rdparty/python:req1",
"3rdparty/python:req2",
"dep/target",
"some/other/target",
"some/target",
],
closed=True,
)
|
|
"""
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import optparse
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils import normalize_name
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: Global options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines = content.splitlines()
lines = ignore_comments(lines)
lines = join_lines(lines)
lines = skip_regex(lines, options)
for line_number, line in enumerate(lines, 1):
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_dir = os.path.dirname(filename)
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# set finder options
elif finder:
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.allow_all_external:
finder.allow_all_external = opts.allow_all_external
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.allow_external:
finder.allow_external |= set(
[normalize_name(v).lower() for v in opts.allow_external])
if opts.allow_unverified:
# Remove after 7.0
finder.allow_unverified |= set(
[normalize_name(v).lower() for v in opts.allow_unverified])
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(iterator):
"""
Joins a line ending in '\' with the previous line.
"""
lines = []
for line in iterator:
if not line.endswith('\\'):
if lines:
lines.append(line)
yield ''.join(lines)
lines = []
else:
yield line
else:
lines.append(line.strip('\\'))
# TODO: handle space after '\'.
# TODO: handle '\' on last line.
def ignore_comments(iterator):
"""
Strips and filters empty or commented lines.
"""
for line in iterator:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line
def skip_regex(lines, options):
"""
Optionally exclude lines that match '--skip-requirements-regex'
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
lines = filterfalse(re.compile(skip_regex).search, lines)
return lines
|
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Atom, Bool, Int, Float, Typed
from enaml.qt import QT_API
from enaml.qt.QtCore import Qt, QPoint, QRect, QTimer, QPropertyAnimation
from enaml.qt.QtGui import QWidget, QStyle, QStyleOption, QPainter
from .q_guide_rose import QGuideRose
from .q_dock_bar import QDockBar
from .q_dock_container import QDockContainer
from .q_dock_splitter import QDockSplitterHandle
from .q_dock_tab_widget import QDockTabWidget
class QDockRubberBand(QWidget):
""" A custom rubber band widget for use with the dock overlay.
This class is stylable from Qt style sheets.
"""
def __init__(self, parent=None):
""" Initialize a QDockRubberBand.
Parameters
----------
parent : QWidget, optional
The parent of the dock rubber band.
"""
super(QDockRubberBand, self).__init__(parent)
self.setWindowFlags(Qt.ToolTip | Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
def paintEvent(self, event):
""" Handle the paint event for the dock rubber band.
"""
painter = QPainter(self)
opt = QStyleOption()
opt.initFrom(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
class DockOverlay(Atom):
""" An object which manages the overlays for dock widgets.
This manager handles the state transitions for the overlays. The
transitions are performed on a slightly-delayed timer to provide
a more fluid user interaction experience.
"""
# PySide requires weakrefs for using bound methods as slots
if QT_API == 'pyside':
__slots__ = '__weakref__'
#: The size of the rubber band when docking on the border, in px.
border_size = Int(60)
#: The delay to use when triggering the rose timer, in ms.
rose_delay = Int(30)
#: The delay to use when triggering the band timer, in ms.
band_delay = Int(50)
#: The target opacity to use when making the band visible.
band_target_opacity = Float(1.0)
#: The duration of the band visibilty animation, in ms.
band_vis_duration = Int(100)
#: the duration of the band geometry animation, in ms.
band_geo_duration = Int(100)
#: The overlayed guide rose.
_rose = Typed(QGuideRose, ())
#: The overlayed rubber band.
_band = Typed(QDockRubberBand, ())
#: The property animator for the rubber band geometry.
_geo_animator = Typed(QPropertyAnimation)
#: The property animator for the rubber band visibility.
_vis_animator = Typed(QPropertyAnimation)
#: The target mode to apply to the rose on timeout.
_target_rose_mode = Int(QGuideRose.Mode.NoMode)
#: The target geometry to apply to rubber band on timeout.
_target_band_geo = Typed(QRect, factory=lambda: QRect())
#: The value of the last guide which was hit in the rose.
_last_guide = Int(-1)
#: A flag indicating whether it is safe to show the band.
_show_band = Bool(False)
#: The hover position of the mouse to use for state changes.
_hover_pos = Typed(QPoint, factory=lambda: QPoint())
#: The timer for changing the state of the rose.
_rose_timer = Typed(QTimer)
#: The timer for changing the state of the band.
_band_timer = Typed(QTimer)
def __init__(self, parent=None):
""" Initialize a DockOverlay.
Parameters
----------
parent : QWidget, optional
The parent of the overlay. This will be used as the parent
widget for the dock rubber band. The overlay guides do not
have a parent.
"""
self._band = QDockRubberBand(parent)
#--------------------------------------------------------------------------
# Default Value Methods
#--------------------------------------------------------------------------
def _default__rose_timer(self):
""" Create the default timer for the rose state changes.
"""
timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(self._on_rose_timer)
return timer
def _default__band_timer(self):
""" Create the default timer for the band state changes.
"""
timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(self._on_band_timer)
return timer
def _default__geo_animator(self):
""" Create the default property animator for the rubber band.
"""
p = QPropertyAnimation(self._band, 'geometry')
p.setDuration(self.band_geo_duration)
return p
def _default__vis_animator(self):
""" Create the default property animator for the rubber band.
"""
p = QPropertyAnimation(self._band, 'windowOpacity')
p.setDuration(self.band_vis_duration)
p.finished.connect(self._on_vis_finished)
return p
#--------------------------------------------------------------------------
# Timer Handlers
#--------------------------------------------------------------------------
def _on_rose_timer(self):
""" Handle the timeout event for the internal rose timer.
This handler transitions the rose to its new state and updates
the position of the rubber band.
"""
rose = self._rose
rose.setMode(self._target_rose_mode)
rose.mouseOver(self._hover_pos)
self._show_band = True
self._update_band_state()
def _on_band_timer(self):
""" Handle the timeout event for the internal band timer.
This handler updates the position of the rubber band.
"""
self._update_band_state()
#--------------------------------------------------------------------------
# Animation Handlers
#--------------------------------------------------------------------------
def _on_vis_finished(self):
""" Handle the 'finished' signal from the visibility animator.
This handle will hide the rubber band when its opacity is 0.
"""
band = self._band
if band.windowOpacity() == 0.0:
band.hide()
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _update_band_state(self):
""" Refresh the geometry and visible state of the rubber band.
The state will be updated using animated properties to provide
a nice fluid user experience.
"""
# A valid geometry indicates that the rubber should be shown on
# the screen. An invalid geometry means it should be hidden. If
# the validity is changed during animation, the animators are
# restarted using the current state as their starting point.
band = self._band
geo = self._target_band_geo
if geo.isValid() and self._show_band:
# If the band is already hidden, the geometry animation can
# be bypassed since the band can be located anywhere.
if band.isHidden():
band.setGeometry(geo)
self._start_vis_animator(self.band_target_opacity)
self._rose.raise_()
else:
self._start_vis_animator(self.band_target_opacity)
self._start_geo_animator(geo)
else:
self._start_vis_animator(0.0)
def _start_vis_animator(self, opacity):
""" (Re)start the visibility animator.
Parameters
----------
opacity : float
The target opacity of the target object.
"""
animator = self._vis_animator
if animator.state() == animator.Running:
animator.stop()
target = animator.targetObject()
if target.isHidden() and opacity != 0.0:
target.setWindowOpacity(0.0)
target.show()
animator.setStartValue(target.windowOpacity())
animator.setEndValue(opacity)
animator.start()
def _start_geo_animator(self, geo):
""" (Re)start the visibility animator.
Parameters
----------
geo : QRect
The target geometry for the target object.
"""
animator = self._geo_animator
if animator.state() == animator.Running:
animator.stop()
target = animator.targetObject()
animator.setStartValue(target.geometry())
animator.setEndValue(geo)
animator.start()
def _band_geometry(self, widget, guide):
""" Compute the geometry for an overlay rubber band.
Parameters
----------
widget : QWidget
The widget to which the band geometry should be fit.
guide : Guide
The rose guide under the mouse. This determines how the
geometry of the band will be fit to the widget.
"""
Guide = QGuideRose.Guide
if guide == Guide.NoGuide:
return QRect()
# border hits
border_size = self.border_size
rect = widget.contentsRect()
if guide == Guide.BorderNorth:
rect.setHeight(border_size)
elif guide == Guide.BorderEast:
rect.setLeft(rect.right() + 1 - border_size)
elif guide == Guide.BorderSouth:
rect.setTop(rect.bottom() + 1 - border_size)
elif guide == Guide.BorderWest:
rect.setWidth(border_size)
# For the next 4 conditions `widget` will be a QDockArea
elif guide == Guide.BorderExNorth:
bar_rect = widget.dockBarGeometry(QDockBar.North)
if bar_rect.isValid():
rect = bar_rect
else:
rect.setHeight(border_size / 2)
elif guide == Guide.BorderExEast:
bar_rect = widget.dockBarGeometry(QDockBar.East)
if bar_rect.isValid():
rect = bar_rect
else:
rect.setLeft(rect.right() + 1 - border_size / 2)
elif guide == Guide.BorderExSouth:
bar_rect = widget.dockBarGeometry(QDockBar.South)
if bar_rect.isValid():
rect = bar_rect
else:
rect.setTop(rect.bottom() + 1 - border_size / 2)
elif guide == Guide.BorderExWest:
bar_rect = widget.dockBarGeometry(QDockBar.West)
if bar_rect.isValid():
rect = bar_rect
else:
rect.setWidth(border_size / 2)
# compass hits
elif guide == Guide.CompassNorth:
rect.setHeight(rect.height() / 3)
elif guide == Guide.CompassEast:
rect.setLeft(2 * rect.width() / 3)
elif guide == Guide.CompassSouth:
rect.setTop(2 * rect.height() / 3)
elif guide == Guide.CompassWest:
rect.setWidth(rect.width() / 3)
elif guide == Guide.CompassCenter:
pass # nothing to do
elif guide == Guide.CompassExNorth:
pass # nothing to do
elif guide == Guide.CompassExEast:
pass # nothing to do
elif guide == Guide.CompassExSouth:
pass # nothing to do
elif guide == Guide.CompassExWest:
pass # nothing to do
# splitter handle hits
elif guide == Guide.SplitHorizontal:
wo, r = divmod(border_size - rect.width(), 2)
rect.setWidth(2 * (wo + r) + rect.width())
rect.moveLeft(rect.x() - (wo + r))
elif guide == Guide.SplitVertical:
ho, r = divmod(border_size - widget.height(), 2)
rect.setHeight(2 * (ho + r) + rect.height())
rect.moveTop(rect.y() - (ho + r))
# single center
elif guide == Guide.AreaCenter:
pass # nothing to do
# default no-op
else:
return QRect()
pt = widget.mapToGlobal(rect.topLeft())
return QRect(pt, rect.size())
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def guide_at(self, pos):
""" Get the dock guide for a given position.
Parameters
----------
pos : QPoint
The position of interest, expressed in global coordinates.
Returns
-------
result : Guide
The guide enum which lies under the given point.
"""
rose = self._rose
pos = rose.mapFromGlobal(pos)
return rose.guideAt(pos)
def hide(self):
""" Hide the overlay.
This method will stop the timers and set the visibility of the
guide rose and the rubber band to False.
"""
self._rose_timer.stop()
self._band_timer.stop()
self._rose.hide()
self._band.hide()
def mouse_over_widget(self, widget, pos, empty=False):
""" Update the overlays based on the mouse position.
This handler should be invoked when the mouse hovers over a
single widget (such as a floating dock container) as opposed to
an area of docked widgets. The guide rose will be displayed in
the center of the widget with no border guides.
Parameters
----------
widget : QWidget
The widget under the mouse.
pos : QPoint
The hover position, expressed in the local coordinates of
the widget.
empty : bool, optional
Whether the widget represents an empty widget. If this is
True, a single center guide will be shown instead of the
guide rose.
"""
Mode = QGuideRose.Mode
rose = self._rose
target_mode = Mode.AreaCenter if empty else Mode.CompassEx
self._target_rose_mode = target_mode
if rose.mode() != target_mode:
rose.setMode(Mode.NoMode)
self._rose_timer.start(self.rose_delay)
self._band_timer.start(self.band_delay)
origin = widget.mapToGlobal(QPoint(0, 0))
geo = QRect(origin, widget.size())
dirty = rose.geometry() != geo
if dirty:
rose.hide()
rose.setMode(Mode.NoMode)
rose.setGeometry(geo)
guide = rose.guideAt(pos, target_mode)
if dirty or guide != self._last_guide:
self._last_guide = guide
self._target_band_geo = self._band_geometry(widget, guide)
self._band_timer.start(self.band_delay)
rose.setCenterPoint(QPoint(geo.width() / 2, geo.height() / 2))
rose.mouseOver(pos)
rose.show()
def mouse_over_area(self, area, widget, pos):
""" Update the overlays based on the mouse position.
Parameters
----------
area : QDockArea
The dock area which contains the dock items onto which
the overlay will be displayed.
widget : QWidget
The dock widget in the area which is under the mouse, or
None if there is no relevant widget.
pos : QPoint
The hover position, expressed in the local coordinates of
the overlayed dock area.
"""
Mode = QGuideRose.Mode
Guide = QGuideRose.Guide
pane = area.centralPane()
pos = pane.mapFrom(area, pos)
if widget is None:
if area.centralWidget() is None:
self.mouse_over_widget(pane, pos, empty=True)
return
# Compute the target mode for the guide rose based on the dock
# widget which lies under the mouse position.
target_mode = Mode.Border
if isinstance(widget, QDockContainer):
target_mode |= Mode.CompassEx
elif isinstance(widget, QDockTabWidget):
target_mode |= Mode.Compass
elif isinstance(widget, QDockSplitterHandle):
if widget.orientation() == Qt.Horizontal:
target_mode |= Mode.SplitHorizontal
else:
target_mode |= Mode.SplitVertical
# Get the local area coordinates for the center of the widget.
center = widget.mapTo(pane, QPoint(0, 0))
center += QPoint(widget.width() / 2, widget.height() / 2)
# Update the state of the rose. If it is to be hidden, it is
# done so immediately. If the target mode is different from
# the current mode, the rose is hidden and the state changes
# are collapsed on a timer.
rose = self._rose
self._hover_pos = pos
self._show_band = True
self._target_rose_mode = target_mode
if target_mode != rose.mode():
rose.setMode(Mode.Border)
self._rose_timer.start(self.rose_delay)
self._show_band = False
# Update the geometry of the rose if needed. This ensures that
# the rose does not change geometry while visible.
origin = pane.mapToGlobal(QPoint(0, 0))
geo = QRect(origin, pane.size())
dirty = rose.geometry() != geo
if dirty:
rose.hide()
rose.setMode(Mode.NoMode)
rose.setGeometry(geo)
# Hit test the rose and update the target geometry for the
# rubber band if the target guide has changed.
rose.setCenterPoint(center)
guide = rose.guideAt(pos, target_mode)
if dirty or guide != self._last_guide:
self._last_guide = guide
if guide >= Guide.BorderNorth and guide <= Guide.BorderWest:
band_geo = self._band_geometry(pane, guide)
elif guide >= Guide.BorderExNorth and guide <= Guide.BorderExWest:
band_geo = self._band_geometry(area, guide)
else:
band_geo = self._band_geometry(widget, guide)
self._target_band_geo = band_geo
self._band_timer.start(self.band_delay)
# Finally, make the rose visible and issue a mouseover command
# so that the guides are highlighted.
rose.mouseOver(pos)
rose.show()
|
|
import os
import shutil
import linktastic
import lnkr
import term
from import_section import ImportSection, MODE_COPY, MODE_LINK, MODE_SYMLINK
from export_section import ExportSection
def do_link_app(app_config):
term.info('\nStart Linking App: %s' % term.format_path(app_config.path))
for section in app_config.import_sections:
link_import_section('Import', app_config, section, [app_config])
term.info('\nFinish Linking App: %s' % term.format_path(app_config.path))
def get_new_attribs_holders(attribs_holders, new_holder):
if new_holder in attribs_holders:
return attribs_holders
else:
new_attribs_holders = list(attribs_holders)
new_attribs_holders.append(new_holder)
return new_attribs_holders
def link_import_section(kind, app_config, import_section, attribs_holders):
term.info('\nLoading %s Section: %s' % (kind, term.format_param(import_section.key)))
import_section.load()
if not import_section.loaded:
return
link_import_section_component(app_config, import_section, import_section.key, get_new_attribs_holders(attribs_holders, import_section))
def link_import_section_component(app_config, import_section, key, attribs_holders):
term.info('\nLinking Component, Section: %s, Key: %s' % (term.format_param(import_section.key), term.format_param(key)))
error = 'Component Not Found'
component = import_section.get_component(key)
if component is not None:
if isinstance(component, ExportSection):
export_section = component
error = link_import_section_package_export(app_config, import_section, import_section.package_config, export_section, get_new_attribs_holders(attribs_holders, import_section.package_config))
elif isinstance(component, ImportSection):
wrapper_section = component
error = link_import_section_wrapper_import(app_config, import_section, import_section.wrapper_config, wrapper_section, attribs_holders)
if error is not None:
term.error('\nLinking Component Failed, Section: %s, Key: %s, Reason: %s' % (term.format_param(import_section.key), term.format_param(key), error))
# GOCHA: it's a bit messy here, since want to put the dependencies' attribs inside the accessor
def update_required_attribs_holders(attribs_holders, import_section, require_key):
component = import_section.get_component(require_key)
if component is not None:
if isinstance(component, ExportSection):
attribs_holders = get_new_attribs_holders(attribs_holders, import_section.package_config)
attribs_holders = get_new_attribs_holders(attribs_holders, component)
attribs_holders = get_required_attribs_holders(attribs_holders, import_section, component.requires)
elif isinstance(component, ImportSection):
attribs_holders = get_new_attribs_holders(attribs_holders, import_section.wrapper_config)
attribs_holders = update_required_attribs_holders(attribs_holders, component, require_key)
return attribs_holders
def get_required_attribs_holders(attribs_holders, import_section, requires):
for require_key in requires:
attribs_holders = update_required_attribs_holders(attribs_holders, import_section, require_key)
return attribs_holders
def link_import_section_package_export(app_config, import_section, package_config, export_section, attribs_holders):
new_attribs_holders = get_new_attribs_holders(attribs_holders, export_section)
for require_key in export_section.requires:
link_import_section_component(app_config, import_section, require_key, new_attribs_holders)
link_attribs_holders = get_required_attribs_holders(new_attribs_holders, import_section, export_section.requires)
for folder in export_section.folders:
ok, from_path, to_path = check_link_folder('Folder', export_section.key, package_config.root_path, app_config.root_path, folder, link_attribs_holders)
if ok:
do_link_folder(import_section.mode, export_section.key, from_path, to_path)
for fc in export_section.files:
ok, from_path, to_path = check_link_folder('File', export_section.key, package_config.root_path, app_config.root_path, fc, link_attribs_holders)
if ok:
files = fc.get_file_list(from_path)
for f in files:
do_link_file(import_section.mode, export_section.key, from_path, to_path, f)
def link_import_section_wrapper_import(app_config, import_section, wrapper_config, wrapper_section, attribs_holders):
link_import_section('Wrapper', app_config, wrapper_section, get_new_attribs_holders(attribs_holders, wrapper_config))
def check_link_folder(kind, key, from_root_path, to_root_path, folder_config, attribs_holders):
term.verbose("Check Link Folder: %s -> %s" % (kind, key))
from_path = folder_config.get_from_path(from_root_path, attribs_holders)
to_path = folder_config.get_to_path(to_root_path, attribs_holders)
ok = False
if not from_path.startswith(from_root_path):
term.error('Link %s Failed, Component: %s\n\tFrom Root: %s\n\tInvalid From: %s' %
(kind, term.format_param(key), term.format_path(from_root_path), term.format_path(from_path)))
elif not to_path.startswith(to_root_path):
term.error('Link %s Failed, Component: %s\n\tTo Root: %s\n\tInvalid To: %s' %
(kind, term.format_param(key), term.format_path(to_root_path), term.format_path(to_path)))
elif not os.path.isdir(from_path):
term.error('Link %s Failed, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(kind, term.format_param(key), term.format_error('From Folder Not Exist'), term.format_path(from_path), term.format_path(to_path)))
else:
ok = True
return ok, from_path, to_path
def cleanup_path(to_path):
if os.path.islink(to_path):
term.info('Remove Link: %s' % term.format_path(to_path))
os.remove(to_path)
elif os.path.isdir(to_path):
term.info('Remove Folder: %s' % term.format_path(to_path))
shutil.rmtree(to_path)
elif os.path.isfile(to_path):
term.info('Remove File: %s' % term.format_path(to_path))
os.remove(to_path)
def do_link_folder(mode, key, from_path, to_path):
if lnkr.test_mode:
term.info('Link Folder, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_error('Test Mode, NOT Doing Anything'), term.format_path(from_path), term.format_path(to_path)))
elif not os.path.exists(to_path) or lnkr.confirm_change('Changes In Folder Will be Lost, Are You Sure?\n%s, %s' % (term.format_param(key), term.format_path(to_path))):
cleanup_path(to_path)
if mode == MODE_COPY:
do_link_folder_copy(key, from_path, to_path)
elif mode == MODE_LINK:
do_link_folder_link(key, from_path, to_path)
elif mode == MODE_SYMLINK:
do_link_folder_symlink(key, from_path, to_path)
else:
term.info('Link Folder, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_error('Skipped'), term.format_path(from_path), term.format_path(to_path)))
def check_parent_folder(key, to_path):
parent_path = os.path.dirname(to_path)
if not os.path.exists(parent_path):
os.makedirs(parent_path, 0755)
elif os.path.isfile(parent_path):
term.info('Link Folder, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_error('"symlink" Failed: Parent Path is Not a Folder'), term.format_path(from_path), term.format_path(to_path)))
return False
return True
def do_link_folder_copy(key, from_path, to_path):
if not check_parent_folder(key, to_path):
return
#TODO: not using os.system to support Windows
os.system('cp -r %s "%s" "%s"' % (term.verbose_mode and '-v' or '', from_path, to_path))
term.info('Link Folder, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_param('"copy" Done'), term.format_path(from_path), term.format_path(to_path)))
def do_link_folder_link(key, from_path, to_path):
term.info('Link Folder, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_error('"link" Mode Not Implemented'), term.format_path(from_path), term.format_path(to_path)))
def get_folder_rel_from_path(from_path, to_path):
to_dir = os.path.dirname(to_path)
prefix = os.path.dirname(os.path.commonprefix([from_path, to_path]))
if prefix:
old_from_path = from_path
from_path = os.path.join(os.path.relpath(prefix, to_dir), os.path.relpath(from_path, prefix))
term.verbose("get_folder_rel_from_path()\n\told_from_path: %s\n\tto_path: %s\n\tprefix: %s\n\tfrom_path: %s" %
(term.format_path(old_from_path), term.format_path(to_path), term.format_path(prefix), term.format_path(from_path)))
return from_path
def do_link_folder_symlink(key, from_path, to_path):
if not check_parent_folder(key, to_path):
return
from_path = get_folder_rel_from_path(from_path, to_path)
linktastic.symlink(from_path, to_path)
term.info('Link Folder, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_param('"symlink" Done'), term.format_path(from_path), term.format_path(to_path)))
def do_link_file(mode, key, from_path, to_path, file_path):
from_path = os.path.join(from_path, file_path)
to_path = os.path.join(to_path, file_path)
if lnkr.test_mode:
term.info('Link File, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_error('Test Mode, NOT Doing Anything'), term.format_path(from_path), term.format_path(to_path)))
elif not os.path.exists(to_path) or lnkr.confirm_change('Change of File Will be Lost, Are You Sure?\n%s, %s' % (term.format_param(key), term.format_path(to_path))):
if mode == MODE_COPY:
do_link_file_copy(key, from_path, to_path)
elif mode == MODE_LINK:
do_link_file_link(key, from_path, to_path)
elif mode == MODE_SYMLINK:
do_link_file_symlink(key, from_path, to_path)
else:
term.info('Link File, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_error('Skipped'), term.format_path(from_path), term.format_path(to_path)))
def do_link_file_copy(key, from_path, to_path):
if not check_parent_folder(key, to_path):
return
#TODO: not using os.system to support Windows
os.system('cp %s "%s" "%s"' % (term.verbose_mode and '-v' or '', from_path, to_path))
term.info('Link File, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_param('"copy" Done'), term.format_path(from_path), term.format_path(to_path)))
def do_link_file_link(key, from_path, to_path):
term.info('Link File, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_error('"link" Mode Not Implemented'), term.format_path(from_path), term.format_path(to_path)))
def get_file_rel_from_path(from_path, to_path):
from_dir = os.path.dirname(from_path)
to_dir = os.path.dirname(to_path)
prefix = os.path.dirname(os.path.commonprefix([from_dir, to_dir]))
if prefix:
old_from_path = from_path
from_path = os.path.join(os.path.relpath(prefix, to_dir), os.path.relpath(from_dir, prefix), os.path.basename(from_path))
term.verbose("get_file_rel_from_path()\n\told_from_path: %s\n\tto_path: %s\n\tprefix: %s\n\tfrom_path: %s" %
(term.format_path(old_from_path), term.format_path(to_path), term.format_path(prefix), term.format_path(from_path)))
return from_path
def do_link_file_symlink(key, from_path, to_path):
cleanup_path(to_path)
if not check_parent_folder(key, to_path):
return
from_path = get_file_rel_from_path(from_path, to_path)
linktastic.symlink(from_path, to_path)
term.info('Link File, Component: %s -> %s\n\tFrom: %s\n\tTo: %s' %
(term.format_param(key), term.format_param('"symlink" Done'), term.format_path(from_path), term.format_path(to_path)))
|
|
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from twisted.internet import defer
from mock import Mock
from tests.utils import (
MockHttpResource, DeferredMockCallable, setup_test_homeserver
)
from synapse.api.filtering import Filter
from synapse.events import FrozenEvent
user_localpart = "test_user"
def MockEvent(**kwargs):
if "event_id" not in kwargs:
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
return FrozenEvent(kwargs)
class FilteringTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.mock_federation_resource = MockHttpResource()
self.mock_http_client = Mock(spec=[])
self.mock_http_client.put_json = DeferredMockCallable()
hs = yield setup_test_homeserver(
handlers=None,
http_client=self.mock_http_client,
keyring=Mock(),
)
self.filtering = hs.get_filtering()
self.datastore = hs.get_datastore()
def test_definition_types_works_with_literals(self):
definition = {
"types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_types_works_with_wildcards(self):
definition = {
"types": ["m.*", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_types_works_with_unknowns(self):
definition = {
"types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="now.for.something.completely.different",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_literals(self):
definition = {
"not_types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_wildcards(self):
definition = {
"not_types": ["m.room.message", "org.matrix.*"]
}
event = MockEvent(
sender="@foo:bar",
type="org.matrix.custom.event",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_unknowns(self):
definition = {
"not_types": ["m.*", "org.*"]
}
event = MockEvent(
sender="@foo:bar",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_types_takes_priority_over_types(self):
definition = {
"not_types": ["m.*", "org.*"],
"types": ["m.room.message", "m.room.topic"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.topic",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_senders_works_with_literals(self):
definition = {
"senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@flibble:wibble",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_senders_works_with_unknowns(self):
definition = {
"senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@challenger:appears",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_senders_works_with_literals(self):
definition = {
"not_senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@flibble:wibble",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_senders_works_with_unknowns(self):
definition = {
"not_senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@challenger:appears",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_senders_takes_priority_over_senders(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets", "@misspiggy:muppets"]
}
event = MockEvent(
sender="@misspiggy:muppets",
type="m.room.topic",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_rooms_works_with_literals(self):
definition = {
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!secretbase:unknown"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_rooms_works_with_unknowns(self):
definition = {
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_rooms_works_with_literals(self):
definition = {
"not_rooms": ["!anothersecretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_rooms_works_with_unknowns(self):
definition = {
"not_rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_rooms_takes_priority_over_rooms(self):
definition = {
"not_rooms": ["!secretbase:unknown"],
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!secretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@kermit:muppets", # yup
type="m.room.message", # yup
room_id="!stage:unknown" # yup
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_combined_event_bad_sender(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@misspiggy:muppets", # nope
type="m.room.message", # yup
room_id="!stage:unknown" # yup
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event_bad_room(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@kermit:muppets", # yup
type="m.room.message", # yup
room_id="!piggyshouse:muppets" # nope
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event_bad_type(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@kermit:muppets", # yup
type="muppets.misspiggy.kisses", # nope
room_id="!stage:unknown" # yup
)
self.assertFalse(
Filter(definition).check(event)
)
@defer.inlineCallbacks
def test_filter_presence_match(self):
user_filter_json = {
"presence": {
"types": ["m.*"]
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
event = MockEvent(
sender="@foo:bar",
type="m.profile",
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
results = user_filter.filter_presence(events=events)
self.assertEquals(events, results)
@defer.inlineCallbacks
def test_filter_presence_no_match(self):
user_filter_json = {
"presence": {
"types": ["m.*"]
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart + "2",
user_filter=user_filter_json,
)
event = MockEvent(
event_id="$asdasd:localhost",
sender="@foo:bar",
type="custom.avatar.3d.crazy",
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart + "2",
filter_id=filter_id,
)
results = user_filter.filter_presence(events=events)
self.assertEquals([], results)
@defer.inlineCallbacks
def test_filter_room_state_match(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
event = MockEvent(
sender="@foo:bar",
type="m.room.topic",
room_id="!foo:bar"
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
results = user_filter.filter_room_state(events=events)
self.assertEquals(events, results)
@defer.inlineCallbacks
def test_filter_room_state_no_match(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
event = MockEvent(
sender="@foo:bar",
type="org.matrix.custom.event",
room_id="!foo:bar"
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
results = user_filter.filter_room_state(events)
self.assertEquals([], results)
def test_filter_rooms(self):
definition = {
"rooms": ["!allowed:example.com", "!excluded:example.com"],
"not_rooms": ["!excluded:example.com"],
}
room_ids = [
"!allowed:example.com", # Allowed because in rooms and not in not_rooms.
"!excluded:example.com", # Disallowed because in not_rooms.
"!not_included:example.com", # Disallowed because not in rooms.
]
filtered_room_ids = list(Filter(definition).filter_rooms(room_ids))
self.assertEquals(filtered_room_ids, ["!allowed:example.com"])
@defer.inlineCallbacks
def test_add_filter(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.filtering.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
self.assertEquals(filter_id, 0)
self.assertEquals(user_filter_json, (
yield self.datastore.get_user_filter(
user_localpart=user_localpart,
filter_id=0,
)
))
@defer.inlineCallbacks
def test_get_filter(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
self.assertEquals(filter.get_filter_json(), user_filter_json)
self.assertRegexpMatches(repr(filter), r"<FilterCollection \{.*\}>")
|
|
#!/usr/bin/env python
#
# meza command
#
import sys, getopt, os
def load_yaml ( filepath ):
import yaml
with open(filepath, 'r') as stream:
try:
return yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
defaults = load_yaml( "/opt/meza/config/defaults.yml" )
# Hard-coded for now, because I'm not sure where to set it yet
language = "en"
i18n = load_yaml( os.path.join( defaults['m_i18n'], language+".yml" ) )
def main (argv):
# meza requires a command parameter. No first param, no command. Display
# help. Also display help if explicitly specifying help.
if len(argv) == 0:
display_docs('base')
sys.exit(1)
elif argv[0] in ('-h', '--help'):
display_docs('base')
sys.exit(0) # asking for help doesn't give error code
elif argv[0] in ('-v', '--version'):
import subprocess
version = subprocess.check_output( ["git", "--git-dir=/opt/meza/.git", "describe", "--tags" ] )
commit = subprocess.check_output( ["git", "--git-dir=/opt/meza/.git", "rev-parse", "HEAD" ] )
print "Meza " + version.strip()
print "Commit " + commit.strip()
print "Mediawiki EZ Admin"
print
sys.exit(0)
# Every command has a sub-command. No second param, no sub-command. Display
# help for that specific sub-command.
# sub-command "update" does not require additional directives
if len(argv) == 1 and argv[0] != "update":
display_docs(argv[0])
sys.exit(1)
elif len(argv) == 2 and argv[1] in ('--help','-h'):
display_docs(argv[0])
sys.exit(0)
command = argv[0]
command_fn = "meza_command_{}".format( argv[0] )
# if command_fn is a valid Python function, pass it all remaining args
if command_fn in globals() and callable( globals()[command_fn] ):
globals()[command_fn]( argv[1:] )
else:
print
print "{} is not a valid command".format(command)
sys.exit(1)
def meza_command_deploy (argv):
env = argv[0]
rc = check_environment(env)
# return code != 0 means failure
if rc != 0:
if env == "monolith":
meza_command_setup_env(env, True)
else:
sys.exit(rc)
more_extra_vars = False
# strip environment off of it
argv = argv[1:]
# save state of args before stripping -o and --overwrite
args_string = ' '.join( argv )
# if argv[1:] includes -o or --overwrite
if len( set(argv).intersection({"-o", "--overwrite"}) ) > 0:
# remove -o and --overwrite from args;
argv = [value for value in argv[:] if value not in ["-o", "--overwrite"]]
more_extra_vars = { 'force_overwrite_from_backup': True }
import hashlib
start = get_datetime_string()
unique = hashlib.sha1( start + env ).hexdigest()[:8]
write_deploy_log( start, env, unique, 'start', args_string )
shell_cmd = playbook_cmd( 'site', env, more_extra_vars )
if len(argv) > 0:
shell_cmd = shell_cmd + argv
return_code = meza_shell_exec( shell_cmd )
if return_code == 0:
condition = 'complete'
else:
condition = 'failed'
end = get_datetime_string()
write_deploy_log( end, env, unique, condition, args_string )
meza_shell_exec_exit( return_code )
def write_deploy_log( datetime, env, unique, condition, args_string ):
import subprocess, os
deploy_log = defaults['m_logs_deploy']
line = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
datetime,
env,
unique,
condition,
get_git_descripe_tags( "/opt/meza" ),
get_git_hash( "/opt/meza" ),
get_git_hash( "/opt/conf-meza/secret" ),
get_git_hash( "/opt/conf-meza/public" ),
args_string
)
log_dir = os.path.dirname( os.path.realpath( deploy_log ) )
if not os.path.isdir( log_dir ):
os.makedirs( log_dir )
with open(deploy_log, "a") as myfile:
myfile.write(line)
def get_git_hash ( dir ):
import subprocess, os
git_dir = "{}/.git".format( dir )
if os.path.isdir( git_dir ):
try:
commit = subprocess.check_output( ["git", "--git-dir={}".format( git_dir), "rev-parse", "HEAD" ] ).strip()
except:
commit = "git-error"
return commit
else:
return "not-a-git-repo"
def get_git_descripe_tags ( dir ):
import subprocess, os
git_dir = "{}/.git".format( dir )
if os.path.isdir( git_dir ):
try:
tags = subprocess.check_output( ["git", "--git-dir={}".format( git_dir ), "describe", "--tags" ] ).strip()
except:
tags = "git-error"
return tags
else:
return "not-a-git-repo"
# env
# dev
# dev-networking --> vbox-networking ??
# docker
def meza_command_setup (argv):
sub_command = argv[0]
if sub_command == "dev-networking":
sub_command = "dev_networking" # hyphen not a valid function character
command_fn = "meza_command_setup_" + sub_command
# if command_fn is a valid Python function, pass it all remaining args
if command_fn in globals() and callable( globals()[command_fn] ):
globals()[command_fn]( argv[1:] )
else:
print
print sub_command + " is not a valid sub-command for setup"
sys.exit(1)
def meza_command_update (argv):
import subprocess
# This function executes many Git commands that need to be from /otp/meza
os.chdir("/opt/meza")
# Define a special git remote repository so we can control its settings
# Else, a user using Vagrant may have their origin remote setup for SSH
# but these commands need HTTPS.
meza_remote = "mezaremote"
check_remotes = subprocess.check_output( ["git", "remote" ] ).strip().split("\n")
if meza_remote not in check_remotes:
add_remote = subprocess.check_output( ["git", "remote", "add", meza_remote, "https://github.com/enterprisemediawiki/meza.git" ] )
# Get latest commits and tags from mezaremote
fetch = subprocess.check_output( ["git", "fetch", meza_remote ] )
fetch = subprocess.check_output( ["git", "fetch", meza_remote, "--tags" ] )
tags_text = subprocess.check_output( ["git", "tag", "-l" ] )
if len(argv) == 0:
# print fetch.strip()
print "The following versions are available:"
print tags_text.strip()
print ""
closest_tag = subprocess.check_output( ["git", "describe", "--tags" ] )
print "You are currently on version {}".format(closest_tag.strip())
print "To change versions, do 'sudo meza update <version>'"
elif len(argv) > 1:
print "Unknown argument {}".format(argv[1])
else:
# Needed else 'git status' gives bad response
status = subprocess.check_output( ["git", "status", "--untracked-files=no", "--porcelain" ] )
status = status.strip()
if status != "":
print "'git status' not empty:\n{}".format(status)
version = argv[0]
if status == "":
tags = tags_text.split("\n")
branches = subprocess.check_output( ["git", "branch", "-a" ] ).strip().split("\n")
branches = map(str.strip, branches)
if version in tags:
version_type = "at version"
tag_version = "tags/{}".format(version)
checkout = subprocess.check_output( ["git", "checkout", tag_version ], stderr=subprocess.STDOUT )
elif version in branches or "* {}".format(version) in branches:
version_type = "on branch"
checkout = subprocess.check_output( ["git", "checkout", version ], stderr=subprocess.STDOUT )
reset = subprocess.check_output( ["git", "reset", "--hard", "mezaremote/{}".format(version) ] )
elif "remotes/{}/{}".format(meza_remote,version) in branches:
version_type = "on branch"
checkout = subprocess.check_output( ["git", "checkout", "-b", version, '-t', "{}/{}".format(meza_remote,version) ], stderr=subprocess.STDOUT )
else:
print "{} is not a valid version or branch".format(version)
sys.exit(1)
print ""
print ""
print "Meza now {} {}".format(version_type, version)
print "Now deploy changes with 'sudo meza deploy <environment>'"
else:
print "Files have been modified in /opt/meza. Clean them up before proceeding."
print "MSG: {}".format(status)
# FIXME #824: This function is big.
def meza_command_setup_env (argv, return_not_exit=False):
import json, string
if isinstance( argv, basestring ):
env = argv
else:
env = argv[0]
if not os.path.isdir( "/opt/conf-meza" ):
os.mkdir( "/opt/conf-meza" )
if not os.path.isdir( "/opt/conf-meza/secret" ):
os.mkdir( "/opt/conf-meza/secret" )
if os.path.isdir( "/opt/conf-meza/secret/" + env ):
print
print "Environment {} already exists".format(env)
sys.exit(1)
fqdn = db_pass = private_net_zone = False
try:
opts, args = getopt.getopt(argv[1:],"",["fqdn=","db_pass=","private_net_zone="])
except Exception as e:
print str(e)
print 'meza setup env <env> [options]'
sys.exit(1)
for opt, arg in opts:
if opt == "--fqdn":
fqdn = arg
elif opt == "--db_pass":
# This will put the DB password on the command line, so should
# only be done in testing cases
db_pass = arg
elif opt == "--private_net_zone":
private_net_zone = arg
else:
print "Unrecognized option " + opt
sys.exit(1)
if not fqdn:
fqdn = prompt("fqdn")
if not db_pass:
db_pass = prompt_secure("db_pass")
# No need for private networking. Set to public.
if env == "monolith":
private_net_zone = "public"
elif not private_net_zone:
private_net_zone = prompt("private_net_zone")
# Ansible environment variables
env_vars = {
'env': env,
'fqdn': fqdn,
'private_net_zone': private_net_zone,
# Set all db passwords the same
'mysql_root_pass': db_pass,
'wiki_app_db_pass': db_pass,
'db_slave_pass': db_pass,
# Generate a random secret key
'wg_secret_key': random_string( num_chars=64, valid_chars= string.ascii_letters + string.digits )
}
server_types = ['load_balancers','app_servers','memcached_servers',
'db_slaves','parsoid_servers','elastic_servers','backup_servers','logging_servers']
for stype in server_types:
if stype in os.environ:
env_vars[stype] = [x.strip() for x in os.environ[stype].split(',')]
elif stype == "db_slaves":
# unless db_slaves are explicitly set, don't configure any
env_vars["db_slaves"] = []
elif "default_servers" in os.environ:
env_vars[stype] = [x.strip() for x in os.environ["default_servers"].split(',')]
else:
env_vars[stype] = ['localhost']
if "db_master" in os.environ:
env_vars["db_master"] = os.environ["db_master"].strip()
elif "default_servers" in os.environ:
env_vars["db_master"] = os.environ["default_servers"].strip()
else:
env_vars["db_master"] = 'localhost'
json_env_vars = json.dumps(env_vars)
# Create temporary extra vars file in secret directory so passwords
# are not written to command line. Putting in secret should make
# permissions acceptable since this dir will hold secret info, though it's
# sort of an odd place for a temporary file. Perhaps /root instead?
extra_vars_file = "/opt/conf-meza/secret/temp_vars.json"
if os.path.isfile(extra_vars_file):
os.remove(extra_vars_file)
f = open(extra_vars_file, 'w')
f.write(json_env_vars)
f.close()
# Make sure temp_vars.json is accessible. On the first run of deploy it is
# possible that user meza-ansible will not be able to reach this file,
# specifically if the system has a restrictive umask set (e.g 077).
os.chmod(extra_vars_file, 0664)
shell_cmd = playbook_cmd( "setup-env" ) + ["--extra-vars", '@'+extra_vars_file]
rc = meza_shell_exec( shell_cmd )
os.remove(extra_vars_file)
print
print "Please review your host file. Run command:"
print " sudo vi /opt/conf-meza/secret/{}/hosts".format(env)
print "Please review your secret config. Run command:"
print " sudo vi /opt/conf-meza/secret/{}/secret.yml".format(env)
if return_not_exit:
return rc
else:
sys.exit(rc)
def meza_command_setup_dev (argv):
dev_users = prompt("dev_users")
dev_git_user = prompt("dev_git_user")
dev_git_user_email = prompt("dev_git_user_email")
for dev_user in dev_users.split(' '):
os.system( "sudo -u {} git config --global user.name '{}'".format( dev_user, dev_git_user ) )
os.system( "sudo -u {} git config --global user.email {}".format( dev_user, dev_git_user_email ) )
os.system( "sudo -u {} git config --global color.ui true".format( dev_user ) )
# ref: https://www.liquidweb.com/kb/how-to-install-and-configure-vsftpd-on-centos-7/
os.system( "yum -y install vsftpd" )
os.system( "sed -r -i 's/anonymous_enable=YES/anonymous_enable=NO/g;' /etc/vsftpd/vsftpd.conf" )
os.system( "sed -r -i 's/local_enable=NO/local_enable=YES/g;' /etc/vsftpd/vsftpd.conf" )
os.system( "sed -r -i 's/write_enable=NO/write_enable=YES/g;' /etc/vsftpd/vsftpd.conf" )
# Start FTP and setup firewall
os.system( "systemctl restart vsftpd" )
os.system( "systemctl enable vsftpd" )
os.system( "firewall-cmd --permanent --add-port=21/tcp" )
os.system( "firewall-cmd --reload" )
print "To setup SFTP in Sublime Text, see:"
print "https://wbond.net/sublime_packages/sftp/settings#Remote_Server_Settings"
sys.exit()
def meza_command_setup_dev_networking (argv):
rc = meza_shell_exec(["bash","/opt/meza/src/scripts/dev-networking.sh"])
sys.exit(rc)
def meza_command_setup_docker (argv):
shell_cmd = playbook_cmd( "getdocker" )
rc = meza_shell_exec( shell_cmd )
sys.exit(0)
def meza_command_create (argv):
sub_command = argv[0]
if sub_command in ("wiki", "wiki-promptless"):
if len(argv) < 2:
print "You must specify an environment: 'meza create wiki ENV'"
sys.exit(1)
env = argv[1]
rc = check_environment(env)
if rc > 0:
meza_shell_exec_exit(rc)
playbook = "create-" + sub_command
if sub_command == "wiki-promptless":
if len(argv) < 4:
print "create wiki-promptless requires wiki_id and wiki_name arguments"
sys.exit(1)
shell_cmd = playbook_cmd( playbook, env, { 'wiki_id': argv[2], 'wiki_name': argv[3] } )
else:
shell_cmd = playbook_cmd( playbook, env )
rc = meza_shell_exec( shell_cmd )
meza_shell_exec_exit(rc)
def meza_command_delete (argv):
sub_command = argv[0]
if sub_command not in ("wiki", "wiki-promptless", "elasticsearch"):
print "{} is not a valid sub-command for delete".format(sub_command)
sys.exit(1)
if len(argv) < 2:
print "You must specify an environment: 'meza delete {} ENV'".format(sub_command)
sys.exit(1)
env = argv[1]
rc = check_environment(env)
if rc > 0:
meza_shell_exec_exit(rc)
playbook = "delete-" + sub_command
if sub_command == "wiki-promptless":
if len(argv) < 3:
print "delete wiki-promptless requires wiki_id"
sys.exit(1)
shell_cmd = playbook_cmd( playbook, env, { 'wiki_id': argv[2] } )
else:
shell_cmd = playbook_cmd( playbook, env )
rc = meza_shell_exec( shell_cmd )
meza_shell_exec_exit(rc)
def meza_command_backup (argv):
env = argv[0]
rc = check_environment(env)
if rc != 0:
meza_shell_exec_exit(rc)
shell_cmd = playbook_cmd( 'backup', env ) + argv[1:]
rc = meza_shell_exec( shell_cmd )
meza_shell_exec_exit(rc)
def meza_command_setbaseconfig (argv):
env = argv[0]
rc = check_environment(env)
if rc != 0:
meza_shell_exec_exit(rc)
shell_cmd = playbook_cmd( 'setbaseconfig', env ) + argv[1:]
rc = meza_shell_exec( shell_cmd )
meza_shell_exec_exit(rc)
def meza_command_destroy (argv):
print "command not yet built"
# FIXME #825: It would be great to have this function automatically map all
# scripts in MediaWiki's maintenance directory to all wikis. Then
# you could do:
# $ meza maint runJobs + argv --> run jobs on all wikis
# $ meza maint createAndPromote + argv --> create a user on all wikis
def meza_command_maint (argv):
# FIXME #711: This has no notion of environments and won't work in polylith
sub_command = argv[0]
command_fn = "meza_command_maint_" + sub_command
# if command_fn is a valid Python function, pass it all remaining args
if command_fn in globals() and callable( globals()[command_fn] ):
globals()[command_fn]( argv[1:] )
else:
print
print sub_command + " is not a valid sub-command for maint"
sys.exit(1)
def meza_command_maint_runJobs (argv):
#
# FIXME #711: THIS FUNCTION SHOULD STILL WORK ON MONOLITHS, BUT HAS NOT BE
# RE-TESTED SINCE MOVING TO ANSIBLE. FOR NON-MONOLITHS IT WILL
# NOT WORK AND NEEDS TO BE ANSIBLE-IZED.
#
wikis_dir = "/opt/htdocs/wikis"
wikis = os.listdir( wikis_dir )
for i in wikis:
if os.path.isdir(os.path.join(wikis_dir, i)):
anywiki=i
break
if not anywiki:
print "No wikis available to run jobs"
sys.exit(1)
shell_cmd = ["WIKI="+anywiki, "php", "/opt/meza/src/scripts/runAllJobs.php"]
if len(argv) > 0:
shell_cmd = shell_cmd + ["--wikis="+argv[1]]
rc = meza_shell_exec( shell_cmd )
meza_shell_exec_exit(rc)
def meza_command_maint_rebuild (argv):
env = argv[0]
rc = check_environment(env)
# return code != 0 means failure
if rc != 0:
meza_shell_exec_exit(rc)
more_extra_vars = False
# strip environment off of it
argv = argv[1:]
shell_cmd = playbook_cmd( 'rebuild-smw-and-index', env, more_extra_vars )
if len(argv) > 0:
shell_cmd = shell_cmd + argv
rc = meza_shell_exec( shell_cmd )
# exit with same return code as ansible command
meza_shell_exec_exit(rc)
def meza_command_maint_cleanuploadstash (argv):
env = argv[0]
rc = check_environment(env)
# return code != 0 means failure
if rc != 0:
meza_shell_exec_exit(rc)
more_extra_vars = False
# strip environment off of it
argv = argv[1:]
shell_cmd = playbook_cmd( 'cleanup-upload-stash', env, more_extra_vars )
if len(argv) > 0:
shell_cmd = shell_cmd + argv
rc = meza_shell_exec( shell_cmd )
# exit with same return code as ansible command
meza_shell_exec_exit(rc)
def meza_command_maint_encrypt_string (argv):
env = argv[0]
rc = check_environment(env)
# return code != 0 means failure
if rc != 0:
meza_shell_exec_exit(rc)
# strip environment off of it
argv = argv[1:]
if len(argv) == 0:
print "encrypt_string requires value to encrypt. Ex:"
print " sudo meza maint encrypt_string <env> somesecretvalue"
print "Additionally, you can supply the variable name. Ex:"
print " sudo meza maint encrypt_string <env> somesecretvalue var_name"
sys.exit(1)
varvalue = argv[0]
vault_pass_file = get_vault_pass_file( env )
shell_cmd = ["ansible-vault","encrypt_string","--vault-id",vault_pass_file,varvalue]
# If name argument passed in, use it
if len(argv) == 2:
shell_cmd = shell_cmd + ["--name",argv[1]]
# false = don't print command prior to running
rc = meza_shell_exec( shell_cmd, False )
# exit with same return code as ansible command
meza_shell_exec_exit(rc)
# sudo meza maint decrypt_string <env> <encrypted_string>
def meza_command_maint_decrypt_string (argv):
env = argv[0]
rc = check_environment(env)
# return code != 0 means failure
if rc != 0:
meza_shell_exec_exit(rc)
# strip environment off of it
argv = argv[1:]
if len(argv) == 0:
print "decrypt_string requires you to supply encrypted string. Ex:"
print """
sudo meza maint decrypt_string <env> '$ANSIBLE_VAULT;1.1;AES256
31386561343430626435373766393066373464656262383063303630623032616238383838346132
6162313461666439346337616166396133616466363935360a373333313165343535373761333634
62636634306632633539306436363866323639363332613363346663613235653138373837303337
6133383864613430370a623661653462336565376565346638646238643132636663383761613966
6566'
"""
sys.exit(1)
encrypted_string = argv[0]
vault_pass_file = get_vault_pass_file( env )
tmp_file = write_vault_decryption_tmp_file( env, encrypted_string )
shell_cmd = ["ansible-vault","decrypt",tmp_file,"--vault-password-file",vault_pass_file]
# false = don't print command prior to running
rc = meza_shell_exec( shell_cmd, False )
decrypted_value = read_vault_decryption_tmp_file( env )
print ""
print "Decrypted value:"
print decrypted_value
# exit with same return code as ansible command
meza_shell_exec_exit(rc)
def meza_command_docker (argv):
if argv[0] == "run":
if len(argv) == 1:
docker_repo = "jamesmontalvo3/meza-docker-test-max:latest"
else:
docker_repo = argv[1]
rc = meza_shell_exec([ "bash", "/opt/meza/src/scripts/build-docker-container.sh", docker_repo])
meza_shell_exec_exit(rc)
elif argv[0] == "exec":
if len(argv) < 2:
print "Please provide docker container id"
meza_shell_exec(["docker", "ps" ])
sys.exit(1)
else:
container_id = argv[1]
if len(argv) < 3:
print "Please supply a command for your container"
sys.exit(1)
shell_cmd = ["docker","exec","--tty",container_id,"env","TERM=xterm"] + argv[2:]
rc = meza_shell_exec( shell_cmd )
else:
print argv[0] + " is not a valid command"
sys.exit(1)
def playbook_cmd ( playbook, env=False, more_extra_vars=False ):
command = ['sudo', '-u', 'meza-ansible', 'ansible-playbook',
'/opt/meza/src/playbooks/{}.yml'.format(playbook)]
if env:
host_file = "/opt/conf-meza/secret/{}/hosts".format(env)
# Meza _needs_ to be able to load this file. Be perhaps a little
# overzealous and chown/chmod it everytime
secret_file = '/opt/conf-meza/secret/{}/secret.yml'.format(env)
meza_chown( secret_file, 'meza-ansible', 'wheel' )
os.chmod( secret_file, 0o660 )
# Setup password file if not exists (environment info is potentially encrypted)
vault_pass_file = get_vault_pass_file( env )
command = command + [ '-i', host_file, '--vault-password-file', vault_pass_file ]
extra_vars = { 'env': env }
else:
extra_vars = {}
if more_extra_vars:
for varname, value in more_extra_vars.items():
extra_vars[varname] = value
if len(extra_vars) > 0:
import json
command = command + ["--extra-vars", "'{}'".format(json.dumps(extra_vars)).replace('"','\\"') ]
return command
# FIXME install --> setup dev-networking, setup docker, deploy monolith (special case)
def meza_shell_exec ( shell_cmd, print_command=True ):
# Get errors with user meza-ansible trying to write to the calling-user's
# home directory if don't cd to a neutral location. By cd'ing to this
# location you can pick up ansible.cfg and use vars there.
starting_wd = os.getcwd()
os.chdir( "/opt/meza/config" )
# import subprocess
# # child = subprocess.Popen(shell_cmd, stdout=subprocess.PIPE)
# child = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# if return_output:
# output = child.communicate()[0]
# else:
# print child.communicate()[0]
# rc = child.returncode
#
# FIXME #874: For some reason `sudo -u meza-ansible ...` started failing in
# fall 2017. Using `su meza-ansible -c "..."` works. It is not
# known why this started happening, but a fix was needed. This,
# despite being somewhat of a hack, seemed like the best way to
# address the issue at the time.
#
firstargs = ' '.join(shell_cmd[0:3])
if firstargs == "sudo -u meza-ansible":
cmd = "su meza-ansible -c \"{}\"".format( ' '.join(shell_cmd[3:]) )
else:
cmd = ' '.join(shell_cmd)
if print_command:
print cmd
rc = os.system(cmd)
# Move back to original working directory
os.chdir( starting_wd )
return rc
# Return codes from function meza_shell_exec may either not be numbers or they
# may be out of the range accepted by sys.exit(). For example, return codes in
# the 30000 range were not being interpretted as failures. This function will
# instead take any non-zero return code and make it return the integer 1.
def meza_shell_exec_exit( return_code=0 ):
if int(return_code) > 0:
print "Exiting with return code {}".format(return_code)
sys.exit(1)
else:
sys.exit(0)
def get_vault_pass_file ( env ):
import pwd
import grp
home_dir = defaults['m_home']
vault_pass_file = '{}/meza-ansible/.vault-pass-{}.txt'.format(home_dir,env)
if not os.path.isfile( vault_pass_file ):
with open( vault_pass_file, 'w' ) as f:
f.write( random_string( num_chars=64 ) )
f.close()
# Run this everytime, since it should be fast and if meza-ansible can't
# read this then you're stuck!
meza_chown( vault_pass_file, 'meza-ansible', 'wheel' )
os.chmod( vault_pass_file, 0o600 )
return vault_pass_file
def write_vault_decryption_tmp_file ( env, value ):
home_dir = defaults['m_home']
temp_decrypt_file = '{}/meza-ansible/.vault-temp-decrypt-{}.txt'.format(home_dir,env)
with open( temp_decrypt_file, 'w' ) as filetowrite:
filetowrite.write( value )
filetowrite.close()
return temp_decrypt_file
def read_vault_decryption_tmp_file ( env ):
home_dir = defaults['m_home']
temp_decrypt_file = '{}/meza-ansible/.vault-temp-decrypt-{}.txt'.format(home_dir,env)
f = open( temp_decrypt_file, "r" )
if f.mode == 'r':
contents = f.read()
f.close()
os.remove( temp_decrypt_file )
else:
contents = "[decryption error]"
return contents
def meza_chown ( path, username, groupname ):
import pwd
import grp
uid = pwd.getpwnam( username ).pw_uid
gid = grp.getgrnam( groupname ).gr_gid
os.chown( path, uid, gid )
def display_docs(name):
f = open('/opt/meza/manual/meza-cmd/{}.txt'.format(name),'r')
print f.read()
def prompt(varname,default=False):
# Pretext message is prior to the actual line the user types on. Input msg
# is on the same line and will be repeated if the user does not give good
# input
pretext_msg = i18n["MSG_prompt_pretext_"+varname]
input_msg = i18n["MSG_prompt_input_"+varname]
print
print pretext_msg
value = raw_input( input_msg )
if default:
# If there's a default, either use user entry or default
value = value or default
else:
# If no default, keep asking until user supplies a value
while (not value):
value = raw_input( input_msg )
return value
def prompt_secure(varname):
import getpass
# See prompt() for more info
pretext_msg = i18n["MSG_prompt_pretext_"+varname]
input_msg = i18n["MSG_prompt_input_"+varname]
print
print pretext_msg
value = getpass.getpass( input_msg )
if not value:
value = random_string()
return value
def random_string(**params):
import string, random
if 'num_chars' in params:
num_chars = params['num_chars']
else:
num_chars = 32
if 'valid_chars' in params:
valid_chars = params['valid_chars']
else:
valid_chars = string.ascii_letters + string.digits + '!@$%^*'
return ''.join(random.SystemRandom().choice(valid_chars) for _ in range(num_chars))
# return code 0 success, 1+ failure
def check_environment(env):
import os
conf_dir = "/opt/conf-meza/secret"
env_dir = os.path.join( conf_dir, env )
if not os.path.isdir( env_dir ):
if env == "monolith":
return 1
print
print '"{}" is not a valid environment.'.format(env)
print "Please choose one of the following:"
conf_dir_stuff = os.listdir( conf_dir )
valid_envs = []
for x in conf_dir_stuff:
if os.path.isdir( os.path.join( conf_dir, x ) ):
valid_envs.append( x )
if len(valid_envs) > 0:
for x in valid_envs:
print " " + x
else:
print " No environments configured"
print " Run command: meza setup env <environment name>"
return 1
host_file = os.path.join( env_dir, "hosts" )
if not os.path.isfile( host_file ):
print
print "{} not a valid file".format( host_file )
return 1
return 0
# http://stackoverflow.com/questions/1994488/copy-file-or-directories-recursively-in-python
def copy (src, dst):
import shutil, errno
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def get_datetime_string():
import time, datetime
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return st
if __name__ == "__main__":
main(sys.argv[1:])
|
|
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from django.test import TestCase
from django.test import Client
from django.core.files.uploadedfile import SimpleUploadedFile
import django
from rest_framework.test import APIRequestFactory
from rest_framework.parsers import JSONParser
from rest_framework import status
from . import fakedata
from .models.Attachment import Attachment
from .serializers import AttachmentSerializer
from .models.AttachmentViewModel import AttachmentViewModel
from .serializers import AttachmentViewModelSerializer
from .models.Audit import Audit
from .serializers import AuditSerializer
from .models.Contact import Contact
from .serializers import ContactSerializer
from .models.CreditTrade import CreditTrade
from .serializers import CreditTradeSerializer
from .models.CreditTradeLogEntry import CreditTradeLogEntry
from .serializers import CreditTradeLogEntrySerializer
from .models.CurrentUserViewModel import CurrentUserViewModel
from .serializers import CurrentUserViewModelSerializer
from .models.FuelSupplier import FuelSupplier
from .serializers import FuelSupplierSerializer
from .models.Group import Group
from .serializers import GroupSerializer
from .models.GroupMembership import GroupMembership
from .serializers import GroupMembershipSerializer
from .models.GroupMembershipViewModel import GroupMembershipViewModel
from .serializers import GroupMembershipViewModelSerializer
from .models.GroupViewModel import GroupViewModel
from .serializers import GroupViewModelSerializer
from .models.History import History
from .serializers import HistorySerializer
from .models.HistoryViewModel import HistoryViewModel
from .serializers import HistoryViewModelSerializer
from .models.LookupList import LookupList
from .serializers import LookupListSerializer
from .models.Note import Note
from .serializers import NoteSerializer
from .models.Notification import Notification
from .serializers import NotificationSerializer
from .models.NotificationEvent import NotificationEvent
from .serializers import NotificationEventSerializer
from .models.NotificationViewModel import NotificationViewModel
from .serializers import NotificationViewModelSerializer
from .models.Permission import Permission
from .serializers import PermissionSerializer
from .models.PermissionViewModel import PermissionViewModel
from .serializers import PermissionViewModelSerializer
from .models.Role import Role
from .serializers import RoleSerializer
from .models.RolePermission import RolePermission
from .serializers import RolePermissionSerializer
from .models.RolePermissionViewModel import RolePermissionViewModel
from .serializers import RolePermissionViewModelSerializer
from .models.RoleViewModel import RoleViewModel
from .serializers import RoleViewModelSerializer
from .models.User import User
from .serializers import UserSerializer
from .models.UserDetailsViewModel import UserDetailsViewModel
from .serializers import UserDetailsViewModelSerializer
from .models.UserFavourite import UserFavourite
from .serializers import UserFavouriteSerializer
from .models.UserFavouriteViewModel import UserFavouriteViewModel
from .serializers import UserFavouriteViewModelSerializer
from .models.UserRole import UserRole
from .serializers import UserRoleSerializer
from .models.UserRoleViewModel import UserRoleViewModel
from .serializers import UserRoleViewModelSerializer
from .models.UserViewModel import UserViewModel
from .serializers import UserViewModelSerializer
# Custom API test cases.
# If an API operation does not contains generated code then it is tested in this
# file.
#
class Test_Api_Custom(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
# needed to setup django
django.setup()
# following functions are used by the complex tests to create / delete dependent objects
def createContact(self):
testContactUrl = "/api/contacts"
# Create:
payload = fakedata.ContactTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testContactUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
contactId = data['id']
return contactId
def createGroup(self):
testGroupUrl = "/api/groups"
payload = fakedata.GroupTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testGroupUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
contactId = data['id']
return contactId
def createCompliancePeriod(self):
testUrl = "/api/complianceperiods"
# Create:
payload = fakedata.CompliancePeriodTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createFuelSupplier(self, contactId):
testUrl = "/api/fuelsuppliers"
# Create:
payload = {
'name': "Initial",
'status': "Initial",
'dateCreated': '2000-01-01',
'primaryContact': contactId ,
'contacts': [contactId],
'notes': [],
'attachments': [],
'history': []
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createRole(self):
testUrl = "/api/roles"
# Create:
fakeRole = fakedata.RoleTestDataCreate()
payload = {
'name': fakeRole['name'],
'description': fakeRole['description']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createPermission(self):
testUrl = "/api/permissions"
# Create:
fakePermission = fakedata.PermissionTestDataCreate()
payload = {
'code': fakePermission['code'],
'name': fakePermission['name'],
'description': fakePermission['description']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createUser(self, fuelsupplierId):
testUserUrl = "/api/users"
# Create:
fakeUser = fakedata.UserTestDataCreate()
payload = {
'givenName': fakeUser['givenName'],
'surname':fakeUser['surname'],
'initials':fakeUser['initials'],
'email':fakeUser['email'],
'status':'Active',
'smUserId':fakeUser['smUserId'],
'guid':fakeUser['guid'],
'smAuthorizationDirectory':fakeUser['smAuthorizationDirectory'],
'fuelSupplier': fuelsupplierId
}
jsonString = json.dumps(payload)
response = self.client.post(testUserUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
userId = data['id']
return userId
def createCreditTrade(self, fuelSupplierId, userId):
testUrl = "/api/credittrades"
fakeCreditTrade = fakedata.CreditTradeTestDataCreate()
payload = {
'status':fakeCreditTrade['status'],
'initiator':fuelSupplierId,
'respondent': fuelSupplierId,
'initiatorLastUpdateBy': userId,
'respondentLastUpdatedBy': None,
'reviewedRejectedBy': None,
'approvedRejectedBy': None,
'cancelledBy': None,
'tradeExecutionDate': '2017-01-01',
'transactionType':fakeCreditTrade['transactionType'],
'numberOfCredits':fakeCreditTrade['numberOfCredits'],
'fairMarketValuePrice': '100.00',
'fuelSupplierBalanceBeforeTransaction':'2017-01-01',
'notes':[],
'attachments':[],
'history':[]
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createOffer(self, fuelSupplierId):
testUrl = "/api/offers"
fakeOffer = fakedata.OfferTestDataCreate()
payload = {
'fuelSupplier':fuelSupplierId,
'status': fakeOffer['status'],
'buyOrSell': fakeOffer['buyOrSell'],
'numberOfCredits': fakeOffer['numberOfCredits'],
'numberOfViews': fakeOffer['numberOfViews'],
'datePosted': '2017-01-01',
'note': fakeOffer['note'],
'history':[]
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createNotificationEvent(self, creditTradeId, offerId):
testUrl = "/api/notificationevents"
fakeNotificationEvent = fakedata.NotificationEventTestDataCreate()
payload = {
'eventTime': '2017-01-01',
'eventTypeCode': fakeNotificationEvent['eventTypeCode'],
'notes': fakeNotificationEvent['notes'],
'creditTrade':creditTradeId,
'offer': offerId
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def deleteContact(self, contactId):
# cleanup the contact.
deleteUrl = "/api/contacts/" + str(contactId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteCreditTrade(self, creditTradeId):
deleteUrl = "/api/credittrades/" + str(creditTradeId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteFuelSupplier(self, fuelsupplierId):
deleteUrl = "/api/fuelsuppliers/" + str(fuelsupplierId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteGroup(self, groupId):
deleteUrl = "/api/groups/" + str(groupId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteRole(self, roleId):
deleteUrl = "/api/roles/" + str(roleId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteRolePermission(self, rolePermissionId):
deleteUrl = "/api/rolepermissions/" + str(rolePermissionId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteOffer(self, offerId):
deleteUrl = "/api/offers/" + str(offerId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deletePermission(self, permissionId):
deleteUrl = "/api/permissions/" + str(permissionId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteNotificationEvent(self, notificationEventId):
deleteUrl = "/api/notificationevents/" + str(notificationEventId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteUser(self, userId):
deleteUrl = "/api/users/" + str(userId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def test_credittradesBulkPost(self):
# Test Bulk Load.
payload = fakedata.CreditTradeTestDataCreate()
jsonString = "[]"
response = self.client.post('/api/credittrades/bulk',content_type='application/json', data=jsonString)
# Check that the response is 200 OK.
assert status.HTTP_201_CREATED == response.status_code
def test_credittradesGet(self):
# Credit Trade has the following dependencies:
# User
# Fuel Supplier
# FuelSupplier
# Contact
# Order of operations for the create will be:
# 1. Create a Contact
# 2. Create a Fuel Supplier with that Contact
# 3. Create a User with that Fuel Supplier
# 4. Create the Credit Trade.
fakeCreditTrade = fakedata.CreditTradeTestDataCreate()
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
creditTradeId = self.createCreditTrade(fuelSupplierId, userId)
# Test List operation
baseUrl = "/api/credittrades"
response = self.client.get(baseUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
testUrl = baseUrl + "/" + str(creditTradeId)
response = self.client.get(testUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
# Test Put
payload = {
'id': creditTradeId,
'status':fakeCreditTrade['status'],
'initiator':fuelSupplierId,
'respondent': fuelSupplierId,
'initiatorLastUpdateBy': userId,
'respondentLastUpdatedBy': None,
'reviewedRejectedBy': None,
'approvedRejectedBy': None,
'cancelledBy': None,
'tradeExecutionDate': '2017-01-01',
'transactionType':fakeCreditTrade['transactionType'],
'numberOfCredits':fakeCreditTrade['numberOfCredits'],
'fairMarketValuePrice': '101.00',
'fuelSupplierBalanceBeforeTransaction':'2017-01-01',
'notes':[],
'attachments':[],
'history':[]
}
jsonString = json.dumps(payload)
response = self.client.put(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# Cleanup
self.deleteCreditTrade(creditTradeId)
self.deleteUser(userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_credittradetradelogentriesBulkPost(self):
# Test Bulk Load.
payload = fakedata.CreditTradeLogEntryTestDataCreate()
jsonString = "[]"
response = self.client.post('/api/credittradetradelogentries/bulk',content_type='application/json', data=jsonString)
# Check that the response is 200 OK.
assert status.HTTP_201_CREATED == response.status_code
def test_credittradetradelogentriesGet(self):
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
creditTradeId = self.createCreditTrade(fuelSupplierId, userId)
# Test Create and List operations.
testUrl = "/api/credittradetradelogentries"
# Create:
serializer_class = CreditTradeLogEntrySerializer
fakeCreditTradeLogEntry = fakedata.CreditTradeLogEntryTestDataCreate()
payload = {
'creditTrade': creditTradeId,
'user': userId,
'logEntryTime': '2000-01-01',
'newStatus': fakeCreditTradeLogEntry['newStatus'],
'newTradeExecutionDate': '2000-01-01',
'newTransactionType': fakeCreditTradeLogEntry['newStatus'],
'newNumberOfCredits': fakeCreditTradeLogEntry['newNumberOfCredits'],
'newFairMarketValuePrice': '500.00',
'newFuelSupplierBalanceAtTransactionTime': fakeCreditTradeLogEntry['newFuelSupplierBalanceAtTransactionTime']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# List:
response = self.client.get(testUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
# Put
getputUrl = testUrl + "/" + str(createdId)
payload = {
'id': createdId,
'creditTrade': creditTradeId,
'user': userId,
'logEntryTime': '2000-01-01',
'newStatus': 'changed',
'newTradeExecutionDate': '2000-01-01',
'newTransactionType': fakeCreditTradeLogEntry['newStatus'],
'newNumberOfCredits': fakeCreditTradeLogEntry['newNumberOfCredits'],
'newFairMarketValuePrice': '500.00',
'newFuelSupplierBalanceAtTransactionTime': fakeCreditTradeLogEntry['newFuelSupplierBalanceAtTransactionTime']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# Get
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data[1]['newStatus'] == payload['newStatus']
# Cleanup:
deleteUrl = testUrl + "/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteCreditTrade(creditTradeId)
self.deleteUser(userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_fuelsuppliersBulkPost(self):
# Test Bulk Load.
payload = fakedata.FuelSupplierTestDataCreate()
jsonString = "[]"
response = self.client.post('/api/fuelsuppliers/bulk',content_type='application/json', data=jsonString)
# Check that the response is 200 OK.
assert status.HTTP_201_CREATED == response.status_code
def test_fuelsuppliersCreateGetDelete(self):
# Fuel supplier has contacts as a dependency.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
# Test List operation
testUrl = "/api/fuelsuppliers"
# List:
response = self.client.get(testUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
# Get
getUrl = testUrl + "/" + str(fuelSupplierId)
response = self.client.get(testUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
# Put
changedpayload = {
'id': fuelSupplierId,
'name': "Changed",
'status': "Changed",
'dateCreated': '2000-01-01',
'primaryContact': contactId ,
'contacts': [contactId],
'notes': [],
'attachments': [],
'history': []
}
jsonString = json.dumps(changedpayload)
response = self.client.put(getUrl, content_type='application/json', data=jsonString)
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data['name'] == changedpayload['name'];
response = self.client.get(getUrl)
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data['name'] == changedpayload['name'];
# Cleanup Fuel Supplier
self.deleteFuelSupplier(fuelSupplierId)
# Cleanup contact:
self.deleteContact(contactId)
def test_notificationsBulkPost(self):
# Test Bulk Load.
payload = fakedata.NotificationTestDataCreate()
jsonString = "[]"
response = self.client.post('/api/notifications/bulk',content_type='application/json', data=jsonString)
# Check that the response is 200 OK.
assert status.HTTP_201_CREATED == response.status_code
def test_notificationsGet(self):
# Test Create and List operations.
testUrl = "/api/notifications"
# Create:
serializer_class = NotificationSerializer
payload = fakedata.NotificationTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# List:
response = self.client.get(testUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
# Cleanup:
deleteUrl = testUrl + "/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def test_notificationsIdDeletePost(self):
# Test Retrieve and Update operations.
testUrl = "/api/notifications/(?P<id>[0-9]+)/delete"
createUrl = testUrl.replace ("/(?P<id>[0-9]+)/delete","")
# Create an object:
payload = fakedata.NotificationTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(createUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
deleteUrl = testUrl.replace ("(?P<id>[0-9]+)",str(createdId))
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def test_notificationsIdGet(self):
# Test Retrieve and Update operations.
testUrl = "/api/notifications/(?P<id>[0-9]+)"
createUrl = testUrl.replace ("/(?P<id>[0-9]+)","")
# Create an object:
payload = fakedata.NotificationTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(createUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# Update the object:
updateUrl = testUrl.replace ("(?P<id>[0-9]+)",str(createdId))
payload = fakedata.NotificationTestDataUpdate()
jsonString = json.dumps(payload)
response = self.client.put(updateUrl, content_type='application/json', data=jsonString)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
# Cleanup:
deleteUrl = createUrl + "/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def test_notificationeventsBulkPost(self):
# Test Bulk Load.
payload = fakedata.NotificationEventTestDataCreate()
jsonString = "[]"
response = self.client.post('/api/notificationevents/bulk',content_type='application/json', data=jsonString)
# Check that the response is 200 OK.
assert status.HTTP_201_CREATED == response.status_code
def test_notificationeventsGet(self):
# NotificationEvent needs a CreditTrade.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
creditTradeId = self.createCreditTrade(fuelSupplierId, userId)
# Test Create and List operations.
testUrl = "/api/notificationevents"
# Create:
serializer_class = NotificationEventSerializer
fakeNotificationEvent = fakedata.NotificationEventTestDataCreate()
payload = {
'eventTime': '2000-01-01',
'eventTypeCode': fakeNotificationEvent['eventTypeCode'],
'notes': fakeNotificationEvent['notes'],
'creditTrade': creditTradeId
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# List:
response = self.client.get(testUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
getputUrl = testUrl + "/" + str (createdId)
# put
payload = {
'eventTime': '2000-01-01',
'eventTypeCode': 'test',
'notes': fakeNotificationEvent['notes'],
'creditTrade': creditTradeId
}
jsonString = json.dumps(payload)
response = self.client.put(getputUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
response = self.client.get(getputUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert data['eventTypeCode'] == payload['eventTypeCode']
# Cleanup
deleteUrl = testUrl + "/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
self.deleteCreditTrade(creditTradeId)
self.deleteUser(userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_usersBulkPost(self):
# Test Bulk Load.
payload = fakedata.UserTestDataCreate()
jsonString = "[]"
response = self.client.post('/api/users/bulk',content_type='application/json', data=jsonString)
# Check that the response is 200 OK.
assert status.HTTP_201_CREATED == response.status_code
def test_users(self):
# a User has Fuel supplier as a dependency
# a Fuel Supplier has contacts as a dependency
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
creditTradeId = self.createCreditTrade(fuelSupplierId, userId)
testUrl="/api/users"
# List:
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
fakeUser = fakedata.UserTestDataCreate()
# test update and get
testUrl="/api/users/" + str(userId)
payload = {
'id': userId,
'givenName': 'changed',
'surname':fakeUser['surname'],
'initials':fakeUser['initials'],
'email':fakeUser['email'],
'status':fakeUser['status'],
'smUserId':fakeUser['smUserId'],
'guid':fakeUser['guid'],
'smAuthorizationDirectory':fakeUser['smAuthorizationDirectory'],
'fuelSupplier': fuelSupplierId
}
jsonString = json.dumps(payload)
response = self.client.put(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
response = self.client.get(testUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
# Cleanup
self.deleteCreditTrade(creditTradeId)
self.deleteUser(userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_attachmentsIdDownloadGet(self):
# first upload a new attachment.
testUrl = "/api/attachments"
uploadUrl = testUrl + "/upload"
serializer_class = AttachmentSerializer
payload = fakedata.AttachmentTestDataCreate()
rawData = "TEST"
jsonString = json.dumps(payload)
fileData = SimpleUploadedFile("file.txt", rawData.encode('utf-8') )
form = {
"file": fileData,
"item": jsonString,
}
response = self.client.post(uploadUrl, data=form)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# download the attachment.
downloadUrl = testUrl + "/" + str(createdId) + "/download"
response = self.client.get(downloadUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
parsed = response.content.decode("utf-8")
# response should match the contents sent.
assert rawData==parsed
# Cleanup:
deleteUrl = testUrl + "/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def test_creditTradeIdNotesGet(self):
# start by creating a credit trade.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
creditTradeId = self.createCreditTrade(fuelSupplierId, userId)
fakeNote = fakedata.NoteTestDataCreate()
testUrl = "/api/credittrades/" + str(creditTradeId) + "/notes"
payload = fakedata.NoteTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# Cleanup the Note
deleteUrl = "/api/notes/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteCreditTrade(creditTradeId)
self.deleteUser(userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_credittradesIdAttachmentsGet(self):
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
creditTradeId = self.createCreditTrade(fuelSupplierId, userId)
uploadUrl = "/api/credittrades/" + str(creditTradeId) + "/attachments"
payload = fakedata.AttachmentTestDataCreate()
rawData = "TEST"
jsonString = json.dumps(payload)
fileData = SimpleUploadedFile("file.txt", rawData.encode('utf-8') )
form = {
"file": fileData,
"item": jsonString,
}
response = self.client.post(uploadUrl, data=form)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
testUrl = "/api/attachments"
# download the attachment.
downloadUrl = testUrl + "/" + str(createdId) + "/download"
response = self.client.get(downloadUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
parsed = response.content.decode("utf-8")
# response should match the contents sent.
assert rawData==parsed
# Cleanup:
deleteUrl = testUrl + "/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteCreditTrade(creditTradeId)
self.deleteUser(userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_credittradesIdHistoryGet(self):
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
creditTradeId = self.createCreditTrade(fuelSupplierId, userId)
fakeHistory = fakedata.HistoryTestDataCreate()
testUrl = "/api/credittrades/" + str(creditTradeId) + "/history"
payload = fakedata.HistoryTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# Cleanup the History
deleteUrl = "/api/histories/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
testUrl = "/api/credittrades/" + str(creditTradeId) + "/history"
payload = fakedata.HistoryTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# Cleanup
self.deleteCreditTrade(creditTradeId)
self.deleteUser(userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_credittradeSearchGet(self):
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
creditTradeId = self.createCreditTrade(fuelSupplierId, userId)
# do a search
testUrl = "/api/credittrades/search"
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# Cleanup
self.deleteCreditTrade(creditTradeId)
self.deleteUser(userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_usersCurrentFavourites(self):
# create a user
groupId = self.createGroup()
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
# add a favourite
fakeFavourite = fakedata.UserFavouriteTestDataCreate()
testUrl = "/api/users/current/favourites"
jsonString = json.dumps(fakeFavourite)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# update a favourite
fakeFavourite = fakedata.UserFavouriteTestDataUpdate()
payload = [{
'type': fakeFavourite['type'],
'name': fakeFavourite['name'],
'value': fakeFavourite['value'],
'isDefault': fakeFavourite['isDefault'],
'user': userId
}]
jsonString = json.dumps(payload)
response = self.client.put(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# search for the favourite
response = self.client.get(testUrl + "/search")
assert status.HTTP_200_OK == response.status_code
# delete favourite
deleteUrl = testUrl + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# cleanup
self.deleteUser (userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_usersCurrentGet(self):
# the auth layer is out of scope - in future add a check here that the user matches the logged in user.
groupId = self.createGroup()
# create a user.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
testUrl="/api/users/current"
# List:
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
self.deleteUser (userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_fuelsuppliersIdAttachmentsGet(self):
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
uploadUrl = "/api/fuelsuppliers/" + str(fuelSupplierId) + "/attachments"
payload = fakedata.AttachmentTestDataCreate()
rawData = "TEST"
jsonString = json.dumps(payload)
fileData = SimpleUploadedFile("file.txt", rawData.encode('utf-8') )
form = {
"file": fileData,
"item": jsonString,
}
response = self.client.post(uploadUrl, data=form)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
testUrl = "/api/attachments"
# download the attachment.
downloadUrl = testUrl + "/" + str(createdId) + "/download"
response = self.client.get(downloadUrl)
# Check that the response is 200 OK.
assert status.HTTP_200_OK == response.status_code
parsed = response.content.decode("utf-8")
# response should match the contents sent.
assert rawData==parsed
# Cleanup:
deleteUrl = testUrl + "/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_fuelsuppliersIdHistoryGet(self):
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
fakeHistory = fakedata.HistoryTestDataCreate()
testUrl = "/api/fuelsuppliers/" + str(fuelSupplierId) + "/history"
payload = fakedata.HistoryTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# Cleanup the History
deleteUrl = "/api/histories/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_fuelsuppliersIdNotesGet(self):
# start by creating a credit trade.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
fakeNote = fakedata.NoteTestDataCreate()
testUrl = "/api/fuelsuppliers/" + str(fuelSupplierId) + "/notes"
payload = fakedata.NoteTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# Cleanup the Note
deleteUrl = "/api/notes/" + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
# Cleanup
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_fuelsuppliersSearchGet(self):
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
# do a search
testUrl = "/api/fuelsuppliers/search"
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# Cleanup
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_groupsIdUsersGet(self):
# create a group.
groupId = self.createGroup()
# create a user.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
# add user to group.
userGroupUrl = "/api/users/" + str(groupId) + "/groups"
# create a new group membership.
payload = {'active': True, 'group':groupId, 'user':userId}
jsonString = json.dumps(payload)
response = self.client.post(userGroupUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the get
response = self.client.get(userGroupUrl)
assert status.HTTP_200_OK == response.status_code
testUrl = "/api/groups/" + str(groupId)
# get the users in the group.
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# should match
# cleanup
self.deleteGroup (groupId)
self.deleteUser (userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_rolesIdPermissionsGet(self):
# create a group.
roleId = self.createRole()
# create a permission.
permissionId = self.createPermission()
rolePermissionUrl = "/api/roles/" + str(roleId) + "/permissions"
# create a new group membership.
payload = {'role':roleId, 'permission':permissionId}
jsonString = json.dumps(payload)
response = self.client.post(rolePermissionUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
rolePermissionId = data['id']
# test the get
response = self.client.get(rolePermissionUrl)
assert status.HTTP_200_OK == response.status_code
# test the put. This will also delete the RolePermission.
payload = []
jsonString = json.dumps(payload)
response = self.client.put(rolePermissionUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteRole (roleId)
self.deletePermission(permissionId)
def test_rolesIdUsersGet(self):
# create a role.
roleId = self.createRole()
# create a user.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
# add role to user.
userRoleUrl = "/api/users/" + str(userId) + "/roles"
# create a new UserRole.
payload = {
'effectiveDate': '2000-01-01',
'expiryDate': None,
'user': userId,
'role': roleId
}
jsonString = json.dumps(payload)
response = self.client.post(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the get
response = self.client.get(userRoleUrl)
assert status.HTTP_200_OK == response.status_code
testUrl = "/api/roles/" + str(roleId)
# get the users in the group.
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# test the PUT - this will clear the user role map.
payload = []
jsonString = json.dumps(payload)
response = self.client.put(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteRole (roleId)
self.deleteUser (userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_usersIdFavourites(self):
# create a user
groupId = self.createGroup()
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
# add a favourite
fakeFavourite = fakedata.UserFavouriteTestDataCreate()
payload = {
'type': fakeFavourite['type'],
'name': fakeFavourite['name'],
'value': fakeFavourite['value'],
'isDefault': fakeFavourite['isDefault'],
'user': userId
}
testUrl = "/api/users/" + str(userId) + "/favourites"
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
# update a favourite
fakeFavourite = fakedata.UserFavouriteTestDataUpdate()
payload = [{
'type': fakeFavourite['type'],
'name': fakeFavourite['name'],
'value': fakeFavourite['value'],
'isDefault': fakeFavourite['isDefault'],
'user': userId
}]
jsonString = json.dumps(payload)
response = self.client.put(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# delete favourite
deleteUrl = testUrl + str(createdId) + "/delete"
response = self.client.post(deleteUrl)
# cleanup
self.deleteUser (userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_usersIdGroupsPut(self):
# create a role.
groupId = self.createGroup()
# create a user.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
# add group to user.
userGroupUrl = "/api/users/" + str(userId) + "/groups"
# create a new UserRole.
payload = {
'active': True,
'user': userId,
'group': groupId
}
jsonString = json.dumps(payload)
response = self.client.post(userGroupUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the PUT
payload = []
jsonString = json.dumps(payload)
response = self.client.put(userGroupUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteGroup (groupId)
self.deleteUser (userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_usersIdNotificationsGet(self):
# create a user.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
# create a credit trade and offer.
offerId = self.createOffer(fuelSupplierId)
creditTradeId = self.createCreditTrade(fuelSupplierId,userId)
notificationEventId = self.createNotificationEvent(creditTradeId, offerId)
# add notification to user.
userNotificationUrl = "/api/users/" + str(userId) + "/notifications"
# create a new UserRole.
payload = {
'event': notificationEventId,
'hasBeenViewed': False,
'isWatchNotification': False,
'user':userId
}
jsonString = json.dumps(payload)
response = self.client.post(userNotificationUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the Get
response = self.client.get(userNotificationUrl)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteNotificationEvent(notificationEventId)
self.deleteOffer(offerId)
self.deleteCreditTrade(creditTradeId)
self.deleteUser (userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_usersIdPermissionsGet(self):
# create a user.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
# create a credit trade and offer.
notificationEventId = self.createUser(fuelSupplierId)
# assign permissions to the user.
#TODO add that.
userPermissionUrl = "/api/users/" + str(userId) + "/permissions"
# test the Get
response = self.client.get(userPermissionUrl)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteUser (userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_usersIdRolesPut(self):
# create a role.
roleId = self.createRole()
# create a user.
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
# add role to user.
userRoleUrl = "/api/users/" + str(userId) + "/roles"
# create a new UserRole.
payload = {
'effectiveDate': '2000-01-01',
'expiryDate': None,
'user': userId,
'role': roleId
}
jsonString = json.dumps(payload)
response = self.client.post(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the PUT
payload = []
jsonString = json.dumps(payload)
response = self.client.put(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteRole (roleId)
self.deleteUser (userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_usersSearchGet(self):
contactId = self.createContact()
fuelSupplierId = self.createFuelSupplier(contactId)
userId = self.createUser(fuelSupplierId)
# do a search
testUrl = "/api/users/search"
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# Cleanup
self.deleteUser(userId)
self.deleteFuelSupplier(fuelSupplierId)
self.deleteContact(contactId)
def test_ApiViewer(self):
testUrl = "/api/"
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
if __name__ == '__main__':
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module is for internal use only; no backwards-compatibility guarantees.
The classes in this file keep shared state, and organize metrics information.
Available classes:
- MetricKey - Internal key for a metric.
- MetricResult - Current status of a metric's updates/commits.
- _MetricsEnvironment - Keeps track of MetricsContainer and other metrics
information for every single execution working thread.
- MetricsContainer - Holds the metrics of a single step and a single
unit-of-commit (bundle).
"""
import threading
from collections import defaultdict
from apache_beam.metrics.cells import CounterCell
from apache_beam.metrics.cells import DistributionCell
class MetricKey(object):
"""Key used to identify instance of metric cell.
Metrics are internally keyed by the step name they associated with and
the name of the metric.
"""
def __init__(self, step, metric):
"""Initializes ``MetricKey``.
Args:
step: A string with the step this metric cell is part of.
metric: A ``MetricName`` that identifies a metric.
"""
self.step = step
self.metric = metric
def __eq__(self, other):
return (self.step == other.step and
self.metric == other.metric)
def __str__(self):
return 'MetricKey(step={}, metric={})'.format(
self.step, self.metric)
def __hash__(self):
return hash((self.step, self.metric))
class MetricResult(object):
"""Keeps track of the status of a metric within a single bundle.
It contains the physical and logical updates to the metric. Physical updates
are updates that have not necessarily been committed, but that have been made
during pipeline execution. Logical updates are updates that have been
committed.
Attributes:
key: A ``MetricKey`` that identifies the metric and bundle of this result.
committed: The committed updates of the metric. This attribute's type is
that of the underlying cell data (e.g. int, DistributionData).
attempted: The logical updates of the metric. This attribute's type is that
of the underlying cell data (e.g. int, DistributionData).
"""
def __init__(self, key, committed, attempted):
"""Initializes ``MetricResult``.
Args:
key: A ``MetricKey`` object.
committed: Metric data that has been committed (e.g. logical updates)
attempted: Metric data that has been attempted (e.g. physical updates)
"""
self.key = key
self.committed = committed
self.attempted = attempted
def __eq__(self, other):
return (self.key == other.key and
self.committed == other.committed and
self.attempted == other.attempted)
def __str__(self):
return 'MetricResult(key={}, committed={}, attempted={})'.format(
self.key, str(self.committed), str(self.attempted))
class _MetricsEnvironment(object):
"""Holds the MetricsContainer for every thread and other metric information.
This class is not meant to be instantiated, instead being used to keep
track of global state.
"""
def __init__(self):
self.METRICS_SUPPORTED = False
self._METRICS_SUPPORTED_LOCK = threading.Lock()
self.PER_THREAD = threading.local()
self.set_container_stack()
def set_container_stack(self):
if not hasattr(self.PER_THREAD, 'container'):
self.PER_THREAD.container = []
def container_stack(self):
self.set_container_stack()
return self.PER_THREAD.container
def set_metrics_supported(self, supported):
self.set_container_stack()
with self._METRICS_SUPPORTED_LOCK:
self.METRICS_SUPPORTED = supported
def current_container(self):
self.set_container_stack()
index = len(self.PER_THREAD.container) - 1
if index < 0:
return None
return self.PER_THREAD.container[index]
def set_current_container(self, container):
self.set_container_stack()
self.PER_THREAD.container.append(container)
def unset_current_container(self):
self.set_container_stack()
self.PER_THREAD.container.pop()
MetricsEnvironment = _MetricsEnvironment()
class MetricsContainer(object):
"""Holds the metrics of a single step and a single bundle."""
def __init__(self, step_name):
self.step_name = step_name
self.counters = defaultdict(lambda: CounterCell())
self.distributions = defaultdict(lambda: DistributionCell())
def get_counter(self, metric_name):
return self.counters[metric_name]
def get_distribution(self, metric_name):
return self.distributions[metric_name]
def _get_updates(self, filter=None):
"""Return cumulative values of metrics filtered according to a lambda.
This returns all the cumulative values for all metrics after filtering
then with the filter parameter lambda function. If None is passed in,
then cumulative values for all metrics are returned.
"""
if filter is None:
filter = lambda v: True
counters = {MetricKey(self.step_name, k): v.get_cumulative()
for k, v in self.counters.items()
if filter(v)}
distributions = {MetricKey(self.step_name, k): v.get_cumulative()
for k, v in self.distributions.items()
if filter(v)}
return MetricUpdates(counters, distributions)
def get_updates(self):
"""Return cumulative values of metrics that changed since the last commit.
This returns all the cumulative values for all metrics only if their state
prior to the function call was COMMITTING or DIRTY.
"""
return self._get_updates(filter=lambda v: v.commit.before_commit())
def get_cumulative(self):
"""Return MetricUpdates with cumulative values of all metrics in container.
This returns all the cumulative values for all metrics regardless of whether
they have been committed or not.
"""
return self._get_updates()
class ScopedMetricsContainer(object):
def __init__(self, container=None):
self._stack = MetricsEnvironment.container_stack()
self._container = container
def enter(self):
self._stack.append(self._container)
def exit(self):
self._stack.pop()
def __enter__(self):
self.enter()
def __exit__(self, type, value, traceback):
self.exit()
class MetricUpdates(object):
"""Contains updates for several metrics.
A metric update is an object containing information to update a metric.
For Distribution metrics, it is DistributionData, and for Counter metrics,
it's an int.
"""
def __init__(self, counters=None, distributions=None):
"""Create a MetricUpdates object.
Args:
counters: Dictionary of MetricKey:MetricUpdate updates.
distributions: Dictionary of MetricKey:MetricUpdate objects.
"""
self.counters = counters or {}
self.distributions = distributions or {}
|
|
#!/usr/bin/env python3
#
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import os
from os import path
import shutil
import sys
from nototools import unicode_data
"""Create aliases in target directory.
In addition to links/copies named with aliased sequences, this can also
create canonically named aliases/copies, if requested."""
DATA_ROOT = path.dirname(path.abspath(__file__))
def str_to_seq(seq_str):
res = [int(s, 16) for s in seq_str.split('_')]
if 0xfe0f in res:
print('0xfe0f in file name: %s' % seq_str)
res = [x for x in res if x != 0xfe0f]
return tuple(res)
def seq_to_str(seq):
return '_'.join('%04x' % cp for cp in seq)
def read_default_unknown_flag_aliases():
unknown_flag_path = path.join(DATA_ROOT, 'unknown_flag_aliases.txt')
return read_emoji_aliases(unknown_flag_path)
def read_default_emoji_aliases():
alias_path = path.join(DATA_ROOT, 'emoji_aliases.txt')
return read_emoji_aliases(alias_path)
def read_emoji_aliases(filename):
result = {}
with open(filename, 'r') as f:
for line in f:
ix = line.find('#')
if (ix > -1):
line = line[:ix]
line = line.strip()
if not line:
continue
als, trg = (s.strip() for s in line.split(';'))
try:
als_seq = tuple([int(x, 16) for x in als.split('_')])
trg_seq = tuple([int(x, 16) for x in trg.split('_')])
except:
print('cannot process alias %s -> %s' % (als, trg))
continue
result[als_seq] = trg_seq
return result
def add_aliases(
srcdir, dstdir, aliasfile, prefix, ext, replace=False, copy=False,
canonical_names=False, dry_run=False):
"""Use aliasfile to create aliases of files in srcdir matching prefix/ext in
dstdir. If dstdir is null, use srcdir as dstdir. If replace is false
and a file already exists in dstdir, report and do nothing. If copy is false
create a symlink, else create a copy.
If canonical_names is true, check all source files and generate aliases/copies
using the canonical name if different from the existing name.
If dry_run is true, report what would be done. Dstdir will be created if
necessary, even if dry_run is true."""
if not path.isdir(srcdir):
print('%s is not a directory' % srcdir, file=sys.stderr)
return
if not dstdir:
dstdir = srcdir
elif not path.isdir(dstdir):
os.makedirs(dstdir)
prefix_len = len(prefix)
suffix_len = len(ext) + 1
filenames = [path.basename(f)
for f in glob.glob(path.join(srcdir, '%s*.%s' % (prefix, ext)))]
seq_to_file = {
str_to_seq(name[prefix_len:-suffix_len]) : name
for name in filenames}
aliases = read_emoji_aliases(aliasfile)
aliases_to_create = {}
aliases_to_replace = []
alias_exists = False
def check_alias_seq(seq):
alias_str = seq_to_str(seq)
alias_name = '%s%s.%s' % (prefix, alias_str, ext)
alias_path = path.join(dstdir, alias_name)
if path.exists(alias_path):
if replace:
aliases_to_replace.append(alias_name)
else:
print('alias %s exists' % alias_str, file=sys.stderr)
alias_exists = True
return None
return alias_name
canonical_to_file = {}
for als, trg in sorted(aliases.items()):
if trg not in seq_to_file:
print('target %s for %s does not exist' % (
seq_to_str(trg), seq_to_str(als)), file=sys.stderr)
continue
alias_name = check_alias_seq(als)
if alias_name:
target_file = seq_to_file[trg]
aliases_to_create[alias_name] = target_file
if canonical_names:
canonical_seq = unicode_data.get_canonical_emoji_sequence(als)
if canonical_seq and canonical_seq != als:
canonical_alias_name = check_alias_seq(canonical_seq)
if canonical_alias_name:
canonical_to_file[canonical_alias_name] = target_file
if canonical_names:
print('adding %d canonical aliases' % len(canonical_to_file))
for seq, f in seq_to_file.iteritems():
canonical_seq = unicode_data.get_canonical_emoji_sequence(seq)
if canonical_seq and canonical_seq != seq:
alias_name = check_alias_seq(canonical_seq)
if alias_name:
canonical_to_file[alias_name] = f
print('adding %d total canonical sequences' % len(canonical_to_file))
aliases_to_create.update(canonical_to_file)
if replace:
if not dry_run:
for k in sorted(aliases_to_replace):
os.remove(path.join(dstdir, k))
print('replacing %d files' % len(aliases_to_replace))
elif alias_exists:
print('aborting, aliases exist.', file=sys.stderr)
return
for k, v in sorted(aliases_to_create.items()):
if dry_run:
msg = 'replace ' if k in aliases_to_replace else ''
print('%s%s -> %s' % (msg, k, v))
else:
try:
if copy:
shutil.copy2(path.join(srcdir, v), path.join(dstdir, k))
else:
# fix this to create relative symlinks
if srcdir == dstdir:
os.symlink(v, path.join(dstdir, k))
else:
raise Exception('can\'t create cross-directory symlinks yet')
except Exception as e:
print('failed to create %s -> %s' % (k, v), file=sys.stderr)
raise Exception('oops, ' + str(e))
print('created %d %s' % (
len(aliases_to_create), 'copies' if copy else 'symlinks'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-s', '--srcdir', help='directory containing files to alias',
required=True, metavar='dir')
parser.add_argument(
'-d', '--dstdir', help='directory to write aliases, default srcdir',
metavar='dir')
parser.add_argument(
'-a', '--aliasfile', help='alias file (default emoji_aliases.txt)',
metavar='file', default='emoji_aliases.txt')
parser.add_argument(
'-p', '--prefix', help='file name prefix (default emoji_u)',
metavar='pfx', default='emoji_u')
parser.add_argument(
'-e', '--ext', help='file name extension (default png)',
choices=['ai', 'png', 'svg'], default='png')
parser.add_argument(
'-r', '--replace', help='replace existing files/aliases',
action='store_true')
parser.add_argument(
'-c', '--copy', help='create a copy of the file, not a symlink',
action='store_true')
parser.add_argument(
'--canonical_names', help='include extra copies with canonical names '
'(including fe0f emoji presentation character)', action='store_true');
parser.add_argument(
'-n', '--dry_run', help='print out aliases to create only',
action='store_true')
args = parser.parse_args()
add_aliases(
args.srcdir, args.dstdir, args.aliasfile, args.prefix, args.ext,
args.replace, args.copy, args.canonical_names, args.dry_run)
if __name__ == '__main__':
main()
|
|
import distutils.sysconfig
import itertools
import os
import os.path
import sys
import sysconfig
from glob import iglob
from typing import List
import setuptools.command.build_ext
from setuptools import Distribution, Extension, setup
def _get_turbodbc_libname():
builder = setuptools.command.build_ext.build_ext(Distribution())
full_name = builder.get_ext_filename("libturbodbc")
without_lib = full_name.split("lib", 1)[-1]
without_so = without_lib.rsplit(".so", 1)[0]
return without_so
def _get_distutils_build_directory():
"""
Returns the directory distutils uses to build its files.
We need this directory since we build extensions which have to link
other ones.
"""
pattern = "lib.{platform}-{major}.{minor}"
return os.path.join(
"build",
pattern.format(
platform=sysconfig.get_platform(),
major=sys.version_info[0],
minor=sys.version_info[1],
),
)
def _get_source_files(directory):
path = os.path.join("src", directory)
iterable_sources = (
iglob(os.path.join(root, "*.cpp")) for root, dirs, files in os.walk(path)
)
source_files = itertools.chain.from_iterable(iterable_sources)
return list(source_files)
def _remove_strict_prototype_option_from_distutils_config():
strict_prototypes = "-Wstrict-prototypes"
config = distutils.sysconfig.get_config_vars()
for key, value in config.items():
if strict_prototypes in str(value):
config[key] = config[key].replace(strict_prototypes, "") # type: ignore
_remove_strict_prototype_option_from_distutils_config()
def _has_arrow_headers():
try:
import pyarrow # noqa: F401
return True
except ImportError:
return False
def _has_numpy_headers():
try:
import numpy # noqa: F401
return True
except ImportError:
return False
class _deferred_pybind11_include:
def __str__(self):
import pybind11
return pybind11.get_include()
extra_compile_args = []
hidden_visibility_args = []
include_dirs = ["include/", _deferred_pybind11_include()]
library_dirs = [_get_distutils_build_directory()]
python_module_link_args = []
base_library_link_args: List[str] = []
if sys.platform == "darwin":
extra_compile_args.append("--std=c++11")
extra_compile_args.append("--stdlib=libc++")
extra_compile_args.append("-mmacosx-version-min=10.9")
hidden_visibility_args.append("-fvisibility=hidden")
include_dirs.append(os.getenv("UNIXODBC_INCLUDE_DIR", "/usr/local/include/"))
library_dirs.append(os.getenv("UNIXODBC_LIBRARY_DIR", "/usr/local/lib/"))
config_vars = distutils.sysconfig.get_config_vars()
config_vars["LDSHARED"] = config_vars["LDSHARED"].replace("-bundle", "") # type: ignore
python_module_link_args.append("-bundle")
builder = setuptools.command.build_ext.build_ext(Distribution())
full_name = builder.get_ext_filename("libturbodbc")
base_library_link_args.append(f"-Wl,-dylib_install_name,@loader_path/{full_name}")
base_library_link_args.append("-dynamiclib")
odbclib = "odbc"
elif sys.platform == "win32":
extra_compile_args.append("-DNOMINMAX")
if "BOOST_ROOT" in os.environ:
include_dirs.append(os.getenv("BOOST_ROOT"))
library_dirs.append(os.path.join(os.getenv("BOOST_ROOT"), "stage", "lib"))
library_dirs.append(os.path.join(os.getenv("BOOST_ROOT"), "lib64-msvc-14.0"))
else:
print("warning: BOOST_ROOT enviroment variable not set")
odbclib = "odbc32"
else:
extra_compile_args.append("--std=c++11")
hidden_visibility_args.append("-fvisibility=hidden")
python_module_link_args.append("-Wl,-rpath,$ORIGIN")
if "UNIXODBC_INCLUDE_DIR" in os.environ:
include_dirs.append(os.getenv("UNIXODBC_INCLUDE_DIR"))
if "UNIXODBC_LIBRARY_DIR" in os.environ:
library_dirs.append(os.getenv("UNIXODBC_LIBRARY_DIR"))
odbclib = "odbc"
def get_extension_modules():
extension_modules = []
"""
Extension module which is actually a plain C++ library without Python bindings
"""
turbodbc_sources = _get_source_files("cpp_odbc") + _get_source_files("turbodbc")
turbodbc_library = Extension(
"libturbodbc",
sources=turbodbc_sources,
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=base_library_link_args,
libraries=[odbclib],
library_dirs=library_dirs,
)
if sys.platform == "win32":
turbodbc_libs = []
else:
turbodbc_libs = [_get_turbodbc_libname()]
extension_modules.append(turbodbc_library)
"""
An extension module which contains the main Python bindings for turbodbc
"""
turbodbc_python_sources = _get_source_files("turbodbc_python")
if sys.platform == "win32":
turbodbc_python_sources = turbodbc_sources + turbodbc_python_sources
turbodbc_python = Extension(
"turbodbc_intern",
sources=turbodbc_python_sources,
include_dirs=include_dirs,
extra_compile_args=extra_compile_args + hidden_visibility_args,
libraries=[odbclib] + turbodbc_libs,
extra_link_args=python_module_link_args,
library_dirs=library_dirs,
)
extension_modules.append(turbodbc_python)
"""
An extension module which contains Python bindings which require numpy support
to work. Not included in the standard Python bindings so this can stay optional.
"""
if _has_numpy_headers():
import numpy
turbodbc_numpy_sources = _get_source_files("turbodbc_numpy")
if sys.platform == "win32":
turbodbc_numpy_sources = turbodbc_sources + turbodbc_numpy_sources
turbodbc_numpy = Extension(
"turbodbc_numpy_support",
sources=turbodbc_numpy_sources,
include_dirs=include_dirs + [numpy.get_include()],
extra_compile_args=extra_compile_args + hidden_visibility_args,
libraries=[odbclib] + turbodbc_libs,
extra_link_args=python_module_link_args,
library_dirs=library_dirs,
)
extension_modules.append(turbodbc_numpy)
"""
An extension module which contains Python bindings which require Apache Arrow
support to work. Not included in the standard Python bindings so this can
stay optional.
"""
if _has_arrow_headers():
import pyarrow
# Make default named pyarrow shared libs available.
pyarrow.create_library_symlinks()
pyarrow_location = os.path.dirname(pyarrow.__file__)
# For now, assume that we build against bundled pyarrow releases.
pyarrow_include_dir = os.path.join(pyarrow_location, "include")
turbodbc_arrow_sources = _get_source_files("turbodbc_arrow")
pyarrow_module_link_args = list(python_module_link_args)
if sys.platform == "win32":
turbodbc_arrow_sources = turbodbc_sources + turbodbc_arrow_sources
elif sys.platform == "darwin":
pyarrow_module_link_args.append("-Wl,-rpath,@loader_path/pyarrow")
else:
pyarrow_module_link_args.append("-Wl,-rpath,$ORIGIN/pyarrow")
turbodbc_arrow = Extension(
"turbodbc_arrow_support",
sources=turbodbc_arrow_sources,
include_dirs=include_dirs + [pyarrow_include_dir],
extra_compile_args=extra_compile_args + hidden_visibility_args,
libraries=[odbclib, "arrow", "arrow_python"] + turbodbc_libs,
extra_link_args=pyarrow_module_link_args,
library_dirs=library_dirs + [pyarrow_location],
)
extension_modules.append(turbodbc_arrow)
return extension_modules
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.md")) as f:
long_description = f.read()
setup(
name="turbodbc",
version="4.5.1",
description="turbodbc is a Python DB API 2.0 compatible ODBC driver",
long_description=long_description,
long_description_content_type="text/markdown",
include_package_data=True,
url="https://github.com/blue-yonder/turbodbc",
author="Michael Koenig",
author_email="michael.koenig@blue-yonder.com",
packages=["turbodbc"],
setup_requires=[
"pybind11>=2.2.0",
"pyarrow>=1,<7.1.0",
"numpy>=1.18",
],
install_requires=[],
extras_require={"arrow": ["pyarrow>=1.0,<7.1.0"], "numpy": "numpy>=1.19.0"},
python_requires=">=3.8",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: C++",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Database",
],
ext_modules=get_extension_modules(),
)
|
|
"""
Routines for filling missing data
"""
import operator
import numpy as np
from distutils.version import LooseVersion
from pandas._libs import algos, lib
from pandas.compat import range, string_types
from pandas.core.dtypes.common import (
is_numeric_v_string_like,
is_float_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_scalar,
is_integer,
needs_i8_conversion,
ensure_float64)
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.missing import isna
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask = False
else:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if is_scalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask |= False
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isna(arr)
else:
mask |= isna(arr)
# GH 21977
if mask is None:
mask = np.zeros(arr.shape, dtype=bool)
return mask
def clean_fill_method(method, allow_nearest=False):
# asfreq is compat for resampling
if method in [None, 'asfreq']:
return None
if isinstance(method, string_types):
method = method.lower()
if method == 'ffill':
method = 'pad'
elif method == 'bfill':
method = 'backfill'
valid_methods = ['pad', 'backfill']
expecting = 'pad (ffill) or backfill (bfill)'
if allow_nearest:
valid_methods.append('nearest')
expecting = 'pad (ffill), backfill (bfill) or nearest'
if method not in valid_methods:
msg = ('Invalid fill method. Expecting {expecting}. Got {method}'
.format(expecting=expecting, method=method))
raise ValueError(msg)
return method
def clean_interp_method(method, **kwargs):
order = kwargs.get('order')
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial', 'krogh',
'piecewise_polynomial', 'pchip', 'akima', 'spline',
'from_derivatives']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {valid}. Got '{method}' "
"instead.".format(valid=valid, method=method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
limit_direction='forward', limit_area=None, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argumnet.
"""
# Treat the original, non-scipy methods first.
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
# have to call np.asarray(xvalues) since xvalues could be an Index
# which can't be mutated
result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
valid_limit_directions = ['forward', 'backward', 'both']
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
msg = ('Invalid limit_direction: expecting one of {valid!r}, '
'got {invalid!r}.')
raise ValueError(msg.format(valid=valid_limit_directions,
invalid=limit_direction))
if limit_area is not None:
valid_limit_areas = ['inside', 'outside']
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError('Invalid limit_area: expecting one of {}, got '
'{}.'.format(valid_limit_areas, limit_area))
# default limit is unlimited GH #16282
if limit is None:
# limit = len(xvalues)
pass
elif not is_integer(limit):
raise ValueError('Limit must be an integer')
elif limit < 1:
raise ValueError('Limit must be greater than 0')
from pandas import Series
ys = Series(yvalues)
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
start_nans = set(range(ys.first_valid_index()))
end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
mid_nans = all_nans - start_nans - end_nans
# Like the sets above, preserve_nans contains indices of invalid values,
# but in this case, it is the final set of indices that need to be
# preserved as NaN after the interpolation.
# For example if limit_direction='forward' then preserve_nans will
# contain indices of NaNs at the beginning of the series, and NaNs that
# are more than'limit' away from the prior non-NaN.
# set preserve_nans based on direction using _interp_limit
if limit_direction == 'forward':
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
elif limit_direction == 'backward':
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
else:
# both directions... just use _interp_limit
preserve_nans = set(_interp_limit(invalid, limit, limit))
# if limit_area is set, add either mid or outside indices
# to preserve_nans GH #16284
if limit_area == 'inside':
# preserve NaNs on the outside
preserve_nans |= start_nans | end_nans
elif limit_area == 'outside':
# preserve NaNs on the inside
preserve_nans |= mid_nans
# sort preserve_nans and covert to list
preserve_nans = sorted(preserve_nans)
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
result = yvalues.copy()
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if needs_i8_conversion(inds.dtype.type):
inds = inds.view(np.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
result[preserve_nans] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima']
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
result[invalid] = _interpolate_scipy_wrapper(inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order, **kwargs)
result[preserve_nans] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method
"""
try:
from scipy import interpolate
# TODO: Why is DatetimeIndex being imported here?
from pandas import DatetimeIndex # noqa
except ImportError:
raise ImportError('{method} interpolation requires SciPy'
.format(method=method))
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'from_derivatives': _from_derivatives,
'piecewise_polynomial': _from_derivatives,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x._values.astype('i8'), new_x.astype('i8')
if method == 'pchip':
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError("Your version of Scipy does not support "
"PCHIP interpolation.")
elif method == 'akima':
try:
from scipy.interpolate import Akima1DInterpolator # noqa
alt_methods['akima'] = _akima_interpolate
except ImportError:
raise ImportError("Your version of Scipy does not support "
"Akima interpolation.")
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
# GH #10633
if not order:
raise ValueError("order needs to be specified and greater than 0")
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
import scipy
from scipy import interpolate
if LooseVersion(scipy.__version__) < LooseVersion('0.18.0'):
try:
method = interpolate.piecewise_polynomial_interpolate
return method(xi, yi.reshape(-1, 1), x,
orders=order, der=der)
except AttributeError:
pass
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1),
orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
try:
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
except TypeError:
# Scipy earlier than 0.17.0 missing axis
P = interpolate.Akima1DInterpolator(xi, yi)
if der == 0:
return P(x)
elif interpolate._isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
dtype=None):
""" perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64, np.int64)
_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
np.int64)
def pad_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'pad_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
values = ensure_float64(values)
_method = algos.pad_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_1d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'backfill_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
values = ensure_float64(values)
_method = algos.backfill_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_1d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'pad_2d_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
values = ensure_float64(values)
_method = algos.pad_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_2d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'backfill_2d_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
values = ensure_float64(values)
_method = algos.backfill_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_2d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def fill_zeros(result, x, y, name, fill):
"""
if this is a reversed op, then flip x,y
if we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result
mask the nan's from x
"""
if fill is None or is_float_dtype(result):
return result
if name.startswith(('r', '__r')):
x, y = y, x
is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type'))
is_scalar_type = is_scalar(y)
if not is_variable_type and not is_scalar_type:
return result
if is_scalar_type:
y = np.array(y)
if is_integer_dtype(y):
if (y == 0).any():
# GH 7325, mask and nans must be broadcastable (also: PR 9308)
# Raveling and then reshaping makes np.putmask faster
mask = ((y == 0) & ~np.isnan(result)).ravel()
shape = result.shape
result = result.astype('float64', copy=False).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it correctly
# (GH 6178 and PR 9308)
if np.isinf(fill):
signs = y if name.startswith(('r', '__r')) else x
signs = np.sign(signs.astype('float', copy=False))
negative_inf_mask = (signs.ravel() < 0) & mask
np.putmask(result, negative_inf_mask, -fill)
if "floordiv" in name: # (PR 9308)
nan_mask = ((y == 0) & (x == 0)).ravel()
np.putmask(result, nan_mask, np.nan)
result = result.reshape(shape)
return result
def mask_zero_div_zero(x, y, result, copy=False):
"""
Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes
of the numerator or the denominator.
Parameters
----------
x : ndarray
y : ndarray
result : ndarray
copy : bool (default False)
Whether to always create a new array or try to fill in the existing
array if possible.
Returns
-------
filled_result : ndarray
Examples
--------
>>> x = np.array([1, 0, -1], dtype=np.int64)
>>> y = 0 # int 0; numpy behavior is different with float
>>> result = x / y
>>> result # raw numpy result does not fill division by zero
array([0, 0, 0])
>>> mask_zero_div_zero(x, y, result)
array([ inf, nan, -inf])
"""
if is_scalar(y):
y = np.array(y)
zmask = y == 0
if zmask.any():
shape = result.shape
nan_mask = (zmask & (x == 0)).ravel()
neginf_mask = (zmask & (x < 0)).ravel()
posinf_mask = (zmask & (x > 0)).ravel()
if nan_mask.any() or neginf_mask.any() or posinf_mask.any():
# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN
result = result.astype('float64', copy=copy).ravel()
np.putmask(result, nan_mask, np.nan)
np.putmask(result, posinf_mask, np.inf)
np.putmask(result, neginf_mask, -np.inf)
result = result.reshape(shape)
return result
def dispatch_missing(op, left, right, result):
"""
Fill nulls caused by division by zero, casting to a diffferent dtype
if necessary.
Parameters
----------
op : function (operator.add, operator.div, ...)
left : object (Index for non-reversed ops)
right : object (Index fof reversed ops)
result : ndarray
Returns
-------
result : ndarray
"""
opstr = '__{opname}__'.format(opname=op.__name__).replace('____', '__')
if op in [operator.truediv, operator.floordiv,
getattr(operator, 'div', None)]:
result = mask_zero_div_zero(left, right, result)
elif op is operator.mod:
result = fill_zeros(result, left, right, opstr, np.nan)
elif op is divmod:
res0 = mask_zero_div_zero(left, right, result[0])
res1 = fill_zeros(result[1], left, right, opstr, np.nan)
result = (res0, res1)
return result
def _interp_limit(invalid, fw_limit, bw_limit):
"""
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : boolean ndarray
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indices from N - 1
N = len(invalid)
f_idx = set()
b_idx = set()
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = (set(np.where(windowed)[0] + limit) |
set(np.where((~invalid[:limit + 1]).cumsum() == 0)[0]))
return idx
if fw_limit is not None:
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit is not None:
if bw_limit == 0:
# then we don't even need to care about backwards
# just use forwards
return f_idx
else:
b_idx = list(inner(invalid[::-1], bw_limit))
b_idx = set(N - 1 - np.asarray(b_idx))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a, window):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
|
|
import json
import pickle
from typing import Any, Tuple, Union
from abc import ABC, abstractmethod
from couchbase.exceptions import ValueFormatException
from couchbase_core._libcouchbase import (Transcoder, FMT_JSON,
FMT_BYTES, FMT_UTF8, FMT_PICKLE,
FMT_LEGACY_MASK, FMT_COMMON_MASK)
UNIFIED_FORMATS = (FMT_JSON, FMT_BYTES, FMT_UTF8, FMT_PICKLE)
LEGACY_FORMATS = tuple([x & FMT_LEGACY_MASK for x in UNIFIED_FORMATS])
COMMON_FORMATS = tuple([x & FMT_COMMON_MASK for x in UNIFIED_FORMATS])
COMMON2UNIFIED = {}
LEGACY2UNIFIED = {}
for fl in UNIFIED_FORMATS:
COMMON2UNIFIED[fl & FMT_COMMON_MASK] = fl
LEGACY2UNIFIED[fl & FMT_LEGACY_MASK] = fl
def get_decode_format(flags):
"""
Returns a tuple of format, recognized
"""
c_flags = flags & FMT_COMMON_MASK
l_flags = flags & FMT_LEGACY_MASK
if c_flags:
# if unknown format, default to FMT_BYTES
return COMMON2UNIFIED.get(c_flags, FMT_BYTES)
else:
# if unknown format, default to FMT_BYTES
return LEGACY2UNIFIED.get(l_flags, FMT_BYTES)
class Transcoder(ABC):
"""Interface a Custom Transcoder must implement
"""
@abstractmethod
def encode_value(self, # type: "Transcoder"
value # type: Any
) -> Tuple[bytes, int]:
pass
@abstractmethod
def decode_value(self, # type: "Transcoder"
value, # type: bytes
flags # type: int
) -> Any:
pass
class JSONTranscoder(Transcoder):
def encode_value(self, # type: "JSONTranscoder"
value, # type: Any
) -> Tuple[bytes, int]:
if isinstance(value, str):
format = FMT_JSON
elif isinstance(value, (bytes, bytearray)):
raise ValueError(
"The JSONTranscoder (default transcoder) does not support binary data.")
elif isinstance(value, (list, tuple, dict, bool, int, float)) or value is None:
format = FMT_JSON
else:
raise ValueFormatException(
"Unrecognized value type {}".format(type(value)))
if format != FMT_JSON:
raise ValueFormatException("Unrecognized format {}".format(format))
return json.dumps(value, ensure_ascii=False).encode("utf-8"), FMT_JSON
def decode_value(self, # type: "JSONTranscoder"
value, # type: bytes
flags # type: int
) -> Any:
format = get_decode_format(flags)
if format == FMT_BYTES:
raise ValueFormatException(
"The JSONTranscoder (default transcoder) does not support binary format")
elif format == FMT_UTF8:
raise ValueFormatException(
"The JSONTranscoder (default transcoder) does not support string format")
elif format == FMT_JSON:
return json.loads(value.decode('utf-8'))
else:
raise ValueFormatException(
"Unrecognized format provided: {}".format(format))
class RawJSONTranscoder(Transcoder):
def encode_value(self, # type: "RawJSONTranscoder"
value # type: Union[str,bytes,bytearray]
) -> Tuple[bytes, int]:
if isinstance(value, str):
return value.encode("utf-8"), FMT_JSON
elif isinstance(value, (bytes, bytearray)):
if isinstance(value, bytearray):
value = bytes(value)
return value, FMT_JSON
else:
raise ValueFormatException(
"Only binary and string data supported by RawJSONTranscoder")
def decode_value(self, # type: "RawJSONTranscoder"
value, # type: bytes
flags # type: int
) -> Union[str, bytes]:
format = get_decode_format(flags)
if format == FMT_BYTES:
raise ValueFormatException(
"Binary format type not supported by RawJSONTranscoder")
elif format == FMT_UTF8:
raise ValueFormatException(
"String format type not supported by RawJSONTranscoder")
elif format == FMT_JSON:
if isinstance(value, str):
return value.decode("utf-8")
elif isinstance(value, (bytes, bytearray)):
if isinstance(value, bytearray):
value = bytes(value)
return value
else:
raise ValueFormatException(
"Only binary and string data supported by RawJSONTranscoder")
else:
raise ValueError("Unexpected flags value.")
class RawStringTranscoder(Transcoder):
def encode_value(self, # type: "RawStringTranscoder"
value # type: str
) -> Tuple[bytes, int]:
if isinstance(value, str):
return value.encode("utf-8"), FMT_UTF8
else:
raise ValueFormatException(
"Only string data supported by RawStringTranscoder")
def decode_value(self, # type: "RawStringTranscoder"
value, # type: bytes
flags # type: int
) -> Union[str, bytes]:
format = get_decode_format(flags)
if format == FMT_BYTES:
raise ValueFormatException(
"Binary format type not supported by RawStringTranscoder")
elif format == FMT_UTF8:
return value.decode("utf-8")
elif format == FMT_JSON:
raise ValueFormatException(
"JSON format type not supported by RawStringTranscoder")
else:
raise ValueError("Unexpected flags value.")
class RawBinaryTranscoder(Transcoder):
def encode_value(self, # type: "RawBinaryTranscoder"
value # type: Union[bytes,bytearray]
) -> Tuple[bytes, int]:
if isinstance(value, (bytes, bytearray)):
if isinstance(value, bytearray):
value = bytes(value)
return value, FMT_BYTES
else:
raise ValueFormatException(
"Only binary data supported by RawBinaryTranscoder")
def decode_value(self, # type: "RawBinaryTranscoder"
value, # type: bytes
flags # type: int
) -> bytes:
format = get_decode_format(flags)
if format == FMT_BYTES:
if isinstance(value, bytearray):
value = bytes(value)
return value
elif format == FMT_UTF8:
raise ValueFormatException(
"String format type not supported by RawBinaryTranscoder")
elif format == FMT_JSON:
raise ValueFormatException(
"JSON format type not supported by RawBinaryTranscoder")
else:
raise ValueError("Unexpected flags value.")
class LegacyTranscoder(Transcoder):
def encode_value(self, # type: "LegacyTranscoder"
value # type: Any
) -> Tuple[bytes, int]:
if isinstance(value, str):
format = FMT_UTF8
elif isinstance(value, (bytes, bytearray)):
format = FMT_BYTES
elif isinstance(value, (list, tuple, dict, bool, int, float)) or value is None:
format = FMT_JSON
else:
format = FMT_PICKLE
if format == FMT_BYTES:
if isinstance(value, bytes):
pass
elif isinstance(value, bytearray):
value = bytes(value)
else:
raise ValueFormatException("Expected bytes")
return value, format
elif format == FMT_UTF8:
return value.encode('utf-8'), format
elif format == FMT_PICKLE:
return pickle.dumps(value), FMT_PICKLE
elif format == FMT_JSON:
return json.dumps(value, ensure_ascii=False).encode("utf-8"), FMT_JSON
else:
raise ValueFormatException("Unrecognized format {}".format(format))
def decode_value(self, # type: "LegacyTranscoder"
value, # type: bytes
flags # type: int
) -> Any:
format = get_decode_format(flags)
if format == FMT_BYTES:
return value
elif format == FMT_UTF8:
return value.decode("utf-8")
elif format == FMT_JSON:
try:
return json.loads(value.decode('utf-8'))
except Exception:
# if error encountered, assume return bytes
return value
elif format == FMT_PICKLE:
return pickle.loads(value)
else:
# default to returning bytes
return value
|
|
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import unittest
from unittest import mock
from oslo_utils import units
from cinder import exception
from cinder.tests.unit.volume.drivers.dell_emc.unity import test_adapter
from cinder.tests.unit.volume.drivers.dell_emc.unity import test_driver
from cinder.volume.drivers.dell_emc.unity import utils
def get_volume_type_extra_specs(volume_type):
return {'provisioning:type': volume_type}
def get_group_type_specs(group_type):
return {'consistent_group_snapshot_enabled': '<is> True',
'group_type_id': group_type}
def get_volume_type_qos_specs(type_id):
if type_id == 'invalid_backend_qos_consumer':
ret = {'qos_specs': {'consumer': 'invalid'}}
elif type_id == 'both_none':
ret = {'qos_specs': {'consumer': 'back-end', 'specs': {}}}
elif type_id == 'max_1000_iops':
ret = {
'qos_specs': {
'id': 'max_1000_iops',
'consumer': 'both',
'specs': {
'maxIOPS': 1000
}
}
}
elif type_id == 'max_2_mbps':
ret = {
'qos_specs': {
'id': 'max_2_mbps',
'consumer': 'back-end',
'specs': {
'maxBWS': 2
}
}
}
else:
ret = None
return ret
def patch_volume_types(func):
@functools.wraps(func)
@mock.patch(target=('cinder.volume.volume_types'
'.get_volume_type_extra_specs'),
new=get_volume_type_extra_specs)
@mock.patch(target=('cinder.volume.volume_types'
'.get_volume_type_qos_specs'),
new=get_volume_type_qos_specs)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
def patch_group_types(func):
@functools.wraps(func)
@mock.patch(target=('cinder.volume.group_types'
'.get_group_type_specs'),
new=get_group_type_specs)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
class UnityUtilsTest(unittest.TestCase):
def test_validate_pool_names_filter(self):
all_pools = list('acd')
pool_names = utils.validate_pool_names(list('abc'), all_pools)
self.assertIn('a', pool_names)
self.assertIn('c', pool_names)
self.assertNotIn('b', pool_names)
self.assertNotIn('d', pool_names)
def test_validate_pool_names_non_exists(self):
def f():
all_pools = list('abc')
utils.validate_pool_names(list('efg'), all_pools)
self.assertRaises(exception.VolumeBackendAPIException, f)
def test_validate_pool_names_default(self):
all_pools = list('ab')
pool_names = utils.validate_pool_names([], all_pools)
self.assertEqual(2, len(pool_names))
pool_names = utils.validate_pool_names(None, all_pools)
self.assertEqual(2, len(pool_names))
def test_build_provider_location(self):
location = utils.build_provider_location('unity', 'thin', 'ev_1', '3')
expected = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual(expected, location)
def test_extract_provider_location_version(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual('3',
utils.extract_provider_location(location, 'version'))
def test_extract_provider_location_type(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual('thin',
utils.extract_provider_location(location, 'type'))
def test_extract_provider_location_system(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual('unity',
utils.extract_provider_location(location, 'system'))
def test_extract_provider_location_id(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual('ev_1',
utils.extract_provider_location(location, 'id'))
def test_extract_provider_location_not_found(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertIsNone(utils.extract_provider_location(location, 'na'))
def test_extract_provider_location_none(self):
self.assertIsNone(utils.extract_provider_location(None, 'abc'))
def test_extract_iscsi_uids(self):
connector = {'host': 'fake_host',
'initiator': 'fake_iqn'}
self.assertEqual(['fake_iqn'],
utils.extract_iscsi_uids(connector))
def test_extract_iscsi_uids_not_found(self):
connector = {'host': 'fake_host'}
self.assertRaises(exception.VolumeBackendAPIException,
utils.extract_iscsi_uids,
connector)
def test_extract_fc_uids(self):
connector = {'host': 'fake_host',
'wwnns': ['1111111111111111',
'2222222222222222'],
'wwpns': ['3333333333333333',
'4444444444444444']
}
self.assertEqual(['11:11:11:11:11:11:11:11:33:33:33:33:33:33:33:33',
'22:22:22:22:22:22:22:22:44:44:44:44:44:44:44:44', ],
utils.extract_fc_uids(connector))
def test_extract_fc_uids_not_found(self):
connector = {'host': 'fake_host'}
self.assertRaises(exception.VolumeBackendAPIException,
utils.extract_iscsi_uids,
connector)
def test_byte_to_gib(self):
self.assertEqual(5, utils.byte_to_gib(5 * units.Gi))
def test_byte_to_mib(self):
self.assertEqual(5, utils.byte_to_mib(5 * units.Mi))
def test_gib_to_mib(self):
self.assertEqual(5 * units.Gi / units.Mi, utils.gib_to_mib(5))
def test_convert_ip_to_portal(self):
self.assertEqual('1.2.3.4:3260', utils.convert_ip_to_portal('1.2.3.4'))
self.assertEqual('[fd27:2e95:e174::100]:3260',
utils.convert_ip_to_portal('fd27:2e95:e174::100'))
self.assertEqual('[fd27:2e95:e174::100]:3260',
utils.convert_ip_to_portal('[fd27:2e95:e174::100]'))
def test_convert_to_itor_tgt_map(self):
zone_mapping = {
'san_1': {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'),
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121')
}
}
ret = utils.convert_to_itor_tgt_map(zone_mapping)
self.assertEqual(['100000051e55a100', '100000051e55a121'], ret[0])
mapping = ret[1]
targets = ('100000051e55a100', '100000051e55a121')
self.assertEqual(targets, mapping['200000051e55a100'])
self.assertEqual(targets, mapping['200000051e55a121'])
def test_get_pool_name(self):
volume = test_adapter.MockOSResource(host='host@backend#pool_name')
self.assertEqual('pool_name', utils.get_pool_name(volume))
def test_get_pool_name_from_host(self):
host = {'host': 'host@backend#pool_name'}
ret = utils.get_pool_name_from_host(host)
self.assertEqual('pool_name', ret)
def get_backend_name_from_volume(self):
volume = test_adapter.MockOSResource(host='host@backend#pool_name')
ret = utils.get_backend_name_from_volume(volume)
self.assertEqual('host@backend', ret)
def get_backend_name_from_host(self):
host = {'host': 'host@backend#pool_name'}
ret = utils.get_backend_name_from_volume(host)
self.assertEqual('host@backend', ret)
def test_ignore_exception(self):
class IgnoredException(Exception):
pass
def f():
raise IgnoredException('any exception')
try:
utils.ignore_exception(f)
except IgnoredException:
self.fail('should not raise any exception.')
def test_assure_cleanup(self):
data = [0]
def _enter():
data[0] += 10
return data[0]
def _exit(x):
data[0] = x - 1
ctx = utils.assure_cleanup(_enter, _exit, True)
with ctx as r:
self.assertEqual(10, r)
self.assertEqual(9, data[0])
def test_get_backend_qos_specs_type_none(self):
volume = test_adapter.MockOSResource(volume_type_id=None)
ret = utils.get_backend_qos_specs(volume)
self.assertIsNone(ret)
@patch_volume_types
def test_get_backend_qos_specs_none(self):
volume = test_adapter.MockOSResource(volume_type_id='no_qos')
ret = utils.get_backend_qos_specs(volume)
self.assertIsNone(ret)
@patch_volume_types
def test_get_backend_qos_invalid_consumer(self):
volume = test_adapter.MockOSResource(
volume_type_id='invalid_backend_qos_consumer')
ret = utils.get_backend_qos_specs(volume)
self.assertIsNone(ret)
@patch_volume_types
def test_get_backend_qos_both_none(self):
volume = test_adapter.MockOSResource(volume_type_id='both_none')
ret = utils.get_backend_qos_specs(volume)
self.assertIsNone(ret)
@patch_volume_types
def test_get_backend_qos_iops(self):
volume = test_adapter.MockOSResource(volume_type_id='max_1000_iops')
ret = utils.get_backend_qos_specs(volume)
expected = {'maxBWS': None, 'id': 'max_1000_iops', 'maxIOPS': 1000}
self.assertEqual(expected, ret)
@patch_volume_types
def test_get_backend_qos_mbps(self):
volume = test_adapter.MockOSResource(volume_type_id='max_2_mbps')
ret = utils.get_backend_qos_specs(volume)
expected = {'maxBWS': 2, 'id': 'max_2_mbps', 'maxIOPS': None}
self.assertEqual(expected, ret)
def test_remove_empty(self):
option = mock.Mock()
value_list = [' pool1', 'pool2 ', ' pool3 ']
ret = utils.remove_empty(option, value_list)
expected = ['pool1', 'pool2', 'pool3']
self.assertListEqual(expected, ret)
def test_remove_empty_none(self):
option = mock.Mock()
value_list = None
ret = utils.remove_empty(option, value_list)
expected = None
self.assertEqual(expected, ret)
def test_remove_empty_empty_list(self):
option = mock.Mock()
value_list = []
ret = utils.remove_empty(option, value_list)
expected = None
self.assertEqual(expected, ret)
@patch_group_types
def test_group_is_cg(self):
cg = test_driver.UnityDriverTest.get_cg()
result = utils.group_is_cg(cg)
self.assertTrue(result)
@patch_group_types
def test_get_group_specs_by_key(self):
cg = test_driver.UnityDriverTest.get_cg()
result = utils.get_group_specs(cg, 'consistent_group_snapshot_enabled')
self.assertEqual('<is> True', result)
@patch_group_types
def test_no_group_specs_key(self):
cg = test_driver.UnityDriverTest.get_cg()
result = utils.get_group_specs(cg, 'test_key')
self.assertIsNone(result)
@patch_volume_types
def test_retype_no_need_migration_when_same_host(self):
volume = test_adapter.MockOSResource(volume_type_id='host_1',
host='host_1')
new_host = {'name': 'new_name', 'host': 'host_1'}
ret = utils.retype_need_migration(volume, None, None, new_host)
self.assertFalse(ret)
@patch_volume_types
def test_retype_need_migration_when_diff_host(self):
volume = test_adapter.MockOSResource(volume_type_id='host_1',
host='host_1')
new_host = {'name': 'new_name', 'host': 'new_host'}
ret = utils.retype_need_migration(volume, None, None, new_host)
self.assertTrue(ret)
@patch_volume_types
def test_retype_no_need_migration_thin_to_compressed(self):
volume = test_adapter.MockOSResource(volume_type_id='host_1',
host='host_1')
new_host = {'name': 'new_name', 'host': 'host_1'}
old_provision = ''
new_provision = 'compressed'
ret = utils.retype_need_migration(volume, old_provision,
new_provision, new_host)
self.assertFalse(ret)
@patch_volume_types
def test_retype_no_need_migration_compressed_to_thin(self):
volume = test_adapter.MockOSResource(volume_type_id='host_1',
host='host_1')
new_host = {'name': 'new_name', 'host': 'host_1'}
old_provision = 'compressed'
new_provision = ''
ret = utils.retype_need_migration(volume, old_provision,
new_provision, new_host)
self.assertFalse(ret)
@patch_volume_types
def test_retype_need_migration_thin_to_thick(self):
volume = test_adapter.MockOSResource(volume_type_id='host_1',
host='host_1')
new_host = {'name': 'new_name', 'host': 'host_1'}
old_provision = ''
new_provision = 'thick'
ret = utils.retype_need_migration(volume, old_provision,
new_provision, new_host)
self.assertTrue(ret)
|
|
# Copyright (c) 2012-2015 Kapiche Ltd.
# Author: Ryan Stuart<ryan@kapiche.com>
from __future__ import absolute_import, division, print_function, unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth.models import _user_has_perm, _user_get_all_permissions, _user_has_module_perms
from django.contrib.contenttypes.models import ContentTypeManager
from django.contrib import auth
from django.contrib.auth.models import AnonymousUser
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_str
from .. import Entity, TextProperty, ListProperty, BooleanProperty, DateTimeProperty, ReferenceProperty, EmailProperty
from .utils import datetime_now
REDIRECT_FIELD_NAME = 'next'
try:
from django.contrib.auth.hashers import check_password, make_password
except ImportError:
"""Handle older versions of Django"""
from django.utils.hashcompat import md5_constructor, sha_constructor
def get_hexdigest(algorithm, salt, raw_password):
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError('Got unknown password algorithm type in password')
def check_password(raw_password, password):
algo, salt, hash = password.split('$')
return hash == get_hexdigest(algo, salt, raw_password)
def make_password(raw_password):
from random import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random()), str(random()))[:5]
hash = get_hexdigest(algo, salt, raw_password)
return '%s$%s$%s' % (algo, salt, hash)
class ContentType(Entity):
name = TextProperty(max_length=100)
app_label = TextProperty(max_length=100)
model = TextProperty(max_length=100, verbose_name=_('python model class name'))
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
kind = 'django_content_type'
ordering = ('name',)
def __unicode__(self):
return self.name
def model_class(self):
"""Returns the Python model class for this type of content."""
from django.db import models
return models.get_model(self.app_label, self.model)
entity_class = model_class
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given. Basically, this is a proxy around this
object_type's get_object() model method. The ObjectNotExist exception, if thrown, will not be caught, so code
that calls this method should catch it.
"""
return self.entity_class().object.get(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label, model)
)
class Permission(Entity):
"""
The permissions system provides a way to assign permissions to specific users and groups of users.
The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin
site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add"
form and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may
change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she
created herself" or "Mary may only change news stories that have a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically created for each Django model.
"""
name = TextProperty(max_length=50, verbose_name=_('username'))
# content_type = ReferenceField(ContentType)
codename = TextProperty(max_length=100, verbose_name=_('codename'))
# FIXME: don't access field of the other class
# unique_with=['content_type__app_label', 'content_type__model'])
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
def __unicode__(self):
return u"%s | %s | %s" % (str(self.content_type.app_label), str(self.content_type), str(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class Group(Entity):
"""
Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user
can belong to any number of groups.
A user in a group automatically has all the permissions granted to that group. For example, if the group Site
editors has the permission can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality,
to them. For example, you could create a group 'Special users', and you could write code that would do special
things to those users -- such as giving them access to a members-only portion of your site, or sending them
members-only e-mail messages.
"""
# name = TextProperty(max_length=80, unique=True, verbose_name=_('name'))
name = TextProperty(max_length=80, verbose_name=_('name'))
permissions = ListProperty(ReferenceProperty(Permission, verbose_name=_('permissions'), required=False))
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"""
Creates and saves a User with the given username, e-mail and password.
"""
now = datetime_now()
# Normalize the address by lowercasing the domain part of the email
# address.
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = self.model(username=username, email=email, is_staff=False, is_active=True,
is_superuser=False, last_login=now, date_joined=now)
user.set_password(password)
user.save()
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save()
return u
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"""Generates a random password with the given length and given allowed_chars."""
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for _ in range(length)])
class User(Entity):
"""
A User entity that aims to mirror most of the API specified by Django at
http://docs.djangoproject.com/en/dev/topics/auth/#users
"""
username = TextProperty(
max_length=30,
required=True,
verbose_name=_('username'),
help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters")
)
first_name = TextProperty(
max_length=30,
verbose_name=_('first name')
)
last_name = TextProperty(max_length=30, verbose_name=_('last name'))
email = EmailProperty(verbose_name=_('e-mail address'))
password = TextProperty(
max_length=128,
verbose_name=_('password'),
help_text=_(
"Use '[algo]$[iterations]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."
)
)
is_staff = BooleanProperty(
default=False,
verbose_name=_('staff status'),
help_text=_("Designates whether the user can log into this admin site.")
)
is_active = BooleanProperty(
default=True,
verbose_name=_('active'),
help_text=_(
"Designates whether this user should be treated as active. Unselect this instead of deleting accounts."
)
)
is_superuser = BooleanProperty(
default=False,
verbose_name=_('superuser status'),
help_text=_("Designates that this user has all permissions without explicitly assigning them.")
)
last_login = DateTimeProperty(default=datetime_now, verbose_name=_('last login'))
date_joined = DateTimeProperty(default=datetime_now, verbose_name=_('date joined'))
user_permissions = ListProperty(
ReferenceProperty(Permission),
verbose_name=_('user permissions'),
help_text=_('Permissions for the user.')
)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def __unicode__(self):
return self.username
def get_full_name(self):
"""Returns the users first and last names, separated by a space.
"""
full_name = u'%s %s' % (self.first_name or '', self.last_name or '')
return full_name.strip()
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, raw_password):
"""
Sets the user's password - always use this rather than directly assigning to
:attr:`~gcloudoem.django.auth.User.password` as the password is hashed before storage.
"""
self.password = make_password(raw_password)
self.save()
return self
def check_password(self, raw_password):
"""
Checks the user's password against a provided password - always use this rather than directly comparing to
:attr:`~mongoengine.django.auth.User.password` as the password is hashed before storage.
"""
return check_password(raw_password, self.password)
@classmethod
def create_user(cls, username, password, email=None):
"""Create (and save) a new user with the given username, password and email address."""
now = datetime_now()
# Normalize the address by lowercasing the domain part of the email address.
if email is not None:
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = cls(username=username, email=email, date_joined=now)
user.set_password(password)
user.save()
return user
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her groups. This method queries all
available auth backends. If an object is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method queries all available auth backends, but
returns immediately if any backend returns True. Thus, a user who has permission from a single auth backend is
assumed to have permission in general. If an object is provided, permissions for this specific object are
checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label. Uses pretty much the same logic as
has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def email_user(self, subject, message, from_email=None):
"""Sends an e-mail to this User."""
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises SiteProfileNotAvailable if this site does not allow
profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MODULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable(
'app_label and model_name should be separated by a dot in the AUTH_PROFILE_MODULE setting'
)
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable(
'Unable to load the profile model, check AUTH_PROFILE_MODULE in your project settings'
)
self._profile_cache = model.objects.get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class GCloudDatastoreBackend(object):
"""Authenticate using Google Datastore and gcloudoem.django.auth.User."""
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
_user_entity = False
def authenticate(self, username=None, password=None):
user = self.user_entity.objects(username=username).first()
if user:
if password and user.check_password(password):
backend = auth.get_backends()[0]
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
return None
def get_user(self, user_id):
return self.user_entity.objects.with_id(user_id)
@property
def user_entity(self):
if self._user_entity is False:
from .gcloud_auth.models import get_user_entity
self._user_entity = get_user_entity()
return self._user_entity
def get_user(userid):
"""
Returns a User object from an id (User.id). Django's equivalent takes request, but taking an id instead leaves it
up to the developer to store the id in any way they want (session, signed cookie, etc.)
"""
if not userid:
return AnonymousUser()
return GCloudDatastoreBackend().get_user(userid) or AnonymousUser()
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Load application resources from a known path.
Loading resources by specifying relative paths to filenames is often
problematic in Python, as the working directory is not necessarily the same
directory as the application's script files.
This module allows applications to specify a search path for resources.
Relative paths are taken to be relative to the application's __main__ module.
ZIP files can appear on the path; they will be searched inside. The resource
module also behaves as expected when applications are bundled using py2exe or
py2app.
As well as providing file references (with the `file` function), the resource
module also contains convenience functions for loading images, textures,
fonts, media and documents.
3rd party modules or packages not bound to a specific application should
construct their own `Loader` instance and override the path to use the
resources in the module's directory.
Path format
^^^^^^^^^^^
The resource path `path` (see also `Loader.__init__` and `Loader.path`)
is a list of locations to search for resources. Locations are searched in the
order given in the path. If a location is not valid (for example, if the
directory does not exist), it is skipped.
Locations in the path beginning with an ampersand (''@'' symbol) specify
Python packages. Other locations specify a ZIP archive or directory on the
filesystem. Locations that are not absolute are assumed to be relative to the
script home. Some examples::
# Search just the `res` directory, assumed to be located alongside the
# main script file.
path = ['res']
# Search the directory containing the module `levels.level1`, followed
# by the `res/images` directory.
path = ['@levels.level1', 'res/images']
Paths are always case-sensitive and forward slashes are always used as path
separators, even in cases when the filesystem or platform does not do this.
This avoids a common programmer error when porting applications between
platforms.
The default path is ``['.']``. If you modify the path, you must call
`reindex`.
:since: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os
import weakref
import sys
import zipfile
import pyglet
from pyglet.compat import BytesIO
class ResourceNotFoundException(Exception):
'''The named resource was not found on the search path.'''
def __init__(self, name):
message = ('Resource "%s" was not found on the path. '
'Ensure that the filename has the correct captialisation.') % name
Exception.__init__(self, message)
def get_script_home():
'''Get the directory containing the program entry module.
For ordinary Python scripts, this is the directory containing the
``__main__`` module. For executables created with py2exe the result is
the directory containing the running executable file. For OS X bundles
created using Py2App the result is the Resources directory within the
running bundle.
If none of the above cases apply and the file for ``__main__`` cannot
be determined the working directory is returned.
:rtype: str
'''
frozen = getattr(sys, 'frozen', None)
if frozen in ('windows_exe', 'console_exe'):
return os.path.dirname(sys.executable)
elif frozen == 'macosx_app':
return os.environ['RESOURCEPATH']
else:
main = sys.modules['__main__']
if hasattr(main, '__file__'):
return os.path.dirname(main.__file__)
# Probably interactive
return ''
def get_settings_path(name):
'''Get a directory to save user preferences.
Different platforms have different conventions for where to save user
preferences, saved games, and settings. This function implements those
conventions. Note that the returned path may not exist: applications
should use ``os.makedirs`` to construct it if desired.
On Linux, a hidden directory `name` in the user's home directory is
returned.
On Windows (including under Cygwin) the `name` directory in the user's
``Application Settings`` directory is returned.
On Mac OS X the `name` directory under ``~/Library/Application Support``
is returned.
:Parameters:
`name` : str
The name of the application.
:rtype: str
'''
if sys.platform in ('cygwin', 'win32'):
if 'APPDATA' in os.environ:
return os.path.join(os.environ['APPDATA'], name)
else:
return os.path.expanduser('~/%s' % name)
elif sys.platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/%s' % name)
else:
return os.path.expanduser('~/.%s' % name)
class Location(object):
'''Abstract resource location.
Given a location, a file can be loaded from that location with the `open`
method. This provides a convenient way to specify a path to load files
from, and not necessarily have that path reside on the filesystem.
'''
def open(self, filename, mode='rb'):
'''Open a file at this location.
:Parameters:
`filename` : str
The filename to open. Absolute paths are not supported.
Relative paths are not supported by most locations (you
should specify only a filename with no path component).
`mode` : str
The file mode to open with. Only files opened on the
filesystem make use of this parameter; others ignore it.
:rtype: file object
'''
raise NotImplementedError('abstract')
class FileLocation(Location):
'''Location on the filesystem.
'''
def __init__(self, path):
'''Create a location given a relative or absolute path.
:Parameters:
`path` : str
Path on the filesystem.
'''
self.path = path
def open(self, filename, mode='rb'):
return open(os.path.join(self.path, filename), mode)
class ZIPLocation(Location):
'''Location within a ZIP file.
'''
def __init__(self, zip, dir):
'''Create a location given an open ZIP file and a path within that
file.
:Parameters:
`zip` : ``zipfile.ZipFile``
An open ZIP file from the ``zipfile`` module.
`dir` : str
A path within that ZIP file. Can be empty to specify files at
the top level of the ZIP file.
'''
self.zip = zip
self.dir = dir
def open(self, filename, mode='rb'):
if self.dir:
path = self.dir + '/' + filename
else:
path = filename
text = self.zip.read(path)
return BytesIO(text)
class URLLocation(Location):
'''Location on the network.
This class uses the ``urlparse`` and ``urllib2`` modules to open files on
the network given a URL.
'''
def __init__(self, base_url):
'''Create a location given a base URL.
:Parameters:
`base_url` : str
URL string to prepend to filenames.
'''
self.base = base_url
def open(self, filename, mode='rb'):
import urlparse
import urllib2
url = urlparse.urljoin(self.base, filename)
return urllib2.urlopen(url)
class Loader(object):
'''Load program resource files from disk.
The loader contains a search path which can include filesystem
directories, ZIP archives and Python packages.
:Ivariables:
`path` : list of str
List of search locations. After modifying the path you must
call the `reindex` method.
`script_home` : str
Base resource location, defaulting to the location of the
application script.
'''
def __init__(self, path=None, script_home=None):
'''Create a loader for the given path.
If no path is specified it defaults to ``['.']``; that is, just the
program directory.
See the module documentation for details on the path format.
:Parameters:
`path` : list of str
List of locations to search for resources.
`script_home` : str
Base location of relative files. Defaults to the result of
`get_script_home`.
'''
if path is None:
path = ['.']
if type(path) in (str, unicode):
path = [path]
self.path = list(path)
if script_home is None:
script_home = get_script_home()
self._script_home = script_home
self._index = None
# Map name to image
self._cached_textures = weakref.WeakValueDictionary()
self._cached_images = weakref.WeakValueDictionary()
self._cached_animations = weakref.WeakValueDictionary()
# Map bin size to list of atlases
self._texture_atlas_bins = {}
def _require_index(self):
if self._index is None:
self.reindex()
def reindex(self):
'''Refresh the file index.
You must call this method if `path` is changed or the filesystem
layout changes.
'''
self._index = {}
for path in self.path:
if path.startswith('@'):
# Module
name = path[1:]
try:
module = __import__(name)
except:
continue
for component in name.split('.')[1:]:
module = getattr(module, component)
if hasattr(module, '__file__'):
path = os.path.dirname(module.__file__)
else:
path = '' # interactive
elif not os.path.isabs(path):
# Add script base unless absolute
assert '\\' not in path, \
'Backslashes not permitted in relative path'
path = os.path.join(self._script_home, path)
if os.path.isdir(path):
# Filesystem directory
path = path.rstrip(os.path.sep)
location = FileLocation(path)
for dirpath, dirnames, filenames in os.walk(path):
dirpath = dirpath[len(path) + 1:]
# Force forward slashes for index
if dirpath:
parts = filter(None, dirpath.split(os.sep))
dirpath = '/'.join(parts)
for filename in filenames:
if dirpath:
index_name = dirpath + '/' + filename
else:
index_name = filename
self._index_file(index_name, location)
else:
# Find path component that is the ZIP file.
dir = ''
old_path = None
while path and not os.path.isfile(path):
old_path = path
path, tail_dir = os.path.split(path)
if path == old_path:
break
dir = '/'.join((tail_dir, dir))
if path == old_path:
continue
dir = dir.rstrip('/')
# path is a ZIP file, dir resides within ZIP
if path and zipfile.is_zipfile(path):
zip = zipfile.ZipFile(path, 'r')
location = ZIPLocation(zip, dir)
for zip_name in zip.namelist():
#zip_name_dir, zip_name = os.path.split(zip_name)
#assert '\\' not in name_dir
#assert not name_dir.endswith('/')
if zip_name.startswith(dir):
if dir:
zip_name = zip_name[len(dir)+1:]
self._index_file(zip_name, location)
def _index_file(self, name, location):
if name not in self._index:
self._index[name] = location
def file(self, name, mode='rb'):
'''Load a resource.
:Parameters:
`name` : str
Filename of the resource to load.
`mode` : str
Combination of ``r``, ``w``, ``a``, ``b`` and ``t`` characters
with the meaning as for the builtin ``open`` function.
:rtype: file object
'''
self._require_index()
try:
location = self._index[name]
return location.open(name, mode)
except KeyError:
raise ResourceNotFoundException(name)
def location(self, name):
'''Get the location of a resource.
This method is useful for opening files referenced from a resource.
For example, an HTML file loaded as a resource might reference some
images. These images should be located relative to the HTML file, not
looked up individually in the loader's path.
:Parameters:
`name` : str
Filename of the resource to locate.
:rtype: `Location`
'''
self._require_index()
try:
return self._index[name]
except KeyError:
raise ResourceNotFoundException(name)
def add_font(self, name):
'''Add a font resource to the application.
Fonts not installed on the system must be added to pyglet before they
can be used with `font.load`. Although the font is added with
its filename using this function, it is loaded by specifying its
family name. For example::
resource.add_font('action_man.ttf')
action_man = font.load('Action Man')
:Parameters:
`name` : str
Filename of the font resource to add.
'''
self._require_index()
from pyglet import font
file = self.file(name)
font.add_file(file)
def _alloc_image(self, name):
file = self.file(name)
img = pyglet.image.load(name, file=file)
bin = self._get_texture_atlas_bin(img.width, img.height)
if bin is None:
return img.get_texture(True)
return bin.add(img)
def _get_texture_atlas_bin(self, width, height):
'''A heuristic for determining the atlas bin to use for a given image
size. Returns None if the image should not be placed in an atlas (too
big), otherwise the bin (a list of TextureAtlas).
'''
# Large images are not placed in an atlas
if width > 128 or height > 128:
return None
# Group images with small height separately to larger height (as the
# allocator can't stack within a single row).
bin_size = 1
if height > 32:
bin_size = 2
try:
bin = self._texture_atlas_bins[bin_size]
except KeyError:
bin = self._texture_atlas_bins[bin_size] = \
pyglet.image.atlas.TextureBin()
return bin
def image(self, name, flip_x=False, flip_y=False, rotate=0):
'''Load an image with optional transformation.
This is similar to `texture`, except the resulting image will be
packed into a `TextureBin` if it is an appropriate size for packing.
This is more efficient than loading images into separate textures.
:Parameters:
`name` : str
Filename of the image source to load.
`flip_x` : bool
If True, the returned image will be flipped horizontally.
`flip_y` : bool
If True, the returned image will be flipped vertically.
`rotate` : int
The returned image will be rotated clockwise by the given
number of degrees (a multiple of 90).
:rtype: `Texture`
:return: A complete texture if the image is large, otherwise a
`TextureRegion` of a texture atlas.
'''
self._require_index()
if name in self._cached_images:
identity = self._cached_images[name]
else:
identity = self._cached_images[name] = self._alloc_image(name)
if not rotate and not flip_x and not flip_y:
return identity
return identity.get_transform(flip_x, flip_y, rotate)
def animation(self, name, flip_x=False, flip_y=False, rotate=0):
'''Load an animation with optional transformation.
Animations loaded from the same source but with different
transformations will use the same textures.
:Parameters:
`name` : str
Filename of the animation source to load.
`flip_x` : bool
If True, the returned image will be flipped horizontally.
`flip_y` : bool
If True, the returned image will be flipped vertically.
`rotate` : int
The returned image will be rotated clockwise by the given
number of degrees (a multiple of 90).
:rtype: `Animation`
'''
self._require_index()
try:
identity = self._cached_animations[name]
except KeyError:
animation = pyglet.image.load_animation(name, self.file(name))
bin = self._get_texture_atlas_bin(animation.get_max_width(),
animation.get_max_height())
if bin:
animation.add_to_texture_bin(bin)
identity = self._cached_animations[name] = animation
if not rotate and not flip_x and not flip_y:
return identity
return identity.get_transform(flip_x, flip_y, rotate)
def get_cached_image_names(self):
'''Get a list of image filenames that have been cached.
This is useful for debugging and profiling only.
:rtype: list
:return: List of str
'''
self._require_index()
return self._cached_images.keys()
def get_cached_animation_names(self):
'''Get a list of animation filenames that have been cached.
This is useful for debugging and profiling only.
:rtype: list
:return: List of str
'''
self._require_index()
return self._cached_animations.keys()
def get_texture_bins(self):
'''Get a list of texture bins in use.
This is useful for debugging and profiling only.
:rtype: list
:return: List of `TextureBin`
'''
self._require_index()
return self._texture_atlas_bins.values()
def media(self, name, streaming=True):
'''Load a sound or video resource.
The meaning of `streaming` is as for `media.load`. Compressed
sources cannot be streamed (that is, video and compressed audio
cannot be streamed from a ZIP archive).
:Parameters:
`name` : str
Filename of the media source to load.
`streaming` : bool
True if the source should be streamed from disk, False if
it should be entirely decoded into memory immediately.
:rtype: `media.Source`
'''
self._require_index()
from pyglet import media
try:
location = self._index[name]
if isinstance(location, FileLocation):
# Don't open the file if it's streamed from disk -- AVbin
# needs to do it.
path = os.path.join(location.path, name)
return media.load(path, streaming=streaming)
else:
file = location.open(name)
return media.load(name, file=file, streaming=streaming)
except KeyError:
raise ResourceNotFoundException(name)
def texture(self, name):
'''Load a texture.
The named image will be loaded as a single OpenGL texture. If the
dimensions of the image are not powers of 2 a `TextureRegion` will
be returned.
:Parameters:
`name` : str
Filename of the image resource to load.
:rtype: `Texture`
'''
self._require_index()
if name in self._cached_textures:
return self._cached_textures[name]
file = self.file(name)
texture = pyglet.image.load(name, file=file).get_texture()
self._cached_textures[name] = texture
return texture
def html(self, name):
'''Load an HTML document.
:Parameters:
`name` : str
Filename of the HTML resource to load.
:rtype: `FormattedDocument`
'''
self._require_index()
file = self.file(name)
return pyglet.text.decode_html(file.read(), self.location(name))
def attributed(self, name):
'''Load an attributed text document.
See `pyglet.text.formats.attributed` for details on this format.
:Parameters:
`name` : str
Filename of the attribute text resource to load.
:rtype: `FormattedDocument`
'''
self._require_index()
file = self.file(name)
return pyglet.text.load(name, file, 'text/vnd.pyglet-attributed')
def text(self, name):
'''Load a plain text document.
:Parameters:
`name` : str
Filename of the plain text resource to load.
:rtype: `UnformattedDocument`
'''
self._require_index()
file = self.file(name)
return pyglet.text.load(name, file, 'text/plain')
def get_cached_texture_names(self):
'''Get the names of textures currently cached.
:rtype: list of str
'''
self._require_index()
return self._cached_textures.keys()
#: Default resource search path.
#:
#: Locations in the search path are searched in order and are always
#: case-sensitive. After changing the path you must call `reindex`.
#:
#: See the module documentation for details on the path format.
#:
#: :type: list of str
path = []
class _DefaultLoader(Loader):
def _get_path(self):
return path
def _set_path(self, value):
global path
path = value
path = property(_get_path, _set_path)
_default_loader = _DefaultLoader()
reindex = _default_loader.reindex
file = _default_loader.file
location = _default_loader.location
add_font = _default_loader.add_font
image = _default_loader.image
animation = _default_loader.animation
get_cached_image_names = _default_loader.get_cached_image_names
get_cached_animation_names = _default_loader.get_cached_animation_names
get_texture_bins = _default_loader.get_texture_bins
media = _default_loader.media
texture = _default_loader.texture
html = _default_loader.html
attributed = _default_loader.attributed
text = _default_loader.text
get_cached_texture_names = _default_loader.get_cached_texture_names
|
|
from typing import Optional, Any, Dict, Text
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.auth import authenticate, update_session_auth_hash
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect, render
from django.urls import reverse
from zerver.decorator import has_request_variables, \
zulip_login_required, REQ, human_users_only
from zerver.lib.actions import do_change_password, \
do_change_enter_sends, do_change_notification_settings, \
do_change_default_desktop_notifications, do_change_autoscroll_forever, \
do_regenerate_api_key, do_change_avatar_fields, do_set_user_display_setting, \
validate_email, do_change_user_email, do_start_email_change_process, \
check_change_full_name
from zerver.lib.avatar import avatar_url
from zerver.lib.send_email import send_email, FromAddress
from zerver.lib.i18n import get_available_language_codes
from zerver.lib.response import json_success, json_error
from zerver.lib.upload import upload_avatar_image
from zerver.lib.validator import check_bool, check_string
from zerver.lib.request import JsonableError
from zerver.lib.timezone import get_all_timezones
from zerver.models import UserProfile, Realm, name_changes_disabled, \
EmailChangeStatus
from confirmation.models import get_object_from_key, render_confirmation_key_error, \
ConfirmationKeyException, Confirmation
def confirm_email_change(request: HttpRequest, confirmation_key: str) -> HttpResponse:
try:
email_change_object = get_object_from_key(confirmation_key, Confirmation.EMAIL_CHANGE)
except ConfirmationKeyException as exception:
return render_confirmation_key_error(request, exception)
new_email = email_change_object.new_email
old_email = email_change_object.old_email
user_profile = email_change_object.user_profile
if user_profile.realm.email_changes_disabled:
raise JsonableError(_("Email address changes are disabled in this organization."))
do_change_user_email(user_profile, new_email)
context = {'realm': user_profile.realm, 'new_email': new_email}
send_email('zerver/emails/notify_change_in_email', to_email=old_email,
from_name="Zulip Account Security", from_address=FromAddress.SUPPORT,
context=context)
ctx = {
'new_email': new_email,
'old_email': old_email,
}
return render(request, 'confirmation/confirm_email_change.html', context=ctx)
@human_users_only
@has_request_variables
def json_change_ui_settings(
request: HttpRequest, user_profile: UserProfile,
autoscroll_forever: Optional[bool]=REQ(validator=check_bool, default=None),
default_desktop_notifications: Optional[bool]=REQ(validator=check_bool, default=None)
) -> HttpResponse:
result = {}
if autoscroll_forever is not None and \
user_profile.autoscroll_forever != autoscroll_forever:
do_change_autoscroll_forever(user_profile, autoscroll_forever)
result['autoscroll_forever'] = autoscroll_forever
if default_desktop_notifications is not None and \
user_profile.default_desktop_notifications != default_desktop_notifications:
do_change_default_desktop_notifications(user_profile, default_desktop_notifications)
result['default_desktop_notifications'] = default_desktop_notifications
return json_success(result)
@human_users_only
@has_request_variables
def json_change_settings(request: HttpRequest, user_profile: UserProfile,
full_name: Text=REQ(default=""),
email: Text=REQ(default=""),
old_password: Text=REQ(default=""),
new_password: Text=REQ(default=""),
confirm_password: Text=REQ(default="")) -> HttpResponse:
if not (full_name or new_password or email):
return json_error(_("No new data supplied"))
if new_password != "" or confirm_password != "":
if new_password != confirm_password:
return json_error(_("New password must match confirmation password!"))
if not authenticate(username=user_profile.email, password=old_password,
realm=user_profile.realm):
return json_error(_("Wrong password!"))
do_change_password(user_profile, new_password)
# In Django 1.10, password changes invalidates sessions, see
# https://docs.djangoproject.com/en/1.10/topics/auth/default/#session-invalidation-on-password-change
# for details. To avoid this logging the user out of their own
# session (which would provide a confusing UX at best), we
# update the session hash here.
update_session_auth_hash(request, user_profile)
# We also save the session to the DB immediately to mitigate
# race conditions. In theory, there is still a race condition
# and to completely avoid it we will have to use some kind of
# mutex lock in `django.contrib.auth.get_user` where session
# is verified. To make that lock work we will have to control
# the AuthenticationMiddleware which is currently controlled
# by Django,
request.session.save()
result = {} # type: Dict[str, Any]
new_email = email.strip()
if user_profile.email != email and new_email != '':
if user_profile.realm.email_changes_disabled:
return json_error(_("Email address changes are disabled in this organization."))
error, skipped = validate_email(user_profile, new_email)
if error:
return json_error(error)
if skipped:
return json_error(skipped)
do_start_email_change_process(user_profile, new_email)
result['account_email'] = _("Check your email for a confirmation link. ")
if user_profile.full_name != full_name and full_name.strip() != "":
if name_changes_disabled(user_profile.realm):
# Failingly silently is fine -- they can't do it through the UI, so
# they'd have to be trying to break the rules.
pass
else:
# Note that check_change_full_name strips the passed name automatically
result['full_name'] = check_change_full_name(user_profile, full_name, user_profile)
return json_success(result)
@human_users_only
@has_request_variables
def update_display_settings_backend(
request: HttpRequest, user_profile: UserProfile,
twenty_four_hour_time: Optional[bool]=REQ(validator=check_bool, default=None),
high_contrast_mode: Optional[bool]=REQ(validator=check_bool, default=None),
night_mode: Optional[bool]=REQ(validator=check_bool, default=None),
default_language: Optional[bool]=REQ(validator=check_string, default=None),
left_side_userlist: Optional[bool]=REQ(validator=check_bool, default=None),
emoji_alt_code: Optional[bool]=REQ(validator=check_bool, default=None),
emojiset: Optional[str]=REQ(validator=check_string, default=None),
timezone: Optional[str]=REQ(validator=check_string, default=None)) -> HttpResponse:
if (default_language is not None and
default_language not in get_available_language_codes()):
raise JsonableError(_("Invalid language '%s'" % (default_language,)))
if (timezone is not None and
timezone not in get_all_timezones()):
raise JsonableError(_("Invalid timezone '%s'" % (timezone,)))
if (emojiset is not None and
emojiset not in UserProfile.emojiset_choices()):
raise JsonableError(_("Invalid emojiset '%s'" % (emojiset,)))
request_settings = {k: v for k, v in list(locals().items()) if k in user_profile.property_types}
result = {} # type: Dict[str, Any]
for k, v in list(request_settings.items()):
if v is not None and getattr(user_profile, k) != v:
do_set_user_display_setting(user_profile, k, v)
result[k] = v
return json_success(result)
@human_users_only
@has_request_variables
def json_change_notify_settings(
request: HttpRequest, user_profile: UserProfile,
enable_stream_desktop_notifications: Optional[bool]=REQ(validator=check_bool, default=None),
enable_stream_email_notifications: Optional[bool]=REQ(validator=check_bool, default=None),
enable_stream_push_notifications: Optional[bool]=REQ(validator=check_bool, default=None),
enable_stream_sounds: Optional[bool]=REQ(validator=check_bool, default=None),
enable_desktop_notifications: Optional[bool]=REQ(validator=check_bool, default=None),
enable_sounds: Optional[bool]=REQ(validator=check_bool, default=None),
enable_offline_email_notifications: Optional[bool]=REQ(validator=check_bool, default=None),
enable_offline_push_notifications: Optional[bool]=REQ(validator=check_bool, default=None),
enable_online_push_notifications: Optional[bool]=REQ(validator=check_bool, default=None),
enable_digest_emails: Optional[bool]=REQ(validator=check_bool, default=None),
pm_content_in_desktop_notifications: Optional[bool]=REQ(validator=check_bool, default=None)
) -> HttpResponse:
result = {}
# Stream notification settings.
req_vars = {k: v for k, v in list(locals().items()) if k in user_profile.notification_setting_types}
for k, v in list(req_vars.items()):
if v is not None and getattr(user_profile, k) != v:
do_change_notification_settings(user_profile, k, v)
result[k] = v
return json_success(result)
def set_avatar_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if len(request.FILES) != 1:
return json_error(_("You must upload exactly one avatar."))
user_file = list(request.FILES.values())[0]
if ((settings.MAX_AVATAR_FILE_SIZE * 1024 * 1024) < user_file.size):
return json_error(_("Uploaded file is larger than the allowed limit of %s MB") % (
settings.MAX_AVATAR_FILE_SIZE))
upload_avatar_image(user_file, user_profile, user_profile)
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_USER)
user_avatar_url = avatar_url(user_profile)
json_result = dict(
avatar_url = user_avatar_url
)
return json_success(json_result)
def delete_avatar_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_GRAVATAR)
gravatar_url = avatar_url(user_profile)
json_result = dict(
avatar_url = gravatar_url
)
return json_success(json_result)
# We don't use @human_users_only here, because there are use cases for
# a bot regenerating its own API key.
@has_request_variables
def regenerate_api_key(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
do_regenerate_api_key(user_profile, user_profile)
json_result = dict(
api_key = user_profile.api_key
)
return json_success(json_result)
@human_users_only
@has_request_variables
def change_enter_sends(request: HttpRequest, user_profile: UserProfile,
enter_sends: bool=REQ(validator=check_bool)) -> HttpResponse:
do_change_enter_sends(user_profile, enter_sends)
return json_success()
|
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing the Coordinate search."""
# coding=utf-8
import os
from string import Template
import sys
from search.common import exceptions
from search.common import geconstants
from search.common import utils
from search.plugin import coordinate_transform
class CoordinateSearch(object):
"""Class for performing the Coordinate search.
Coordinate search supports the following formats:
1. Decimal Degrees (DD)
2. Degrees Minutes Seconds (DMS)
3. Degrees Decimal Minutes (DDM)
4. Military Grid Reference System (MGRS)
5. Universal Transverse Mercator (UTM)
Coordinate search transforms coordinates from DMS, DDM, UTM, MGRS formats to
DD, validates the coordinates and sends the response back to the client.
Depending on the client type, KML or JSONP formats are supported.
"""
NUM_OF_COORDS_IN_LAT_LNG_FORMAT = 2
NUM_OF_COORDS_IN_MGRS_FORMAT = 1
def __init__(self):
"""Inits CoordinateSearch.
Initializes the logger "ge_search".
Initializes templates for kml, placemark templates for KML/JSONP outputs.
"""
self.utils = utils.SearchUtils()
self._transform = coordinate_transform.CoordinateTransform()
configs = self.utils.GetConfigs(
os.path.join(geconstants.SEARCH_CONFIGS_DIR, "CoordinateSearch.conf"))
self._jsonp_call = self.utils.jsonp_functioncall
self._geom = """
<name>%s</name>
<styleUrl>%s</styleUrl>
<Point>
<coordinates>%s,%s</coordinates>
</Point>\
"""
self._json_geom = """
{
"Point": {
"coordinates": "%s,%s"
}
}
"""
self._kml = """
<kml xmlns="http://www.opengis.net/kml/2.2"
xmlns:gx="http://www.google.com/kml/ext/2.2"
xmlns:kml="http://www.opengis.net/kml/2.2"
xmlns:atom="http://www.w3.org/2005/Atom">
<Folder>
<name>Coordinate Search Results</name>
<open>1</open>
<Style id="placemark_label">\
${style}
</Style>\
${placemark}
</Folder>
</kml>
"""
self._kml_template = Template(self._kml)
self._placemark_template = self.utils.placemark_template
self._json_template = self.utils.json_template
self._json_placemark_template = self.utils.json_placemark_template
style_template = self.utils.style_template
self.coordinates_in_lat_lng_format_ = ["DD", "DMS", "DDM"]
self.logger = self.utils.logger
self._style = style_template.substitute(
balloonBgColor=configs.get("balloonstyle.bgcolor"),
balloonTextColor=configs.get("balloonstyle.textcolor"),
balloonText=configs.get("balloonstyle.text"),
iconStyleScale=configs.get("iconstyle.scale"),
iconStyleHref=configs.get("iconstyle.href"),
lineStyleColor=configs.get("linestyle.color"),
lineStyleWidth=configs.get("linestyle.width"),
polyStyleColor=configs.get("polystyle.color"),
polyStyleColorMode=configs.get("polystyle.colormode"),
polyStyleFill=configs.get("polystyle.fill"),
polyStyleOutline=configs.get("polystyle.outline"),
listStyleHref=configs.get("iconstyle.href"))
def HandleSearchRequest(self, environ):
"""Fetches the search tokens from form and performs the coordinate search.
Args:
environ: A list of environment variables as supplied by the
WSGI interface to the coordinate search application interface.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
Raises:
BadQueryException: if the search query is invalid.
"""
search_results = ""
# Fetch all the attributes provided by the user.
parameters = self.utils.GetParameters(environ)
response_type = self.utils.GetResponseType(environ)
# Retrieve the function call back name for JSONP response.
self.f_callback = self.utils.GetCallback(parameters)
original_query = self.utils.GetValue(parameters, "q")
if not original_query:
msg = "Empty search query received."
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
search_status, search_results = self.DoSearch(original_query, response_type)
if not search_status:
folder_name = "Search returned no results."
search_results = self.utils.NoSearchResults(
folder_name, self._style, response_type, self.f_callback)
return (search_results, response_type)
def DoSearch(self, search_query, response_type):
"""Performs the coordinate search.
Args:
search_query: A string containing the search coordinates as
entered by the user.
response_type: Response type can be KML or JSONP, depending on the client.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
Raises:
BadQueryException: if the search query is invalid.
"""
coordinate_type = ""
search_results = ""
input_coordinates = []
decimal_degrees_coordinates = []
search_tokens = self.utils.SearchTokensFromString(search_query)
self.logger.debug("coordinates: %s", ",".join(search_tokens))
input_coordinates = self._transform.GetInputCoordinates(
",".join(search_tokens))
number_of_coordinates = len(input_coordinates)
if number_of_coordinates == 0:
msg = "Incomplete search query %s submitted" % search_query
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
coordinate_type = self._transform.GetInputType(input_coordinates)
self.logger.debug("Coordinate type is %s.", coordinate_type)
if coordinate_type in self.coordinates_in_lat_lng_format_:
reqd_num_of_coordinates = CoordinateSearch.NUM_OF_COORDS_IN_LAT_LNG_FORMAT
else:
reqd_num_of_coordinates = CoordinateSearch.NUM_OF_COORDS_IN_MGRS_FORMAT
if number_of_coordinates > reqd_num_of_coordinates:
self.logger.warning(
"extra search parameters ignored: %s", ",".join(
input_coordinates[reqd_num_of_coordinates:]))
input_coordinates = input_coordinates[:reqd_num_of_coordinates]
elif number_of_coordinates < reqd_num_of_coordinates:
msg = "Incomplete search query %s submitted" % search_query
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
decimal_degrees_coordinates = self._transform.TransformToDecimalDegrees(
coordinate_type, input_coordinates)
search_results = self.ConstructResponse(
response_type, decimal_degrees_coordinates)
search_status = True if search_results else False
return search_status, search_results
def ConstructKMLResponse(self, latitude, longitude):
"""Prepares KML response.
KML response has the below format:
<kml>
<Folder>
<name/>
<StyleURL>
---
</StyleURL>
<Point>
<coordinates/>
</Point>
</Folder>
</kml>
Args:
latitude: latitude in Decimal Degress format.
longitude: longitude in Decimal Degress format.
Returns:
kml_response: KML formatted response.
"""
placemark = ""
kml_response = ""
name = "%s, %s" % (latitude, longitude)
style_url = "#placemark_label"
geom = self._geom % (name, style_url, str(longitude), str(latitude))
placemark = self._placemark_template.substitute(geom=geom)
kml_response = self._kml_template.substitute(
style=self._style, placemark=placemark)
self.logger.info("KML response successfully formatted")
return kml_response
def ConstructJSONPResponse(self, latitude, longitude):
"""Prepares JSONP response.
{
"Folder": {
"name": "X,Y",
"Style": {
"IconStyle": {"scale": "1" },
"LineStyle": {
"color": "7fffff00",
"width": "5" },
"PolyStyle": {
"color": "7f66ffff",
"fill": "1",
"outline": "1" } },
"Placemark": {
"Point": {
"coordinates": "X,Y" } }
}
}
Args:
latitude: latitude in Decimal Degress format.
longitude: longitude in Decimal Degress format.
Returns:
jsonp_response: JSONP formatted response.
"""
placemark = ""
json_response = ""
jsonp_response = ""
folder_name = "%s, %s" % (latitude, longitude)
json_geom = self._json_geom % (latitude, longitude)
placemark = self._json_placemark_template.substitute(
geom=json_geom)
json_response = self._json_template.substitute(
foldername=folder_name, json_placemark=placemark)
# Escape single quotes from json_response.
json_response = json_response.replace("'", "\\'")
jsonp_response = self._jsonp_call % (self.f_callback, json_response)
self.logger.info("JSONP response successfully formatted")
return jsonp_response
def ConstructResponse(self, response_type, decimal_degrees_coordinates):
"""Construct the response based on response_type.
Args:
response_type: Response type can be KML or JSONP, depending on the client.
decimal_degrees_coordinates: List of coordinates in DD(Decimal Degrees)
format.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
"""
search_results = ""
assert response_type in self.utils.output_formats, (
self.logger.error("Invalid response type %s", response_type))
if response_type == "KML":
search_results = self.ConstructKMLResponse(
decimal_degrees_coordinates[0], decimal_degrees_coordinates[1])
elif response_type == "JSONP":
search_results = self.ConstructJSONPResponse(
decimal_degrees_coordinates[0], decimal_degrees_coordinates[1])
return search_results
def main(coords, response_type):
gepobj = CoordinateSearch()
gepobj.DoSearch(coords, response_type)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 39332 if testnet else 29332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# This script generates C++ code for compiler diagnostics.
import argparse
import os
import shlex
def writefile(path, contents):
try:
with open(path, 'r') as f:
existing = f.read()
except OSError:
existing = ''
if existing != contents:
with open(path, 'w') as f:
f.write(contents)
def main():
parser = argparse.ArgumentParser(description='Diagnostic source generator')
parser.add_argument('--outDir', default=os.getcwd(), help='Output directory')
parser.add_argument('--srcDir', help='Source directory to search for usages')
parser.add_argument('--incDir', help='Include directory to search for usages')
args = parser.parse_args()
ourdir = os.path.dirname(os.path.realpath(__file__))
inf = open(os.path.join(ourdir, "diagnostics.txt"))
headerdir = os.path.join(args.outDir, 'slang', 'diagnostics')
try:
os.makedirs(headerdir)
except OSError:
pass
diags = {}
groups = []
diaglist = []
subsystem = 'General'
curgroup = None
def parsegroup(elems):
nonlocal curgroup
for e in elems:
if e == '}':
groups.append(curgroup)
curgroup = None
break
curgroup[1].append(e)
for line in [x.strip('\n') for x in inf]:
if not line or line.startswith('//'):
continue
parts = shlex.split(line)
if curgroup:
parsegroup(parts)
elif parts[0] == 'subsystem':
subsystem = parts[1]
if subsystem not in diags:
diags[subsystem] = []
elif parts[0] == 'group':
curgroup = (parts[1], [])
assert(parts[2] == '=')
assert(parts[3] == '{')
parsegroup(parts[4:])
else:
sev = parts[0]
if sev == 'warning':
diags[subsystem].append(('Warning', parts[2], parts[3], parts[1]))
diaglist.append(parts[2])
elif sev == 'error':
diags[subsystem].append(('Error', parts[1], parts[2], ''))
diaglist.append(parts[1])
elif sev == 'note':
diags[subsystem].append(('Note', parts[1], parts[2], ''))
diaglist.append(parts[1])
else:
raise Exception('Invalid entry: {}'.format(line))
for k,v in sorted(diags.items()):
createheader(os.path.join(headerdir, k + "Diags.h"), k, v)
createsource(os.path.join(args.outDir, "DiagCode.cpp"), diags, groups)
createallheader(os.path.join(headerdir, "AllDiags.h"), diags)
doCheck = False
if doCheck:
diaglist = checkDiags(args.srcDir, diaglist)
diaglist = checkDiags(args.incDir, diaglist)
reportUnused(diaglist)
def createheader(path, subsys, diags):
output = '''//------------------------------------------------------------------------------
//! @file {}Diags.h
//! @brief Generated diagnostic enums for the {} subsystem
//
// File is under the MIT license; see LICENSE for details
//------------------------------------------------------------------------------
#pragma once
#include "slang/diagnostics/Diagnostics.h"
namespace slang::diag {{
'''.format(subsys, subsys)
index = 0
for d in diags:
output += 'inline constexpr DiagCode {}(DiagSubsystem::{}, {});\n'.format(d[1], subsys, index)
index += 1
output += '''
}
'''
writefile(path, output)
def createsource(path, diags, groups):
output = '''//------------------------------------------------------------------------------
// DiagCode.cpp
// Generated diagnostic helpers
//
// File is under the MIT license; see LICENSE for details
//------------------------------------------------------------------------------
#include "slang/diagnostics/AllDiags.h"
#include <ostream>
#include <flat_hash_map.hpp>
namespace slang {
static const flat_hash_map<DiagCode, std::tuple<string_view, string_view, DiagnosticSeverity, string_view>> data = {
'''
for k,v in sorted(diags.items()):
for d in v:
output += ' {{diag::{}, std::make_tuple("{}"sv, "{}"sv, DiagnosticSeverity::{}, "{}"sv)}},\n'.format(
d[1], d[1], d[2], d[0], d[3] if len(d) > 2 else '')
output += '''};
static const flat_hash_map<string_view, std::vector<DiagCode>> optionMap = {
'''
optionMap = {}
for k,v in sorted(diags.items()):
for d in v:
name = d[3]
if not name:
continue
if name in optionMap:
optionMap[name].append(d[1])
else:
optionMap[name] = [d[1]]
for key in sorted(optionMap):
vals = optionMap[key]
valstr = ', '.join(["diag::{}".format(v) for v in vals])
output += ' {{"{}"sv, {{ {} }}}},\n'.format(key, valstr)
output += '''};
static const flat_hash_map<string_view, DiagGroup> groupMap = {
'''
for g in sorted(groups):
elems = []
for e in g[1]:
elems.extend(optionMap[e])
elems = ', '.join('diag::{}'.format(e) for e in elems)
output += ' {{"{}"sv, DiagGroup("{}", {{ {} }})}},\n'.format(g[0], g[0], elems)
output += '''};
std::ostream& operator<<(std::ostream& os, DiagCode code) {
os << toString(code);
return os;
}
string_view toString(DiagCode code) {
if (auto it = data.find(code); it != data.end())
return std::get<0>(it->second);
return "<user-diag>"sv;
}
string_view getDefaultMessage(DiagCode code) {
if (auto it = data.find(code); it != data.end())
return std::get<1>(it->second);
return ""sv;
}
DiagnosticSeverity getDefaultSeverity(DiagCode code) {
if (auto it = data.find(code); it != data.end())
return std::get<2>(it->second);
return DiagnosticSeverity::Ignored;
}
string_view getDefaultOptionName(DiagCode code) {
if (auto it = data.find(code); it != data.end())
return std::get<3>(it->second);
return ""sv;
}
span<const DiagCode> findDiagsFromOptionName(string_view name) {
if (auto it = optionMap.find(name); it != optionMap.end())
return it->second;
return {};
}
const DiagGroup* findDefaultDiagGroup(string_view name) {
if (auto it = groupMap.find(name); it != groupMap.end())
return &it->second;
return nullptr;
}
}
'''
writefile(path, output)
def createallheader(path, diags):
output = '''//------------------------------------------------------------------------------
//! @file AllDiags.h
//! @brief Combined header that includes all subsystem-specific diagnostic headers
//
// File is under the MIT license; see LICENSE for details
//------------------------------------------------------------------------------
#pragma once
'''
for k in sorted(diags.keys()):
output += '#include "slang/diagnostics/{}Diags.h"\n'.format(k)
output += '\n'
writefile(path, output)
def checkDiags(path, diags):
import glob
for ext in ('cpp', 'h'):
for file in glob.glob(path + "/**/*." + ext, recursive=True):
with open(file, 'r') as f:
text = f.read()
diags = [d for d in diags if not ('::' + d) in text]
return diags
def reportUnused(diags):
for d in diags:
print("warning: '{}' is unused".format(d))
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nailgun
import nailgun.rpc as rpc
import time
import mock
from sqlalchemy import sql
from nailgun import consts
from nailgun import objects
from nailgun.consts import ACTION_TYPES
from nailgun.consts import NODE_STATUSES
from nailgun.consts import TASK_NAMES
from nailgun.consts import TASK_STATUSES
from nailgun.settings import settings
from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
from nailgun.task.helpers import TaskHelper
from nailgun.task import manager
from nailgun.task.task import DeletionTask
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
from nailgun.utils import reverse
class TestTaskManagers(BaseIntegrationTest):
def tearDown(self):
self._wait_for_threads()
super(TestTaskManagers, self).tearDown()
def check_node_presence(self, nodes_count):
return self.db.query(models.Node).count() == nodes_count
@fake_tasks(override_state={"progress": 100, "status": "ready"})
def test_deployment_task_managers(self):
self.env.create(
nodes_kwargs=[
{"pending_addition": True},
{"pending_deletion": True,
'status': NODE_STATUSES.provisioned},
]
)
supertask = self.env.launch_deployment()
self.env.refresh_nodes()
self.assertEqual(supertask.name, TASK_NAMES.deploy)
self.assertIn(
supertask.status,
(TASK_STATUSES.running, TASK_STATUSES.ready)
)
# we have three subtasks here
# repository check
# deletion
# provision
# deployment
self.assertEqual(len(supertask.subtasks), 4)
# provisioning task has less weight then deployment
provision_task = filter(
lambda t: t.name == TASK_NAMES.provision, supertask.subtasks)[0]
self.assertEqual(provision_task.weight, 0.4)
wait_nodes = [self.env.nodes[0]]
self.env.wait_for_nodes_status(wait_nodes, NODE_STATUSES.provisioning)
self.env.wait_ready(
supertask,
60,
u"Successfully removed 1 node(s). No errors occurred; "
"Deployment of environment '{0}' is done".format(
self.env.clusters[0].name
)
)
self.env.wait_for_nodes_status(wait_nodes, NODE_STATUSES.ready)
self.env.refresh_nodes()
for n in filter(
lambda n: n.cluster_id == self.env.clusters[0].id,
self.env.nodes
):
self.assertEqual(n.status, NODE_STATUSES.ready)
self.assertEqual(n.progress, 100)
@fake_tasks(fake_rpc=False, mock_rpc=True)
def test_write_action_logs(self, _):
self.env.create(
nodes_kwargs=[
{"pending_addition": True},
{"pending_addition": True},
{"pending_deletion": True}
]
)
deployment_task = self.env.launch_deployment()
for subtask in deployment_task.subtasks:
action_log = objects.ActionLog.get_by_kwargs(
task_uuid=subtask.uuid,
action_name=subtask.name
)
self.assertIsNotNone(action_log)
self.assertEqual(subtask.parent_id,
action_log.additional_info['parent_task_id'])
self.assertIn(action_log.action_name, TASK_NAMES)
self.assertEqual(action_log.action_type, ACTION_TYPES.nailgun_task)
if action_log.additional_info["operation"] in \
(TASK_NAMES.check_networks,
TASK_NAMES.check_before_deployment):
self.assertIsNotNone(action_log.end_timestamp)
self.assertIn("ended_with_status", action_log.additional_info)
self.assertIn("message", action_log.additional_info)
self.assertEqual(action_log.additional_info["message"], "")
self.assertIn("output", action_log.additional_info)
def test_update_action_logs_after_empty_cluster_deletion(self):
self.env.create_cluster()
self.env.delete_environment()
al = objects.ActionLogCollection.filter_by(
None, action_type=consts.ACTION_TYPES.nailgun_task).first()
self.assertIsNotNone(al.end_timestamp)
self.assertEqual(al.additional_info["ended_with_status"],
consts.TASK_STATUSES.ready)
self.assertEqual(al.additional_info["message"], "")
self.assertEqual(al.additional_info["output"], {})
def test_check_before_deployment_with_error(self):
self.env.create(
nodes_kwargs=[
{"pending_addition": True, "online": False}
]
)
supertask = self.env.launch_deployment()
action_logs = objects.ActionLogCollection.filter_by(
None, action_type=consts.ACTION_TYPES.nailgun_task).all()
self.assertEqual(len(action_logs), 4)
for al in action_logs:
self.assertEqual(al.action_type, ACTION_TYPES.nailgun_task)
if al.additional_info["operation"] == TASK_NAMES.deploy:
self.assertIsNone(al.additional_info["parent_task_id"])
self.assertEqual(al.task_uuid, supertask.uuid)
else:
self.assertIsNotNone(al.end_timestamp)
self.assertIn("ended_with_status", al.additional_info)
self.assertIn("message", al.additional_info)
self.assertEqual(al.additional_info["message"], "")
self.assertIn("output", al.additional_info)
if (
al.additional_info["operation"] ==
TASK_NAMES.check_networks
):
self.assertEqual(al.additional_info["ended_with_status"],
TASK_STATUSES.ready)
self.assertEqual(al.additional_info["parent_task_id"],
supertask.id)
elif (
al.additional_info["operation"] ==
TASK_NAMES.check_before_deployment
):
self.assertEqual(al.additional_info["ended_with_status"],
TASK_STATUSES.error)
self.assertEqual(al.additional_info["parent_task_id"],
supertask.id)
@fake_tasks(fake_rpc=False, mock_rpc=False)
@mock.patch('nailgun.rpc.cast')
def test_do_not_send_node_to_orchestrator_which_has_status_discover(
self, _):
self.env.create(
nodes_kwargs=[
{'pending_deletion': True, 'status': 'discover'}])
self.env.launch_deployment()
args, kwargs = nailgun.task.manager.rpc.cast.call_args
self.assertEqual(len(args[1]['args']['nodes']), 0)
self.env.refresh_nodes()
for n in self.env.nodes:
self.assertEqual(len(self.env.nodes), 0)
@fake_tasks(fake_rpc=False, mock_rpc=False)
@mock.patch('nailgun.rpc.cast')
def test_send_to_orchestrator_offline_nodes(self, _):
self.env.create(
nodes_kwargs=[
{'pending_deletion': True,
'status': 'ready',
'online': False}])
self.env.launch_deployment()
args, kwargs = nailgun.task.manager.rpc.cast.call_args
self.assertEqual(len(args[1]['args']['nodes']), 1)
@fake_tasks()
def test_do_not_redeploy_nodes_in_ready_status(self):
self.env.create(nodes_kwargs=[
{"pending_addition": True},
{"pending_addition": True, 'roles': ['compute']}])
cluster_db = self.env.clusters[0]
# Generate ips, fqdns
objects.NodeCollection.prepare_for_deployment(cluster_db.nodes)
# First node with status ready
# should not be readeployed
self.env.nodes[0].status = 'ready'
self.env.nodes[0].pending_addition = False
self.db.commit()
objects.Cluster.clear_pending_changes(cluster_db)
supertask = self.env.launch_deployment()
self.assertEqual(supertask.name, 'deploy')
self.assertIn(supertask.status, ('running', 'ready'))
self.assertEqual(self.env.nodes[0].status, 'ready')
self.env.wait_for_nodes_status([self.env.nodes[1]], 'provisioning')
self.env.wait_ready(supertask)
self.env.refresh_nodes()
self.assertEqual(self.env.nodes[1].status, 'ready')
self.assertEqual(self.env.nodes[1].progress, 100)
@fake_tasks()
def test_deployment_fails_if_node_offline(self):
cluster = self.env.create_cluster(api=True)
self.env.create_node(
cluster_id=cluster['id'],
roles=["controller"],
pending_addition=True)
offline_node = self.env.create_node(
cluster_id=cluster['id'],
roles=["compute"],
online=False,
name="Offline node",
pending_addition=True)
self.env.create_node(
cluster_id=cluster['id'],
roles=["compute"],
pending_addition=True)
supertask = self.env.launch_deployment()
self.env.wait_error(
supertask,
5,
'Nodes "{0}" are offline. Remove them from environment '
'and try again.'.format(offline_node.full_name)
)
# Do not move cluster to error state
# in case if cluster new and before
# validation failed
self.assertEqual(self.env.clusters[0].status, 'new')
@fake_tasks()
def test_deployment_fails_if_node_to_redeploy_is_offline(self):
cluster = self.env.create_cluster(
api=True,
status=consts.CLUSTER_STATUSES.operational)
offline_node = self.env.create_node(
cluster_id=cluster['id'],
roles=["controller"],
online=False,
name="Offline node to be redeployed",
status=consts.NODE_STATUSES.ready)
self.env.create_node(
cluster_id=cluster['id'],
roles=["controller"],
pending_addition=True)
self.env.create_node(
cluster_id=cluster['id'],
roles=["compute"],
pending_addition=True)
supertask = self.env.launch_deployment()
self.env.wait_error(
supertask,
5,
'Nodes "{0}" are offline. Remove them from environment '
'and try again.'.format(offline_node.full_name)
)
self.assertEqual(self.env.clusters[0].status, 'error')
@fake_tasks(override_state={"progress": 100, "status": "ready"})
def test_redeployment_works(self):
self.env.create(
nodes_kwargs=[
{"pending_addition": True},
{"pending_addition": True},
{"pending_addition": True},
{"roles": ["compute"], "pending_addition": True}
]
)
supertask = self.env.launch_deployment()
self.env.wait_ready(supertask, 60)
self.env.refresh_nodes()
self.env.create_node(
cluster_id=self.env.clusters[0].id,
roles=["controller"],
pending_addition=True
)
supertask = self.env.launch_deployment()
self.env.wait_ready(supertask, 60)
self.env.refresh_nodes()
for n in self.env.nodes:
self.assertEqual(n.status, 'ready')
self.assertEqual(n.progress, 100)
def test_deletion_empty_cluster_task_manager(self):
cluster = self.env.create_cluster(api=True)
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'obj_id': self.env.clusters[0].id}),
headers=self.default_headers
)
self.assertEqual(202, resp.status_code)
timer = time.time()
timeout = 15
clstr = self.db.query(models.Cluster).get(self.env.clusters[0].id)
while clstr:
time.sleep(1)
try:
self.db.refresh(clstr)
except Exception:
break
if time.time() - timer > timeout:
raise Exception("Cluster deletion seems to be hanged")
notification = self.db.query(models.Notification)\
.filter(models.Notification.topic == "done")\
.filter(models.Notification.message == "Environment '%s' and all "
"its nodes are deleted" % cluster["name"]).first()
self.assertIsNotNone(notification)
tasks = self.db.query(models.Task).all()
self.assertEqual(tasks, [])
@fake_tasks()
def test_deletion_cluster_task_manager(self):
self.env.create(
nodes_kwargs=[
{"status": "ready", "progress": 100},
{"roles": ["compute"], "status": "ready", "progress": 100},
{"roles": ["compute"], "pending_addition": True},
]
)
cluster_id = self.env.clusters[0].id
cluster_name = self.env.clusters[0].name
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'obj_id': cluster_id}),
headers=self.default_headers
)
self.assertEqual(202, resp.status_code)
timer = time.time()
timeout = 15
clstr = self.db.query(models.Cluster).get(cluster_id)
while clstr:
time.sleep(1)
try:
self.db.refresh(clstr)
except Exception:
break
if time.time() - timer > timeout:
raise Exception("Cluster deletion seems to be hanged")
notification = self.db.query(models.Notification)\
.filter(models.Notification.topic == "done")\
.filter(models.Notification.message == "Environment '%s' and all "
"its nodes are deleted" % cluster_name).first()
self.assertIsNotNone(notification)
tasks = self.db.query(models.Task).all()
self.assertEqual(tasks, [])
@fake_tasks(tick_interval=10, tick_count=5)
def test_deletion_clusters_one_by_one(self):
self.env.create(
nodes_kwargs=[
{"roles": ["compute"], "status": "ready", "progress": 100},
{"roles": ["compute"], "status": "ready", "progress": 100},
{"roles": ["compute"], "status": "ready", "progress": 100},
{"roles": ["controller"], "status": "ready", "progress": 100},
{"roles": ["controller"], "status": "ready", "progress": 100},
{"roles": ["cinder"], "status": "ready", "progress": 100},
]
)
cluster1_id = self.env.clusters[0].id
self.env.create_cluster(api=True)
cluster2_id = self.env.clusters[1].id
cluster_names = [cluster.name for cluster in self.env.clusters]
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'obj_id': cluster1_id}),
headers=self.default_headers
)
self.assertEqual(202, resp.status_code)
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'obj_id': cluster2_id}),
headers=self.default_headers
)
self.assertEqual(202, resp.status_code)
timer = time.time()
timeout = 15
clstr1 = self.db.query(models.Cluster).get(cluster1_id)
clstr2 = self.db.query(models.Cluster).get(cluster2_id)
while clstr1 or clstr2:
time.sleep(1)
try:
self.db.refresh(clstr1 or clstr2)
except Exception:
break
if time.time() - timer > timeout:
raise Exception("Cluster deletion seems to be hanged")
for name in cluster_names:
notification = self.db.query(models.Notification)\
.filter(models.Notification.topic == "done")\
.filter(models.Notification.message == "Environment '%s' and "
"all its nodes are deleted" % name)
self.assertIsNotNone(notification)
tasks = self.db.query(models.Task).all()
self.assertEqual(tasks, [])
@fake_tasks(recover_nodes=False)
def test_deletion_during_deployment(self):
self.env.create(
nodes_kwargs=[
{"status": "ready", "pending_addition": True},
]
)
cluster_id = self.env.clusters[0].id
resp = self.app.put(
reverse(
'ClusterChangesHandler',
kwargs={'cluster_id': cluster_id}),
headers=self.default_headers
)
deploy_uuid = resp.json_body['uuid']
self.app.delete(
reverse(
'ClusterHandler',
kwargs={'obj_id': cluster_id}),
headers=self.default_headers
)
def cluster_is_deleted():
return not self.db.query(models.Cluster).get(cluster_id)
self.env.wait_for_true(cluster_is_deleted,
error_message="Cluster deletion timeout")
task_deploy = self.db.query(models.Task).filter_by(
uuid=deploy_uuid
).first()
self.assertIsNone(task_deploy)
task_delete = self.db.query(models.Task).filter_by(
cluster_id=cluster_id,
name="cluster_deletion"
).first()
self.assertIsNone(task_delete)
@fake_tasks(override_state={"progress": 100, "status": "ready"})
def test_deletion_cluster_ha_3x3(self):
self.env.create(
cluster_kwargs={
"api": True,
},
nodes_kwargs=[
{"roles": ["controller"], "pending_addition": True},
{"roles": ["compute"], "pending_addition": True}
] * 3
)
cluster_id = self.env.clusters[0].id
cluster_name = self.env.clusters[0].name
supertask = self.env.launch_deployment()
self.env.wait_ready(supertask)
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'obj_id': cluster_id}),
headers=self.default_headers
)
self.assertEqual(202, resp.status_code)
timer = time.time()
timeout = 15
clstr = self.db.query(models.Cluster).get(cluster_id)
while clstr:
time.sleep(1)
try:
self.db.refresh(clstr)
except Exception:
break
if time.time() - timer > timeout:
raise Exception("Cluster deletion seems to be hanged")
notification = self.db.query(models.Notification)\
.filter(models.Notification.topic == "done")\
.filter(models.Notification.message == "Environment '%s' and all "
"its nodes are deleted" % cluster_name).first()
self.assertIsNotNone(notification)
tasks = self.db.query(models.Task).all()
self.assertEqual(tasks, [])
@fake_tasks()
def test_node_fqdn_is_assigned(self):
self.env.create(
nodes_kwargs=[
{"pending_addition": True},
{"pending_addition": True}
]
)
self.env.launch_deployment()
self.env.refresh_nodes()
for node in self.env.nodes:
fqdn = "node-%s.%s" % (node.id, settings.DNS_DOMAIN)
self.assertEqual(fqdn, node.fqdn)
@fake_tasks()
def test_no_node_no_cry(self):
cluster = self.env.create_cluster(api=True)
cluster_id = cluster['id']
manager_ = manager.ApplyChangesTaskManager(cluster_id)
task = models.Task(name='provision', cluster_id=cluster_id)
self.db.add(task)
self.db.commit()
rpc.receiver.NailgunReceiver.deploy_resp(nodes=[
{'uid': 666, 'id': 666, 'status': 'discover'}
], task_uuid=task.uuid)
self.assertRaises(errors.WrongNodeStatus, manager_.execute)
@fake_tasks()
@mock.patch.object(DeletionTask, 'execute')
def test_deletion_task_called(self, mdeletion_execute):
cluster = self.env.create_cluster()
cluster_id = cluster['id']
node_db = self.env.create_node(
api=False,
cluster_id=cluster['id'],
pending_addition=False,
pending_deletion=True,
status=NODE_STATUSES.ready,
roles=['controller'])
manager_ = manager.ApplyChangesTaskManager(cluster_id)
manager_.execute()
self.assertEqual(mdeletion_execute.call_count, 1)
task, nodes = mdeletion_execute.call_args[0]
# unfortunately assertItemsEqual does not recurse into dicts
self.assertItemsEqual(
nodes['nodes_to_delete'],
DeletionTask.prepare_nodes_for_task([node_db])['nodes_to_delete']
)
self.assertItemsEqual(
nodes['nodes_to_restore'],
DeletionTask.prepare_nodes_for_task([node_db])['nodes_to_restore']
)
@fake_tasks()
@mock.patch.object(DeletionTask, 'execute')
def test_deletion_task_w_check_ceph(self, mdeletion_execute):
cluster = self.env.create_cluster()
cluster_id = cluster['id']
self.env.create_node(
api=False,
cluster_id=cluster['id'],
pending_addition=False,
pending_deletion=True,
status=NODE_STATUSES.ready,
roles=['controller'])
manager_ = manager.ApplyChangesTaskManager(cluster_id)
manager_.execute()
self.assertEqual(mdeletion_execute.call_count, 1)
kwargs = mdeletion_execute.call_args[1]
self.assertEqual(kwargs['check_ceph'], True)
@fake_tasks()
def test_no_changes_no_cry(self):
self.env.create(
nodes_kwargs=[
{"status": "ready"}
]
)
cluster_db = self.env.clusters[0]
objects.Cluster.clear_pending_changes(cluster_db)
manager_ = manager.ApplyChangesTaskManager(cluster_db.id)
self.assertRaises(errors.WrongNodeStatus, manager_.execute)
@fake_tasks()
@mock.patch('nailgun.task.manager.tasks.DeletionTask.execute')
def test_apply_changes_exception_caught(self, mdeletion_execute):
self.env.create(
nodes_kwargs=[
{"pending_deletion": True, "status": NODE_STATUSES.ready},
]
)
cluster_db = self.env.clusters[0]
objects.Cluster.clear_pending_changes(cluster_db)
manager_ = manager.ApplyChangesTaskManager(cluster_db.id)
mdeletion_execute.side_effect = Exception('exception')
task = manager_.execute()
self.assertEqual(task.status, TASK_STATUSES.error)
@fake_tasks(recover_offline_nodes=False)
def test_deletion_offline_node(self):
self.env.create(
nodes_kwargs=[
{"online": False, "pending_deletion": True},
{"status": "ready"}
]
)
to_delete = TaskHelper.nodes_to_delete(self.env.clusters[0])
to_delete_ids = [node.id for node in to_delete]
self.assertEqual(len(to_delete_ids), 1)
supertask = self.env.launch_deployment()
self.env.wait_ready(supertask, timeout=5)
self.assertEqual(self.env.db.query(models.Node).count(), 1)
remaining_node = self.env.db.query(models.Node).first()
self.assertNotIn(remaining_node.id, to_delete_ids)
@fake_tasks(recover_offline_nodes=False, tick_interval=1)
def test_deletion_three_offline_nodes_and_one_online(self):
cluster = self.env.create(
nodes_kwargs=[
{"online": False, "pending_deletion": True},
{"online": False, "pending_deletion": True},
{"online": False, "pending_deletion": True},
{"online": True, "pending_deletion": True}
]
)
supertask = self.env.launch_deployment()
self.db.flush()
self.env.wait_ready(supertask, timeout=5)
# this test is failing when whole test set is executing
# apparently the main reason for that is delays in data
# updating inside of fake threads so in order to make test
# pass we have to wait for data to be present in db
self.env.wait_for_true(self.check_node_presence, args=[1])
# Offline nodes were deleted, online node came back
self.assertEqual(
self.db.query(models.Node).filter(
models.Node.cluster_id == cluster['id']).count(),
0
)
self.assertEqual(
self.db.query(models.Node).filter(
models.Node.cluster_id.is_(None)).count(),
1
)
self.assertEqual(
self.db.query(models.Node).filter(
models.Node.status == NODE_STATUSES.discover).count(),
1
)
self.assertEqual(
self.db.query(models.Node).filter(
models.Node.online == sql.true()).count(),
1
)
@fake_tasks(tick_interval=1)
def test_delete_offile_nodes_and_recover_them(self):
self.env.create(
nodes_kwargs=[
{"online": False, "pending_deletion": True},
{"online": False, "pending_deletion": True},
{"online": True, "pending_deletion": True}
]
)
supertask = self.env.launch_deployment()
self.db.flush()
self.env.wait_ready(supertask, timeout=5)
# same as in previous test
self.env.wait_for_true(self.check_node_presence, args=[3])
q_nodes = self.env.db.query(models.Node)
online_nodes_count = q_nodes.filter_by(online=True).count()
self.assertEqual(online_nodes_count, 1)
offilne_nodes_count = q_nodes.filter_by(online=False).count()
self.assertEqual(offilne_nodes_count, 2)
for node in q_nodes:
self.assertEqual(node.status, 'discover')
self.assertEqual(node.cluster_id, None)
@fake_tasks(recover_offline_nodes=False)
def test_deletion_offline_node_when_cluster_has_only_one_node(self):
cluster = self.env.create_cluster()
objects.Cluster.clear_pending_changes(self.env.clusters[0])
self.env.create_node(
cluster_id=cluster['id'],
online=False,
pending_deletion=True,
pending_addition=False,
status='ready',
roles=['controller'])
supertask = self.env.launch_deployment()
self.env.wait_ready(supertask, timeout=5)
self.assertEqual(self.env.db.query(models.Node).count(), 0)
@fake_tasks(recover_nodes=False)
def test_node_deletion_task_manager(self):
self.env.create(
nodes_kwargs=[
{"pending_deletion": True, "status": "ready"}
]
)
cluster_db = self.env.clusters[0]
objects.Cluster.clear_pending_changes(cluster_db)
manager_ = manager.NodeDeletionTaskManager(cluster_id=cluster_db.id)
task = manager_.execute(cluster_db.nodes)
node = cluster_db.nodes[0]
self.assertEqual(node.status, NODE_STATUSES.removing)
self.db.commit()
self.env.wait_ready(task, timeout=5)
self.assertEqual(self.db.query(models.Node).count(), 0)
@fake_tasks(recover_nodes=False)
def test_node_deletion_task_manager_works_for_nodes_not_in_cluster(self):
self.env.create(
nodes_kwargs=[
{"pending_deletion": True, "status": "ready"}
]
)
cluster_db = self.env.clusters[0]
objects.Cluster.clear_pending_changes(cluster_db)
node = cluster_db.nodes[0]
objects.Node.update(node, {'cluster_id': None})
self.db.commit()
self.db.refresh(node)
self.db.refresh(cluster_db)
manager_ = manager.NodeDeletionTaskManager()
task = manager_.execute([node])
self.db.commit()
# Nodes are removed immediately
self.assertEqual(task.status, TASK_STATUSES.ready)
self.assertEqual(self.db.query(models.Node).count(), 0)
@fake_tasks(recover_nodes=False)
def test_node_deletion_task_manager_invalid_cluster(self):
self.env.create(
nodes_kwargs=[
{"pending_deletion": True, "status": "ready"}
]
)
cluster_db = self.env.clusters[0]
objects.Cluster.clear_pending_changes(cluster_db)
manager_ = manager.NodeDeletionTaskManager()
self.assertRaises(
errors.InvalidData, manager_.execute, cluster_db.nodes)
@fake_tasks()
def test_deployment_on_controller_removal_via_apply_changes(self):
self.env.create(
nodes_kwargs=[
{'roles': ['controller'],
'pending_deletion': True},
{'roles': ['controller'],
'status': consts.NODE_STATUSES.ready},
{'roles': ['controller'],
'status': consts.NODE_STATUSES.ready},
{'roles': ['controller'],
'status': consts.NODE_STATUSES.ready},
{'roles': ['compute'],
'status': consts.NODE_STATUSES.ready},
{'roles': ['compute'],
'status': consts.NODE_STATUSES.ready},
]
)
cluster = self.env.clusters[0]
expected_nodes_to_deploy = filter(lambda n: 'controller' in n.roles
and not n.pending_deletion,
cluster.nodes)
with mock.patch('nailgun.task.task.DeploymentTask.message') as \
mocked_task:
self.env.launch_deployment()
_, actual_nodes_to_deploy = mocked_task.call_args[0]
self.assertItemsEqual(expected_nodes_to_deploy,
actual_nodes_to_deploy)
@fake_tasks()
def test_deployment_on_controller_removal_via_node_deletion(self):
self.env.create(
nodes_kwargs=[
{'roles': ['controller'],
'status': consts.NODE_STATUSES.ready},
{'roles': ['controller'],
'status': consts.NODE_STATUSES.ready},
{'roles': ['controller'],
'status': consts.NODE_STATUSES.ready},
{'roles': ['compute'],
'status': consts.NODE_STATUSES.ready},
{'roles': ['compute'],
'status': consts.NODE_STATUSES.ready},
]
)
cluster = self.env.clusters[0]
controllers = filter(lambda n: 'controller' in n.roles
and not n.pending_deletion,
cluster.nodes)
controller_to_delete = controllers[0]
expected_nodes_to_deploy = controllers[1:]
with mock.patch('nailgun.task.task.DeploymentTask.message') as \
mocked_task:
with mock.patch('nailgun.rpc.cast'):
resp = self.app.delete(
reverse(
'NodeHandler',
kwargs={'obj_id': controller_to_delete.id}),
headers=self.default_headers
)
_, actual_nodes_to_deploy = mocked_task.call_args[0]
self.assertItemsEqual(expected_nodes_to_deploy,
actual_nodes_to_deploy)
self.assertEqual(202, resp.status_code)
|
|
# Copyright 2014 Yahoo! Inc.
# Licensed under the Apache 2.0 license. Developed for Yahoo! by Sean Gillespie.
#
# Yahoo! licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from lxml import etree as et
import copy
import ioc_et
import wx
def strip_namespace(ioc_xml):
if ioc_xml.tag.startswith('{'):
ns_length = ioc_xml.tag.find('}')
namespace = ioc_xml.tag[0:ns_length+1]
for element in ioc_xml.getiterator():
if element.tag.startswith(namespace):
element.tag = element.tag[len(namespace):]
return ioc_xml
def generate_label(element):
if element.tag == "Indicator":
return (element.get('operator'), wx.BLACK)
if element.tag == "IndicatorItem":
color = wx.BLUE
context = element.find('Context')
content = element.find('Content')
condition = ""
search_type = ""
search_path = ""
search_text = ""
if element.get('condition'):
condition = element.get('condition')
if context.get('type'):
search_type = context.get('type')
if context.get('search'):
search_path = context.get('search')
if content.text:
search_text = content.text
if "preserve-case" in element.keys():
if element.get('preserve-case') == "true":
color = "#009900"
negate = ""
if "negate" in element.keys():
if element.get('negate') == "true":
negate = " NOT"
if element.get('preserve-case') == "true":
color = "#7300FF"
else:
color = wx.RED
if condition == "isnot":
condition = "is"
negate = " NOT"
color = wx.RED
if condition == "containsnot":
condition = "contains"
negate = " NOT"
color = wx.RED
label = negate + " " + search_type + ":" + search_path + " " + condition + " " + search_text
return (label, color)
return "Bad Indicator"
class IOC():
def __init__(self, ioc_xml):
self.working_xml = copy.deepcopy(ioc_xml)
self.orig_xml = copy.deepcopy(ioc_xml)
self.attributes = self.working_xml.attrib
metadata_root = "TEST"
if self.working_xml.nsmap[None] == "http://schemas.mandiant.com/2010/ioc":
self.version = "1.0"
metadata_root = self.working_xml
self.criteria = self.working_xml.find('definition')
if self.criteria == None:
self.working_xml.append(ioc_et.make_definition_node(ioc_et.make_Indicator_node("OR")))
self.criteria = self.working_xml.find('definition')
self.parameters = None
elif self.working_xml.nsmap[None] == "http://openioc.org/schemas/OpenIOC_1.1":
self.version = "1.1"
metadata_root = self.working_xml.find('metadata')
if metadata_root == None:
self.working_xml.append(ioc_et.make_metadata_node(name = "*Missing*", author = "*Missing*", description = "*Missing*", links=ioc_et.make_links_node()))
metadata_root = self.working_xml.find('metadata')
self.criteria = self.working_xml.find('criteria')
if self.criteria == None:
self.working_xml.append(ioc_et.make_criteria_node(ioc_et.make_Indicator_node("OR")))
self.criteria = self.working_xml.find('criteria')
self.parameters = self.working_xml.find('parameters')
if self.parameters == None:
self.working_xml.append(ioc_et.make_parameters_node())
self.parameters = self.working_xml.find('parameters')
self.name = metadata_root.find('short_description')
if self.name == None:
metadata_root.append(ioc_et.make_short_description_node("*Missing*"))
self.name = metadata_root.find('short_description')
self.desc = metadata_root.find('description')
if self.desc == None:
metadata_root.append(ioc_et.make_description_node("*Missing*"))
self.desc = metadata_root.find('description')
self.author = metadata_root.find('authored_by')
if self.author == None:
metadata_root.append(ioc_et.make_authored_by_node("*Missing*"))
self.author = metadata_root.find('authored_by')
self.created = metadata_root.find('authored_date')
if self.created == None:
metadata_root.append(ioc_et.make_authored_date_node())
self.created = metadata_root.find('authored_date')
self.links = metadata_root.find('links')
if self.links == None:
metadata_root.append(ioc_et.make_links_node())
self.links = metadata_root.find('links')
def get_uuid(self):
return self.attributes['id']
def get_name(self):
return self.name.text
def set_name(self, name):
self.name.text = name
def get_modified(self):
return self.attributes['last-modified']
def set_modified(self):
self.attributes['last-modified'] = ioc_et.get_current_date()
def get_author(self):
if self.author.text is not None:
return self.author.text
else:
return ""
def set_author(self, author):
self.author.text = author
def get_created(self):
return self.created.text
def set_created(self):
self.created.text = ioc_et.get_current_date()
def get_metadata(field):
pass
def get_desc(self):
if self.desc.text is not None:
if os.name == "nt":
return self.desc.text.replace('\n', '\r\n')
else:
return self.desc.text
else:
return ""
def set_desc(self, desc):
self.desc.text = desc
def get_links(self):
pass
def get_indicator(self):
pass
class IOCList():
def __init__(self):
self.working_dir = None
self.iocs = {}
def save_iocs(self, full_path=None):
if full_path:
if et.tostring(self.iocs[full_path].working_xml) != et.tostring(self.iocs[full_path].orig_xml):
self.iocs[full_path].set_modified()
ioc_xml_string = et.tostring(self.iocs[full_path].working_xml, encoding="utf-8", xml_declaration=True, pretty_print = True)
ioc_file = open(full_path, 'w')
ioc_file.write(ioc_xml_string)
ioc_file.close()
self.iocs[full_path].orig_xml = copy.deepcopy(self.iocs[full_path].working_xml)
else:
for full_path in self.iocs:
if et.tostring(self.iocs[full_path].working_xml) != et.tostring(self.iocs[full_path].orig_xml):
self.iocs[full_path].set_modified()
ioc_xml_string = et.tostring(self.iocs[full_path].working_xml, encoding="utf-8", xml_declaration=True, pretty_print = True)
ioc_file = open(full_path, 'w')
ioc_file.write(ioc_xml_string)
ioc_file.close()
self.iocs[full_path].orig_xml = copy.deepcopy(self.iocs[full_path].working_xml)
def clone_ioc(self,current_ioc):
new_ioc_xml = copy.deepcopy(current_ioc.working_xml)
new_uuid = ioc_et.get_guid()
ioc_file = new_uuid + ".ioc"
full_path = os.path.join(self.working_dir, ioc_file)
new_ioc_xml.attrib['id'] = new_uuid
self.iocs[full_path] = IOC(new_ioc_xml)
self.iocs[full_path].set_modified()
self.iocs[full_path].set_created()
self.iocs[full_path].orig_xml = et.Element('Clone')
return full_path
def add_ioc(self, author, version):
new_ioc_xml = ioc_et.make_IOC_root(version=version)
ioc_file = new_ioc_xml.attrib['id'] + ".ioc"
full_path = os.path.join(self.working_dir, ioc_file)
if version == "1.0":
new_ioc_xml.append(ioc_et.make_short_description_node(name = "*New IOC*"))
new_ioc_xml.append(ioc_et.make_description_node(text="PyIOCe Generated IOC"))
new_ioc_xml.append(ioc_et.make_authored_by_node(author = author))
new_ioc_xml.append(ioc_et.make_authored_date_node())
new_ioc_xml.append(ioc_et.make_links_node())
new_ioc_xml.append(ioc_et.make_definition_node(ioc_et.make_Indicator_node("OR")))
elif version == "1.1":
new_ioc_xml.append(ioc_et.make_metadata_node( name = "*New IOC*", author = "PyIOCe", description = "PyIOCe Generated IOC"))
new_ioc_xml.append(ioc_et.make_criteria_node(ioc_et.make_Indicator_node("OR")))
new_ioc_xml.append(ioc_et.make_parameters_node())
self.iocs[full_path] = IOC(new_ioc_xml)
self.iocs[full_path].orig_xml = et.Element('New')
return full_path
def open_ioc_path(self,dir):
self.iocs = {}
self.working_dir = dir
for base, sub, files in os.walk(self.working_dir):
for filename in files:
if os.path.splitext(filename)[1][1:].lower() == "ioc":
full_path = os.path.join(base, filename)
ioc_file = open(full_path, 'r')
try:
ioc_xml = et.fromstring(ioc_file.read())
clean_ioc_xml = strip_namespace(ioc_xml)
self.iocs[full_path] = IOC(clean_ioc_xml)
except:
pass #FIXME Logging/Alerts for failed files
|
|
#!/usr/bin/env python
# coding: utf-8
# References:
# man curl
# https://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
# https://curl.haxx.se/libcurl/c/easy_getinfo_options.html
# http://blog.kenweiner.com/2014/11/http-request-timings-with-curl.html
from __future__ import print_function
import os
import json
import sys
import logging
import tempfile
import subprocess
import sys
__version__ = '1.2.1'
import io
try:
to_unicode = unicode
except NameError:
to_unicode = str
PY3 = sys.version_info >= (3,)
if PY3:
xrange = range
# Env class is copied from https://github.com/reorx/getenv/blob/master/getenv.py
class Env(object):
prefix = 'HTTPSTAT'
_instances = []
def __init__(self, key):
self.key = key.format(prefix=self.prefix)
Env._instances.append(self)
def get(self, default=None):
return os.environ.get(self.key, default)
ENV_SHOW_BODY = Env('{prefix}_SHOW_BODY')
ENV_SHOW_IP = Env('{prefix}_SHOW_IP')
ENV_SHOW_SPEED = Env('{prefix}_SHOW_SPEED')
ENV_SAVE_BODY = Env('{prefix}_SAVE_BODY')
ENV_CURL_BIN = Env('{prefix}_CURL_BIN')
ENV_DEBUG = Env('{prefix}_DEBUG')
curl_format = """{
"time_namelookup": %{time_namelookup},
"time_connect": %{time_connect},
"time_appconnect": %{time_appconnect},
"time_pretransfer": %{time_pretransfer},
"time_redirect": %{time_redirect},
"time_starttransfer": %{time_starttransfer},
"time_total": %{time_total},
"speed_download": %{speed_download},
"speed_upload": %{speed_upload},
"remote_ip": "%{remote_ip}",
"remote_port": "%{remote_port}",
"local_ip": "%{local_ip}",
"local_port": "%{local_port}"
}"""
https_template = """
DNS Lookup TCP Connection TLS Handshake Server Processing Content Transfer
[ {a0000} | {a0001} | {a0002} | {a0003} | {a0004} ]
| | | | |
namelookup:{b0000} | | | |
connect:{b0001} | | |
pretransfer:{b0002} | |
starttransfer:{b0003} |
total:{b0004}
"""[1:]
http_template = """
DNS Lookup TCP Connection Server Processing Content Transfer
[ {a0000} | {a0001} | {a0003} | {a0004} ]
| | | |
namelookup:{b0000} | | |
connect:{b0001} | |
starttransfer:{b0003} |
total:{b0004}
"""[1:]
# Color code is copied from https://github.com/reorx/python-terminal-color/blob/master/color_simple.py
ISATTY = sys.stdout.isatty()
def make_color(code):
def color_func(s):
if not ISATTY:
return s
tpl = '\x1b[{}m{}\x1b[0m'
return tpl.format(code, s)
return color_func
red = make_color(31)
green = make_color(32)
yellow = make_color(33)
blue = make_color(34)
magenta = make_color(35)
cyan = make_color(36)
bold = make_color(1)
underline = make_color(4)
grayscale = {(i - 232): make_color('38;5;' + str(i)) for i in xrange(232, 256)}
def quit(s, code=0):
if s is not None:
print(s)
sys.exit(code)
def print_help():
help = """
Usage: httpstat URL [CURL_OPTIONS]
httpstat -h | --help
httpstat --version
Arguments:
URL url to request, could be with or without `http(s)://` prefix
Options:
CURL_OPTIONS any curl supported options, except for -w -D -o -S -s,
which are already used internally.
-h --help show this screen.
--version show version.
Environments:
HTTPSTAT_SHOW_BODY Set to `true` to show response body in the output,
note that body length is limited to 1023 bytes, will be
truncated if exceeds. Default is `false`.
HTTPSTAT_SHOW_IP By default httpstat shows remote and local IP/port address.
Set to `false` to disable this feature. Default is `true`.
HTTPSTAT_SHOW_SPEED Set to `true` to show download and upload speed.
Default is `false`.
HTTPSTAT_SAVE_BODY By default httpstat stores body in a tmp file,
set to `false` to disable this feature. Default is `true`
HTTPSTAT_CURL_BIN Indicate the curl bin path to use. Default is `curl`
from current shell $PATH.
HTTPSTAT_DEBUG Set to `true` to see debugging logs. Default is `false`
"""[1:-1]
print(help)
def main():
args = sys.argv[1:]
if not args:
print_help()
quit(None, 0)
# get envs
show_body = 'true' in ENV_SHOW_BODY.get('false').lower()
show_ip = 'true' in ENV_SHOW_IP.get('true').lower()
show_speed = 'true'in ENV_SHOW_SPEED.get('false').lower()
save_body = 'true' in ENV_SAVE_BODY.get('true').lower()
curl_bin = ENV_CURL_BIN.get('curl')
is_debug = 'true' in ENV_DEBUG.get('false').lower()
# configure logging
if is_debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(level=log_level)
lg = logging.getLogger('httpstat')
# log envs
lg.debug('Envs:\n%s', '\n'.join(' {}={}'.format(i.key, i.get('')) for i in Env._instances))
lg.debug('Flags: %s', dict(
show_body=show_body,
show_ip=show_ip,
show_speed=show_speed,
save_body=save_body,
curl_bin=curl_bin,
is_debug=is_debug,
))
# get url
url = args[0]
if url in ['-h', '--help']:
print_help()
quit(None, 0)
elif url == '--version':
print('httpstat {}'.format(__version__))
quit(None, 0)
curl_args = args[1:]
# check curl args
exclude_options = [
'-w', '--write-out',
'-D', '--dump-header',
'-o', '--output',
'-s', '--silent',
]
for i in exclude_options:
if i in curl_args:
quit(yellow('Error: {} is not allowed in extra curl args'.format(i)), 1)
# tempfile for output
bodyf = tempfile.NamedTemporaryFile(delete=False)
bodyf.close()
headerf = tempfile.NamedTemporaryFile(delete=False)
headerf.close()
# run cmd
cmd_env = os.environ.copy()
cmd_env.update(
LC_ALL='C',
)
cmd_core = [curl_bin, '-w', curl_format, '-D', headerf.name, '-o', bodyf.name, '-s', '-S']
cmd = cmd_core + curl_args + [url]
lg.debug('cmd: %s', cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=cmd_env)
out, err = p.communicate()
if PY3:
out, err = out.decode(), err.decode()
lg.debug('out: %s', out)
# print stderr
if p.returncode == 0:
if err:
print(grayscale[16](err))
else:
_cmd = list(cmd)
_cmd[2] = '<output-format>'
_cmd[4] = '<tempfile>'
_cmd[6] = '<tempfile>'
print('> {}'.format(' '.join(_cmd)))
quit(yellow('curl error: {}'.format(err)), p.returncode)
# parse output
try:
d = json.loads(out)
except ValueError as e:
print(yellow('Could not decode json: {}'.format(e)))
print('curl result:', p.returncode, grayscale[16](out), grayscale[16](err))
quit(None, 1)
with io.open('data.json', 'w', encoding='utf8') as outfile:
str_ = json.dumps(out,
indent=4, sort_keys=True,
separators=(',', ': '), ensure_ascii=False)
outfile.write(to_unicode(str_))
for k in d:
if k.startswith('time_'):
d[k] = int(d[k] * 1000)
# calculate ranges
d.update(
range_dns=d['time_namelookup'],
range_connection=d['time_connect'] - d['time_namelookup'],
range_ssl=d['time_pretransfer'] - d['time_connect'],
range_server=d['time_starttransfer'] - d['time_pretransfer'],
range_transfer=d['time_total'] - d['time_starttransfer'],
)
# ip
if show_ip:
s = 'Connected to {}:{} from {}:{}'.format(
cyan(d['remote_ip']), cyan(d['remote_port']),
d['local_ip'], d['local_port'],
)
print(s)
print()
# print header & body summary
with open(headerf.name, 'r') as f:
headers = f.read().strip()
# remove header file
lg.debug('rm header file %s', headerf.name)
os.remove(headerf.name)
for loop, line in enumerate(headers.split('\n')):
if loop == 0:
p1, p2 = tuple(line.split('/'))
print(green(p1) + grayscale[14]('/') + cyan(p2))
else:
pos = line.find(':')
print(grayscale[14](line[:pos + 1]) + cyan(line[pos + 1:]))
print()
# body
if show_body:
body_limit = 1024
with open(bodyf.name, 'r') as f:
body = f.read().strip()
body_len = len(body)
if body_len > body_limit:
print(body[:body_limit] + cyan('...'))
print()
s = '{} is truncated ({} out of {})'.format(green('Body'), body_limit, body_len)
if save_body:
s += ', stored in: {}'.format(bodyf.name)
print(s)
else:
print(body)
else:
if save_body:
print('{} stored in: {}'.format(green('Body'), bodyf.name))
# remove body file
if not save_body:
lg.debug('rm body file %s', bodyf.name)
os.remove(bodyf.name)
# print stat
if url.startswith('https://'):
template = https_template
else:
template = http_template
# colorize template first line
tpl_parts = template.split('\n')
tpl_parts[0] = grayscale[16](tpl_parts[0])
template = '\n'.join(tpl_parts)
def fmta(s):
return cyan('{:^7}'.format(str(s) + 'ms'))
def fmtb(s):
return cyan('{:<7}'.format(str(s) + 'ms'))
stat = template.format(
# a
a0000=fmta(d['range_dns']),
a0001=fmta(d['range_connection']),
a0002=fmta(d['range_ssl']),
a0003=fmta(d['range_server']),
a0004=fmta(d['range_transfer']),
# b
b0000=fmtb(d['time_namelookup']),
b0001=fmtb(d['time_connect']),
b0002=fmtb(d['time_pretransfer']),
b0003=fmtb(d['time_starttransfer']),
b0004=fmtb(d['time_total']),
)
print()
print(stat)
# speed, originally bytes per second
if show_speed:
print('speed_download: {:.1f} KiB/s, speed_upload: {:.1f} KiB/s'.format(
d['speed_download'] / 1024, d['speed_upload'] / 1024))
if __name__ == '__main__':
main()
infile = "data.json"
outfile = sys.argv[1]+".json"
delete_list = ["\\n\\", "\\", "\\n"]
replace_list = ["\"{\n"]
fin = open(infile)
fout = open(outfile, "w+")
for line in fin:
for word in delete_list:
line = line.replace(word, "")
line=line.replace("n}\"","}")
line=line.replace("\"{","{")
fout.write(line)
fin.close()
fout.close()
|
|
# -*- test-case-name: txweb2.dav.test.test_prop.PROP.test_PROPPATCH -*-
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
"""
WebDAV-aware static resources.
"""
__all__ = ["http_PROPPATCH"]
from twisted.python.failure import Failure
from twisted.internet.defer import deferredGenerator, waitForDeferred
from twext.python.log import Logger
from txweb2 import responsecode
from txweb2.http import HTTPError, StatusResponse
from txdav.xml import element as davxml
from txweb2.dav.http import MultiStatusResponse, PropertyStatusResponseQueue
from txweb2.dav.util import davXMLFromStream
log = Logger()
def http_PROPPATCH(self, request):
"""
Respond to a PROPPATCH request. (RFC 2518, section 8.2)
"""
if not self.exists():
log.error("File not found: %s" % (self,))
raise HTTPError(responsecode.NOT_FOUND)
x = waitForDeferred(self.authorize(request, (davxml.WriteProperties(),)))
yield x
x.getResult()
#
# Read request body
#
try:
doc = waitForDeferred(davXMLFromStream(request.stream))
yield doc
doc = doc.getResult()
except ValueError, e:
log.error("Error while handling PROPPATCH body: %s" % (e,))
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, str(e)))
if doc is None:
error = "Request XML body is required."
log.error("Error: {err}", error)
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error))
#
# Parse request
#
update = doc.root_element
if not isinstance(update, davxml.PropertyUpdate):
error = ("Request XML body must be a propertyupdate element."
% (davxml.PropertyUpdate.sname(),))
log.error("Error: {err}", error)
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, error))
responses = PropertyStatusResponseQueue("PROPPATCH", request.uri, responsecode.NO_CONTENT)
undoActions = []
gotError = False
# Look for Prefer header
prefer = request.headers.getHeader("prefer", {})
returnMinimal = any([key == "return" and value == "minimal" for key, value, _ignore_args in prefer])
try:
#
# Update properties
#
for setOrRemove in update.children:
assert len(setOrRemove.children) == 1
container = setOrRemove.children[0]
assert isinstance(container, davxml.PropertyContainer)
properties = container.children
def do(action, property, removing=False):
"""
Perform action(property, request) while maintaining an
undo queue.
"""
has = waitForDeferred(self.hasProperty(property, request))
yield has
has = has.getResult()
if has:
oldProperty = waitForDeferred(self.readProperty(property, request))
yield oldProperty
oldProperty = oldProperty.getResult()
def undo():
return self.writeProperty(oldProperty, request)
else:
def undo():
return self.removeProperty(property, request)
try:
x = waitForDeferred(action(property, request))
yield x
x.getResult()
except KeyError, e:
# Removing a non-existent property is OK according to WebDAV
if removing:
responses.add(responsecode.OK, property)
yield True
return
else:
# Convert KeyError exception into HTTPError
responses.add(
Failure(exc_value=HTTPError(StatusResponse(responsecode.FORBIDDEN, str(e)))),
property
)
yield False
return
except:
responses.add(Failure(), property)
yield False
return
else:
responses.add(responsecode.OK, property)
# Only add undo action for those that succeed because those that fail will not have changed
undoActions.append(undo)
yield True
return
do = deferredGenerator(do)
if isinstance(setOrRemove, davxml.Set):
for property in properties:
ok = waitForDeferred(do(self.writeProperty, property))
yield ok
ok = ok.getResult()
if not ok:
gotError = True
elif isinstance(setOrRemove, davxml.Remove):
for property in properties:
ok = waitForDeferred(do(self.removeProperty, property, True))
yield ok
ok = ok.getResult()
if not ok:
gotError = True
else:
raise AssertionError("Unknown child of PropertyUpdate: %s" % (setOrRemove,))
except:
#
# If there is an error, we have to back out whatever we have
# operations we have done because PROPPATCH is an
# all-or-nothing request.
# We handle the first one here, and then re-raise to handle the
# rest in the containing scope.
#
for action in undoActions:
x = waitForDeferred(action())
yield x
x.getResult()
raise
#
# If we had an error we need to undo any changes that did succeed and change status of
# those to 424 Failed Dependency.
#
if gotError:
for action in undoActions:
x = waitForDeferred(action())
yield x
x.getResult()
responses.error()
#
# Return response - use 200 if Prefer:return=minimal set and no errors
#
if returnMinimal and not gotError:
yield responsecode.OK
else:
yield MultiStatusResponse([responses.response()])
http_PROPPATCH = deferredGenerator(http_PROPPATCH)
|
|
#! /usr/bin/env python
"""Static analysis tool for checking docstring conventions and style.
Implemented checks cover PEP257:
http://www.python.org/dev/peps/pep-0257/
Other checks can be added, e.g. NumPy docstring conventions:
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
The repository is located at:
http://github.com/GreenSteam/pep257
"""
from __future__ import with_statement
import os
import sys
import tokenize as tk
from itertools import takewhile, dropwhile, chain
from optparse import OptionParser
from re import compile as re
try:
from StringIO import StringIO
except ImportError: # Python 3.0 and later
from io import StringIO
try:
next
except NameError: # Python 2.5 and earlier
nothing = object()
def next(obj, default=nothing):
if default == nothing:
return obj.next()
else:
try:
return obj.next()
except StopIteration:
return default
__version__ = '0.3.0'
__all__ = ('check', 'collect')
humanize = lambda string: re(r'(.)([A-Z]+)').sub(r'\1 \2', string).lower()
is_magic = lambda name: name.startswith('__') and name.endswith('__')
is_ascii = lambda string: all(ord(char) < 128 for char in string)
is_blank = lambda string: not string.strip()
leading_space = lambda string: re('\s*').match(string).group()
class Value(object):
__init__ = lambda self, *args: vars(self).update(zip(self._fields, args))
__hash__ = lambda self: hash(repr(self))
__eq__ = lambda self, other: other and vars(self) == vars(other)
def __repr__(self):
args = [vars(self)[field] for field in self._fields]
return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr, args)))
class Definition(Value):
_fields = 'name _source start end docstring children parent'.split()
_human = property(lambda self: humanize(type(self).__name__))
kind = property(lambda self: self._human.split()[-1])
module = property(lambda self: self.parent.module)
all = property(lambda self: self.module.all)
_slice = property(lambda self: slice(self.start - 1, self.end))
source = property(lambda self: ''.join(self._source[self._slice]))
__iter__ = lambda self: chain([self], *self.children)
@property
def _publicity(self):
return {True: 'public', False: 'private'}[self.is_public]
def __str__(self):
return 'in %s %s `%s`' % (self._publicity, self._human, self.name)
class Module(Definition):
_fields = 'name _source start end docstring children parent _all'.split()
is_public = True
_nest = staticmethod(lambda s: {'def': Function, 'class': Class}[s])
module = property(lambda self: self)
all = property(lambda self: self._all)
__str__ = lambda self: 'at module level'
class Function(Definition):
_nest = staticmethod(lambda s: {'def': NestedFunction,
'class': NestedClass}[s])
@property
def is_public(self):
if self.all is not None:
return self.name in self.all
else: # TODO: are there any magic functions? not methods
return not self.name.startswith('_') or is_magic(self.name)
class NestedFunction(Function):
is_public = False
class Method(Function):
@property
def is_public(self):
name_is_public = not self.name.startswith('_') or is_magic(self.name)
return self.parent.is_public and name_is_public
class Class(Definition):
_nest = staticmethod(lambda s: {'def': Method, 'class': NestedClass}[s])
is_public = Function.is_public
class NestedClass(Class):
is_public = False
class Token(Value):
_fields = 'kind value start end source'.split()
class TokenStream(object):
def __init__(self, filelike):
self._generator = tk.generate_tokens(filelike.readline)
self.current = Token(*next(self._generator, None))
self.line = self.current.start[0]
def move(self):
previous = self.current
current = next(self._generator, None)
self.current = None if current is None else Token(*current)
self.line = self.current.start[0] if self.current else self.line
return previous
def __iter__(self):
while True:
if self.current is not None:
yield self.current
else:
return
self.move()
class AllError(Exception):
def __init__(self, message):
Exception.__init__(
self, message +
'That means pep257 cannot decide which definitions are public. '
'Variable __all__ should be present at most once in each file, '
"in form `__all__ = ('a_public_function', 'APublicClass', ...)`. "
'More info on __all__: http://stackoverflow.com/q/44834/. ')
class Parser(object):
def __call__(self, filelike, filename):
self.source = filelike.readlines()
src = ''.join(self.source)
self.stream = TokenStream(StringIO(src))
self.filename = filename
self.all = None
return self.parse_module()
current = property(lambda self: self.stream.current)
line = property(lambda self: self.stream.line)
def consume(self, kind):
assert self.stream.move().kind == kind
def leapfrog(self, kind):
for token in self.stream:
if token.kind == kind:
self.consume(kind)
return
def parse_docstring(self):
for token in self.stream:
if token.kind in [tk.COMMENT, tk.NEWLINE, tk.NL]:
continue
elif token.kind == tk.STRING:
return token.value
else:
return None
def parse_definitions(self, class_, all=False):
for token in self.stream:
if all and token.value == '__all__':
self.parse_all()
if token.value in ['def', 'class']:
yield self.parse_definition(class_._nest(token.value))
if token.kind == tk.INDENT:
self.consume(tk.INDENT)
for definition in self.parse_definitions(class_):
yield definition
if token.kind == tk.DEDENT:
return
def parse_all(self):
assert self.current.value == '__all__'
self.consume(tk.NAME)
if self.current.value != '=':
raise AllError('Could not evaluate contents of __all__. ')
self.consume(tk.OP)
if self.current.value != '(':
raise AllError('Could not evaluate contents of __all__. ')
self.consume(tk.OP)
s = '('
if self.current.kind != tk.STRING:
raise AllError('Could not evaluate contents of __all__. ')
while self.current.value != ')':
s += self.current.value
self.stream.move()
s += ')'
try:
self.all = eval(s, {})
except BaseException:
raise AllError('Could not evaluate contents of __all__: %s. ' % s)
def parse_module(self):
start = self.line
docstring = self.parse_docstring()
children = list(self.parse_definitions(Module, all=True))
assert self.current is None
end = self.line
module = Module(self.filename, self.source, start, end,
docstring, children, None, self.all)
for child in module.children:
child.parent = module
return module
def parse_definition(self, class_):
start = self.line
self.consume(tk.NAME)
name = self.current.value
self.leapfrog(tk.INDENT)
assert self.current.kind != tk.INDENT
docstring = self.parse_docstring()
children = list(self.parse_definitions(class_))
assert self.current.kind == tk.DEDENT
end = self.line - 1
definition = class_(name, self.source, start, end,
docstring, children, None)
for child in definition.children:
child.parent = definition
return definition
class Error(object):
"""Error in docstring style."""
# Options that define how errors are printed:
explain = False
source = False
def __init__(self, message=None, final=False):
self.message, self.is_final = message, final
self.definition, self.explanation = [None, None]
code = property(lambda self: self.message.partition(':')[0])
filename = property(lambda self: self.definition.module.name)
line = property(lambda self: self.definition.start)
@property
def lines(self):
source = ''
lines = self.definition._source[self.definition._slice]
offset = self.definition.start
lines_stripped = list(reversed(list(dropwhile(is_blank,
reversed(lines)))))
numbers_width = 0
for n, line in enumerate(lines_stripped):
numbers_width = max(numbers_width, n + offset)
numbers_width = len(str(numbers_width))
numbers_width = 6
for n, line in enumerate(lines_stripped):
source += '%*d: %s' % (numbers_width, n + offset, line)
if n > 5:
source += ' ...\n'
break
return source
def __str__(self):
self.explanation = '\n'.join(l for l in self.explanation.split('\n')
if not is_blank(l))
template = '%(filename)s:%(line)s %(definition)s:\n %(message)s'
if self.source and self.explain:
template += '\n\n%(explanation)s\n\n%(lines)s\n'
elif self.source and not self.explain:
template += '\n\n%(lines)s\n'
elif self.explain and not self.source:
template += '\n\n%(explanation)s\n\n'
return template % dict((name, getattr(self, name)) for name in
['filename', 'line', 'definition', 'message',
'explanation', 'lines'])
__repr__ = __str__
def __lt__(self, other):
return (self.filename, self.line) < (other.filename, other.line)
def parse_options():
parser = OptionParser(version=__version__,
usage='Usage: pep257 [options] [<file|dir>...]')
option = parser.add_option
option('-e', '--explain', action='store_true',
help='show explanation of each error')
option('-s', '--source', action='store_true',
help='show source for each error')
option('--ignore', metavar='<codes>', default='',
help='ignore a list comma-separated error codes, '
'for example: --ignore=D101,D202')
option('--match', metavar='<pattern>', default='(?!test_).*\.py',
help="check only files that exactly match <pattern> regular "
"expression; default is --match='(?!test_).*\.py' which "
"matches files that don't start with 'test_' but end with "
"'.py'")
option('--match-dir', metavar='<pattern>', default='[^\.].*',
help="search only dirs that exactly match <pattern> regular "
"expression; default is --match-dir='[^\.].*', which matches "
"all dirs that don't start with a dot")
return parser.parse_args()
def collect(names, match=lambda name: True, match_dir=lambda name: True):
"""Walk dir trees under `names` and generate filnames that `match`.
Example
-------
>>> sorted(collect(['non-dir.txt', './'],
... match=lambda name: name.endswith('.py')))
['non-dir.txt', './pep257.py', './setup.py', './test_pep257.py']
"""
for name in names: # map(expanduser, names):
if os.path.isdir(name):
for root, dirs, filenames in os.walk(name):
for dir in dirs:
if not match_dir(dir):
dirs.remove(dir) # do not visit those dirs
for filename in filenames:
if match(filename):
yield os.path.join(root, filename)
else:
yield name
def check(filenames, ignore=()):
"""Generate PEP 257 errors that exist in `filenames` iterable.
Skips errors with error-codes defined in `ignore` iterable.
Example
-------
>>> check(['pep257.py'], ignore=['D100'])
<generator object check at 0x...>
"""
for filename in filenames:
try:
with open(filename) as file:
source = file.read()
for error in PEP257Checker().check_source(source, filename):
code = getattr(error, 'code', None)
if code is not None and code not in ignore:
yield error
except (EnvironmentError, AllError):
yield sys.exc_info()[1]
except tk.TokenError:
yield SyntaxError('invalid syntax in file %s' % filename)
def main(options, arguments):
Error.explain = options.explain
Error.source = options.source
collected = collect(arguments or ['.'],
match=re(options.match + '$').match,
match_dir=re(options.match_dir + '$').match)
code = 0
for error in check(collected, ignore=options.ignore.split(',')):
sys.stderr.write('%s\n' % error)
code = 1
return code
parse = Parser()
def check_for(kind, terminal=False):
def decorator(f):
f._check_for = kind
f._terminal = terminal
return f
return decorator
class PEP257Checker(object):
"""Checker for PEP 257.
D10x: Missing docstrings
D20x: Whitespace issues
D30x: Docstring formatting
D40x: Docstring content issues
"""
def check_source(self, source, filename):
module = parse(StringIO(source), filename)
for definition in module:
for check in self.checks:
terminate = False
if isinstance(definition, check._check_for):
error = check(None, definition, definition.docstring)
errors = error if hasattr(error, '__iter__') else [error]
for error in errors:
if error is not None:
partition = check.__doc__.partition('.\n')
message, _, explanation = partition
if error.message is None:
error.message = message
error.explanation = explanation
error.definition = definition
yield error
if check._terminal:
terminate = True
break
if terminate:
break
@property
def checks(self):
all = [check for check in vars(type(self)).values()
if hasattr(check, '_check_for')]
return sorted(all, key=lambda check: not check._terminal)
@check_for(Definition, terminal=True)
def check_docstring_missing(self, definition, docstring):
"""D10{0,1,2,3}: Public definitions should have docstrings.
All modules should normally have docstrings. [...] all functions and
classes exported by a module should also have docstrings. Public
methods (including the __init__ constructor) should also have
docstrings.
Note: Public (exported) definitions are either those with names listed
in __all__ variable (if present), or those that do not start
with a single underscore.
"""
if (not docstring and definition.is_public or
docstring and is_blank(eval(docstring))):
codes = {Module: 'D100', Class: 'D101', NestedClass: 'D101',
Method: 'D102', Function: 'D103', NestedFunction: 'D103'}
return Error('%s: Docstring missing' % codes[type(definition)])
@check_for(Definition)
def check_one_liners(self, definition, docstring):
"""D200: One-liner docstrings should fit on one line with quotes.
The closing quotes are on the same line as the opening quotes.
This looks better for one-liners.
"""
if docstring:
lines = eval(docstring).split('\n')
if len(lines) > 1:
non_empty_lines = sum(1 for l in lines if not is_blank(l))
if non_empty_lines == 1:
return Error('D200: One-line docstring should not occupy '
'%s lines' % len(lines))
@check_for(Function)
def check_no_blank_before(self, function, docstring): # def
"""D20{1,2}: No blank lines allowed around function/method docstring.
There's no blank line either before or after the docstring.
"""
# NOTE: This does not take comments into account.
# NOTE: This does not take into account functions with groups of code.
if docstring:
before, _, after = function.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield Error('D201: No blank lines allowed *before* %s '
'docstring, found %s'
% (function.kind, blanks_before_count))
if not all(blanks_after) and blanks_after_count != 0:
yield Error('D202: No blank lines allowed *after* %s '
'docstring, found %s'
% (function.kind, blanks_after_count))
@check_for(Class)
def check_blank_before_after_class(slef, class_, docstring):
"""D20{3,4}: Class docstring should have 1 blank line around them.
Insert a blank line before and after all docstrings (one-line or
multi-line) that document a class -- generally speaking, the class's
methods are separated from each other by a single blank line, and the
docstring needs to be offset from the first method by a blank line;
for symmetry, put a blank line between the class header and the
docstring.
"""
# NOTE: this gives flase-positive in this case
# class Foo:
#
# """Docstring."""
#
#
# # comment here
# def foo(): pass
if docstring:
before, _, after = class_.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 1:
yield Error('D203: Expected 1 blank line *before* class '
'docstring, found %s' % blanks_before_count)
if not all(blanks_after) and blanks_after_count != 1:
yield Error('D204: Expected 1 blank line *after* class '
'docstring, found %s' % blanks_after_count)
@check_for(Definition)
def check_blank_after_summary(self, definition, docstring):
"""D205: Blank line missing between one-line summary and description.
Multi-line docstrings consist of a summary line just like a one-line
docstring, followed by a blank line, followed by a more elaborate
description. The summary line may be used by automatic indexing tools;
it is important that it fits on one line and is separated from the
rest of the docstring by a blank line.
"""
if docstring:
lines = eval(docstring).strip().split('\n')
if len(lines) > 1 and not is_blank(lines[1]):
return Error()
@check_for(Definition)
def check_indent(self, definition, docstring):
"""D20{6,7,8}: The entire docstring should be indented same as code.
The entire docstring is indented the same as the quotes at its
first line.
"""
if docstring:
before_docstring, _, _ = definition.source.partition(docstring)
_, _, indent = before_docstring.rpartition('\n')
lines = docstring.split('\n')
if len(lines) > 1:
lines = lines[1:] # First line does not need indent.
indents = [leading_space(l) for l in lines if not is_blank(l)]
if set(' \t') == set(''.join(indents) + indent):
return Error('D206: Docstring indented with both tabs and '
'spaces')
if (len(indents) > 1 and min(indents[:-1]) > indent
or indents[-1] > indent):
return Error('D208: Docstring is over-indented')
if min(indents) < indent:
return Error('D207: Docstring is under-indented')
@check_for(Definition)
def check_blank_after_last_paragraph(self, definition, docstring):
"""D209: Multi-line docstring should end with 1 blank line.
The BDFL recommends inserting a blank line between the last
paragraph in a multi-line docstring and its closing quotes,
placing the closing quotes on a line by themselves.
"""
if docstring:
lines = [l for l in eval(docstring).split('\n') if not is_blank(l)]
if len(lines) > 1:
lines = eval(docstring).split('\n')
blanks = len(list(takewhile(is_blank, reversed(lines))))
if blanks != 2:
return Error('D209: Multi-line docstring should end with '
'1 blank line, found %s' % max(0, blanks - 1))
@check_for(Definition)
def check_triple_double_quotes(self, definition, docstring):
r'''D300: Use """triple double quotes""".
For consistency, always use """triple double quotes""" around
docstrings. Use r"""raw triple double quotes""" if you use any
backslashes in your docstrings. For Unicode docstrings, use
u"""Unicode triple-quoted strings""".
Note: Exception to this is made if the docstring contains
""" quotes in its body.
'''
if docstring and '"""' in eval(docstring) and docstring.startswith(
("'''", "r'''", "u'''")):
# Allow ''' quotes if docstring contains """, because otherwise """
# quotes could not be expressed inside docstring. Not in PEP 257.
return
if docstring and not docstring.startswith(('"""', 'r"""', 'u"""')):
quotes = "'''" if "'''" in docstring[:4] else "'"
return Error('D300: Expected """-quotes, got %s-quotes' % quotes)
@check_for(Definition)
def check_backslashes(self, definition, docstring):
r'''D301: Use r""" if any backslashes in a docstring.
Use r"""raw triple double quotes""" if you use any backslashes
(\) in your docstrings.
'''
# Just check that docstring is raw, check_triple_double_quotes
# ensures the correct quotes.
if docstring and '\\' in docstring and not docstring.startswith('r'):
return Error()
@check_for(Definition)
def check_unicode_docstring(self, definition, docstring):
r'''D302: Use u""" for docstrings with Unicode.
For Unicode docstrings, use u"""Unicode triple-quoted strings""".
'''
# Just check that docstring is unicode, check_triple_double_quotes
# ensures the correct quotes.
if docstring and sys.version_info[0] <= 2:
if not is_ascii(docstring) and not docstring.startswith('u'):
return Error()
@check_for(Definition)
def check_ends_with_period(self, definition, docstring):
"""D400: First line should end with a period.
The [first line of a] docstring is a phrase ending in a period.
"""
if docstring:
summary_line = eval(docstring).strip().split('\n')[0]
if not summary_line.endswith('.'):
return Error("D400: First line should end with '.', not %r"
% summary_line[-1])
@check_for(Function)
def check_imperative_mood(self, function, docstring): # def context
"""D401: First line should be in imperative mood: 'Do', not 'Does'.
[Docstring] prescribes the function or method's effect as a command:
("Do this", "Return that"), not as a description; e.g. don't write
"Returns the pathname ...".
"""
if docstring:
stripped = eval(docstring).strip()
if stripped:
first_word = stripped.split()[0]
if first_word.endswith('s') and not first_word.endswith('ss'):
return Error('D401: First line should be imperative: '
'%r, not %r' % (first_word[:-1], first_word))
@check_for(Function)
def check_no_signature(self, function, docstring): # def context
"""D402: First line should not be function's or method's "signature".
The one-line docstring should NOT be a "signature" reiterating the
function/method parameters (which can be obtained by introspection).
"""
if docstring:
first_line = eval(docstring).strip().split('\n')[0]
if function.name + '(' in first_line.replace(' ', ''):
return Error("D402: First line should not be %s's signature"
% function.kind)
# Somewhat hard to determine if return value is mentioned.
# @check(Function)
def SKIP_check_return_type(self, function, docstring):
"""D40x: Return value type should be mentioned.
[T]he nature of the return value cannot be determined by
introspection, so it should be mentioned.
"""
if docstring and function.returns_value:
if 'return' not in docstring.lower():
return Error()
if __name__ == '__main__':
try:
sys.exit(main(*parse_options()))
except KeyboardInterrupt:
pass
|
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Package admin handler."""
import datetime
import httplib
import urllib
from google.appengine.api import app_identity
from google.appengine.api import users
from simian import settings
from simian.mac import admin
from simian.mac import common
from simian.mac import models
from simian.mac.admin import xsrf
from simian.mac.common import auth
from simian.mac.common import mail
class Package(admin.AdminHandler):
"""Handler for /admin/package."""
def get(self, filename=None):
"""GET handler."""
if not filename:
self.error(httplib.NOT_FOUND)
return
elif not auth.HasPermission(auth.VIEW_PACKAGES):
self.error(httplib.FORBIDDEN)
return
filename = urllib.unquote(filename)
p = models.PackageInfo.get_by_key_name(filename)
if not p:
self.error(httplib.NOT_FOUND)
self.Render(
'error.html', {'message': 'PackageInfo not found: %s' % filename})
return
p.name = p.plist['name']
p.display_name = p.plist.get('display_name', '')
p.unattended = p.plist.get('unattended_install')
p.unattended_uninstall = p.plist.get('unattended_uninstall')
p.version = p.plist['version']
force_install_after_date = p.plist.get('force_install_after_date', None)
if force_install_after_date:
p.force_install_after_date = datetime.datetime.strftime(
force_install_after_date, '%Y-%m-%d')
p.force_install_after_date_time = datetime.datetime.strftime(
force_install_after_date, '%H:%M')
if self.request.referrer and self.request.referrer.endswith('proposals'):
return_address = '/admin/proposals'
return_title = 'proposals'
else:
return_address = '/admin/packages'
return_title = 'package'
if self.request.get('plist_xml'):
self.Render('plist.html',
{'report_type': 'packages',
'plist_type': 'package_plist',
'xml': admin.XmlToHtml(p.plist.GetXml()),
'title': 'Plist for %s' % p.name,
'raw_xml_link': '/pkgsinfo/%s' % filename,
})
else:
categories = (
[x.strip() for x in settings.LIST_OF_CATEGORIES.split(',') if x])
manifests_and_catalogs_unlocked = (
p.blob_info or p.plist.get('PackageCompleteURL'))
data = {
'pkg': p,
'report_type': 'package',
'tracks': common.TRACKS,
'install_types': common.INSTALL_TYPES,
'manifest_mod_groups': common.MANIFEST_MOD_GROUPS,
'approval_required': settings.APPROVAL_REQUIRED,
'is_admin_user': self.IsAdminUser(),
'is_support_user': auth.IsSupportUser(),
'pkg_safe_to_modify': p.IsSafeToModify(),
'editxml': self.request.get('editxml'),
'manifests_and_catalogs_unlocked': manifests_and_catalogs_unlocked,
'return_address': return_address,
'return_title': return_title,
'categories': categories}
self.Render('package.html', data)
def post(self, filename=None):
"""POST handler."""
if not auth.HasPermission(auth.UPLOAD):
self.error(httplib.FORBIDDEN)
self.response.out.write('Access Denied for current user')
return
xsrf_token = self.request.get('xsrf_token', None)
report_type = filename and 'package' or 'packages'
if not xsrf.XsrfTokenValidate(xsrf_token, report_type):
self.error(httplib.BAD_REQUEST)
self.Render(
'error.html',
{'message': 'Invalid XSRF token. Please refresh and retry.'})
return
if filename:
filename = urllib.unquote(filename)
# If we're updating from new plist xml, perform the update and return.
if self.request.get('new_pkginfo_plist'):
self.UpdatePackageInfoFromPlist()
return
# All non-plist updates require an existing PackageInfo entity.
p = models.PackageInfo.get_by_key_name(filename)
if not p:
self.error(httplib.NOT_FOUND)
self.Render(
'error.html', {'message': 'PackageInfo not found: %s' % filename})
return
if self.request.get('delete') == '1':
self._DeletePackage(p, filename)
elif self.request.get('submit', None) == 'save':
self.UpdatePackageInfo(p)
elif self.request.get('unlock') == '1':
self._UnlockPackage(p, filename)
elif self.request.get('approve') == '1':
if p.proposal.proposal_in_flight:
self._ApproveProposal(p, filename)
elif self.request.get('reject') == '1':
if p.proposal.proposal_in_flight:
self._RejectProposal(p, filename)
else:
self.error(httplib.BAD_REQUEST)
self.Render(
'error.html', {'message': 'No action specified or unknown action.'})
elif self.request.get('new_pkginfo_plist'):
# No filename was specified, so we're creating a new PackageInfo.
self.UpdatePackageInfoFromPlist(create_new=True)
else:
self.error(httplib.NOT_FOUND)
def _ApproveProposal(self, p, filename):
if not self.IsAdminUser():
self.redirect(
'/admin/package/%s?msg=Only admins can approve proposals' % (
filename))
else:
try:
p.proposal.ApproveProposal()
self.redirect(
'/admin/package/%s?msg=Changes approved for %s' % (
filename, filename))
except models.PackageInfoProposalApprovalError:
self.redirect(
'/admin/package/%s?msg=Unable to approve changes to %s' % (
filename, filename))
except models.PackageInfoLockError:
self.redirect(
'/admin/package/%s?msg=Unable to approve changes, package '
'is locked.' % (filename))
except models.PackageInfoUpdateError:
self.redirect(
'/admin/package/%s?msg=Unable to approve changes, a package '
'with the same name already in catalog.' % (filename))
def _DeletePackage(self, p, filename):
if not self.IsAdminUser():
self.redirect(
'/admin/package/%s?msg=Only admins can delete packages' % (filename))
else:
if p.IsSafeToModify():
if settings.EMAIL_ON_EVERY_CHANGE:
self.NotifyAdminsOfPackageDeletion(p)
p.delete()
self.redirect(
'/admin/packages?msg=%s successfully deleted' % filename)
else:
self.redirect(
'/admin/package/%s?msg=Unlock package before deleting.')
def _RejectProposal(self, p, filename):
if not self.IsAdminUser():
self.redirect('/admin/package/%s?msg=Only admins can reject '
'proposals' % (filename))
else:
p.proposal.RejectProposal()
self.redirect(
'/admin/package/%s?msg=Changes rejected for %s' % (
filename, filename))
def _UnlockPackage(self, p, filename):
if not self.IsAdminUser():
self.redirect('/admin/package/%s?msg=Only admins are allowed to '
'unlock packages.' % (filename))
else:
if settings.EMAIL_ON_EVERY_CHANGE:
self.NotifyAdminsOfPackageUnlock(p)
p.MakeSafeToModify()
self.redirect(
'/admin/package/%s?msg=%s is safe to modify' % (
filename, filename))
def NotifyAdminsOfPackageChange(self, pkginfo, **kwargs):
"""Notifies admins of changes to packages."""
subject_line = 'MSU Package Update by %s - %s' % (users.get_current_user(),
pkginfo.filename)
main_body = ['New configuration:\n']
for key, value in kwargs.iteritems():
if key == 'manifests':
if pkginfo.manifests != value:
main_body.append('Manifests: %s --> %s' % (
', '.join(pkginfo.manifests), ', '.join(value)))
elif key == 'catalogs':
if pkginfo.catalogs != value:
main_body.append('Catalogs: %s --> %s' % (
', '.join(pkginfo.catalogs), ', '.join(value)))
elif key == 'install_types':
if pkginfo.install_types != value:
main_body.append('Install Types: %s --> %s' % (
', '.join(pkginfo.install_types), ', '.join(value)))
elif key == 'munki_name':
if pkginfo.munki_name != value:
main_body.append('Munki Name: %s --> %s' % (
pkginfo.munki_name, value))
elif (key == 'force_install_after_date'
and pkginfo.plist.get(key, '') != value):
main_body.append('%s: %s' % (key, value))
elif type(value) is list:
if pkginfo.plist.get(key, []) != value:
main_body.append('%s: %s --> %s' % (
key, ', '.join(pkginfo.plist.get(key, [])), ', '.join(value)))
else:
if pkginfo.plist.get(key, '') != value:
main_body.append(
'%s: %s --> %s' % (key, pkginfo.plist.get(key, ''), value))
mail.SendMail(settings.EMAIL_ADMIN_LIST, subject_line, '\n'.join(main_body))
def NotifyAdminsOfPackageChangeFromPlist(self, log, defer=True):
"""Notifies admins of changes to packages."""
subject_line = 'MSU Package Update by %s - %s' % (
users.get_current_user(), log.filename)
plist_diff = log.plist_diff
main_body = 'Diff:\n' + '\n'.join([x['line'] for x in plist_diff])
mail.SendMail(
settings.EMAIL_ADMIN_LIST, subject_line, main_body, defer=defer)
def NotifyAdminsOfPackageDeletion(self, pkginfo):
"""Notifies admins of packages deletions."""
subject_line = 'MSU Package Deleted by %s - %s' % (users.get_current_user(),
pkginfo.filename)
main_body = 'That package has been deleted, hope you didn\'t need it.'
mail.SendMail(settings.EMAIL_ADMIN_LIST, subject_line, main_body)
def NotifyAdminsOfPackageUnlock(self, pkginfo):
"""Notifies admins of package being unlocked."""
subject_line = 'MSU Package Unlocked by %s - %s' % (
users.get_current_user(), pkginfo.filename)
main_body = 'That package has been removed from all catalogs and manifests.'
mail.SendMail(settings.EMAIL_ADMIN_LIST, subject_line, main_body)
def UpdatePackageInfo(self, pkginfo):
"""Updates an existing PackageInfo entity."""
unattended_install = self.request.get('unattended_install', None)
if unattended_install is not None:
unattended_install = unattended_install == 'on'
unattended_uninstall = self.request.get('unattended_uninstall', None)
if unattended_uninstall is not None:
unattended_uninstall = unattended_uninstall == 'on'
# Parse any force_install_after_date str into a datetime object.
force_install_after_date_str = self.request.get(
'force_install_after_date', None)
force_install_after_date_time_str = self.request.get(
'force_install_after_date_time', None)
if force_install_after_date_str or force_install_after_date_time_str:
date_string = '%s %s' % (
force_install_after_date_str, force_install_after_date_time_str)
try:
force_install_after_date = datetime.datetime.strptime(
date_string, '%Y-%m-%d %H:%M')
except ValueError:
self.error(httplib.BAD_REQUEST)
self.Render(
'error.html',
{'message': 'invalid force_install date and/or time format'})
else:
# This will remove force_install_after_date from the plist, as it was
# unset in the UI.
force_install_after_date = ''
kwargs = {
'unattended_install': unattended_install,
'unattended_uninstall': unattended_uninstall,
# get_all() returns an empty array if set, and has no default value opt.
'catalogs': self.request.get_all('catalogs'),
'manifests': self.request.get_all('manifests'),
'install_types': self.request.get_all('install_types'),
'manifest_mod_access': self.request.get_all('manifest_mod_access'),
# get() returns an empty string if not set, so default to None.
'name': self.request.get('name', None),
'description': self.request.get('description', None),
'display_name': self.request.get('display_name', None),
'version': self.request.get('version', None),
'minimum_os_version': self.request.get('minimum_os_version', None),
'maximum_os_version': self.request.get('maximum_os_version', None),
'force_install_after_date': force_install_after_date,
'category': self.request.get('category', None),
'developer': self.request.get('developer', None),
}
try:
pkginfo.Update(**kwargs)
if settings.EMAIL_ON_EVERY_CHANGE:
self.NotifyAdminsOfPackageChange(pkginfo, **kwargs)
except models.PackageInfoLockError:
self.error(httplib.FOUND)
self.Render(
'error.html',
{'message': 'PackageInfo was locked; refresh and try again'})
except models.PackageInfoUpdateError as e:
self.error(httplib.FORBIDDEN)
self.Render(
'error.html', {'message': 'PackageInfoUpdateError: %s' % str(e)})
else:
filename = pkginfo.filename
self.redirect(
'/admin/packages?msg=%s saved.&activepkg=%s#package-%s' % (
filename, filename, filename))
def UpdatePackageInfoFromPlist(self, create_new=False):
"""Updates or creates a new PackageInfo entity from plist XML."""
plist_xml = self.request.get('new_pkginfo_plist').encode('utf-8').strip()
try:
pkginfo, log = models.PackageInfo.UpdateFromPlist(
plist_xml, create_new=create_new)
except models.PackageInfoUpdateError as e:
self.error(httplib.BAD_REQUEST)
self.Render(
'error.html', {'message': 'PackageInfoUpdateError: %s' % str(e)})
return
else:
if settings.EMAIL_ON_EVERY_CHANGE:
self.NotifyAdminsOfPackageChangeFromPlist(log)
self.redirect('/admin/package/%s?msg=PackageInfo saved#package-%s' % (
pkginfo.filename, pkginfo.filename))
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module implements the handler for state of Denon AVR receivers.
:copyright: (c) 2021 by Oliver Goetz.
:license: MIT, see LICENSE for more details.
"""
import logging
import time
from typing import Hashable, Optional
import attr
from .appcommand import AppCommandCmdParam, AppCommands
from .const import DENON_ATTR_SETATTR
from .exceptions import AvrProcessingError, AvrCommandError
from .foundation import DenonAVRFoundation, convert_string_int_bool
_LOGGER = logging.getLogger(__name__)
@attr.s(auto_attribs=True, on_setattr=DENON_ATTR_SETATTR)
class DenonAVRToneControl(DenonAVRFoundation):
"""This class implements tone control functions of Denon AVR receiver."""
_tone_control_status: Optional[bool] = attr.ib(
converter=attr.converters.optional(convert_string_int_bool),
default=None)
_tone_control_adjust: Optional[bool] = attr.ib(
converter=attr.converters.optional(convert_string_int_bool),
default=None)
_bass: Optional[int] = attr.ib(
converter=attr.converters.optional(int),
default=None)
_bass_level: Optional[str] = attr.ib(
converter=attr.converters.optional(str),
default=None)
_treble: Optional[int] = attr.ib(
converter=attr.converters.optional(int),
default=None)
_treble_level: Optional[str] = attr.ib(
converter=attr.converters.optional(str),
default=None)
# Update tags for attributes
# AppCommand.xml interface
appcommand_attrs = {
AppCommands.GetToneControl: None}
def setup(self) -> None:
"""Ensure that the instance is initialized."""
# Add tags for a potential AppCommand.xml update
for tag in self.appcommand_attrs:
self._device.api.add_appcommand_update_tag(tag)
self._is_setup = True
async def async_update(
self,
global_update: bool = False,
cache_id: Optional[Hashable] = None) -> None:
"""Update volume asynchronously."""
# Ensure instance is setup before updating
if self._is_setup is False:
self.setup()
# Update state
await self.async_update_tone_control(
global_update=global_update, cache_id=cache_id)
async def async_update_tone_control(
self,
global_update: bool = False,
cache_id: Optional[Hashable] = None):
"""Update tone control status of device."""
if self._device.use_avr_2016_update is True:
await self.async_update_attrs_appcommand(
self.appcommand_attrs, global_update=global_update,
cache_id=cache_id, ignore_missing_response=True)
elif self._device.use_avr_2016_update is False:
# Not available
pass
else:
raise AvrProcessingError(
"Device is not setup correctly, update method not set")
async def async_set_tone_control_command(
self, parameter_type: str, value: int) -> None:
"""Post request for tone control commands."""
cmd = (attr.evolve(
AppCommands.SetToneControl,
set_command=AppCommandCmdParam(name=parameter_type, text=value)),)
await self._device.api.async_post_appcommand(
self._device.urls.appcommand, cmd, cache_id=time.time())
##############
# Properties #
##############
@property
def bass(self) -> Optional[int]:
"""Return value of bass."""
return self._bass
@property
def bass_level(self) -> Optional[str]:
"""Return level of bass."""
return self._bass_level
@property
def treble(self) -> Optional[int]:
"""Return value of treble."""
return self._treble
@property
def treble_level(self) -> Optional[str]:
"""Return level of treble."""
return self._treble_level
##########
# Setter #
##########
async def async_enable_tone_control(self) -> None:
"""Enable tone control to change settings like bass or treble."""
if self._tone_control_status is False:
raise AvrCommandError(
"Cannot enable tone control, Dynamic EQ must be deactivated")
await self.async_set_tone_control_command("adjust", 1)
async def async_disable_tone_control(self) -> None:
"""Disable tone control to change settings like bass or treble."""
if self._tone_control_status is False:
raise AvrCommandError(
"Cannot disable tone control, Dynamic EQ must be deactivated")
await self.async_set_tone_control_command("adjust", 0)
async def async_set_bass(self, value: int) -> None:
"""
Set receiver bass.
Minimum is 0, maximum at 12
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
if value < 0 or value > 12:
raise AvrCommandError("Invalid value for bass")
await self.async_enable_tone_control()
await self.async_set_tone_control_command("bassvalue", value)
async def async_bass_up(self) -> None:
"""
Increase level of Bass.
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
if self.bass == 12:
return
await self.async_enable_tone_control()
await self.async_set_tone_control_command("bassvalue", self.bass + 1)
await self.async_update()
async def async_bass_down(self) -> None:
"""
Decrease level of Bass.
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
if self.bass == 0:
return
await self.async_enable_tone_control()
await self.async_set_tone_control_command("bassvalue", self.bass - 1)
await self.async_update()
async def async_set_treble(self, value: int) -> None:
"""
Set receiver treble.
Minimum is 0, maximum at 12
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
if value < 0 or value > 12:
raise AvrCommandError("Invalid value for treble")
await self.async_enable_tone_control()
await self.async_set_tone_control_command("treblevalue", value)
async def async_treble_up(self) -> None:
"""
Increase level of Treble.
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
if self.treble == 12:
return
await self.async_enable_tone_control()
await self.async_set_tone_control_command(
"treblevalue", self.treble + 1
)
await self.async_update()
async def async_treble_down(self) -> None:
"""
Decrease level of Treble.
Note:
Doesn't work, if Dynamic Equalizer is active.
"""
if self.treble == 0:
return
await self.async_enable_tone_control()
await self.async_set_tone_control_command(
"treblevalue", self.treble - 1
)
await self.async_update()
def tone_control_factory(instance: DenonAVRFoundation) -> DenonAVRToneControl:
"""Create DenonAVRToneControl at receiver instances."""
# pylint: disable=protected-access
new = DenonAVRToneControl(device=instance._device)
return new
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.db.sqlalchemy import api as db_api
from senlin.tests.common import base
from senlin.tests.common import utils
from senlin.tests.db import shared
class DBAPIClusterPolicyTest(base.SenlinTestCase):
def setUp(self):
super(DBAPIClusterPolicyTest, self).setUp()
self.ctx = utils.dummy_context()
self.profile = shared.create_profile(self.ctx)
self.cluster = shared.create_cluster(self.ctx, self.profile)
def create_policy(self, **kwargs):
data = {
'name': 'test_policy',
'type': 'ScalingPolicy',
'spec': {
'min_size': 1,
'max_size': 10,
'paust_time': 'PT10M',
},
'level': 50,
'cooldown': 60,
'data': None,
}
data.update(kwargs)
return db_api.policy_create(self.ctx, data)
def test_policy_attach_detach(self):
policy = self.create_policy()
fields = {
'enabled': True,
'level': 50
}
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id,
fields)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertEqual(True, bindings[0].enabled)
# This will succeed
db_api.cluster_policy_detach(self.ctx, self.cluster.id, policy.id)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(0, len(bindings))
# This will fail silently
res = db_api.cluster_policy_detach(self.ctx, self.cluster.id, 'BOGUS')
self.assertIsNone(res)
def test_policy_enable_disable(self):
policy = self.create_policy()
fields = {
'enabled': True,
'level': 50
}
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id,
fields)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertEqual(True, bindings[0].enabled)
db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id,
{'enabled': True})
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertEqual(True, bindings[0].enabled)
db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id,
{'enabled': False})
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertEqual(False, bindings[0].enabled)
db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id,
{'enabled': True})
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertEqual(True, bindings[0].enabled)
# No policy binding found
res = db_api.cluster_policy_update(self.ctx, self.cluster.id, 'BOGUS',
{})
self.assertIsNone(res)
def test_policy_get_all_prioritized(self):
policy = self.create_policy()
fields = {'enabled': True, 'level': 50, 'priority': 20}
binding1 = db_api.cluster_policy_attach(self.ctx, self.cluster.id,
policy.id, fields)
fields = {'enabled': True, 'level': 50, 'priority': 40}
binding2 = db_api.cluster_policy_attach(self.ctx, self.cluster.id,
policy.id, fields)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(binding1.id, bindings[1].id)
self.assertEqual(binding2.id, bindings[0].id)
def test_policy_get_all_with_filters(self):
values = {'policy1': {'level': 40, 'priority': 40},
'policy2': {'level': 30, 'priority': 60}}
for key in values:
value = values[key]
policy_id = self.create_policy(id=key).id
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id,
value)
filters = {'policy_id': ['policy1', 'policyx']}
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('policy1', results[0].policy_id)
filters = {'priority': 60}
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('policy2', results[0].policy_id)
def test_policy_get_all_with_empty_filters(self):
for pid in ['policy1', 'policy2']:
self.create_policy(id=pid)
db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, {})
filters = None
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(2, len(results))
def test_policy_get_all_with_sort_key_are_used(self):
values = {
'policy1': {'level': 40, 'priority': 40, 'cooldown': 1,
'enabled': True},
'policy2': {'level': 30, 'priority': 60, 'cooldown': 2,
'enabled': True},
'policy3': {'level': 50, 'priority': 10, 'cooldown': 3,
'enabled': True}
}
# prepare
for key in values:
value = values[key]
policy_id = self.create_policy(id=key).id
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id,
value)
mock_paginate = self.patchobject(db_api.utils, 'paginate_query')
sort_keys = ['level', 'priority', 'cooldown', 'enabled']
db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
sort_keys=sort_keys)
# Check sort_keys used
args = mock_paginate.call_args[0]
used_sort_keys = set(args[3])
expected_keys = set(['id', 'level', 'priority', 'cooldown',
'enabled'])
self.assertEqual(expected_keys, used_sort_keys)
def test_policy_get_all_with_sort_key_and_dir(self):
values = {
'policy1': {'level': 40, 'priority': 40, 'cooldown': 10,
'enabled': True},
'policy2': {'level': 30, 'priority': 60, 'cooldown': 20,
'enabled': True},
'policy3': {'level': 50, 'priority': 10, 'cooldown': 30,
'enabled': False}
}
# prepare
for key in values:
value = values[key]
policy_id = self.create_policy(id=key).id
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id,
value)
# sorted by level
sort_keys = ['level']
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
sort_keys=sort_keys)
self.assertEqual('policy2', results[0].policy_id)
self.assertEqual('policy1', results[1].policy_id)
self.assertEqual('policy3', results[2].policy_id)
# sorted by priority
sort_keys = ['priority']
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
sort_keys=sort_keys)
self.assertEqual('policy3', results[0].policy_id)
self.assertEqual('policy1', results[1].policy_id)
self.assertEqual('policy2', results[2].policy_id)
# sorted by cooldown
sort_keys = ['cooldown']
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
sort_keys=sort_keys)
self.assertEqual('policy1', results[0].policy_id)
self.assertEqual('policy2', results[1].policy_id)
self.assertEqual('policy3', results[2].policy_id)
# sorted by enabled, the 2nd and 3rd are unpredictable
sort_keys = ['enabled']
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
sort_keys=sort_keys)
self.assertEqual('policy3', results[0].policy_id)
# sorted by enabled, the 2nd and 3rd are ordered by priority
sort_keys = ['enabled', 'priority']
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
sort_keys=sort_keys)
self.assertEqual('policy3', results[0].policy_id)
self.assertEqual('policy1', results[1].policy_id)
self.assertEqual('policy2', results[2].policy_id)
# sorted by cooldown, descending
sort_keys = ['cooldown']
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
sort_keys=sort_keys,
sort_dir='desc')
self.assertEqual('policy3', results[0].policy_id)
self.assertEqual('policy2', results[1].policy_id)
self.assertEqual('policy1', results[2].policy_id)
def test_policy_get_all_with_default_sort_keys(self):
values = {'policy1': {'level': 40, 'priority': 40},
'policy2': {'level': 30, 'priority': 60}}
for key in values:
value = values[key]
policy_id = self.create_policy(id=key).id
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id,
value)
filters = None
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(2, len(results))
|
|
#!/usr/bin/env python
# ===================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# See license.txt for license information.
# ===================================
import subprocess
import shutil
import pwd
import grp
import os
import sys
import stat
import tempfile
import imp
import re
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
# [Key] string GetScript;
# [Key] string SetScript;
# [Key] string TestScript;
# [write] string User;
# [write] string Group;
# [Read] string Result;
global show_mof
show_mof = False
def Set_Marshall(GetScript, SetScript, TestScript, User, Group):
if GetScript is not None:
GetScript = GetScript.encode('ascii', 'ignore')
else:
GetScript = ''
if SetScript is not None:
SetScript = SetScript.encode('ascii', 'ignore')
else:
SetScript = ''
if TestScript is not None:
TestScript = TestScript.encode('ascii', 'ignore')
else:
TestScript = ''
if User is not None:
User = User.encode('ascii', 'ignore')
else:
User = ''
if Group is not None:
Group = Group.encode('ascii', 'ignore')
else:
Group = ''
retval = Set(GetScript, SetScript, TestScript, User, Group)
return retval
def Test_Marshall(GetScript, SetScript, TestScript, User, Group):
if GetScript is not None:
GetScript = GetScript.encode('ascii', 'ignore')
else:
GetScript = ''
if SetScript is not None:
SetScript = SetScript.encode('ascii', 'ignore')
else:
SetScript = ''
if TestScript is not None:
TestScript = TestScript.encode('ascii', 'ignore')
else:
TestScript = ''
if User is not None:
User = User.encode('ascii', 'ignore')
else:
User = ''
if Group is not None:
Group = Group.encode('ascii', 'ignore')
else:
Group = ''
retval = Test(GetScript, SetScript, TestScript, User, Group)
return retval
def Get_Marshall(GetScript, SetScript, TestScript, User, Group):
arg_names = list(locals().keys())
if GetScript is not None:
GetScript = GetScript.encode('ascii', 'ignore')
else:
GetScript = ''
if SetScript is not None:
SetScript = SetScript.encode('ascii', 'ignore')
else:
SetScript = ''
if TestScript is not None:
TestScript = TestScript.encode('ascii', 'ignore')
else:
TestScript = ''
if User is not None:
User = User.encode('ascii', 'ignore')
else:
User = ''
if Group is not None:
Group = Group.encode('ascii', 'ignore')
else:
Group = ''
retval = 0
(retval, GetScript, SetScript, TestScript, User, Group,
Result) = Get(GetScript, SetScript, TestScript, User, Group)
GetScript = protocol.MI_String(GetScript)
SetScript = protocol.MI_String(SetScript)
TestScript = protocol.MI_String(TestScript)
User = protocol.MI_String(User)
Group = protocol.MI_String(Group)
Result = protocol.MI_String(Result)
arg_names.append('Result')
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
#
# Begin user defined DSC functions
#
def SetShowMof(a):
global show_mof
show_mof = a
def ShowMof(op, GetScript, SetScript, TestScript, User, Group):
if not show_mof:
return
mof = ''
mof += op + ' nxScript MyScript \n'
mof += '{\n'
mof += ' TestScript = "' + TestScript + '"\n'
mof += ' GetScript = "' + GetScript + '"\n'
mof += ' SetScript = "' + SetScript + '"\n'
mof += ' User = "' + User + '"\n'
mof += ' Group = "' + Group + '"\n'
mof += '}\n'
f = open('./test_mofs.log', 'a')
Print(mof, file=f)
f.close()
def Print(s, file=sys.stdout):
file.write(s + '\n')
def opened_w_error(filename, mode="r"):
"""
This context ensures the file is closed.
"""
try:
f = open(filename, mode)
except IOError, err:
return None, err
return f, None
def WriteFile(path, contents):
f, error = opened_w_error(path, 'wb')
if error:
Print("Exception opening file " + path + " Error: " + str(error), file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path + " Error: " + str(error))
return -1
else:
contents = re.sub('^\s+', '', contents)
f.write(contents.replace("\r", ""))
f.close()
return 0
def GetUID(User):
uid = None
try:
uid = pwd.getpwnam(User)[2]
except KeyError:
Print('ERROR: Unknown UID for ' + User, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown UID for ' + User)
return uid
def GetGID(Group):
gid = None
try:
gid = grp.getgrnam(Group)[2]
except KeyError:
Print('ERROR: Unknown GID for ' + Group, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown GID for ' + Group)
return gid
# This is a function that returns a callback function. The callback
# function is called prior to a user's script being executed
def PreExec(uid, gid, User):
def SetIDs_callback():
if gid is not -1:
os.setgroups([gid])
os.setgid(gid)
if uid is not -1:
os.setuid(uid)
os.environ["HOME"] = os.path.expanduser("~" + User)
return SetIDs_callback
class Params:
def __init__(self, GetScript, SetScript, TestScript, User, Group):
self.GetScript = GetScript
self.SetScript = SetScript
self.TestScript = TestScript
self.User = User
self.Group = Group
self.Result = ''
def Set(GetScript, SetScript, TestScript, User, Group):
ShowMof('SET', GetScript, SetScript, TestScript, User, Group)
p = Params(GetScript, SetScript, TestScript, User, Group)
# write out SetScript to a file, run it as User/Group, return exit code
tempdir = TempWorkingDirectory(User, Group)
path = tempdir.GetTempPath()
command = path
uid = gid = -1
if User:
uid = GetUID(User)
if uid is None:
Print('ERROR: Unknown UID for ' + User, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown UID for ' + User)
return [-1]
if Group:
gid = GetGID(Group)
if gid is None:
Print('ERROR: Unknown GID for ' + Group, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown GID for ' + Group)
return [-1]
WriteFile(path, SetScript)
os.chmod(path, stat.S_IXUSR | stat.S_IRUSR | stat.S_IXGRP | stat.S_IRGRP)
os.chown(path, uid, gid)
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=PreExec(uid, gid, User))
exit_code = proc.wait()
Print("stdout: " + proc.stdout.read().encode('ascii', 'ignore'))
LG().Log('INFO', "stdout: " +
proc.stdout.read().encode('ascii', 'ignore'))
Print("stderr: " + proc.stderr.read().encode('ascii', 'ignore'))
LG().Log('INFO', "stderr: " +
proc.stderr.read().encode('ascii', 'ignore'))
os.remove(path)
return [exit_code]
def Test(GetScript, SetScript, TestScript, User, Group):
# write out TestScript to a file, run it as User/Group, return exit code
ShowMof('TEST', GetScript, SetScript, TestScript, User, Group)
p = Params(GetScript, SetScript, TestScript, User, Group)
tempdir = TempWorkingDirectory(User, Group)
path = tempdir.GetTempPath()
command = path
uid = gid = -1
if User:
uid = GetUID(User)
if uid is None:
Print('ERROR: Unknown UID for ' + User, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown UID for ' + User)
return [-1]
if Group:
gid = GetGID(Group)
if gid is None:
Print('ERROR: Unknown GID for ' + Group, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown GID for ' + Group)
return [-1]
WriteFile(path, TestScript)
os.chmod(path, stat.S_IXUSR | stat.S_IRUSR | stat.S_IXGRP | stat.S_IRGRP)
os.chown(path, uid, gid)
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=PreExec(uid, gid, User))
exit_code = proc.wait()
Print("stdout: " + proc.stdout.read().encode('ascii', 'ignore'))
LG().Log('INFO', "stdout: " +
proc.stdout.read().encode('ascii', 'ignore'))
Print("stderr: " + proc.stderr.read().encode('ascii', 'ignore'))
LG().Log('INFO', "stderr: " +
proc.stderr.read().encode('ascii', 'ignore'))
os.remove(path)
return [exit_code]
def Get(GetScript, SetScript, TestScript, User, Group):
# write out GetScript to a file, run it as User/Group, then return
# stderr/stdout and exit code
ShowMof('GET', GetScript, SetScript, TestScript, User, Group)
p = Params(GetScript, SetScript, TestScript, User, Group)
tempdir = TempWorkingDirectory(User, Group)
path = tempdir.GetTempPath()
command = path
uid = gid = -1
if User:
uid = GetUID(User)
if uid is None:
Print('ERROR: Unknown UID for ' + User, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown UID for ' + User)
return [-1]
if Group:
gid = GetGID(Group)
if gid is None:
Print('ERROR: Unknown GID for ' + Group, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown GID for ' + Group)
return [-1]
WriteFile(path, GetScript)
os.chmod(path, stat.S_IXUSR | stat.S_IRUSR | stat.S_IXGRP | stat.S_IRGRP)
os.chown(path, uid, gid)
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=PreExec(uid, gid, User))
exit_code = proc.wait()
Result = proc.stdout.read().encode('ascii', 'ignore')
Print("stdout: " + Result)
LG().Log('INFO', "stdout: " + Result)
Print("stderr: " + proc.stderr.read().encode('ascii', 'ignore'))
LG().Log('INFO', "stderr: " +
proc.stderr.read().encode('ascii', 'ignore'))
os.remove(path)
return [exit_code, GetScript, SetScript, TestScript, User, Group, Result]
class TempWorkingDirectory:
def __init__(self, User, Group):
self.dir = tempfile.mkdtemp()
uid = gid = -1
if User:
uid = GetUID(User)
if uid is None:
Print('ERROR: Unknown UID for ' + User, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown UID for ' + User)
uid = -1
if Group:
gid = GetGID(Group)
if gid is None:
Print('ERROR: Unknown GID for ' + Group, file=sys.stderr)
LG().Log('ERROR', 'ERROR: Unknown GID for ' + Group)
gid = -1
os.chown(self.dir, uid, gid)
os.chmod(self.dir, stat.S_IXUSR |
stat.S_IRUSR | stat.S_IXGRP | stat.S_IRGRP)
def __del__(self):
shutil.rmtree(self.dir)
def GetTempPath(self):
return os.path.join(self.dir, "temp_script.sh")
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.dialogflowcx_v3.services.agents import pagers
from google.cloud.dialogflowcx_v3.types import advanced_settings
from google.cloud.dialogflowcx_v3.types import agent
from google.cloud.dialogflowcx_v3.types import agent as gcdc_agent
from google.cloud.dialogflowcx_v3.types import flow
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from .transports.base import AgentsTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import AgentsGrpcAsyncIOTransport
from .client import AgentsClient
class AgentsAsyncClient:
"""Service for managing [Agents][google.cloud.dialogflow.cx.v3.Agent]."""
_client: AgentsClient
DEFAULT_ENDPOINT = AgentsClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = AgentsClient.DEFAULT_MTLS_ENDPOINT
agent_path = staticmethod(AgentsClient.agent_path)
parse_agent_path = staticmethod(AgentsClient.parse_agent_path)
agent_validation_result_path = staticmethod(
AgentsClient.agent_validation_result_path
)
parse_agent_validation_result_path = staticmethod(
AgentsClient.parse_agent_validation_result_path
)
environment_path = staticmethod(AgentsClient.environment_path)
parse_environment_path = staticmethod(AgentsClient.parse_environment_path)
flow_path = staticmethod(AgentsClient.flow_path)
parse_flow_path = staticmethod(AgentsClient.parse_flow_path)
flow_validation_result_path = staticmethod(AgentsClient.flow_validation_result_path)
parse_flow_validation_result_path = staticmethod(
AgentsClient.parse_flow_validation_result_path
)
security_settings_path = staticmethod(AgentsClient.security_settings_path)
parse_security_settings_path = staticmethod(
AgentsClient.parse_security_settings_path
)
common_billing_account_path = staticmethod(AgentsClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(
AgentsClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(AgentsClient.common_folder_path)
parse_common_folder_path = staticmethod(AgentsClient.parse_common_folder_path)
common_organization_path = staticmethod(AgentsClient.common_organization_path)
parse_common_organization_path = staticmethod(
AgentsClient.parse_common_organization_path
)
common_project_path = staticmethod(AgentsClient.common_project_path)
parse_common_project_path = staticmethod(AgentsClient.parse_common_project_path)
common_location_path = staticmethod(AgentsClient.common_location_path)
parse_common_location_path = staticmethod(AgentsClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgentsAsyncClient: The constructed client.
"""
return AgentsClient.from_service_account_info.__func__(AgentsAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgentsAsyncClient: The constructed client.
"""
return AgentsClient.from_service_account_file.__func__(AgentsAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return AgentsClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> AgentsTransport:
"""Returns the transport used by the client instance.
Returns:
AgentsTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(AgentsClient).get_transport_class, type(AgentsClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, AgentsTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the agents client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AgentsTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = AgentsClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_agents(
self,
request: Union[agent.ListAgentsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAgentsAsyncPager:
r"""Returns the list of all agents in the specified
location.
.. code-block:: python
from google.cloud import dialogflowcx_v3
def sample_list_agents():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ListAgentsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_agents(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3.types.ListAgentsRequest, dict]):
The request object. The request message for
[Agents.ListAgents][google.cloud.dialogflow.cx.v3.Agents.ListAgents].
parent (:class:`str`):
Required. The location to list all agents for. Format:
``projects/<Project ID>/locations/<Location ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.services.agents.pagers.ListAgentsAsyncPager:
The response message for
[Agents.ListAgents][google.cloud.dialogflow.cx.v3.Agents.ListAgents].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = agent.ListAgentsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_agents,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListAgentsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def get_agent(
self,
request: Union[agent.GetAgentRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> agent.Agent:
r"""Retrieves the specified agent.
.. code-block:: python
from google.cloud import dialogflowcx_v3
def sample_get_agent():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3.GetAgentRequest(
name="name_value",
)
# Make the request
response = client.get_agent(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3.types.GetAgentRequest, dict]):
The request object. The request message for
[Agents.GetAgent][google.cloud.dialogflow.cx.v3.Agents.GetAgent].
name (:class:`str`):
Required. The name of the agent. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.types.Agent:
Agents are best described as Natural Language Understanding (NLU) modules
that transform user requests into actionable data.
You can include agents in your app, product, or
service to determine user intent and respond to the
user in a natural way.
After you create an agent, you can add
[Intents][google.cloud.dialogflow.cx.v3.Intent],
[Entity
Types][google.cloud.dialogflow.cx.v3.EntityType],
[Flows][google.cloud.dialogflow.cx.v3.Flow],
[Fulfillments][google.cloud.dialogflow.cx.v3.Fulfillment],
[Webhooks][google.cloud.dialogflow.cx.v3.Webhook],
and so on to manage the conversation flows..
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = agent.GetAgentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_agent,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def create_agent(
self,
request: Union[gcdc_agent.CreateAgentRequest, dict] = None,
*,
parent: str = None,
agent: gcdc_agent.Agent = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_agent.Agent:
r"""Creates an agent in the specified location.
Note: You should always train flows prior to sending them
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
.. code-block:: python
from google.cloud import dialogflowcx_v3
def sample_create_agent():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
agent = dialogflowcx_v3.Agent()
agent.display_name = "display_name_value"
agent.default_language_code = "default_language_code_value"
agent.time_zone = "time_zone_value"
request = dialogflowcx_v3.CreateAgentRequest(
parent="parent_value",
agent=agent,
)
# Make the request
response = client.create_agent(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3.types.CreateAgentRequest, dict]):
The request object. The request message for
[Agents.CreateAgent][google.cloud.dialogflow.cx.v3.Agents.CreateAgent].
parent (:class:`str`):
Required. The location to create a agent for. Format:
``projects/<Project ID>/locations/<Location ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
agent (:class:`google.cloud.dialogflowcx_v3.types.Agent`):
Required. The agent to create.
This corresponds to the ``agent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.types.Agent:
Agents are best described as Natural Language Understanding (NLU) modules
that transform user requests into actionable data.
You can include agents in your app, product, or
service to determine user intent and respond to the
user in a natural way.
After you create an agent, you can add
[Intents][google.cloud.dialogflow.cx.v3.Intent],
[Entity
Types][google.cloud.dialogflow.cx.v3.EntityType],
[Flows][google.cloud.dialogflow.cx.v3.Flow],
[Fulfillments][google.cloud.dialogflow.cx.v3.Fulfillment],
[Webhooks][google.cloud.dialogflow.cx.v3.Webhook],
and so on to manage the conversation flows..
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, agent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = gcdc_agent.CreateAgentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if agent is not None:
request.agent = agent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_agent,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def update_agent(
self,
request: Union[gcdc_agent.UpdateAgentRequest, dict] = None,
*,
agent: gcdc_agent.Agent = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_agent.Agent:
r"""Updates the specified agent.
Note: You should always train flows prior to sending them
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
.. code-block:: python
from google.cloud import dialogflowcx_v3
def sample_update_agent():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
agent = dialogflowcx_v3.Agent()
agent.display_name = "display_name_value"
agent.default_language_code = "default_language_code_value"
agent.time_zone = "time_zone_value"
request = dialogflowcx_v3.UpdateAgentRequest(
agent=agent,
)
# Make the request
response = client.update_agent(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3.types.UpdateAgentRequest, dict]):
The request object. The request message for
[Agents.UpdateAgent][google.cloud.dialogflow.cx.v3.Agents.UpdateAgent].
agent (:class:`google.cloud.dialogflowcx_v3.types.Agent`):
Required. The agent to update.
This corresponds to the ``agent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The mask to control which fields get
updated. If the mask is not present, all
fields will be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.types.Agent:
Agents are best described as Natural Language Understanding (NLU) modules
that transform user requests into actionable data.
You can include agents in your app, product, or
service to determine user intent and respond to the
user in a natural way.
After you create an agent, you can add
[Intents][google.cloud.dialogflow.cx.v3.Intent],
[Entity
Types][google.cloud.dialogflow.cx.v3.EntityType],
[Flows][google.cloud.dialogflow.cx.v3.Flow],
[Fulfillments][google.cloud.dialogflow.cx.v3.Fulfillment],
[Webhooks][google.cloud.dialogflow.cx.v3.Webhook],
and so on to manage the conversation flows..
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([agent, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = gcdc_agent.UpdateAgentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if agent is not None:
request.agent = agent
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_agent,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("agent.name", request.agent.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_agent(
self,
request: Union[agent.DeleteAgentRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified agent.
.. code-block:: python
from google.cloud import dialogflowcx_v3
def sample_delete_agent():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3.DeleteAgentRequest(
name="name_value",
)
# Make the request
client.delete_agent(request=request)
Args:
request (Union[google.cloud.dialogflowcx_v3.types.DeleteAgentRequest, dict]):
The request object. The request message for
[Agents.DeleteAgent][google.cloud.dialogflow.cx.v3.Agents.DeleteAgent].
name (:class:`str`):
Required. The name of the agent to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = agent.DeleteAgentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_agent,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
async def export_agent(
self,
request: Union[agent.ExportAgentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Exports the specified agent to a binary file.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``: An empty `Struct
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__
- ``response``:
[ExportAgentResponse][google.cloud.dialogflow.cx.v3.ExportAgentResponse]
.. code-block:: python
from google.cloud import dialogflowcx_v3
def sample_export_agent():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ExportAgentRequest(
name="name_value",
)
# Make the request
operation = client.export_agent(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3.types.ExportAgentRequest, dict]):
The request object. The request message for
[Agents.ExportAgent][google.cloud.dialogflow.cx.v3.Agents.ExportAgent].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflowcx_v3.types.ExportAgentResponse`
The response message for
[Agents.ExportAgent][google.cloud.dialogflow.cx.v3.Agents.ExportAgent].
"""
# Create or coerce a protobuf request object.
request = agent.ExportAgentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.export_agent,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
agent.ExportAgentResponse,
metadata_type=struct_pb2.Struct,
)
# Done; return the response.
return response
async def restore_agent(
self,
request: Union[agent.RestoreAgentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Restores the specified agent from a binary file.
Replaces the current agent with a new one. Note that all
existing resources in agent (e.g. intents, entity types, flows)
will be removed.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``: An empty `Struct
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__
- ``response``: An `Empty
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#empty>`__
Note: You should always train flows prior to sending them
queries. See the `training
documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__.
.. code-block:: python
from google.cloud import dialogflowcx_v3
def sample_restore_agent():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3.RestoreAgentRequest(
agent_uri="agent_uri_value",
name="name_value",
)
# Make the request
operation = client.restore_agent(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3.types.RestoreAgentRequest, dict]):
The request object. The request message for
[Agents.RestoreAgent][google.cloud.dialogflow.cx.v3.Agents.RestoreAgent].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
request = agent.RestoreAgentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.restore_agent,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct,
)
# Done; return the response.
return response
async def validate_agent(
self,
request: Union[agent.ValidateAgentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> agent.AgentValidationResult:
r"""Validates the specified agent and creates or updates
validation results. The agent in draft version is
validated. Please call this API after the training is
completed to get the complete validation results.
.. code-block:: python
from google.cloud import dialogflowcx_v3
def sample_validate_agent():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ValidateAgentRequest(
name="name_value",
)
# Make the request
response = client.validate_agent(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3.types.ValidateAgentRequest, dict]):
The request object. The request message for
[Agents.ValidateAgent][google.cloud.dialogflow.cx.v3.Agents.ValidateAgent].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.types.AgentValidationResult:
The response message for
[Agents.GetAgentValidationResult][google.cloud.dialogflow.cx.v3.Agents.GetAgentValidationResult].
"""
# Create or coerce a protobuf request object.
request = agent.ValidateAgentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.validate_agent,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_agent_validation_result(
self,
request: Union[agent.GetAgentValidationResultRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> agent.AgentValidationResult:
r"""Gets the latest agent validation result. Agent
validation is performed when ValidateAgent is called.
.. code-block:: python
from google.cloud import dialogflowcx_v3
def sample_get_agent_validation_result():
# Create a client
client = dialogflowcx_v3.AgentsClient()
# Initialize request argument(s)
request = dialogflowcx_v3.GetAgentValidationResultRequest(
name="name_value",
)
# Make the request
response = client.get_agent_validation_result(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dialogflowcx_v3.types.GetAgentValidationResultRequest, dict]):
The request object. The request message for
[Agents.GetAgentValidationResult][google.cloud.dialogflow.cx.v3.Agents.GetAgentValidationResult].
name (:class:`str`):
Required. The agent name. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/validationResult``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3.types.AgentValidationResult:
The response message for
[Agents.GetAgentValidationResult][google.cloud.dialogflow.cx.v3.Agents.GetAgentValidationResult].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = agent.GetAgentValidationResultRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_agent_validation_result,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("AgentsAsyncClient",)
|
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package sparse_lookup
# Module caffe2.python.layers.sparse_lookup
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.helpers.arg_scope import get_current_scope
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
get_categorical_limit,
get_key,
IdList,
IdScoreList,
LayerPsParam,
ModelLayer,
)
import collections
import functools
import math
import numpy as np
import operator
def get_sparse_lookup_predictor_version(version):
assert version in {'fp32', 'fp16', 'uint8rowwise'},\
"Unexpected version of sparse_lookup layer {0}".format(version)
return version
class SparseLookup(ModelLayer):
_id_list_supported_reducers = ['PositionWeighted', 'LogMeanExp', 'LogSumExp',
'Max', 'Mean', 'Sum', 'Sqrt', 'None']
_id_score_list_supported_reducers = ['PositionWeighted', 'Mean', 'Sum',
'WeightedSum', 'WeightedMean', 'None']
def __init__(self, model, input_record, inner_shape, reducer,
weight_init=None, weight_optim=None,
name='sparse_lookup', **kwargs):
super(SparseLookup, self).__init__(model, name, input_record, **kwargs)
# TODO Add some asserts about input type
if isinstance(inner_shape, int):
inner_shape = [inner_shape]
assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
"Unexpected type for inner_shape, expected list or tuple, got {0}".\
format(type(inner_shape))
if reducer == "PositionWeighted":
self.external_weights = input_record.values()
self.reducer = reducer
input_dim = get_categorical_limit(input_record)
assert input_dim > 0, (
"{} should have categorical limit > 0, but got {}".format(
get_key(input_record)(), input_dim))
scale = math.sqrt(1.0 / input_dim)
self.shape = [input_dim] + inner_shape
self.weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
if schema.equal_schemas(self.input_record, IdList):
sparse_key = self.input_record.items()
elif schema.equal_schemas(
self.input_record,
IdScoreList,
check_field_types=False):
sparse_key = self.input_record.keys()
else:
raise NotImplementedError()
if self.input_record.lengths.metadata:
avg_length = self.input_record.lengths.metadata.expected_value
else:
avg_length = None
self.w = self.create_param(
param_name='w',
shape=self.shape,
initializer=self.weight_init,
optimizer=weight_optim,
ps_param=LayerPsParam(
sparse_key=sparse_key,
average_length=avg_length))
self.scale_bias_init = ('ConstantFill', {'value': 0.0})
self.scale_bias = self.create_param(
param_name='scale_bias',
shape=[],
initializer=self.scale_bias_init,
optimizer=model.NoOptim)
self.output_schema = schema.Scalar(
(np.float32, inner_shape),
self.get_next_blob_reference('output'),
)
def get_memory_usage(self):
return functools.reduce(operator.mul, self.shape) * 4
def get_fp16_compatible_parameters(self):
return [self.w]
def get_8bits_compatible_parameters(self):
RowwiseQuantized8BitsWeight =\
collections.namedtuple(
'RowwiseQuantized8BitsWeight',
['w', 'scale_bias'], verbose=True)
weight = RowwiseQuantized8BitsWeight(
self.w, self.scale_bias)
return [weight]
def _gather_wrapper(self, net, version, in_indices, out):
# Gather can work on all kinds of input data types, and output
# data with the same type. Convert the output of Gather to float,
# because the follow-up Ops expect fp32.
if version == 'fp32':
return net.Gather([self.w, in_indices], out)
elif version == 'fp16':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.HalfToFloat(gathered_w, out)
elif version == 'uint8rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
gathered_scale_bias = net.Gather(
[self.scale_bias, in_indices],
'gathered_scale_bias'
)
return net.Rowwise8BitQuantizedToFloat(
[gathered_w, gathered_scale_bias], out)
else:
raise "Unsupported version of operators in SparseLookup " +\
"layer: {0}".format(version)
def _sparse_lengths_weighted_reducer(
self, in_indices, weights, reducer,
net, version, grad_on_weights=0):
op_input = [
self.w,
weights,
in_indices,
self.input_record.lengths()
]
layer_name = 'SparseLengths' + reducer
if version in ['fp32', 'fp16']:
# SparseLengths* Ops with engine='fp16' will accept either
# fp16 or fp32 embedding matrix and output fp32 pooled embedding
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
grad_on_weights=grad_on_weights,
engine='fp16',
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
# compute mean pooling result from sum poolig result
# this is hack before distributed trainer support sparselength mean
def _mean_pooling_helper(self, net, sum_pooling_output, result_blobs):
cast_len = net.Cast(self.input_record.lengths(), 1, to=core.DataType.FLOAT)
clip_len = net.Clip(cast_len, 1, min=1.0)
inv_len = net.Pow(clip_len, 1, exponent=-1.0)
net.StopGradient(inv_len, inv_len)
net.Mul(
sum_pooling_output + [inv_len],
result_blobs,
broadcast=1,
axis=0
)
# deal with sparse features of id_list type
def _add_ops_id_list(self, net, version):
assert self.reducer in self._id_list_supported_reducers, (
"Unsupported reducer: {} for ID_LIST".format(self.reducer)
)
if self.reducer in ['Sum', 'Mean']:
op_input = [self.w,
self.input_record.items(),
self.input_record.lengths()]
if self.reducer == 'Mean':
sum_pooling_output = [net.NextScopedBlob('internal_output')]
else:
sum_pooling_output = self.output_schema.field_blobs()
if version in ['fp32', 'fp16']:
# SparseLengths* Ops with engine='fp16' will accept either
# fp16 or fp32 embedding matrix and output fp32 pooled embedding
net.SparseLengthsSum(
op_input,
sum_pooling_output,
engine='fp16',
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.SparseLengthsSum8BitsRowwise(
op_input, sum_pooling_output)
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
if self.reducer == 'Mean':
self._mean_pooling_helper(
net, sum_pooling_output,
self.output_schema.field_blobs()
)
elif self.reducer == 'Sqrt':
sqrt_weight = net.LengthsToWeights(
[self.input_record.lengths()],
[net.NextScopedBlob('lengths_sqrt')],
power=0.5,
)
self._sparse_lengths_weighted_reducer(
self.input_record.items(),
sqrt_weight,
'WeightedSum', net, version)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.items(),
self.output_schema.field_blobs())
else:
table_rows = self._gather_wrapper(
net, version, self.input_record.items(), 'table_rows')
segment_ids = net.LengthsToSegmentIds(
self.input_record.lengths(),
self.input_record.lengths() + '_sid')
net.__getattr__('SortedSegmentRange' + self.reducer)(
[table_rows, segment_ids],
self.output_schema.field_blobs(),
engine='fp16',
)
# deal with sparse features of id_score_list type
def _add_ops_id_score_list(self, net, version):
assert self.reducer in self._id_score_list_supported_reducers, (
"Unsupported reducer: {} for ID_SCORE_LIST".format(self.reducer)
)
if self.reducer in ['WeightedSum', 'WeightedMean']:
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.input_record.values(),
self.reducer, net, version)
elif self.reducer in ['Sum', 'Mean']:
op_input = [self.w,
self.input_record.keys(),
self.input_record.lengths()]
if self.reducer == 'Mean':
sum_pooling_output = [net.NextScopedBlob('sum_pooling_output')]
else:
sum_pooling_output = self.output_schema.field_blobs()
if version in ['fp32', 'fp16']:
net.SparseLengthsSum(
op_input,
sum_pooling_output,
engine='fp16',
)
elif version == 'uint8rowwise':
net.SparseLengthsSum8BitsRowwise(
op_input, sum_pooling_output)
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
if self.reducer == 'Mean':
self._mean_pooling_helper(
net, sum_pooling_output,
self.output_schema.field_blobs()
)
elif self.reducer == 'PositionWeighted':
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.external_weights,
'WeightedSum', net, version, grad_on_weights=1)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.keys(),
self.output_schema.field_blobs())
else:
raise "Only Sum, Mean, None are supported for IdScoreList input." +\
"Trying to create with {}".format(self.reducer)
def add_ops(self, net):
cur_scope = get_current_scope()
version = get_sparse_lookup_predictor_version(
**cur_scope.get(get_sparse_lookup_predictor_version.__name__,
{'version': 'fp32'}))
if schema.equal_schemas(self.input_record, IdList):
self._add_ops_id_list(net, version=version)
elif schema.equal_schemas(self.input_record,
IdScoreList,
check_field_types=False):
self._add_ops_id_score_list(net, version=version)
else:
raise "Unsupported input type {0}".format(self.input_record)
|
|
#!/usr/bin/env python
"""Test client RDFValues."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import platform
import socket
from absl import app
from absl.testing import absltest
from future.builtins import int
from future.builtins import str
import mock
import psutil
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import type_info
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import test_base as rdf_test_base
from grr_response_proto import knowledge_base_pb2
from grr.test_lib import test_lib
class UserTests(rdf_test_base.RDFValueTestMixin, test_lib.GRRBaseTest):
"""Test the User ProtoStruct implementation."""
rdfvalue_class = rdf_client.User
USER_ACCOUNT = dict(
username=u"user",
full_name=u"John Smith",
comment=u"This is a user",
last_logon=10000,
domain=u"Some domain name",
homedir=u"/home/user",
sid=u"some sid")
def GenerateSample(self, number=0):
result = rdf_client.User(username="user%s" % number)
result.desktop = "User Desktop %s" % number
return result
def testKBUserBackwardsCompatibility(self):
"""Check User can be created from deprecated KBUser."""
kbuser = rdf_client.KnowledgeBaseUser()
kbuser.username = "user1"
kbuser.desktop = "User Desktop 1"
user = rdf_client.User(kbuser)
self.assertEqual(user.username, "user1")
self.assertEqual(user.desktop, "User Desktop 1")
def testCompatibility(self):
proto = knowledge_base_pb2.User(username="user1")
proto.desktop = "User Desktop 1"
serialized = proto.SerializeToString()
rdf_from_serialized = rdf_client.User.FromSerializedBytes(serialized)
self.assertEqual(rdf_from_serialized.username, proto.username)
self.assertEqual(rdf_from_serialized.desktop, proto.desktop)
rdf_direct = rdf_client.User(username="user1", desktop="User Desktop 1")
self.assertEqual(rdf_from_serialized, rdf_direct)
def testTimeEncoding(self):
fast_proto = rdf_client.User(username="user")
datetime = rdfvalue.RDFDatetime.FromHumanReadable("2013-04-05 16:00:03")
# Check that we can coerce an int to an RDFDatetime.
# TODO(hanuszczak): Yeah, but why would we...?
fast_proto.last_logon = datetime.AsMicrosecondsSinceEpoch()
self.assertEqual(fast_proto.last_logon, datetime)
# Check that this is backwards compatible with the old protobuf library.
proto = knowledge_base_pb2.User()
proto.ParseFromString(fast_proto.SerializeToBytes())
# Old implementation should just see the last_logon field as an integer.
self.assertIsInstance(proto.last_logon, int)
self.assertEqual(proto.last_logon, datetime.AsMicrosecondsSinceEpoch())
# fast protobufs interoperate with old serialized formats.
serialized_data = proto.SerializeToString()
fast_proto = rdf_client.User.FromSerializedBytes(serialized_data)
self.assertIsInstance(fast_proto.last_logon, rdfvalue.RDFDatetime)
self.assertEqual(fast_proto.last_logon, datetime.AsMicrosecondsSinceEpoch())
def testPrettyPrintMode(self):
for mode, result in [
(0o775, "-rwxrwxr-x"),
(0o75, "----rwxr-x"),
(0, "----------"),
# DIR
(0o40775, "drwxrwxr-x"),
# SUID
(35232, "-rwSr-----"),
# GID
(34208, "-rw-r-S---"),
# CHR
(9136, "crw-rw---T"),
# BLK
(25008, "brw-rw----"),
# FIFO
(4516, "prw-r--r--"),
# Socket
(49663, "srwxrwxrwx"),
# Sticky
(33791, "-rwxrwxrwt"),
# Sticky, not x
(33784, "-rwxrwx--T"),
]:
value = rdf_client_fs.StatMode(mode)
self.assertEqual(str(value), result)
class ClientURNTests(rdf_test_base.RDFValueTestMixin, test_lib.GRRBaseTest):
"""Test the ClientURN."""
rdfvalue_class = rdf_client.ClientURN
def GenerateSample(self, number=0):
return rdf_client.ClientURN("C.%016X" % number)
def testInitialization(self):
"""ClientURNs don't allow empty init so we override the default test."""
self.rdfvalue_class("C.00aaeccbb45f33a3")
# Initialize from another instance.
sample = self.GenerateSample()
self.CheckRDFValue(self.rdfvalue_class(sample), sample)
def testURNValidation(self):
# These should all come out the same: C.00aaeccbb45f33a3
test_set = [
"C.00aaeccbb45f33a3", "C.00aaeccbb45f33a3".upper(),
"c.00aaeccbb45f33a3", "C.00aaeccbb45f33a3 "
]
results = []
for urnstr in test_set:
results.append(rdf_client.ClientURN(urnstr))
results.append(rdf_client.ClientURN("aff4:/%s" % urnstr))
self.assertLen(results, len(test_set) * 2)
# Check all are identical
self.assertTrue(all([x == results[0] for x in results]))
# Check we can handle URN as well as string
rdf_client.ClientURN(rdf_client.ClientURN(test_set[0]))
error_set = [
"B.00aaeccbb45f33a3", "c.00accbb45f33a3", "aff5:/C.00aaeccbb45f33a3"
]
for badurn in error_set:
self.assertRaises(type_info.TypeValueError, rdf_client.ClientURN, badurn)
class NetworkAddressTests(rdf_test_base.RDFValueTestMixin,
test_lib.GRRBaseTest):
"""Test the NetworkAddress."""
rdfvalue_class = rdf_client_network.NetworkAddress
def GenerateSample(self, number=0):
return rdf_client_network.NetworkAddress(
human_readable_address="192.168.0.%s" % number)
def testIPv4(self):
sample = rdf_client_network.NetworkAddress(
human_readable_address="192.168.0.1")
self.assertEqual(sample.address_type,
rdf_client_network.NetworkAddress.Family.INET)
# Equal to socket.inet_pton(socket.AF_INET, "192.168.0.1"), which is
# unavailable on Windows.
self.assertEqual(sample.packed_bytes, b"\xc0\xa8\x00\x01")
self.assertEqual(sample.human_readable_address, "192.168.0.1")
self.CheckRDFValue(self.rdfvalue_class(sample), sample)
def testIPv6(self):
ipv6_addresses = ["fe80::202:b3ff:fe1e:8329", "::1"]
# Equal to socket.inet_pton(socket.AF_INET6, address), which is unavailable
# on Windows.
expected_addresses = [
b"\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83\x29",
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
]
for address, expected in zip(ipv6_addresses, expected_addresses):
sample = rdf_client_network.NetworkAddress(human_readable_address=address)
self.assertEqual(sample.address_type,
rdf_client_network.NetworkAddress.Family.INET6)
self.assertEqual(sample.packed_bytes, expected)
self.assertEqual(sample.human_readable_address, address)
self.CheckRDFValue(self.rdfvalue_class(sample), sample)
class UnameTests(rdf_test_base.RDFValueTestMixin, test_lib.GRRBaseTest):
"""Test the Uname."""
rdfvalue_class = rdf_client.Uname
def GenerateSample(self, number=0):
# Make the hostname slighly different for comparison tests.
result = self.rdfvalue_class.FromCurrentSystem()
parts = result.fqdn.split(".")
parts[0] += str(number)
result.fqdn = ".".join(parts)
return result
def testSignature(self):
sample = self.GenerateSample()
self.assertEqual(sample.signature(), sample.pep425tag)
# We do not support old protos without a signature.
sample.pep425tag = None
self.assertRaises(ValueError, sample.signature)
def testGetFQDN(self):
with mock.patch.object(socket, "getfqdn", return_value="foo.bar.baz"):
uname = self.rdfvalue_class.FromCurrentSystem()
self.assertEqual(uname.fqdn, "foo.bar.baz")
def testGetFQDN_Localhost(self):
with mock.patch.object(
socket, "getfqdn", return_value=rdf_client._LOCALHOST):
with mock.patch.object(socket, "gethostname", return_value="foo"):
uname = self.rdfvalue_class.FromCurrentSystem()
self.assertEqual(uname.fqdn, "foo")
class CpuSampleTest(absltest.TestCase):
def testFromMany(self):
samples = [
rdf_client_stats.CpuSample(
timestamp=rdfvalue.RDFDatetime.FromHumanReadable("2001-01-01"),
cpu_percent=0.2,
user_cpu_time=0.1,
system_cpu_time=0.5),
rdf_client_stats.CpuSample(
timestamp=rdfvalue.RDFDatetime.FromHumanReadable("2001-02-01"),
cpu_percent=0.1,
user_cpu_time=2.5,
system_cpu_time=1.2),
rdf_client_stats.CpuSample(
timestamp=rdfvalue.RDFDatetime.FromHumanReadable("2001-03-01"),
cpu_percent=0.6,
user_cpu_time=3.4,
system_cpu_time=2.4),
]
expected = rdf_client_stats.CpuSample(
timestamp=rdfvalue.RDFDatetime.FromHumanReadable("2001-03-01"),
cpu_percent=0.3,
user_cpu_time=3.4,
system_cpu_time=2.4)
self.assertEqual(rdf_client_stats.CpuSample.FromMany(samples), expected)
def testFromManyRaisesOnEmpty(self):
with self.assertRaises(ValueError):
rdf_client_stats.CpuSample.FromMany([])
class IOSampleTest(absltest.TestCase):
def testFromMany(self):
samples = [
rdf_client_stats.IOSample(
timestamp=rdfvalue.RDFDatetime.FromHumanReadable("2001-01-01"),
read_bytes=0,
write_bytes=0),
rdf_client_stats.IOSample(
timestamp=rdfvalue.RDFDatetime.FromHumanReadable("2002-01-01"),
read_bytes=512,
write_bytes=1024),
rdf_client_stats.IOSample(
timestamp=rdfvalue.RDFDatetime.FromHumanReadable("2003-01-01"),
read_bytes=2048,
write_bytes=4096),
]
expected = rdf_client_stats.IOSample(
timestamp=rdfvalue.RDFDatetime.FromHumanReadable("2003-01-01"),
read_bytes=2048,
write_bytes=4096)
self.assertEqual(rdf_client_stats.IOSample.FromMany(samples), expected)
def testFromManyRaisesOnEmpty(self):
with self.assertRaises(ValueError):
rdf_client_stats.IOSample.FromMany([])
class ClientStatsTest(absltest.TestCase):
def testDownsampled(self):
timestamp = rdfvalue.RDFDatetime.FromHumanReadable
stats = rdf_client_stats.ClientStats(
cpu_samples=[
rdf_client_stats.CpuSample(
timestamp=timestamp("2001-01-01 00:00"),
user_cpu_time=2.5,
system_cpu_time=3.2,
cpu_percent=0.5),
rdf_client_stats.CpuSample(
timestamp=timestamp("2001-01-01 00:05"),
user_cpu_time=2.6,
system_cpu_time=4.7,
cpu_percent=0.6),
rdf_client_stats.CpuSample(
timestamp=timestamp("2001-01-01 00:10"),
user_cpu_time=10.0,
system_cpu_time=14.2,
cpu_percent=0.9),
rdf_client_stats.CpuSample(
timestamp=timestamp("2001-01-01 00:12"),
user_cpu_time=12.3,
system_cpu_time=14.9,
cpu_percent=0.1),
rdf_client_stats.CpuSample(
timestamp=timestamp("2001-01-01 00:21"),
user_cpu_time=16.1,
system_cpu_time=22.3,
cpu_percent=0.4)
],
io_samples=[
rdf_client_stats.IOSample(
timestamp=timestamp("2001-01-01 00:00"),
read_count=0,
write_count=0),
rdf_client_stats.IOSample(
timestamp=timestamp("2001-01-01 00:02"),
read_count=3,
write_count=5),
rdf_client_stats.IOSample(
timestamp=timestamp("2001-01-01 00:12"),
read_count=6,
write_count=8),
])
expected = rdf_client_stats.ClientStats(
cpu_samples=[
rdf_client_stats.CpuSample(
timestamp=timestamp("2001-01-01 00:05"),
user_cpu_time=2.6,
system_cpu_time=4.7,
cpu_percent=0.55),
rdf_client_stats.CpuSample(
timestamp=timestamp("2001-01-01 00:12"),
user_cpu_time=12.3,
system_cpu_time=14.9,
cpu_percent=0.5),
rdf_client_stats.CpuSample(
timestamp=timestamp("2001-01-01 00:21"),
user_cpu_time=16.1,
system_cpu_time=22.3,
cpu_percent=0.4),
],
io_samples=[
rdf_client_stats.IOSample(
timestamp=timestamp("2001-01-01 00:02"),
read_count=3,
write_count=5),
rdf_client_stats.IOSample(
timestamp=timestamp("2001-01-01 00:12"),
read_count=6,
write_count=8),
])
actual = rdf_client_stats.ClientStats.Downsampled(
stats, interval=rdfvalue.Duration.From(10, rdfvalue.MINUTES))
self.assertEqual(actual, expected)
class ProcessTest(absltest.TestCase):
def testFromPsutilProcess(self):
p = psutil.Process()
res = rdf_client.Process.FromPsutilProcess(p)
int_fields = [
"pid", "ppid", "ctime", "num_threads", "user_cpu_time",
"system_cpu_time", "RSS_size", "VMS_size", "memory_percent"
]
if platform.system() != "Windows":
int_fields.extend([
"real_uid", "effective_uid", "saved_uid", "real_gid", "effective_gid",
"saved_gid"
])
for field in int_fields:
self.assertGreater(
getattr(res, field), 0,
"rdf_client.Process.{} is not greater than 0, got {!r}.".format(
field, getattr(res, field)))
string_fields = ["name", "exe", "cmdline", "cwd", "username"]
if platform.system() != "Windows":
string_fields.append("terminal")
for field in string_fields:
self.assertNotEqual(
getattr(res, field), "",
"rdf_client.Process.{} is the empty string.".format(field))
# Prevent flaky tests by allowing "sleeping" as state of current process.
self.assertIn(res.status, ["running", "sleeping"])
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numbers
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# The maximum input rank to test.
_MAX_RANK = 5
def _powerset(iterable):
"""Helper for generating all possible reduction_axes arguments.
Example:
powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)
Args:
iterable: An iterable of items to generate the powerset of.
Returns:
The powerset of all items in iterable.
"""
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
class ReducedShapeTest(test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
self.assertAllEqual(output.eval(), result)
@test_util.run_deprecated_v1
def testSimple(self):
with self.cached_session():
self._check([3], [], [3])
self._check([3], [0], [1])
self._check([5, 3], [], [5, 3])
self._check([5, 3], [0], [1, 3])
self._check([5, 3], [1], [5, 1])
self._check([5, 3], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testZeros(self):
"""Check that reduced_shape does the right thing with zero dimensions."""
with self.cached_session():
self._check([0], [], [0])
self._check([0], [0], [1])
self._check([0, 3], [], [0, 3])
self._check([0, 3], [0], [1, 3])
self._check([0, 3], [1], [0, 1])
self._check([0, 3], [0, 1], [1, 1])
self._check([3, 0], [], [3, 0])
self._check([3, 0], [0], [1, 0])
self._check([3, 0], [1], [3, 1])
self._check([3, 0], [0, 1], [1, 1])
@test_util.run_deprecated_v1
def testNegAxes(self):
with self.cached_session():
self._check([10, 10, 10], [-1], [10, 10, 1])
self._check([10, 10, 10], [-1, 2], [10, 10, 1])
self._check([10, 10, 10], [-1, -1], [10, 10, 1])
self._check([10, 10, 10], [-1, 0], [1, 10, 1])
self._check([10, 10, 10], [-3], [1, 10, 10])
class ReductionUnknownShape(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
with self.cached_session():
for dtype, reductions in [(dtypes.float32,
(math_ops.reduce_sum, math_ops.reduce_mean,
math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min,
math_ops.reduce_euclidean_norm)),
(dtypes.bool, (math_ops.reduce_all,
math_ops.reduce_any))]:
for reduction in reductions:
x = array_ops.placeholder(
dtype=dtype, shape=None) # Some tensor w/ unknown shape.
y = reduction(x)
self.assertEqual(y.shape, ())
class BaseReductionTest(test.TestCase):
def _tf_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _np_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
def _makeIncremental(self, shape, dtype):
data = np.arange(np.prod(shape)).reshape(shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _makeRandom(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 2j * data
return data
def _compare(self, x, reduction_axes, keepdims, feed_dict=None):
np_ans = self._np_reduce(x, reduction_axes, keepdims)
with self.cached_session(use_gpu=True) as sess:
tf_ans = self._tf_reduce(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, keepdims=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, keepdims=True, feed_dict=feed_dict)
def _compareAllAxes(self, x, feed_dict=None):
self._compareAll(x, None)
for axes in _powerset(range(x.ndim)):
self._compareAll(x, axes, feed_dict)
def _compareGradient(self, x, reduction_axes, rtol=1e-8, atol=1e-8):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(x, reduction_axes[0], rtol=rtol, atol=atol)
with self.cached_session(use_gpu=True):
t = ops.convert_to_tensor(x)
su = self._tf_reduce(t, reduction_axes, False)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, x.shape, su, su.get_shape().as_list(), x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=rtol, atol=atol)
def _compareGradientAxes(self, x, rtol=1e-8, atol=1e-8):
self._compareGradient(x, None, rtol=rtol, atol=atol)
self._compareGradient(x, [], rtol=rtol, atol=atol)
self._compareGradient(x, 0, rtol=rtol, atol=atol)
self._compareGradient(x, [1], rtol=rtol, atol=atol)
self._compareGradient(x, [2], rtol=rtol, atol=atol)
self._compareGradient(x, [1, 2], rtol=rtol, atol=atol)
self._compareGradient(x, [0, 1, 2, 3], rtol=rtol, atol=atol)
class SumReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.sum(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float16)
self._compareAllAxes(np_arr)
# test that mean doesn't overflow
# only on GPU, since it has the more accurate implementation
if not test.is_gpu_available():
return
arr = np.ones([68000], dtype=np.float16)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_arr = variables.Variable(arr)
variables.global_variables_initializer().run()
tf_mean = math_ops.reduce_mean(tf_arr, 0, False)
tf_out_mean = self.evaluate(tf_mean)
self.assertAllClose(tf_out_mean, 1.)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
for _ in range(10):
size_x = int(2**np.random.uniform(0, 15))
size_y = int(2**np.random.uniform(0, 15))
if size_x * size_y > 1e7:
size_y = int(1e7 / size_x)
arr = np.ones([size_x, size_y], dtype=np.float32)
col_sum = np.sum(arr, axis=0)
row_sum = np.sum(arr, axis=1)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_row_sum = self._tf_reduce(arr, 1, False)
tf_col_sum = self._tf_reduce(arr, 0, False)
tf_out_row, tf_out_col = self.evaluate([tf_row_sum, tf_col_sum])
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
for size_x in [1, 3, 16, 33]:
for size_y in [1, 3, 16, 33]:
for size_z in [1, 3, 16, 33]:
arr = np.ones([size_x, size_y, size_z], dtype=np.float32)
sum_y = np.sum(arr, axis=1)
sum_xz = np.sum(arr, axis=(0, 2))
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_sum_xz = self._tf_reduce(arr, [0, 2], False)
tf_sum_y = self._tf_reduce(arr, 1, False)
tf_out_sum_xz, tf_out_sum_y = self.evaluate([tf_sum_xz, tf_sum_y])
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [0, 2])
@test_util.run_deprecated_v1
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
c_unknown = array_ops.placeholder(dtypes.float32)
s_unknown = math_ops.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
c_known_rank = array_ops.placeholder(dtypes.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(rank=3))
s_known_rank = math_ops.reduce_sum(
c_known_rank, reduction_axes, keepdims=True)
self.assertEqual(3, s_known_rank.get_shape().rank)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
unknown_indices = array_ops.placeholder(dtypes.int32)
c_unknown_indices = constant_op.constant([[10.0], [20.0]])
s_unknown_indices = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().rank)
@test_util.run_deprecated_v1
def testWrongShapeForReductionIndices(self):
reduction_axes = [[1], [2]]
c_unknown = array_ops.placeholder(dtypes.float32)
with self.assertRaisesWithPredicateMatch(ValueError,
".*must be at most rank 1.*"):
math_ops.reduce_sum(c_unknown, reduction_axes)
# Int64??
@test_util.run_deprecated_v1
def testGradient(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]:
x = self._makeIncremental([2, 3, 4, 2], dtype)
self._compareGradientAxes(x)
@test_util.run_deprecated_v1
def testHighRank(self):
# Do a bunch of random high dimensional reductions
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(4, 10 + 1)
axes, = np.nonzero(np.random.randint(2, size=rank))
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
self._compareAll(data, axes)
# Check some particular axis patterns
for rank in 4, 7, 10:
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
for axes in ([], np.arange(rank), np.arange(0, rank, 2),
np.arange(1, rank, 2)):
self._compareAll(data, axes)
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_sum(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_sum(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
class MeanReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_mean(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
elif isinstance(reduction_axes, numbers.Integral):
reduction_axes = (reduction_axes,)
if reduction_axes is None:
count = np.prod(x.shape)
else:
count = np.prod([x.shape[ax] for ax in reduction_axes])
# np.mean automatically converts integer inputs to float, while TensorFlow's
# reduce_mean does not. For integer inputs, we emulate TensorFlow's behavior
# using np.sum and truncating division.
np_sum = np.sum(x, axis=reduction_axes, keepdims=keepdims)
if np.issubdtype(x.dtype, np.integer):
return np_sum // count
return np_sum / count
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testUint8(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeRandom((2,) * rank, dtypes.uint8)
self._compareAllAxes(np_arr)
# This tests the issue reported in b/145030710.
@test_util.run_deprecated_v1
def testSizeOverflowUint8(self):
np_arr = self._makeRandom((2**8,), dtypes.uint8)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowInt8(self):
np_arr = self._makeRandom((2**7,), dtypes.int8)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowUint16(self):
np_arr = self._makeRandom((2**16,), dtypes.uint16)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowInt16(self):
np_arr = self._makeRandom((2**15,), dtypes.int16)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
x = self._makeIncremental(s, dtype)
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_mean(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
class EuclideanNormReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_euclidean_norm(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
np_fro = np.sqrt(
np.sum(x * np.conj(x), axis=reduction_axes, keepdims=keepdims))
if np.issubdtype(x.dtype, np.integer):
np_fro = np.floor(np_fro)
return np_fro
@test_util.run_deprecated_v1
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True):
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testSingleton(self):
for dtype in [np.float32, np.float64]:
np_arr = np.array([-1.]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_euclidean_norm(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertAllEqual(y, np.zeros(9938))
@test_util.run_deprecated_v1
def testGradient(self):
shape = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
# zero value entry will result NaN gradient if reduction doesn't happen.
# e.g., `tf.math.reduce_sum([0, 1], axis=[])` so add one to avoid it.
x = self._makeIncremental(shape, dtype) + 1.0
self._compareGradientAxes(x, rtol=1e-2, atol=1e-2)
class ProdReductionTest(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_prod(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.prod(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_prod([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
# Numpy automatically upgrades the type of np.prod from int32 to int64, so
# Numpy does not overflow an int32 np.prod while TensorFlow does. To avoid
# overflow, divide the incremental int32 array by 2.
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32) / 2
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradientWithZeros(self):
s = [2, 3, 4, 2]
x = self._makeIncremental(s, dtypes.float32) / 20.
# No zeros in input
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
# Zero at beginning
x1 = x.copy()
x1[:, :, 0, :] = 0
self._compareGradientAxes(x1, rtol=1e-3, atol=1e-3)
# Zero at end
x2 = x.copy()
x2[:, :, -1, :] = 0
self._compareGradientAxes(x2, rtol=1e-3, atol=1e-3)
# Zero in middle
x3 = x.copy()
x3[:, :, 2, :] = 0
self._compareGradientAxes(x3, rtol=1e-3, atol=1e-3)
# All zeros
x4 = x.copy()
x4[:, :, :, :] = 0
self._compareGradientAxes(x4, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session(use_gpu=True):
x = array_ops.zeros([0, 3])
y = math_ops.reduce_prod(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session(use_gpu=True):
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_prod(x, [0])
self.assertAllEqual(y.eval(), np.ones(9938))
class MinReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_min(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_min([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(1, 31).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_min(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_min(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MaxReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_max(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session(use_gpu=True) as sess:
v = math_ops.reduce_max([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
def testInt64Reduce3D(self):
# Create a 3D array of int64s and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.int64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(-31, -1).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1, 2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [1])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 4, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t, [2])
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [2, 3, 2], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(-49.0, -1.0).reshape(s).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
su = math_ops.reduce_max(t)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, s, su, [1], x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.cached_session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_max(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class AllReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_all(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session(use_gpu=True) as sess:
v = math_ops.reduce_all([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class AnyReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_any(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session(use_gpu=True) as sess:
v = math_ops.reduce_any([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class CountNonzeroReductionTest(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False, zero=0,
feed_dict=None):
np_ans = (x != zero).astype(np.int32)
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keepdims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu) as sess:
tf_ans = math_ops.count_nonzero(x, reduction_axes, keepdims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)
@test_util.run_deprecated_v1
def testBoolReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([False, False, True, False, False, True])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.asarray([0.0, 1.0, -1.0, 0.0, 0.0, 3.0]).astype(np.float32)
self._compareAll(np_arr, [0])
@test_util.run_deprecated_v1
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.floor(np.arange(0.0, 210.0) / 100.0).reshape([2, 3, 5,
7]).astype(
np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testDegenerate(self):
for use_gpu in False, True:
with self.cached_session(use_gpu=use_gpu):
for dtype in (dtypes.bool,):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.count_nonzero(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
def testStringReduce(self):
# Test case for GitHub issue 18712
with self.cached_session() as sess:
v = math_ops.count_nonzero(constant_op.constant(["test"]))
self.assertAllClose(self.evaluate(v), 1)
@test_util.run_deprecated_v1
def testStringReduce1D(self):
# Create a 1D array of strings
x = np.asarray(["", "", "a", "", "", "b"])
self._compare(x, None, keepdims=False, zero=np.str(""))
self._compare(x, [], keepdims=False, zero=np.str(""))
self._compare(x, [0], keepdims=False, zero=np.str(""))
self._compare(x, None, keepdims=True, zero=np.str(""))
self._compare(x, [], keepdims=True, zero=np.str(""))
self._compare(x, [0], keepdims=True, zero=np.str(""))
@test_util.run_deprecated_v1
def testStringReduce2D(self):
# Create a 2D array of strings
x = np.asarray([["", "", "a", "", "", "b"],
["", "c", "", "d", "", ""],
["e", "", "f", "", "", ""]])
self._compare(x, None, keepdims=False, zero=np.str(""))
self._compare(x, [], keepdims=False, zero=np.str(""))
self._compare(x, [0], keepdims=False, zero=np.str(""))
self._compare(x, [1], keepdims=False, zero=np.str(""))
self._compare(x, [0, 1], keepdims=False, zero=np.str(""))
self._compare(x, None, keepdims=True, zero=np.str(""))
self._compare(x, [], keepdims=True, zero=np.str(""))
self._compare(x, [0], keepdims=True, zero=np.str(""))
self._compare(x, [0, 1], keepdims=True, zero=np.str(""))
if __name__ == "__main__":
test.main()
|
|
# Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import microversion_parse
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import strutils
import six
import webob
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import versioned_method
from nova import exception
from nova import i18n
from nova.i18n import _
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
_SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
)
# These are typically automatically created by routes as either defaults
# collection or member methods.
_ROUTES_METHODS = [
'create',
'delete',
'show',
'update',
]
_METHODS_WITH_BODY = [
'POST',
'PUT',
]
# The default api version request if none is requested in the headers
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
DEFAULT_API_VERSION = "2.1"
# name of attribute to keep version method information
VER_METHOD_ATTR = 'versioned_methods'
# Names of headers used by clients to request a specific version
# of the REST API
API_VERSION_REQUEST_HEADER = 'OpenStack-API-Version'
LEGACY_API_VERSION_REQUEST_HEADER = 'X-OpenStack-Nova-API-Version'
ENV_LEGACY_V2 = 'openstack.legacy_v2'
def get_supported_content_types():
return _SUPPORTED_CONTENT_TYPES
# NOTE(rlrossit): This function allows a get on both a dict-like and an
# object-like object. cache_db_items() is used on both versioned objects and
# dicts, so the function can't be totally changed over to [] syntax, nor
# can it be changed over to use getattr().
def item_get(item, item_key):
if hasattr(item, '__getitem__'):
return item[item_key]
else:
return getattr(item, item_key)
class Request(wsgi.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_items': {}}
if not hasattr(self, 'api_version_request'):
self.api_version_request = api_version.APIVersionRequest()
def cache_db_items(self, key, items, item_key='id'):
"""Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_items = self._extension_data['db_items'].setdefault(key, {})
for item in items:
db_items[item_get(item, item_key)] = item
def get_db_items(self, key):
"""Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self._extension_data['db_items'][key]
def get_db_item(self, key, item_key):
"""Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_instances(self, instances):
self.cache_db_items('instances', instances, 'uuid')
def cache_db_instance(self, instance):
self.cache_db_items('instances', [instance], 'uuid')
def get_db_instances(self):
return self.get_db_items('instances')
def get_db_instance(self, instance_uuid):
return self.get_db_item('instances', instance_uuid)
def cache_db_flavors(self, flavors):
self.cache_db_items('flavors', flavors, 'flavorid')
def cache_db_flavor(self, flavor):
self.cache_db_items('flavors', [flavor], 'flavorid')
def get_db_flavors(self):
return self.get_db_items('flavors')
def get_db_flavor(self, flavorid):
return self.get_db_item('flavors', flavorid)
def cache_db_compute_nodes(self, compute_nodes):
self.cache_db_items('compute_nodes', compute_nodes, 'id')
def cache_db_compute_node(self, compute_node):
self.cache_db_items('compute_nodes', [compute_node], 'id')
def get_db_compute_nodes(self):
return self.get_db_items('compute_nodes')
def get_db_compute_node(self, id):
return self.get_db_item('compute_nodes', id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in get_supported_content_types():
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(
get_supported_content_types())
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in get_supported_content_types():
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determine the best available language for the request.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
return self.accept_language.best_match(
i18n.get_available_languages())
def set_api_version_request(self):
"""Set API version request based on the request header information."""
hdr_string = microversion_parse.get_version(
self.headers, service_type='compute',
legacy_headers=[LEGACY_API_VERSION_REQUEST_HEADER])
if hdr_string is None:
self.api_version_request = api_version.APIVersionRequest(
api_version.DEFAULT_API_VERSION)
elif hdr_string == 'latest':
# 'latest' is a special keyword which is equivalent to
# requesting the maximum version of the API supported
self.api_version_request = api_version.max_api_version()
else:
self.api_version_request = api_version.APIVersionRequest(
hdr_string)
# Check that the version requested is within the global
# minimum/maximum of supported API versions
if not self.api_version_request.matches(
api_version.min_api_version(),
api_version.max_api_version()):
raise exception.InvalidGlobalAPIVersion(
req_ver=self.api_version_request.get_string(),
min_ver=api_version.min_api_version().get_string(),
max_ver=api_version.max_api_version().get_string())
def set_legacy_v2(self):
self.environ[ENV_LEGACY_V2] = True
def is_legacy_v2(self):
return self.environ.get(ENV_LEGACY_V2, False)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class JSONDeserializer(ActionDispatcher):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class JSONDictSerializer(ActionDispatcher):
"""Default JSON request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return six.text_type(jsonutils.dumps(data))
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object
Object that app methods may return in order to allow its response
to be modified by extensions in the code. Its use is optional (and
should only be used if you really know what you are doing).
"""
def __init__(self, obj, code=None, headers=None):
"""Builds a response object."""
self.obj = obj
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = JSONDictSerializer()
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def serialize(self, request, content_type):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
serializer = self.serializer
body = None
if self.obj is not None:
body = serializer.serialize(self.obj)
response = webob.Response(body=body)
if response.headers.get('Content-Length'):
# NOTE(andreykurilin): we need to encode 'Content-Length' header,
# since webob.Response auto sets it if "body" attr is presented.
# https://github.com/Pylons/webob/blob/1.5.0b0/webob/response.py#L147
response.headers['Content-Length'] = utils.utf8(
response.headers['Content-Length'])
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = utils.utf8(value)
response.headers['Content-Type'] = utils.utf8(content_type)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek(body):
"""Determine action to invoke.
This looks inside the json body and fetches out the action method
name.
"""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action name
return list(decoded.keys())[0]
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.Forbidden):
raise Fault(webob.exc.HTTPForbidden(
explanation=ex_value.format_message()))
elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
raise
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code,
explanation=ex_value.format_message()))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error('Exception handling resource: %s', ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info("Fault thrown: %s", ex_value)
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info("HTTP exception thrown: %s", ex_value)
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
support_api_request_version = False
def __init__(self, controller, inherits=None):
""":param controller: object that implement methods created by routes
lib
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
self.default_serializers = dict(json=JSONDictSerializer)
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
content_type = request.get_content_type()
return content_type, request.body
def deserialize(self, body):
return JSONDeserializer().deserialize(body)
def process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except exception.VersionNotFoundForAPIMethod:
# If an attached extension (@wsgi.extends) for the
# method has no version match its not an error. We
# just don't run the extends code
continue
except Fault as ex:
response = ex
# We had a response return it, to exit early. This is
# actually a failure mode. None is success.
if response:
return response
return None
def _should_have_body(self, request):
return request.method in _METHODS_WITH_BODY
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
if self.support_api_request_version:
# Set the version of the API requested based on the header
try:
request.set_api_version_request()
except exception.InvalidAPIVersionString as e:
return Fault(webob.exc.HTTPBadRequest(
explanation=e.format_message()))
except exception.InvalidGlobalAPIVersion as e:
return Fault(webob.exc.HTTPNotAcceptable(
explanation=e.format_message()))
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
# NOTE(sdague): we filter out InvalidContentTypes early so we
# know everything is good from here on out.
try:
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPUnsupportedMediaType(explanation=msg))
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
msg = _("Action: '%(action)s', calling method: %(meth)s, body: "
"%(body)s") % {'action': action,
'body': six.text_type(body, 'utf-8'),
'meth': str(meth)}
LOG.debug(strutils.mask_password(msg))
else:
LOG.debug("Calling method '%(meth)s'",
{'meth': str(meth)})
# Now, deserialize the request body...
try:
contents = self._get_request_content(body, request)
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request URL: URL's project_id '%(project_id)s'"
" doesn't match Context's project_id"
" '%(context_project_id)s'") % \
{'project_id': project_id,
'context_project_id': context.project_id}
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
response = None
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
# Do a preserialize to set up the response object
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
# Process extensions
response = self.process_extensions(extensions, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept)
if hasattr(response, 'headers'):
for hdr, val in list(response.headers.items()):
if six.PY2:
# In Py2.X Headers must be byte strings
response.headers[hdr] = utils.utf8(val)
else:
# In Py3.X Headers must be utf-8 strings
response.headers[hdr] = encodeutils.safe_decode(
utils.utf8(val))
if not request.api_version_request.is_null():
response.headers[API_VERSION_REQUEST_HEADER] = \
'compute ' + request.api_version_request.get_string()
response.headers[LEGACY_API_VERSION_REQUEST_HEADER] = \
request.api_version_request.get_string()
response.headers.add('Vary', API_VERSION_REQUEST_HEADER)
response.headers.add('Vary', LEGACY_API_VERSION_REQUEST_HEADER)
return response
def _get_request_content(self, body, request):
contents = {}
if self._should_have_body(request):
# allow empty body with PUT and POST
if request.content_length == 0 or request.content_length is None:
contents = {'body': None}
else:
contents = self.deserialize(body)
return contents
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in _ROUTES_METHODS + ['action']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
action_name = action_peek(body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
try:
return method(req=request, **action_args)
except exception.VersionNotFoundForAPIMethod:
# We deliberately don't return any message information
# about the exception to the user so it looks as if
# the method is simply not implemented.
return Fault(webob.exc.HTTPNotFound())
class ResourceV21(Resource):
support_api_request_version = True
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
versioned_methods = None
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
if base.__name__ == "Controller":
# NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute
# between API controller class creations. This allows us
# to use a class decorator on the API methods that doesn't
# require naming explicitly what method is being versioned as
# it can be implicit based on the method decorated. It is a bit
# ugly.
if VER_METHOD_ATTR in base.__dict__:
versioned_methods = getattr(base, VER_METHOD_ATTR)
delattr(base, VER_METHOD_ATTR)
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
if versioned_methods:
cls_dict[VER_METHOD_ATTR] = versioned_methods
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
@six.add_metaclass(ControllerMetaclass)
class Controller(object):
"""Default controller."""
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
def __getattribute__(self, key):
def version_select(*args, **kwargs):
"""Look for the method which matches the name supplied and version
constraints and calls it with the supplied arguments.
@return: Returns the result of the method called
@raises: VersionNotFoundForAPIMethod if there is no method which
matches the name and version constraints
"""
# The first arg to all versioned methods is always the request
# object. The version for the request is attached to the
# request object
if len(args) == 0:
ver = kwargs['req'].api_version_request
else:
ver = args[0].api_version_request
func_list = self.versioned_methods[key]
for func in func_list:
if ver.matches(func.start_version, func.end_version):
# Update the version_select wrapper function so
# other decorator attributes like wsgi.response
# are still respected.
functools.update_wrapper(version_select, func.func)
return func.func(self, *args, **kwargs)
# No version match
raise exception.VersionNotFoundForAPIMethod(version=ver)
try:
version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR)
except AttributeError:
# No versioning on this class
return object.__getattribute__(self, key)
if version_meth_dict and \
key in object.__getattribute__(self, VER_METHOD_ATTR):
return version_select
return object.__getattribute__(self, key)
# NOTE(cyeoh): This decorator MUST appear first (the outermost
# decorator) on an API method for it to work correctly
@classmethod
def api_version(cls, min_ver, max_ver=None):
"""Decorator for versioning api methods.
Add the decorator to any method which takes a request object
as the first parameter and belongs to a class which inherits from
wsgi.Controller.
@min_ver: string representing minimum version
@max_ver: optional string representing maximum version
"""
def decorator(f):
obj_min_ver = api_version.APIVersionRequest(min_ver)
if max_ver:
obj_max_ver = api_version.APIVersionRequest(max_ver)
else:
obj_max_ver = api_version.APIVersionRequest()
# Add to list of versioned methods registered
func_name = f.__name__
new_func = versioned_method.VersionedMethod(
func_name, obj_min_ver, obj_max_ver, f)
func_dict = getattr(cls, VER_METHOD_ATTR, {})
if not func_dict:
setattr(cls, VER_METHOD_ATTR, func_dict)
func_list = func_dict.get(func_name, [])
if not func_list:
func_dict[func_name] = func_list
func_list.append(new_func)
# Ensure the list is sorted by minimum version (reversed)
# so later when we work through the list in order we find
# the method which has the latest version which supports
# the version requested.
is_intersect = Controller.check_for_versions_intersection(
func_list)
if is_intersect:
raise exception.ApiVersionsIntersect(
name=new_func.name,
min_ver=new_func.start_version,
max_ver=new_func.end_version,
)
func_list.sort(key=lambda f: f.start_version, reverse=True)
return f
return decorator
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
return is_dict(body[entity_name])
@staticmethod
def check_for_versions_intersection(func_list):
"""Determines whether function list contains version intervals
intersections or not. General algorithm:
https://en.wikipedia.org/wiki/Intersection_algorithm
:param func_list: list of VersionedMethod objects
:return: boolean
"""
pairs = []
counter = 0
for f in func_list:
pairs.append((f.start_version, 1, f))
pairs.append((f.end_version, -1, f))
def compare(x):
return x[0]
pairs.sort(key=compare)
for p in pairs:
counter += p[1]
if counter > 1:
return True
return False
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
429: "overLimit",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
for key, value in list(self.wrapped_exc.headers.items()):
self.wrapped_exc.headers[key] = str(value)
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
user_locale = req.best_match_language()
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
LOG.debug("Returning %(code)s to user: %(explanation)s",
{'code': code, 'explanation': explanation})
explanation = i18n.translate(explanation, user_locale)
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
if code == 413 or code == 429:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
if not req.api_version_request.is_null():
self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = \
'compute ' + req.api_version_request.get_string()
self.wrapped_exc.headers[LEGACY_API_VERSION_REQUEST_HEADER] = \
req.api_version_request.get_string()
self.wrapped_exc.headers.add('Vary', API_VERSION_REQUEST_HEADER)
self.wrapped_exc.headers.add('Vary',
LEGACY_API_VERSION_REQUEST_HEADER)
self.wrapped_exc.content_type = 'application/json'
self.wrapped_exc.charset = 'UTF-8'
self.wrapped_exc.text = JSONDictSerializer().serialize(fault_data)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
|
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for python client."""
import collections
import multiprocessing.dummy as multithreading
import pickle
import time
from absl.testing import absltest
import numpy as np
import portpicker
from reverb import client
from reverb import errors
from reverb import item_selectors
from reverb import rate_limiters
from reverb import server
import tensorflow.compat.v1 as tf
import tree
TABLE_NAME = 'table'
NESTED_SIGNATURE_TABLE_NAME = 'nested_signature_table'
SIMPLE_QUEUE_NAME = 'simple_queue'
QUEUE_SIGNATURE = {
'a': tf.TensorSpec(dtype=tf.int64, shape=(3,)),
'b': tf.TensorSpec(dtype=tf.float32, shape=(3, 2, 2)),
}
class ClientTest(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.tables = [
server.Table(
name=TABLE_NAME,
sampler=item_selectors.Prioritized(1),
remover=item_selectors.Fifo(),
max_size=1000,
rate_limiter=rate_limiters.MinSize(3),
signature=tf.TensorSpec(dtype=tf.int64, shape=[]),
),
server.Table.queue(
name=NESTED_SIGNATURE_TABLE_NAME,
max_size=10,
signature=QUEUE_SIGNATURE,
),
server.Table.queue(SIMPLE_QUEUE_NAME, 10),
]
cls.server = server.Server(tables=cls.tables)
cls.client = cls.server.localhost_client()
def tearDown(self):
self.client.reset(TABLE_NAME)
self.client.reset(NESTED_SIGNATURE_TABLE_NAME)
self.client.reset(SIMPLE_QUEUE_NAME)
super().tearDown()
@classmethod
def tearDownClass(cls):
cls.server.stop()
super().tearDownClass()
def wait_for_table_size(self, size):
for _ in range(100):
if self.tables[0].info.current_size == size:
break
time.sleep(0.01)
self.assertEqual(self.tables[0].info.current_size, size)
def _get_sample_frequency(self, n=10000):
keys = [sample[0].info.key for sample in self.client.sample(TABLE_NAME, n)]
counter = collections.Counter(keys)
return [count / n for _, count in counter.most_common()]
def test_sample_sets_table_size(self):
for i in range(1, 11):
self.client.insert(i, {TABLE_NAME: 1.0})
if i >= 3:
sample = next(self.client.sample(TABLE_NAME, 1))[0]
self.assertEqual(sample.info.table_size, i)
self.wait_for_table_size(10)
def test_sample_sets_probability(self):
for i in range(1, 11):
self.client.insert(i, {TABLE_NAME: 1.0})
if i >= 3:
sample = next(self.client.sample(TABLE_NAME, 1))[0]
self.assertAlmostEqual(sample.info.probability, 1.0 / i, 0.01)
def test_sample_sets_priority(self):
# Set the test context by manually mutating priorities to known ones.
for i in range(10):
self.client.insert(i, {TABLE_NAME: 1000.0})
self.wait_for_table_size(10)
def _sample_priorities(n=100):
return {
sample[0].info.key: sample[0].info.priority
for sample in self.client.sample(TABLE_NAME, n)
}
original_priorities = _sample_priorities(n=100)
self.assertNotEmpty(original_priorities)
self.assertSequenceAlmostEqual([1000.0] * len(original_priorities),
original_priorities.values())
expected_priorities = {
key: float(i) for i, key in enumerate(original_priorities)
}
self.client.mutate_priorities(TABLE_NAME, updates=expected_priorities)
# Resample and check priorities.
sampled_priorities = _sample_priorities(n=100)
self.assertNotEmpty(sampled_priorities)
for key, priority in sampled_priorities.items():
if key in expected_priorities:
self.assertAlmostEqual(expected_priorities[key], priority)
def test_insert_raises_if_priorities_empty(self):
with self.assertRaises(ValueError):
self.client.insert([1], {})
def test_insert(self):
self.client.insert(1, {TABLE_NAME: 1.0}) # This should be sampled often.
self.client.insert(2, {TABLE_NAME: 0.1}) # This should be sampled rarely.
self.client.insert(3, {TABLE_NAME: 0.0}) # This should never be sampled.
self.wait_for_table_size(3)
freqs = self._get_sample_frequency()
self.assertLen(freqs, 2)
self.assertAlmostEqual(freqs[0], 0.9, delta=0.05)
self.assertAlmostEqual(freqs[1], 0.1, delta=0.05)
def test_writer_raises_if_max_sequence_length_lt_1(self):
with self.assertRaises(ValueError):
self.client.writer(0)
def test_writer_raises_if_chunk_length_lt_1(self):
self.client.writer(2, chunk_length=1) # Should be fine.
for chunk_length in [0, -1]:
with self.assertRaises(ValueError):
self.client.writer(2, chunk_length=chunk_length)
def test_writer_raises_if_chunk_length_gt_max_sequence_length(self):
self.client.writer(2, chunk_length=1) # lt should be fine.
self.client.writer(2, chunk_length=2) # eq should be fine.
with self.assertRaises(ValueError):
self.client.writer(2, chunk_length=3)
def test_writer_raises_if_max_in_flight_items_lt_1(self):
self.client.writer(1, max_in_flight_items=1)
self.client.writer(1, max_in_flight_items=2)
with self.assertRaises(ValueError):
self.client.writer(1, max_in_flight_items=-1)
def test_writer_works_with_no_retries(self):
# If the server responds correctly, the writer ignores the no retries arg.
writer = self.client.writer(2)
writer.append([0])
writer.create_item(TABLE_NAME, 1, 1.0)
writer.close(retry_on_unavailable=False)
def test_writer(self):
with self.client.writer(2) as writer:
writer.append([0])
writer.create_item(TABLE_NAME, 1, 1.0)
writer.append([1])
writer.create_item(TABLE_NAME, 2, 1.0)
writer.append([2])
writer.create_item(TABLE_NAME, 1, 1.0)
writer.append_sequence([np.array([3, 4])])
writer.create_item(TABLE_NAME, 2, 1.0)
freqs = self._get_sample_frequency()
self.assertLen(freqs, 4)
for freq in freqs:
self.assertAlmostEqual(freq, 0.25, delta=0.05)
def test_write_and_sample_different_shapes_and_dtypes(self):
trajectories = [
np.ones([], np.int64),
np.ones([2, 2], np.float32),
np.ones([3, 3], np.int32),
]
for trajectory in trajectories:
self.client.insert(trajectory, {SIMPLE_QUEUE_NAME: 1.0})
for i, [sample] in enumerate(self.client.sample(SIMPLE_QUEUE_NAME, 3)):
np.testing.assert_array_equal(trajectories[i], sample.data[0])
def test_mutate_priorities_update(self):
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
before = self._get_sample_frequency()
self.assertLen(before, 3)
for freq in before:
self.assertAlmostEqual(freq, 0.33, delta=0.05)
key = next(self.client.sample(TABLE_NAME, 1))[0].info.key
self.client.mutate_priorities(TABLE_NAME, updates={key: 0.5})
after = self._get_sample_frequency()
self.assertLen(after, 3)
self.assertAlmostEqual(after[0], 0.4, delta=0.05)
self.assertAlmostEqual(after[1], 0.4, delta=0.05)
self.assertAlmostEqual(after[2], 0.2, delta=0.05)
def test_mutate_priorities_delete(self):
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
before = self._get_sample_frequency()
self.assertLen(before, 4)
key = next(self.client.sample(TABLE_NAME, 1))[0].info.key
self.client.mutate_priorities(TABLE_NAME, deletes=[key])
after = self._get_sample_frequency()
self.assertLen(after, 3)
def test_reset(self):
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
keys_before = set(
sample[0].info.key for sample in self.client.sample(TABLE_NAME, 1000))
self.assertLen(keys_before, 3)
self.client.reset(TABLE_NAME)
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
keys_after = set(
sample[0].info.key for sample in self.client.sample(TABLE_NAME, 1000))
self.assertLen(keys_after, 3)
self.assertTrue(keys_after.isdisjoint(keys_before))
def test_server_info(self):
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
self.client.insert([0], {TABLE_NAME: 1.0})
list(self.client.sample(TABLE_NAME, 1))
server_info = self.client.server_info()
self.assertLen(server_info, 3)
self.assertIn(TABLE_NAME, server_info)
table = server_info[TABLE_NAME]
self.assertEqual(table.current_size, 3)
self.assertEqual(table.num_unique_samples, 1)
self.assertEqual(table.max_size, 1000)
self.assertEqual(table.sampler_options.prioritized.priority_exponent, 1)
self.assertTrue(table.remover_options.fifo)
self.assertEqual(table.signature, tf.TensorSpec(dtype=tf.int64, shape=[]))
self.assertIn(NESTED_SIGNATURE_TABLE_NAME, server_info)
queue = server_info[NESTED_SIGNATURE_TABLE_NAME]
self.assertEqual(queue.current_size, 0)
self.assertEqual(queue.num_unique_samples, 0)
self.assertEqual(queue.max_size, 10)
self.assertTrue(queue.sampler_options.fifo)
self.assertTrue(queue.remover_options.fifo)
self.assertEqual(queue.signature, QUEUE_SIGNATURE)
self.assertIn(SIMPLE_QUEUE_NAME, server_info)
info = server_info[SIMPLE_QUEUE_NAME]
self.assertEqual(info.current_size, 0)
self.assertEqual(info.num_unique_samples, 0)
self.assertEqual(info.max_size, 10)
self.assertTrue(info.sampler_options.fifo)
self.assertTrue(info.remover_options.fifo)
self.assertIsNone(info.signature)
def test_sample_trajectory_with_signature(self):
with self.client.trajectory_writer(3) as writer:
for _ in range(3):
writer.append({
'a': np.ones([], np.int64),
'b': np.ones([2, 2], np.float32),
})
writer.create_item(
table=NESTED_SIGNATURE_TABLE_NAME,
priority=1.0,
trajectory={
'a': writer.history['a'][:],
'b': writer.history['b'][:],
})
sample = next(self.client.sample(NESTED_SIGNATURE_TABLE_NAME,
emit_timesteps=False,
unpack_as_table_signature=True))
# The data should be be unpacked as the structure of the table.
want = {
'a': np.ones([3], np.int64),
'b': np.ones([3, 2, 2], np.float32),
}
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_sample_trajectory_without_signature(self):
with self.client.trajectory_writer(3) as writer:
for _ in range(3):
writer.append({
'a': np.ones([], np.int64),
'b': np.ones([2, 2], np.float32),
})
writer.create_item(
table=SIMPLE_QUEUE_NAME,
priority=1.0,
trajectory={
'a': writer.history['a'][:],
'b': writer.history['b'][:],
})
sample = next(self.client.sample(SIMPLE_QUEUE_NAME,
emit_timesteps=False,
unpack_as_table_signature=True))
# The data should be flat as the table has no signature. Each element within
# the flat data should represent the entire column (i.e not just one step).
want = [np.ones([3], np.int64), np.ones([3, 2, 2], np.float32)]
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_sample_trajectory_as_flat_data(self):
with self.client.trajectory_writer(3) as writer:
for _ in range(3):
writer.append({
'a': np.ones([], np.int64),
'b': np.ones([2, 2], np.float32),
})
writer.create_item(
table=NESTED_SIGNATURE_TABLE_NAME,
priority=1.0,
trajectory={
'a': writer.history['a'][:],
'b': writer.history['b'][:],
})
sample = next(self.client.sample(NESTED_SIGNATURE_TABLE_NAME,
emit_timesteps=False,
unpack_as_table_signature=False))
# The table has a signature but we requested the data to be flat.
want = [np.ones([3], np.int64), np.ones([3, 2, 2], np.float32)]
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_sample_trajectory_written_with_insert(self):
self.client.insert(np.ones([3, 3], np.int32), {SIMPLE_QUEUE_NAME: 1.0})
sample = next(self.client.sample(SIMPLE_QUEUE_NAME,
emit_timesteps=False))
# An extra batch dimension should have been added to the inserted data as
# it is a trajectory of length 1.
want = [np.ones([1, 3, 3], np.int32)]
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_sample_trajectory_written_with_legacy_writer(self):
with self.client.writer(3) as writer:
for i in range(3):
writer.append([i, np.ones([2, 2], np.float64)])
writer.create_item(SIMPLE_QUEUE_NAME, 3, 1.0)
sample = next(self.client.sample(SIMPLE_QUEUE_NAME,
emit_timesteps=False))
# The time dimension should have been added to all fields.
want = [np.array([0, 1, 2]), np.ones([3, 2, 2], np.float64)]
tree.map_structure(np.testing.assert_array_equal, sample.data, want)
# The info fields should all be scalars (i.e not batched by time).
self.assertIsInstance(sample.info.key, int)
self.assertIsInstance(sample.info.probability, float)
self.assertIsInstance(sample.info.table_size, int)
self.assertIsInstance(sample.info.priority, float)
def test_server_info_timeout(self):
try:
# Setup a client that doesn't actually connect to anything.
dummy_port = portpicker.pick_unused_port()
dummy_client = client.Client(f'localhost:{dummy_port}')
with self.assertRaises(
errors.DeadlineExceededError,
msg='ServerInfo call did not complete within provided timeout of 1s'):
dummy_client.server_info(timeout=1)
finally:
portpicker.return_port(dummy_port)
def test_pickle(self):
loaded_client = pickle.loads(pickle.dumps(self.client))
self.assertEqual(loaded_client._server_address, self.client._server_address)
loaded_client.insert([0], {TABLE_NAME: 1.0})
self.wait_for_table_size(1)
def test_multithreaded_writer_using_flush(self):
# Ensure that we don't have any errors caused by multithreaded use of
# writers or clients.
pool = multithreading.Pool(64)
def _write(i):
with self.client.writer(1) as writer:
writer.append([i])
# Make sure that flush before create_item doesn't create trouble.
writer.flush()
writer.create_item(TABLE_NAME, 1, 1.0)
writer.flush()
for _ in range(5):
pool.map(_write, list(range(128)))
self.wait_for_table_size(640)
pool.close()
pool.join()
def test_multithreaded_writer_using_scope(self):
# Ensure that we don't have any errors caused by multithreaded use of
# writers or clients.
pool = multithreading.Pool(64)
def _write(i):
with self.client.writer(1) as writer:
writer.append([i])
writer.create_item(TABLE_NAME, 1, 1.0)
for _ in range(5):
pool.map(_write, list(range(256)))
info = self.client.server_info()[TABLE_NAME]
self.assertEqual(info.current_size, 1000)
pool.close()
pool.join()
def test_validates_trajectory_writer_config(self):
with self.assertRaises(ValueError):
self.client.trajectory_writer(0)
with self.assertRaises(ValueError):
self.client.trajectory_writer(-1)
if __name__ == '__main__':
absltest.main()
|
|
"""
Retrieve Pillar data by doing a SQL query
This module is not meant to be used directly as an ext_pillar.
It is a place to put code common to PEP 249 compliant SQL database adapters.
It exposes a python ABC that can be subclassed for new database providers.
:maturity: new
:platform: all
Theory of sql_base ext_pillar
=============================
Ok, here's the theory for how this works...
- First, any non-keyword args are processed in order.
- Then, remaining keywords are processed.
We do this so that it's backward compatible with older configs.
Keyword arguments are sorted before being appended, so that they're predictable,
but they will always be applied last so overall it's moot.
For each of those items we process, it depends on the object type:
- Strings are executed as is and the pillar depth is determined by the number
of fields returned.
- A list has the first entry used as the query, the second as the pillar depth.
- A mapping uses the keys "query" and "depth" as the tuple
You can retrieve as many fields as you like, how they get used depends on the
exact settings.
Configuring a sql_base ext_pillar
=================================
The sql_base ext_pillar cannot be used directly, but shares query configuration
with its implementations. These examples use a fake 'sql_base' adapter, which
should be replaced with the name of the adapter you are using.
A list of queries can be passed in
.. code-block:: yaml
ext_pillar:
- sql_base:
- "SELECT pillar,value FROM pillars WHERE minion_id = %s"
- "SELECT pillar,value FROM more_pillars WHERE minion_id = %s"
Or you can pass in a mapping
.. code-block:: yaml
ext_pillar:
- sql_base:
main: "SELECT pillar,value FROM pillars WHERE minion_id = %s"
extras: "SELECT pillar,value FROM more_pillars WHERE minion_id = %s"
The query can be provided as a string as we have just shown, but they can be
provided as lists
.. code-block:: yaml
ext_pillar:
- sql_base:
- "SELECT pillar,value FROM pillars WHERE minion_id = %s"
2
Or as a mapping
.. code-block:: yaml
ext_pillar:
- sql_base:
- query: "SELECT pillar,value FROM pillars WHERE minion_id = %s"
depth: 2
The depth defines how the dicts are constructed.
Essentially if you query for fields a,b,c,d for each row you'll get:
- With depth 1: {a: {"b": b, "c": c, "d": d}}
- With depth 2: {a: {b: {"c": c, "d": d}}}
- With depth 3: {a: {b: {c: d}}}
Depth greater than 3 wouldn't be different from 3 itself.
Depth of 0 translates to the largest depth needed, so 3 in this case.
(max depth == key count - 1)
Then they are merged in a similar way to plain pillar data, in the order
returned by the SQL database.
Thus subsequent results overwrite previous ones when they collide.
The ignore_null option can be used to change the overwrite behavior so that
only non-NULL values in subsequent results will overwrite. This can be used
to selectively overwrite default values.
.. code-block:: yaml
ext_pillar:
- sql_base:
- query: "SELECT pillar,value FROM pillars WHERE minion_id = 'default' and minion_id != %s"
depth: 2
- query: "SELECT pillar,value FROM pillars WHERE minion_id = %s"
depth: 2
ignore_null: True
If you specify `as_list: True` in the mapping expression it will convert
collisions to lists.
If you specify `with_lists: '...'` in the mapping expression it will
convert the specified depths to list. The string provided is a sequence
numbers that are comma separated. The string '1,3' will result in::
a,b,c,d,e,1 # field 1 same, field 3 differs
a,b,c,f,g,2 # ^^^^
a,z,h,y,j,3 # field 1 same, field 3 same
a,z,h,y,k,4 # ^^^^
^ ^
These columns define list grouping
.. code-block:: python
{a: [
{c: [
{e: 1},
{g: 2}
]
},
{h: [
{j: 3, k: 4 }
]
}
]}
The range for with_lists is 1 to number_of_fields, inclusive.
Numbers outside this range are ignored.
If you specify `as_json: True` in the mapping expression and query only for
single value, returned data are considered in JSON format and will be merged
directly.
.. code-block:: yaml
ext_pillar:
- sql_base:
- query: "SELECT json_pillar FROM pillars WHERE minion_id = %s"
as_json: True
The processed JSON entries are recursively merged in a single dictionary.
Additionnaly if `as_list` is set to `True` the lists will be merged in case of collision.
For instance the following rows:
{"a": {"b": [1, 2]}, "c": 3}
{"a": {"b": [1, 3]}, "d": 4}
will result in the following pillar with `as_list=False`
{"a": {"b": [1, 3], "c": 3, "d": 4}
and in with `as_list=True`
{"a": {"b": [1, 2, 3], "c": 3, "d": 4}
Finally, if you pass the queries in via a mapping, the key will be the
first level name where as passing them in as a list will place them in the
root. This isolates the query results into their own subtrees.
This may be a help or hindrance to your aims and can be used as such.
You can basically use any SELECT query that gets you the information, you
could even do joins or subqueries in case your minion_id is stored elsewhere.
It is capable of handling single rows or multiple rows per minion.
Configuration of the connection depends on the adapter in use.
.. versionadded:: 3005
The *as_json* parameter.
More complete example for MySQL (to also show configuration)
============================================================
.. code-block:: yaml
mysql:
user: 'salt'
pass: 'super_secret_password'
db: 'salt_db'
ext_pillar:
- mysql:
fromdb:
query: 'SELECT col1,col2,col3,col4,col5,col6,col7
FROM some_random_table
WHERE minion_pattern LIKE %s'
depth: 5
as_list: True
with_lists: [1,3]
"""
import abc # Added in python2.6 so always available
import logging
from salt.utils.dictupdate import update
from salt.utils.odict import OrderedDict
# Please don't strip redundant parentheses from this file.
# I have added some for clarity.
# tests/unit/pillar/mysql_test.py may help understand this code.
# Set up logging
log = logging.getLogger(__name__)
# This ext_pillar is abstract and cannot be used directory
def __virtual__():
return False
class SqlBaseExtPillar(metaclass=abc.ABCMeta):
"""
This class receives and processes the database rows in a database
agnostic way.
"""
result = None
focus = None
field_names = None
num_fields = 0
depth = 0
as_list = False
as_json = False
with_lists = None
ignore_null = False
def __init__(self):
self.result = self.focus = {}
@classmethod
@abc.abstractmethod
def _db_name(cls):
"""
Return a friendly name for the database, e.g. 'MySQL' or 'SQLite'.
Used in logging output.
"""
@abc.abstractmethod
def _get_cursor(self):
"""
Yield a PEP 249 compliant Cursor as a context manager.
"""
def extract_queries(self, args, kwargs):
"""
This function normalizes the config block into a set of queries we
can use. The return is a list of consistently laid out dicts.
"""
# Please note the function signature is NOT an error. Neither args, nor
# kwargs should have asterisks. We are passing in a list and dict,
# rather than receiving variable args. Adding asterisks WILL BREAK the
# function completely.
# First, this is the query buffer. Contains lists of [base,sql]
qbuffer = []
# Add on the non-keywords...
qbuffer.extend([[None, s] for s in args])
# And then the keywords...
# They aren't in definition order, but they can't conflict each other.
klist = list(kwargs.keys())
klist.sort()
qbuffer.extend([[k, kwargs[k]] for k in klist])
# Filter out values that don't have queries.
qbuffer = [
x
for x in qbuffer
if (
(isinstance(x[1], str) and len(x[1]))
or (isinstance(x[1], (list, tuple)) and (len(x[1]) > 0) and x[1][0])
or (isinstance(x[1], dict) and "query" in x[1] and len(x[1]["query"]))
)
]
# Next, turn the whole buffer into full dicts.
for qb in qbuffer:
defaults = {
"query": "",
"depth": 0,
"as_list": False,
"as_json": False,
"with_lists": None,
"ignore_null": False,
}
if isinstance(qb[1], str):
defaults["query"] = qb[1]
elif isinstance(qb[1], (list, tuple)):
defaults["query"] = qb[1][0]
if len(qb[1]) > 1:
defaults["depth"] = qb[1][1]
# May set 'as_list' from qb[1][2].
else:
defaults.update(qb[1])
if defaults["with_lists"] and isinstance(defaults["with_lists"], str):
defaults["with_lists"] = [
int(i) for i in defaults["with_lists"].split(",")
]
qb[1] = defaults
return qbuffer
def enter_root(self, root):
"""
Set self.focus for kwarg queries
"""
# There is no collision protection on root name isolation
if root:
self.result[root] = self.focus = {}
else:
self.focus = self.result
def process_fields(self, field_names, depth):
"""
The primary purpose of this function is to store the sql field list
and the depth to which we process.
"""
# List of field names in correct order.
self.field_names = field_names
# number of fields.
self.num_fields = len(field_names)
# Constrain depth.
if (depth == 0) or (depth >= self.num_fields):
self.depth = self.num_fields - 1
else:
self.depth = depth
def process_results(self, rows):
"""
This function takes a list of database results and iterates over,
merging them into a dict form.
"""
listify = OrderedDict()
listify_dicts = OrderedDict()
for ret in rows:
# crd is the Current Return Data level, to make this non-recursive.
crd = self.focus
# We have just one field without any key, assume returned row is already a dict
# aka JSON storage
if self.as_json and self.num_fields == 1:
crd = update(crd, ret[0], merge_lists=self.as_list)
continue
# Walk and create dicts above the final layer
for i in range(0, self.depth - 1):
# At the end we'll use listify to find values to make a list of
if i + 1 in self.with_lists:
if id(crd) not in listify:
listify[id(crd)] = []
listify_dicts[id(crd)] = crd
if ret[i] not in listify[id(crd)]:
listify[id(crd)].append(ret[i])
if ret[i] not in crd:
# Key missing
crd[ret[i]] = {}
crd = crd[ret[i]]
else:
# Check type of collision
ty = type(crd[ret[i]])
if ty is list:
# Already made list
temp = {}
crd[ret[i]].append(temp)
crd = temp
elif ty is not dict:
# Not a list, not a dict
if self.as_list:
# Make list
temp = {}
crd[ret[i]] = [crd[ret[i]], temp]
crd = temp
else:
# Overwrite
crd[ret[i]] = {}
crd = crd[ret[i]]
else:
# dict, descend.
crd = crd[ret[i]]
# If this test is true, the penultimate field is the key
if self.depth == self.num_fields - 1:
nk = self.num_fields - 2 # Aka, self.depth-1
# Should we and will we have a list at the end?
if (self.as_list and (ret[nk] in crd)) or (nk + 1 in self.with_lists):
if ret[nk] in crd:
if not isinstance(crd[ret[nk]], list):
crd[ret[nk]] = [crd[ret[nk]]]
# if it's already a list, do nothing
else:
crd[ret[nk]] = []
crd[ret[nk]].append(ret[self.num_fields - 1])
else:
if not self.ignore_null or ret[self.num_fields - 1] is not None:
crd[ret[nk]] = ret[self.num_fields - 1]
else:
# Otherwise, the field name is the key but we have a spare.
# The spare results because of {c: d} vs {c: {"d": d, "e": e }}
# So, make that last dict
if ret[self.depth - 1] not in crd:
crd[ret[self.depth - 1]] = {}
# This bit doesn't escape listify
if self.depth in self.with_lists:
if id(crd) not in listify:
listify[id(crd)] = []
listify_dicts[id(crd)] = crd
if ret[self.depth - 1] not in listify[id(crd)]:
listify[id(crd)].append(ret[self.depth - 1])
crd = crd[ret[self.depth - 1]]
# Now for the remaining keys, we put them into the dict
for i in range(self.depth, self.num_fields):
nk = self.field_names[i]
# Listify
if i + 1 in self.with_lists:
if id(crd) not in listify:
listify[id(crd)] = []
listify_dicts[id(crd)] = crd
if nk not in listify[id(crd)]:
listify[id(crd)].append(nk)
# Collision detection
if self.as_list and (nk in crd):
# Same as before...
if isinstance(crd[nk], list):
crd[nk].append(ret[i])
else:
crd[nk] = [crd[nk], ret[i]]
else:
if not self.ignore_null or ret[i] is not None:
crd[nk] = ret[i]
# Get key list and work backwards. This is inner-out processing
ks = list(listify_dicts.keys())
ks.reverse()
for i in ks:
d = listify_dicts[i]
for k in listify[i]:
if isinstance(d[k], dict):
d[k] = list(d[k].values())
elif isinstance(d[k], list):
d[k] = [d[k]]
def fetch(self, minion_id, pillar, *args, **kwargs): # pylint: disable=W0613
"""
Execute queries, merge and return as a dict.
"""
db_name = self._db_name()
log.info("Querying %s for information for %s", db_name, minion_id)
#
# log.debug('ext_pillar %s args: %s', db_name, args)
# log.debug('ext_pillar %s kwargs: %s', db_name, kwargs)
#
# Most of the heavy lifting is in this class for ease of testing.
qbuffer = self.extract_queries(args, kwargs)
with self._get_cursor() as cursor:
for root, details in qbuffer:
# Run the query
cursor.execute(details["query"], (minion_id,))
# Extract the field names the db has returned and process them
self.process_fields(
[row[0] for row in cursor.description], details["depth"]
)
self.enter_root(root)
self.as_list = details["as_list"]
self.as_json = details["as_json"]
if details["with_lists"]:
self.with_lists = details["with_lists"]
else:
self.with_lists = []
self.ignore_null = details["ignore_null"]
self.process_results(cursor.fetchall())
log.debug("ext_pillar %s: Return data: %s", db_name, self)
return self.result
# To extend this module you must define a top level ext_pillar procedure
# See mysql.py for an example
|
|
#1
import pygame
from pygame.locals import*
import math
import random
import time
#2
shootInterval=15
shootTimer=0
badtimer=100
badtimer1=0
badguys=[]
healthvalue=194
acc=[0,0]
arrows=[]
keys=[False,False,False,False]
autoshoot=False
playerpos=[100,100]
pygame.init()
width,height=1000,750
screen=pygame.display.set_mode((width,height))
pygame.mixer.init()
#3
player=pygame.image.load("resources/images/dude.png")
grass=pygame.image.load("resources/images/grass.png")
castle=pygame.image.load("resources/images/castle.png")
arrow=pygame.image.load('resources/images/bullet.png')
badguyimg1 = pygame.image.load("resources/images/badguy.png")
healthbar = pygame.image.load("resources/images/healthbar.png")
health=pygame.image.load("resources/images/health.png")
badguyimg=badguyimg1
gameover = pygame.image.load("resources/images/gameover.png")
gameoverFull=pygame.transform.scale(gameover,(width,height))
youwin = pygame.image.load("resources/images/youwin.png")
youwinFull=pygame.transform.scale(youwin,(width,height))
#3.1
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
hit.set_volume(0.05)
enemy.set_volume(0.05)
shoot.set_volume(0.05)
pygame.mixer.music.load('resources/audio/moonlight.wav')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.25)
#4
running = 1
exitcode = 0
while running:
time.sleep(0.01)
badtimer-=1
#5
screen.fill([0,255,0])
#6
'''
for x in range(width/grass.get_width()+1):
for y in range (height/grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
'''
for x in range (1,5):
screen.blit(castle,(0,height/5*x-50))
#6.1
position=pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32),position[0]-(playerpos[0]+26))
playerrot = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
#6.2
for bullet in arrows:
velx=math.cos(bullet[0])*5
vely=math.sin(bullet[0])*5
bullet[1]+=velx
bullet[2]+=vely
if bullet[1]<-64 or bullet[1]>640 or bullet[2]<-64 or bullet[2]>480:
del bullet
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
#6.3
if badtimer==0:
badguys.append([width, random.randint(50,height-50)])
badtimer=100-(badtimer1*2)
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for badguy in badguys:
if badguy[0]<-64:
badguys.pop(index)
badguy[0]-=10
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
healthvalue -= random.randint(5,20)
badguys.pop(index)
hit.play()
#6.3.2
index1=0
for bullet in arrows:
bullrect=pygame.Rect(arrow.get_rect())
bullrect.left=bullet[1]
bullrect.top=bullet[2]
if badrect.colliderect(bullrect):
acc[0]+=1
badguys.pop(index)
arrows.pop(index1)
enemy.play()
index1+=1
index+=1
for badguy in badguys:
screen.blit(badguyimg, badguy)
#6.4
font=pygame.font.Font(None,24)
survivedtext = font.render(str((90000-pygame.time.get_ticks())/60000)+":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2), True, (0,0,0))
textRect = survivedtext.get_rect()
textRect.topright=[635,5]
screen.blit(survivedtext, textRect)
#6.5
screen.blit(healthbar, (5,5))
for health1 in xrange(healthvalue):
screen.blit(health,(health1+8,8))
#7
pygame.display.flip()
#8
for event in pygame.event.get():
if event.type==pygame.MOUSEBUTTONDOWN:
autoshoot=True
if event.type==pygame.MOUSEBUTTONUP:
autoshoot=False
if event.type==pygame.QUIT:
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key==K_w:
keys[0]=True
elif event.key==K_a:
keys[1]=True
elif event.key==K_s:
keys[2]=True
elif event.key==K_d:
keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w:
keys[0]=False
elif event.key==pygame.K_a:
keys[1]=False
elif event.key==pygame.K_s:
keys[2]=False
elif event.key==pygame.K_d:
keys[3]=False
if autoshoot and shootTimer<=0:
position=pygame.mouse.get_pos()
acc[1]+=1
arrows.append([math.atan2(position[1]-(playerpos1[1]+32),position[0]-(playerpos1[0]+26)),playerpos1[0]+32,playerpos1[1]+32])
shoot.play()
shootTimer=shootInterval
shootTimer-=1
#9
if keys[0]:
playerpos[1]-=5
elif keys[2]:
playerpos[1]+=5
if keys[1]:
playerpos[0]-=5
elif keys[3]:
playerpos[0]+=5
#10
if pygame.time.get_ticks()>=90000:
running=0
exitcode=1
if healthvalue<=0:
running=0
exitcode=0
if acc[1]!=0:
accuracy=acc[0]*1.0/acc[1]*100
else:
accuracy=0
#11
if exitcode==0:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (255,0,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(gameoverFull, (0,0))
screen.blit(text, textRect)
else:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (0,255,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(youwinFull, (0,0))
screen.blit(text, textRect)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip()
|
|
import binascii
import logging
from rdflib import Literal
log = logging.getLogger('fedoralink.utils')
try:
from cis_django_modules.cis_util.czech import czech_sorting_key
except:
czech_sorting_key = lambda x:x
def get_class( kls ):
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
def create_instance(class_name, constructor_args):
return get_class(class_name)(*constructor_args)
class StringLikeList(list):
def __str__(self):
if len(self) == 1:
return str(self[0])
return super(StringLikeList, self).__str__()
def __contains__(self, item):
for x in self:
if isinstance(item, Literal):
if x == item:
return True
elif isinstance(x, str):
if x == str(item):
return True
return False
def __eq__(self, other):
if isinstance(other, str):
return len(self) == 1 and other in self
if isinstance(other, list):
if len(self) != len(other):
return False
for x in other:
if x not in self:
return False
return True
else:
return False
class TypedStream:
def __init__(self, stream_or_filepath, mimetype=None, filename=None):
"""
Creates a new instance of stream with optional mimetype and filename
:param stream_or_filepath: istream or optionally path on local filesystem
:param mimetype: mimetype. If not provided will be guessed
(only if stream_or_filepath points to local file)
:param filename: filename in case stream_or_filepath is an input stream
:return:
"""
if isinstance(stream_or_filepath, str):
self.__stream = None
self.__filename = stream_or_filepath
if mimetype is None:
self.__mimetype = self.__guess_mimetype_from_filename(stream_or_filepath)
else:
self.__mimetype = mimetype
else:
self.__stream = stream_or_filepath
self.__filename = filename
if mimetype is None:
self.__mimetype = 'application/binary'
else:
self.__mimetype = mimetype
@property
def stream(self):
"""
Returns an input stream. Note that even though this method can be called more than once, the returned
stream is always the same and might be already read up.
:return: istream
"""
if not self.__stream and self.__filename is not None:
self.__stream = open(self.__filename, 'rb')
return self.__stream
@property
def mimetype(self):
"""
Return mimetype or application/binary if mimetype can not be estimated
"""
return self.__mimetype
@property
def filename(self):
"""
Return the filename if it was set
"""
return self.__filename
@staticmethod
def __guess_mimetype_from_filename(file_path):
try:
import magic
return magic.from_file(file_path, mime=True)
except Exception as e:
log.error(e)
return 'application/binary'
class OrderableModelList(list):
def __init__(self, lst, model):
super(OrderableModelList, self).__init__(lst)
self._model = model
def order_by(self, *args):
lst = self[:]
class NegatedKey(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return self.obj > other
def __gt__(self, other):
return self.obj < other
def __eq__(self, other):
return self.obj == other
def __le__(self, other):
return self.obj >= other
def __ge__(self, other):
return self.obj <= other
def __ne__(self, other):
return self.obj != other
def key(item):
ret = []
for arg in args:
if '@' in arg:
arg, lang = arg.split('@')
else:
lang = None
if arg[0] in ('+', '-'):
asc, arg = arg[0], arg[1:]
else:
asc = '+'
arg = getattr(item, arg)
item = None
if lang is not None:
for a in arg:
if a.language == lang:
item = a.value
break
if item is None:
if len(arg):
item = arg[0].value
else:
item = None
item = item.strip()
if lang == 'cs':
item = czech_sorting_key(item)
if asc == '-':
ret.append(NegatedKey(item))
else:
ret.append(item)
return ret
lst.sort(key=key)
return OrderableModelList(lst, self._model)
def fullname(o, prefix_before_name=''):
return o.__module__ + "." + prefix_before_name + o.__name__
known_prefixes = {
'http://purl.org/dc/elements/1.1/': '1'
}
known_prefixes_reversed = { v:k for k, v in known_prefixes.items() }
def url2id(url):
ret = []
for p, val in known_prefixes.items():
if url.startswith(p):
ret.append('_' + known_prefixes[p])
url = url[len(p):]
break
url = url.encode('utf-8')
for c in url:
if ord('a') <= c <= ord('z') or ord('A') <= c <= ord('Z') or ord('0') <= c <= ord('9'):
ret.append(chr(c))
else:
ret.append('__')
ret.append(binascii.hexlify(bytes([c])).decode('utf-8'))
return ''.join(ret)
def id2url(id):
ret = []
tok = iter(id)
try:
while True:
c = next(tok)
if c != '_':
ret.append(c)
else:
c = next(tok)
if c != '_':
ret.append(known_prefixes_reversed[c])
else:
c1 = next(tok)
c2 = next(tok)
ret.append(binascii.unhexlify(''.join([c1,c2])).decode('utf-8'))
except StopIteration:
pass
except:
raise Exception("Exception in id2url, id %s" % id)
return ''.join(ret)
|
|
from datetime import datetime, timedelta, date, time
import nose
from pandas.compat import lrange, zip
import numpy as np
from pandas import Index, Series, DataFrame
from pandas.tseries.index import date_range, bdate_range
from pandas.tseries.offsets import DateOffset
from pandas.tseries.period import period_range, Period, PeriodIndex
from pandas.tseries.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean, slow
import pandas.util.testing as tm
from pandas.tests.plotting.common import (TestPlotBase,
_skip_if_no_scipy_gaussian_kde)
""" Test cases for time series specific (freq conversion, etc) """
@tm.mplskip
class TestTSPlot(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def tearDown(self):
tm.close()
@slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
import matplotlib.pyplot as plt # noqa
df = DataFrame(np.random.randn(10, 9), index=range(10))
ax = df.plot(fontsize=2)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
self.assertEqual(label.get_fontsize(), 2)
@slow
def test_frame_inferred(self):
# inferred freq
import matplotlib.pyplot as plt # noqa
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_is_error_nozeroindex(self):
# GH11858
i = np.array([1, 2, 3])
a = DataFrame(i, index=i)
_check_plot_works(a.plot, xerr=a)
_check_plot_works(a.plot, yerr=a)
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}, idx)
ax = df.plot() # it works
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
plt.close(plt.gcf())
self.assertRaises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
ax = plt.gca()
ts = tm.makeTimeSeries()
f = lambda *args, **kwds: tsplot(s, plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
ax = ts.plot(style='k')
color = (0., 0., 0., 1) if self.mpl_ge_2_0_0 else (0., 0., 0.)
self.assertEqual(color, ax.get_lines()[0].get_color())
def test_both_style_and_color(self):
import matplotlib.pyplot as plt # noqa
ts = tm.makeTimeSeries()
self.assertRaises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
self.assertRaises(ValueError, s.plot, style='b-', color='#000099')
@slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot)
def test_get_datevalue(self):
from pandas.tseries.converter import get_datevalue
self.assertIsNone(get_datevalue(None, 'D'))
self.assertEqual(get_datevalue(1987, 'A'), 1987)
self.assertEqual(get_datevalue(Period(1987, 'A'), 'M'),
Period('1987-12', 'M').ordinal)
self.assertEqual(get_datevalue('1/1/1987', 'D'),
Period('1987-1-1', 'D').ordinal)
@slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
self.assertEqual(expected_string,
ax.format_coord(first_x, first_y))
except (ValueError):
raise nose.SkipTest("skipping test because issue forming "
"test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3,
freq='A-DEC'))
check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and
# changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
check_format_of_first_point(daily.plot(),
't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
import matplotlib.pyplot as plt
from pandas.tseries.plotting import tsplot
tsplot(annual, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014 y = 1.000000')
tsplot(daily, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014-01-01 y = 1.000000')
@slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ax = ts.plot()
self.assertFalse(hasattr(ax, 'freq'))
@slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(
2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_uhf(self):
import pandas.tseries.converter as conv
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
ax = df.plot()
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
self.assertEqual(xp, rs)
@slow
def test_irreg_hf(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.ix[[0, 1, 3, 4]]
ax = irreg.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
self.assertTrue((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all(
))
plt.clf()
fig.add_subplot(111)
df2 = df.copy()
df2.index = df.index.asobject
ax = df2.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
self.assertTrue((np.fabs(diffs[1:] - sec) < 1e-8).all())
def test_irregular_datetime64_repr_bug(self):
import matplotlib.pyplot as plt
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
ret = ser.plot()
self.assertIsNotNone(ret)
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
self.assertEqual(rs, xp)
def test_business_freq(self):
import matplotlib.pyplot as plt # noqa
bts = tm.makePeriodSeries()
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
bts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'B')
@slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
ts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'M')
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') + timedelta(
minutes=30))
df = DataFrame(np.arange(24), index=idx)
ax = df.plot()
rs = ax.get_lines()[0].get_xdata()
self.assertFalse(Index(rs).is_normalized)
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
ax = bts.plot()
idx = ax.get_lines()[0].get_xdata()
tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))
@slow
def test_axis_limits(self):
import matplotlib.pyplot as plt
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(result[0], xlim[0] - 5)
self.assertEqual(result[1], xlim[1] + 10)
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
# datetim
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
fig = ax.get_figure()
plt.close(fig)
ser = tm.makeTimeSeries()
ax = ser.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
ax = df.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.tseries.converter as conv
self.assertEqual(conv.get_finder('B'), conv._daily_finder)
self.assertEqual(conv.get_finder('D'), conv._daily_finder)
self.assertEqual(conv.get_finder('M'), conv._monthly_finder)
self.assertEqual(conv.get_finder('Q'), conv._quarterly_finder)
self.assertEqual(conv.get_finder('A'), conv._annual_finder)
self.assertEqual(conv.get_finder('W'), conv._daily_finder)
@slow
def test_finder_daily(self):
import matplotlib.pyplot as plt
xp = Period('1999-1-1', freq='B').ordinal
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
for n in day_lst:
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_quarterly(self):
import matplotlib.pyplot as plt
xp = Period('1988Q1').ordinal
yrs = [3.5, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_monthly(self):
import matplotlib.pyplot as plt
xp = Period('Jan 1988').ordinal
yrs = [1.15, 2.5, 4, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
self.assertEqual(rs, xp)
@slow
def test_finder_annual(self):
import matplotlib.pyplot as plt
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, Period(xp[i], freq='A').ordinal)
plt.close(ax.get_figure())
@slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
self.assertEqual(rs, xp)
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
self.assertEqual(rs, xp)
@slow
def test_gaps(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
ax = ts.plot()
lines = ax.get_lines()
tm._skip_if_mpl_1_5()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
ax = ser.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
@slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
ax = low.plot()
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(len(ax.right_ax.get_lines()), 1)
l = lines[0]
data = l.get_xydata()
tm._skip_if_mpl_1_5()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
@slow
def test_secondary_y(self):
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(),
self.default_tick_position)
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
self.assertFalse(hasattr(ax, 'left_ax'))
self.assertTrue(hasattr(ax, 'right_ax'))
self.assertTrue(hasattr(ax2, 'left_ax'))
self.assertFalse(hasattr(ax2, 'right_ax'))
@slow
def test_secondary_y_ts(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(),
self.default_tick_position)
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
@slow
def test_secondary_kde(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
import matplotlib.pyplot as plt # noqa
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='density')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='bar')
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(),
self.default_tick_position)
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(),
self.default_tick_position)
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
def test_mixed_freq_regular_first(self):
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
# it works!
s1.plot()
ax2 = s2.plot(style='g')
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first(self):
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
s2.plot(style='g')
ax = s1.plot()
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s1.plot()
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s2.plot(style='g')
ax = s1.plot(ax=ax)
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
@slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
ax = ts.plot()
ts2.plot(style='r')
self.assertEqual(ax.lines[0].get_xdata()[0],
ax.lines[1].get_xdata()[0])
@slow
def test_mixed_freq_lf_first(self):
import matplotlib.pyplot as plt
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot(legend=True)
ax = high.plot(legend=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
leg = ax.get_legend()
self.assertEqual(len(leg.texts), 2)
plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'T')
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
irreg.plot()
ps.plot()
def test_mixed_freq_shared_ax(self):
# GH13341, using sharex=True
idx1 = date_range('2015-01-01', periods=3, freq='M')
idx2 = idx1[:1].union(idx1[2:])
s1 = Series(range(len(idx1)), idx1)
s2 = Series(range(len(idx2)), idx2)
fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
s1.plot(ax=ax1)
s2.plot(ax=ax2)
self.assertEqual(ax1.freq, 'M')
self.assertEqual(ax2.freq, 'M')
self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
ax2.lines[0].get_xydata()[0, 0])
# using twinx
fig, ax1 = self.plt.subplots()
ax2 = ax1.twinx()
s1.plot(ax=ax1)
s2.plot(ax=ax2)
self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
ax2.lines[0].get_xydata()[0, 0])
# TODO (GH14330, GH14322)
# plotting the irregular first does not yet work
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# s2.plot(ax=ax1)
# s1.plot(ax=ax2)
# self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
# ax2.lines[0].get_xydata()[0, 0])
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(high, plt.Axes.plot)
lines = tsplot(low, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
@slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
expected_h = idxh.to_period().asi8.astype(np.float64)
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544,
1549, 1553, 1558, 1562], dtype=np.float64)
for l in ax.get_lines():
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(low, plt.Axes.plot)
lines = tsplot(high, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
@slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = low.plot(kind=kind1, stacked=True)
ax = high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
# check stacked values are correct
expected_y += low[i].values
self.assert_numpy_array_equal(
l.get_ydata(orig=False), expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq,
idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = high.plot(kind=kind1, stacked=True)
ax = low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq,
idxh.freq)
self.assert_numpy_array_equal(
l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(
l.get_ydata(orig=False), expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq,
idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
@slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
high.plot()
ax = low.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
tm.close()
# low to high
low.plot()
ax = high.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
@slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].asobject
df = DataFrame(np.random.randn(len(idx), 3), idx)
_check_plot_works(df.plot)
@slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
@slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
# TODO: unused?
# us = int((t - int(t)) * 1e6)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S.%f')
self.assertEqual(xp, rs)
@slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot(secondary_y=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
for l in ax.left_ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
@slow
def test_secondary_legend(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B (right)')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'C'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'])
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
# non-ts
df = tm.makeDataFrame()
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
ax = df.plot()
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
self.assertEqual(l.get_rotation(), 30)
@slow
def test_ax_plot(self):
import matplotlib.pyplot as plt
x = DatetimeIndex(start='2012-01-02', periods=10, freq='D')
y = lrange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
lines = ax.plot(x, y, label='Y')
tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)
@slow
def test_mpl_nopandas(self):
import matplotlib.pyplot as plt
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp)
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)
@slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
ax = ts_irregular[:5].plot()
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
@slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
ax = ts.plot()
left_before, right_before = ax.get_xlim()
ts.resample('D').mean().plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
self.assertEqual(left_before, left_after)
self.assertEqual(right_before, right_after)
@slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
ax = ts_irregular[:5].plot()
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
def test_plot_outofbounds_datetime(self):
# 2579 - checking this does not raise
values = [date(1677, 1, 1), date(1677, 1, 2)]
self.plt.plot(values)
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
self.plt.plot(values)
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
plt.close(fig)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
|
from __future__ import absolute_import, division
import copy
import functools
import heapq
import itertools
import logging
import random
import threading
# selectors in stdlib as of py3.4
try:
import selectors # pylint: disable=import-error
except ImportError:
# vendored backport module
from .vendor import selectors34 as selectors
import socket
import time
from kafka.vendor import six
from .cluster import ClusterMetadata
from .conn import BrokerConnection, ConnectionStates, collect_hosts, get_ip_port_afi
from . import errors as Errors
from .future import Future
from .metrics import AnonMeasurable
from .metrics.stats import Avg, Count, Rate
from .metrics.stats.rate import TimeUnit
from .protocol.metadata import MetadataRequest
from .protocol.produce import ProduceRequest
from .vendor import socketpair
from .version import __version__
if six.PY2:
ConnectionError = None
log = logging.getLogger('kafka.client')
class KafkaClient(object):
"""
A network client for asynchronous request/response network I/O.
This is an internal class used to implement the user-facing producer and
consumer clients.
This class is not thread-safe!
Attributes:
cluster (:any:`ClusterMetadata`): Local cache of cluster metadata, retrived
via MetadataRequests during :meth:`.poll`.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 40000.
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
default: true.
ssl_cafile (str): optional filename of ca file to use in certificate
veriication. default: none.
ssl_certfile (str): optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. default: none.
ssl_keyfile (str): optional filename containing the client private key.
default: none.
ssl_password (str): optional password to be used when loading the
certificate chain. default: none.
ssl_crlfile (str): optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
default: none.
api_version (tuple): Specify which Kafka API version to use. If set
to None, KafkaClient will attempt to infer the broker version by
probing various APIs. For the full list of supported versions,
see KafkaClient.API_VERSIONS. Default: None
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
metric_group_prefix (str): Prefix for metric names. Default: ''
sasl_mechanism (str): string picking sasl mechanism when security_protocol
is SASL_PLAINTEXT or SASL_SSL. Currently only PLAIN is supported.
Default: None
sasl_plain_username (str): username for sasl PLAIN authentication.
Default: None
sasl_plain_password (str): password for sasl PLAIN authentication.
Default: None
"""
DEFAULT_CONFIG = {
'bootstrap_servers': 'localhost',
'client_id': 'kafka-python-' + __version__,
'request_timeout_ms': 40000,
'reconnect_backoff_ms': 50,
'max_in_flight_requests_per_connection': 5,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'retry_backoff_ms': 100,
'metadata_max_age_ms': 300000,
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_password': None,
'ssl_crlfile': None,
'api_version': None,
'api_version_auto_timeout_ms': 2000,
'selector': selectors.DefaultSelector,
'metrics': None,
'metric_group_prefix': '',
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
}
API_VERSIONS = [
(0, 10, 1),
(0, 10, 0),
(0, 10),
(0, 9),
(0, 8, 2),
(0, 8, 1),
(0, 8, 0)
]
def __init__(self, **configs):
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
if self.config['api_version'] is not None:
assert self.config['api_version'] in self.API_VERSIONS, (
'api_version [{0}] must be one of: {1}'.format(
self.config['api_version'], str(self.API_VERSIONS)))
self.cluster = ClusterMetadata(**self.config)
self._topics = set() # empty set will fetch all topic metadata
self._metadata_refresh_in_progress = False
self._last_no_node_available_ms = 0
self._selector = self.config['selector']()
self._conns = {}
self._connecting = set()
self._refresh_on_disconnects = True
self._delayed_tasks = DelayedTaskQueue()
self._last_bootstrap = 0
self._bootstrap_fails = 0
self._wake_r, self._wake_w = socket.socketpair()
self._wake_r.setblocking(False)
self._wake_lock = threading.Lock()
self._selector.register(self._wake_r, selectors.EVENT_READ)
self._closed = False
self._sensors = None
if self.config['metrics']:
self._sensors = KafkaClientMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
self._conns)
self._bootstrap(collect_hosts(self.config['bootstrap_servers']))
# Check Broker Version if not set explicitly
if self.config['api_version'] is None:
check_timeout = self.config['api_version_auto_timeout_ms'] / 1000
self.config['api_version'] = self.check_version(timeout=check_timeout)
def _bootstrap(self, hosts):
log.info('Bootstrapping cluster metadata from %s', hosts)
# Exponential backoff if bootstrap fails
backoff_ms = self.config['reconnect_backoff_ms'] * 2 ** self._bootstrap_fails
next_at = self._last_bootstrap + backoff_ms / 1000.0
self._refresh_on_disconnects = False
now = time.time()
if next_at > now:
log.debug("Sleeping %0.4f before bootstrapping again", next_at - now)
time.sleep(next_at - now)
self._last_bootstrap = time.time()
if self.config['api_version'] is None or self.config['api_version'] < (0, 10):
metadata_request = MetadataRequest[0]([])
else:
metadata_request = MetadataRequest[1](None)
for host, port, afi in hosts:
log.debug("Attempting to bootstrap via node at %s:%s", host, port)
cb = functools.partial(self._conn_state_change, 'bootstrap')
bootstrap = BrokerConnection(host, port, afi,
state_change_callback=cb,
node_id='bootstrap',
**self.config)
bootstrap.connect()
while bootstrap.connecting():
bootstrap.connect()
if not bootstrap.connected():
bootstrap.close()
continue
future = bootstrap.send(metadata_request)
while not future.is_done:
bootstrap.recv()
if future.failed():
bootstrap.close()
continue
self.cluster.update_metadata(future.value)
log.info('Bootstrap succeeded: found %d brokers and %d topics.',
len(self.cluster.brokers()), len(self.cluster.topics()))
# A cluster with no topics can return no broker metadata
# in that case, we should keep the bootstrap connection
if not len(self.cluster.brokers()):
self._conns['bootstrap'] = bootstrap
else:
bootstrap.close()
self._bootstrap_fails = 0
break
# No bootstrap found...
else:
log.error('Unable to bootstrap from %s', hosts)
# Max exponential backoff is 2^12, x4000 (50ms -> 200s)
self._bootstrap_fails = min(self._bootstrap_fails + 1, 12)
self._refresh_on_disconnects = True
def _can_connect(self, node_id):
if node_id not in self._conns:
if self.cluster.broker_metadata(node_id):
return True
return False
conn = self._conns[node_id]
return conn.disconnected() and not conn.blacked_out()
def _conn_state_change(self, node_id, conn):
if conn.connecting():
# SSL connections can enter this state 2x (second during Handshake)
if node_id not in self._connecting:
self._connecting.add(node_id)
self._selector.register(conn._sock, selectors.EVENT_WRITE)
elif conn.connected():
log.debug("Node %s connected", node_id)
if node_id in self._connecting:
self._connecting.remove(node_id)
try:
self._selector.unregister(conn._sock)
except KeyError:
pass
self._selector.register(conn._sock, selectors.EVENT_READ, conn)
if self._sensors:
self._sensors.connection_created.record()
if 'bootstrap' in self._conns and node_id != 'bootstrap':
bootstrap = self._conns.pop('bootstrap')
# XXX: make conn.close() require error to cause refresh
self._refresh_on_disconnects = False
bootstrap.close()
self._refresh_on_disconnects = True
# Connection failures imply that our metadata is stale, so let's refresh
elif conn.state is ConnectionStates.DISCONNECTING:
if node_id in self._connecting:
self._connecting.remove(node_id)
try:
self._selector.unregister(conn._sock)
except KeyError:
pass
if self._sensors:
self._sensors.connection_closed.record()
if self._refresh_on_disconnects and not self._closed:
log.warning("Node %s connection failed -- refreshing metadata", node_id)
self.cluster.request_update()
def _maybe_connect(self, node_id):
"""Idempotent non-blocking connection attempt to the given node id."""
if node_id not in self._conns:
broker = self.cluster.broker_metadata(node_id)
assert broker, 'Broker id %s not in current metadata' % node_id
log.debug("Initiating connection to node %s at %s:%s",
node_id, broker.host, broker.port)
host, port, afi = get_ip_port_afi(broker.host)
cb = functools.partial(self._conn_state_change, node_id)
self._conns[node_id] = BrokerConnection(host, broker.port, afi,
state_change_callback=cb,
node_id=node_id,
**self.config)
conn = self._conns[node_id]
if conn.connected():
return True
conn.connect()
return conn.connected()
def ready(self, node_id, metadata_priority=True):
"""Check whether a node is connected and ok to send more requests.
Arguments:
node_id (int): the id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if we are ready to send to the given node
"""
self._maybe_connect(node_id)
return self.is_ready(node_id, metadata_priority=metadata_priority)
def connected(self, node_id):
"""Return True iff the node_id is connected."""
if node_id not in self._conns:
return False
return self._conns[node_id].connected()
def close(self, node_id=None):
"""Close one or all broker connections.
Arguments:
node_id (int, optional): the id of the node to close
"""
if node_id is None:
self._closed = True
for conn in self._conns.values():
conn.close()
self._wake_r.close()
self._wake_w.close()
self._selector.close()
elif node_id in self._conns:
self._conns[node_id].close()
else:
log.warning("Node %s not found in current connection list; skipping", node_id)
return
def is_disconnected(self, node_id):
"""Check whether the node connection has been disconnected or failed.
A disconnected node has either been closed or has failed. Connection
failures are usually transient and can be resumed in the next ready()
call, but there are cases where transient failures need to be caught
and re-acted upon.
Arguments:
node_id (int): the id of the node to check
Returns:
bool: True iff the node exists and is disconnected
"""
if node_id not in self._conns:
return False
return self._conns[node_id].disconnected()
def connection_delay(self, node_id):
"""
Return the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting, returns 0 to allow
non-blocking connect to finish. When connected, returns a very large
number to handle slow/stalled connections.
Arguments:
node_id (int): The id of the node to check
Returns:
int: The number of milliseconds to wait.
"""
if node_id not in self._conns:
return 0
conn = self._conns[node_id]
time_waited_ms = time.time() - (conn.last_attempt or 0)
if conn.disconnected():
return max(self.config['reconnect_backoff_ms'] - time_waited_ms, 0)
elif conn.connecting():
return 0
else:
return 999999999
def is_ready(self, node_id, metadata_priority=True):
"""Check whether a node is ready to send more requests.
In addition to connection-level checks, this method also is used to
block additional requests from being sent during a metadata refresh.
Arguments:
node_id (int): id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if the node is ready and metadata is not refreshing
"""
if not self._can_send_request(node_id):
return False
# if we need to update our metadata now declare all requests unready to
# make metadata requests first priority
if metadata_priority:
if self._metadata_refresh_in_progress:
return False
if self.cluster.ttl() == 0:
return False
return True
def _can_send_request(self, node_id):
if node_id not in self._conns:
return False
conn = self._conns[node_id]
return conn.connected() and conn.can_send_more()
def send(self, node_id, request):
"""Send a request to a specific node.
Arguments:
node_id (int): destination node
request (Struct): request object (not-encoded)
Raises:
AssertionError: if node_id is not in current cluster metadata
Returns:
Future: resolves to Response struct or Error
"""
if not self._maybe_connect(node_id):
return Future().failure(Errors.NodeNotReadyError(node_id))
return self._conns[node_id].send(request)
def poll(self, timeout_ms=None, future=None, sleep=True, delayed_tasks=True):
"""Try to read and write to sockets.
This method will also attempt to complete node connections, refresh
stale metadata, and run previously-scheduled tasks.
Arguments:
timeout_ms (int, optional): maximum amount of time to wait (in ms)
for at least one response. Must be non-negative. The actual
timeout will be the minimum of timeout, request timeout and
metadata timeout. Default: request_timeout_ms
future (Future, optional): if provided, blocks until future.is_done
sleep (bool): if True and there is nothing to do (no connections
or requests in flight), will sleep for duration timeout before
returning empty results. Default: False.
Returns:
list: responses received (can be empty)
"""
if timeout_ms is None:
timeout_ms = self.config['request_timeout_ms']
responses = []
# Loop for futures, break after first loop if None
while True:
# Attempt to complete pending connections
for node_id in list(self._connecting):
self._maybe_connect(node_id)
# Send a metadata request if needed
metadata_timeout_ms = self._maybe_refresh_metadata()
# Send scheduled tasks
if delayed_tasks:
for task, task_future in self._delayed_tasks.pop_ready():
try:
result = task()
except Exception as e:
log.error("Task %s failed: %s", task, e)
task_future.failure(e)
else:
task_future.success(result)
# If we got a future that is already done, don't block in _poll
if future and future.is_done:
timeout = 0
else:
timeout = min(
timeout_ms,
metadata_timeout_ms,
self._delayed_tasks.next_at() * 1000,
self.config['request_timeout_ms'])
timeout = max(0, timeout / 1000.0) # avoid negative timeouts
responses.extend(self._poll(timeout, sleep=sleep))
# If all we had was a timeout (future is None) - only do one poll
# If we do have a future, we keep looping until it is done
if not future or future.is_done:
break
return responses
def _poll(self, timeout, sleep=True):
# select on reads across all connected sockets, blocking up to timeout
assert self.in_flight_request_count() > 0 or self._connecting or sleep
responses = []
processed = set()
start_select = time.time()
ready = self._selector.select(timeout)
end_select = time.time()
if self._sensors:
self._sensors.select_time.record((end_select - start_select) * 1000000000)
for key, events in ready:
if key.fileobj is self._wake_r:
self._clear_wake_fd()
continue
elif not (events & selectors.EVENT_READ):
continue
conn = key.data
processed.add(conn)
if not conn.in_flight_requests:
# if we got an EVENT_READ but there were no in-flight requests, one of
# two things has happened:
#
# 1. The remote end closed the connection (because it died, or because
# a firewall timed out, or whatever)
# 2. The protocol is out of sync.
#
# either way, we can no longer safely use this connection
#
# Do a 1-byte read to check protocol didnt get out of sync, and then close the conn
try:
unexpected_data = key.fileobj.recv(1)
if unexpected_data: # anything other than a 0-byte read means protocol issues
log.warning('Protocol out of sync on %r, closing', conn)
except socket.error:
pass
conn.close(Errors.ConnectionError('Socket EVENT_READ without in-flight-requests'))
continue
# Accumulate as many responses as the connection has pending
while conn.in_flight_requests:
response = conn.recv() # Note: conn.recv runs callbacks / errbacks
# Incomplete responses are buffered internally
# while conn.in_flight_requests retains the request
if not response:
break
responses.append(response)
# Check for additional pending SSL bytes
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
# TODO: optimize
for conn in self._conns.values():
if conn not in processed and conn.connected() and conn._sock.pending():
response = conn.recv()
if response:
responses.append(response)
for conn in six.itervalues(self._conns):
if conn.requests_timed_out():
log.warning('%s timed out after %s ms. Closing connection.',
conn, conn.config['request_timeout_ms'])
conn.close(error=Errors.RequestTimedOutError(
'Request timed out after %s ms' %
conn.config['request_timeout_ms']))
if self._sensors:
self._sensors.io_time.record((time.time() - end_select) * 1000000000)
return responses
def in_flight_request_count(self, node_id=None):
"""Get the number of in-flight requests for a node or all nodes.
Arguments:
node_id (int, optional): a specific node to check. If unspecified,
return the total for all nodes
Returns:
int: pending in-flight requests for the node, or all nodes if None
"""
if node_id is not None:
if node_id not in self._conns:
return 0
return len(self._conns[node_id].in_flight_requests)
else:
return sum([len(conn.in_flight_requests) for conn in self._conns.values()])
def least_loaded_node(self):
"""Choose the node with fewest outstanding requests, with fallbacks.
This method will prefer a node with an existing connection, but will
potentially choose a node for which we don't yet have a connection if
all existing connections are in use. This method will never choose a
node that was disconnected within the reconnect backoff period.
If all else fails, the method will attempt to bootstrap again using the
bootstrap_servers list.
Returns:
node_id or None if no suitable node was found
"""
nodes = [broker.nodeId for broker in self.cluster.brokers()]
random.shuffle(nodes)
inflight = float('inf')
found = None
for node_id in nodes:
conn = self._conns.get(node_id)
connected = conn is not None and conn.connected()
blacked_out = conn is not None and conn.blacked_out()
curr_inflight = len(conn.in_flight_requests) if conn else 0
if connected and curr_inflight == 0:
# if we find an established connection
# with no in-flight requests, we can stop right away
return node_id
elif not blacked_out and curr_inflight < inflight:
# otherwise if this is the best we have found so far, record that
inflight = curr_inflight
found = node_id
if found is not None:
return found
# some broker versions return an empty list of broker metadata
# if there are no topics created yet. the bootstrap process
# should detect this and keep a 'bootstrap' node alive until
# a non-bootstrap node is connected and non-empty broker
# metadata is available
elif 'bootstrap' in self._conns:
return 'bootstrap'
# Last option: try to bootstrap again
# this should only happen if no prior bootstrap has been successful
log.error('No nodes found in metadata -- retrying bootstrap')
self._bootstrap(collect_hosts(self.config['bootstrap_servers']))
return None
def set_topics(self, topics):
"""Set specific topics to track for metadata.
Arguments:
topics (list of str): topics to check for metadata
Returns:
Future: resolves after metadata request/response
"""
if set(topics).difference(self._topics):
future = self.cluster.request_update()
else:
future = Future().success(set(topics))
self._topics = set(topics)
return future
def add_topic(self, topic):
"""Add a topic to the list of topics tracked via metadata.
Arguments:
topic (str): topic to track
Returns:
Future: resolves after metadata request/response
"""
if topic in self._topics:
return Future().success(set(self._topics))
self._topics.add(topic)
return self.cluster.request_update()
# request metadata update on disconnect and timedout
def _maybe_refresh_metadata(self):
"""Send a metadata request if needed.
Returns:
int: milliseconds until next refresh
"""
ttl = self.cluster.ttl()
next_reconnect_ms = self._last_no_node_available_ms + self.cluster.refresh_backoff()
next_reconnect_ms = max(next_reconnect_ms - time.time() * 1000, 0)
wait_for_in_progress_ms = 9999999999 if self._metadata_refresh_in_progress else 0
timeout = max(ttl, next_reconnect_ms, wait_for_in_progress_ms)
if timeout == 0:
node_id = self.least_loaded_node()
if node_id is None:
log.debug("Give up sending metadata request since no node is available")
# mark the timestamp for no node available to connect
self._last_no_node_available_ms = time.time() * 1000
return timeout
if self._can_send_request(node_id):
topics = list(self._topics)
if self.cluster.need_all_topic_metadata or not topics:
topics = [] if self.config['api_version'] < (0, 10) else None
api_version = 0 if self.config['api_version'] < (0, 10) else 1
request = MetadataRequest[api_version](topics)
log.debug("Sending metadata request %s to node %s", request, node_id)
future = self.send(node_id, request)
future.add_callback(self.cluster.update_metadata)
future.add_errback(self.cluster.failed_update)
self._metadata_refresh_in_progress = True
def refresh_done(val_or_error):
self._metadata_refresh_in_progress = False
future.add_callback(refresh_done)
future.add_errback(refresh_done)
elif self._can_connect(node_id):
log.debug("Initializing connection to node %s for metadata request", node_id)
self._maybe_connect(node_id)
# If _maybe_connect failed immediately, this node will be put into blackout and we
# should allow immediately retrying in case there is another candidate node. If it
# is still connecting, the worst case is that we end up setting a longer timeout
# on the next round and then wait for the response.
else:
# connected, but can't send more OR connecting
# In either case, we just need to wait for a network event to let us know the selected
# connection might be usable again.
self._last_no_node_available_ms = time.time() * 1000
return timeout
def schedule(self, task, at):
"""Schedule a new task to be executed at the given time.
This is "best-effort" scheduling and should only be used for coarse
synchronization. A task cannot be scheduled for multiple times
simultaneously; any previously scheduled instance of the same task
will be cancelled.
Arguments:
task (callable): task to be scheduled
at (float or int): epoch seconds when task should run
Returns:
Future: resolves to result of task call, or exception if raised
"""
return self._delayed_tasks.add(task, at)
def unschedule(self, task):
"""Unschedule a task.
This will remove all instances of the task from the task queue.
This is a no-op if the task is not scheduled.
Arguments:
task (callable): task to be unscheduled
"""
self._delayed_tasks.remove(task)
def check_version(self, node_id=None, timeout=2, strict=False):
"""Attempt to guess the version of a Kafka broker.
Note: It is possible that this method blocks longer than the
specified timeout. This can happen if the entire cluster
is down and the client enters a bootstrap backoff sleep.
This is only possible if node_id is None.
Returns: version tuple, i.e. (0, 10), (0, 9), (0, 8, 2), ...
Raises:
NodeNotReadyError (if node_id is provided)
NoBrokersAvailable (if node_id is None)
UnrecognizedBrokerVersion: please file bug if seen!
AssertionError (if strict=True): please file bug if seen!
"""
end = time.time() + timeout
while time.time() < end:
# It is possible that least_loaded_node falls back to bootstrap,
# which can block for an increasing backoff period
try_node = node_id or self.least_loaded_node()
if try_node is None:
raise Errors.NoBrokersAvailable()
self._maybe_connect(try_node)
conn = self._conns[try_node]
# We will intentionally cause socket failures
# These should not trigger metadata refresh
self._refresh_on_disconnects = False
try:
remaining = end - time.time()
version = conn.check_version(timeout=remaining, strict=strict)
return version
except Errors.NodeNotReadyError:
# Only raise to user if this is a node-specific request
if node_id is not None:
raise
finally:
self._refresh_on_disconnects = True
# Timeout
else:
raise Errors.NoBrokersAvailable()
def wakeup(self):
with self._wake_lock:
if self._wake_w.send(b'x') != 1:
log.warning('Unable to send to wakeup socket!')
def _clear_wake_fd(self):
# reading from wake socket should only happen in a single thread
while True:
try:
self._wake_r.recv(1024)
except:
break
class DelayedTaskQueue(object):
# see https://docs.python.org/2/library/heapq.html
def __init__(self):
self._tasks = [] # list of entries arranged in a heap
self._task_map = {} # mapping of tasks to entries
self._counter = itertools.count() # unique sequence count
def add(self, task, at):
"""Add a task to run at a later time.
Arguments:
task: can be anything, but generally a callable
at (float or int): epoch seconds to schedule task
Returns:
Future: a future that will be returned with the task when ready
"""
if task in self._task_map:
self.remove(task)
count = next(self._counter)
future = Future()
entry = [at, count, (task, future)]
self._task_map[task] = entry
heapq.heappush(self._tasks, entry)
return future
def remove(self, task):
"""Remove a previously scheduled task.
Raises:
KeyError: if task is not found
"""
entry = self._task_map.pop(task)
task, future = entry[-1]
future.failure(Errors.Cancelled)
entry[-1] = 'REMOVED'
def _drop_removed(self):
while self._tasks and self._tasks[0][-1] is 'REMOVED':
at, count, task = heapq.heappop(self._tasks)
def _pop_next(self):
self._drop_removed()
if not self._tasks:
raise KeyError('pop from an empty DelayedTaskQueue')
_, _, maybe_task = heapq.heappop(self._tasks)
if maybe_task is 'REMOVED':
raise ValueError('popped a removed tasks from queue - bug')
else:
task, future = maybe_task
del self._task_map[task]
return (task, future)
def next_at(self):
"""Number of seconds until next task is ready."""
self._drop_removed()
if not self._tasks:
return 9999999999
else:
return max(self._tasks[0][0] - time.time(), 0)
def pop_ready(self):
"""Pop and return a list of all ready (task, future) tuples"""
ready_tasks = []
while self._tasks and self._tasks[0][0] < time.time():
try:
task = self._pop_next()
except KeyError:
break
ready_tasks.append(task)
return ready_tasks
class KafkaClientMetrics(object):
def __init__(self, metrics, metric_group_prefix, conns):
self.metrics = metrics
self.metric_group_name = metric_group_prefix + '-metrics'
self.connection_closed = metrics.sensor('connections-closed')
self.connection_closed.add(metrics.metric_name(
'connection-close-rate', self.metric_group_name,
'Connections closed per second in the window.'), Rate())
self.connection_created = metrics.sensor('connections-created')
self.connection_created.add(metrics.metric_name(
'connection-creation-rate', self.metric_group_name,
'New connections established per second in the window.'), Rate())
self.select_time = metrics.sensor('select-time')
self.select_time.add(metrics.metric_name(
'select-rate', self.metric_group_name,
'Number of times the I/O layer checked for new I/O to perform per'
' second'), Rate(sampled_stat=Count()))
self.select_time.add(metrics.metric_name(
'io-wait-time-ns-avg', self.metric_group_name,
'The average length of time the I/O thread spent waiting for a'
' socket ready for reads or writes in nanoseconds.'), Avg())
self.select_time.add(metrics.metric_name(
'io-wait-ratio', self.metric_group_name,
'The fraction of time the I/O thread spent waiting.'),
Rate(time_unit=TimeUnit.NANOSECONDS))
self.io_time = metrics.sensor('io-time')
self.io_time.add(metrics.metric_name(
'io-time-ns-avg', self.metric_group_name,
'The average length of time for I/O per select call in nanoseconds.'),
Avg())
self.io_time.add(metrics.metric_name(
'io-ratio', self.metric_group_name,
'The fraction of time the I/O thread spent doing I/O'),
Rate(time_unit=TimeUnit.NANOSECONDS))
metrics.add_metric(metrics.metric_name(
'connection-count', self.metric_group_name,
'The current number of active connections.'), AnonMeasurable(
lambda config, now: len(conns)))
|
|
# -*- coding: utf-8 -*-
import unittest
from mongoengine import *
from mongoengine import signals
signal_output = []
class SignalTests(unittest.TestCase):
"""
Testing signals before/after saving and deleting.
"""
def get_signal_output(self, fn, *args, **kwargs):
# Flush any existing signal output
global signal_output
signal_output = []
fn(*args, **kwargs)
return signal_output
def setUp(self):
connect(db='mongoenginetest')
class Author(Document):
name = StringField()
def __unicode__(self):
return self.name
@classmethod
def pre_init(cls, sender, document, *args, **kwargs):
signal_output.append('pre_init signal, %s' % cls.__name__)
signal_output.append(str(kwargs['values']))
@classmethod
def post_init(cls, sender, document, **kwargs):
signal_output.append('post_init signal, %s' % document)
@classmethod
def pre_save(cls, sender, document, **kwargs):
signal_output.append('pre_save signal, %s' % document)
@classmethod
def post_save(cls, sender, document, **kwargs):
signal_output.append('post_save signal, %s' % document)
if 'created' in kwargs:
if kwargs['created']:
signal_output.append('Is created')
else:
signal_output.append('Is updated')
@classmethod
def pre_delete(cls, sender, document, **kwargs):
signal_output.append('pre_delete signal, %s' % document)
@classmethod
def post_delete(cls, sender, document, **kwargs):
signal_output.append('post_delete signal, %s' % document)
@classmethod
def pre_bulk_insert(cls, sender, documents, **kwargs):
signal_output.append('pre_bulk_insert signal, %s' % documents)
@classmethod
def post_bulk_insert(cls, sender, documents, **kwargs):
signal_output.append('post_bulk_insert signal, %s' % documents)
if kwargs.get('loaded', False):
signal_output.append('Is loaded')
else:
signal_output.append('Not loaded')
self.Author = Author
class Another(Document):
name = StringField()
def __unicode__(self):
return self.name
@classmethod
def pre_init(cls, sender, document, **kwargs):
signal_output.append('pre_init Another signal, %s' % cls.__name__)
signal_output.append(str(kwargs['values']))
@classmethod
def post_init(cls, sender, document, **kwargs):
signal_output.append('post_init Another signal, %s' % document)
@classmethod
def pre_save(cls, sender, document, **kwargs):
signal_output.append('pre_save Another signal, %s' % document)
@classmethod
def post_save(cls, sender, document, **kwargs):
signal_output.append('post_save Another signal, %s' % document)
if 'created' in kwargs:
if kwargs['created']:
signal_output.append('Is created')
else:
signal_output.append('Is updated')
@classmethod
def pre_delete(cls, sender, document, **kwargs):
signal_output.append('pre_delete Another signal, %s' % document)
@classmethod
def post_delete(cls, sender, document, **kwargs):
signal_output.append('post_delete Another signal, %s' % document)
self.Another = Another
# Save up the number of connected signals so that we can check at the end
# that all the signals we register get properly unregistered
self.pre_signals = (
len(signals.pre_init.receivers),
len(signals.post_init.receivers),
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
len(signals.pre_bulk_insert.receivers),
len(signals.post_bulk_insert.receivers),
)
signals.pre_init.connect(Author.pre_init, sender=Author)
signals.post_init.connect(Author.post_init, sender=Author)
signals.pre_save.connect(Author.pre_save, sender=Author)
signals.post_save.connect(Author.post_save, sender=Author)
signals.pre_delete.connect(Author.pre_delete, sender=Author)
signals.post_delete.connect(Author.post_delete, sender=Author)
signals.pre_bulk_insert.connect(Author.pre_bulk_insert, sender=Author)
signals.post_bulk_insert.connect(Author.post_bulk_insert, sender=Author)
signals.pre_init.connect(Another.pre_init, sender=Another)
signals.post_init.connect(Another.post_init, sender=Another)
signals.pre_save.connect(Another.pre_save, sender=Another)
signals.post_save.connect(Another.post_save, sender=Another)
signals.pre_delete.connect(Another.pre_delete, sender=Another)
signals.post_delete.connect(Another.post_delete, sender=Another)
def tearDown(self):
signals.pre_init.disconnect(self.Author.pre_init)
signals.post_init.disconnect(self.Author.post_init)
signals.post_delete.disconnect(self.Author.post_delete)
signals.pre_delete.disconnect(self.Author.pre_delete)
signals.post_save.disconnect(self.Author.post_save)
signals.pre_save.disconnect(self.Author.pre_save)
signals.pre_bulk_insert.disconnect(self.Author.pre_bulk_insert)
signals.post_bulk_insert.disconnect(self.Author.post_bulk_insert)
signals.pre_init.disconnect(self.Another.pre_init)
signals.post_init.disconnect(self.Another.post_init)
signals.post_delete.disconnect(self.Another.post_delete)
signals.pre_delete.disconnect(self.Another.pre_delete)
signals.post_save.disconnect(self.Another.post_save)
signals.pre_save.disconnect(self.Another.pre_save)
# Check that all our signals got disconnected properly.
post_signals = (
len(signals.pre_init.receivers),
len(signals.post_init.receivers),
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
len(signals.pre_bulk_insert.receivers),
len(signals.post_bulk_insert.receivers),
)
self.assertEqual(self.pre_signals, post_signals)
def test_model_signals(self):
""" Model saves should throw some signals. """
def create_author():
a1 = self.Author(name='Bill Shakespeare')
def bulk_create_author_with_load():
a1 = self.Author(name='Bill Shakespeare')
self.Author.objects.insert([a1], load_bulk=True)
def bulk_create_author_without_load():
a1 = self.Author(name='Bill Shakespeare')
self.Author.objects.insert([a1], load_bulk=False)
self.assertEqual(self.get_signal_output(create_author), [
"pre_init signal, Author",
"{'name': 'Bill Shakespeare'}",
"post_init signal, Bill Shakespeare",
])
a1 = self.Author(name='Bill Shakespeare')
self.assertEqual(self.get_signal_output(a1.save), [
"pre_save signal, Bill Shakespeare",
"post_save signal, Bill Shakespeare",
"Is created"
])
a1.reload()
a1.name='William Shakespeare'
self.assertEqual(self.get_signal_output(a1.save), [
"pre_save signal, William Shakespeare",
"post_save signal, William Shakespeare",
"Is updated"
])
self.assertEqual(self.get_signal_output(a1.delete), [
'pre_delete signal, William Shakespeare',
'post_delete signal, William Shakespeare',
])
signal_output = self.get_signal_output(bulk_create_author_with_load)
# The output of this signal is not entirely deterministic. The reloaded
# object will have an object ID. Hence, we only check part of the output
self.assertEqual(signal_output[3],
"pre_bulk_insert signal, [<Author: Bill Shakespeare>]")
self.assertEqual(signal_output[-2:],
["post_bulk_insert signal, [<Author: Bill Shakespeare>]",
"Is loaded",])
self.assertEqual(self.get_signal_output(bulk_create_author_without_load), [
"pre_init signal, Author",
"{'name': 'Bill Shakespeare'}",
"post_init signal, Bill Shakespeare",
"pre_bulk_insert signal, [<Author: Bill Shakespeare>]",
"post_bulk_insert signal, [<Author: Bill Shakespeare>]",
"Not loaded",
])
self.Author.objects.delete()
|
|
"""
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import __version__ as sphinx_ver, addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util.compat import Directive
from sphinx.util.console import bold
from sphinx.util.nodes import set_source_info
from sphinx.writers.html import SmartyPantsHTMLTranslator
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_description_unit(
directivename="django-admin-option",
rolename="djadminopt",
indextemplate="pair: %s; django-admin command-line option",
parse_node=parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
code = node.rawsource.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append(
'\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (
# Some filenames have '_', which is special in latex.
fname.replace('_', r'\_'),
)
)
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
# Prevent rawsource from appearing in output a second time.
raise nodes.SkipNode
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
pass
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = '\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import eventlet
import inspect
from oslo_config import cfg
from oslo_context import context as oslo_context
from oslo_log import log as logging
from oslo_utils import timeutils
from osprofiler import profiler
from senlin.common import consts
from senlin.common import context
from senlin.common import exception as exc
from senlin.common.i18n import _
from senlin.common import schema
from senlin.common import utils
from senlin.drivers import base as driver_base
from senlin.engine import environment
from senlin.objects import credential as co
from senlin.objects import profile as po
LOG = logging.getLogger(__name__)
class Profile(object):
"""Base class for profiles."""
VERSIONS = {}
KEYS = (
TYPE, VERSION, PROPERTIES,
) = (
'type', 'version', 'properties',
)
spec_schema = {
TYPE: schema.String(
_('Name of the profile type.'),
required=True,
),
VERSION: schema.String(
_('Version number of the profile type.'),
required=True,
),
PROPERTIES: schema.Map(
_('Properties for the profile.'),
required=True,
)
}
properties_schema = {}
OPERATIONS = {}
def __new__(cls, name, spec, **kwargs):
"""Create a new profile of the appropriate class.
:param name: The name for the profile.
:param spec: A dictionary containing the spec for the profile.
:param kwargs: Keyword arguments for profile creation.
:returns: An instance of a specific sub-class of Profile.
"""
type_name, version = schema.get_spec_version(spec)
type_str = "-".join([type_name, version])
if cls != Profile:
ProfileClass = cls
else:
ProfileClass = environment.global_env().get_profile(type_str)
return super(Profile, cls).__new__(ProfileClass)
def __init__(self, name, spec, **kwargs):
"""Initialize a profile instance.
:param name: A string that specifies the name for the profile.
:param spec: A dictionary containing the detailed profile spec.
:param kwargs: Keyword arguments for initializing the profile.
:returns: An instance of a specific sub-class of Profile.
"""
type_name, version = schema.get_spec_version(spec)
self.type_name = type_name
self.version = version
type_str = "-".join([type_name, version])
self.name = name
self.spec = spec
self.id = kwargs.get('id', None)
self.type = kwargs.get('type', type_str)
self.user = kwargs.get('user')
self.project = kwargs.get('project')
self.domain = kwargs.get('domain')
self.metadata = kwargs.get('metadata', {})
self.created_at = kwargs.get('created_at', None)
self.updated_at = kwargs.get('updated_at', None)
self.spec_data = schema.Spec(self.spec_schema, self.spec)
self.properties = schema.Spec(
self.properties_schema,
self.spec.get(self.PROPERTIES, {}),
version)
if not self.id:
# new object needs a context dict
self.context = self._init_context()
else:
self.context = kwargs.get('context')
# initialize clients
self._computeclient = None
self._networkclient = None
self._orchestrationclient = None
self._workflowclient = None
self._block_storageclient = None
self._glanceclient = None
@classmethod
def _from_object(cls, profile):
"""Construct a profile from profile object.
:param profile: a profile object that contains all required fields.
"""
kwargs = {
'id': profile.id,
'type': profile.type,
'context': profile.context,
'user': profile.user,
'project': profile.project,
'domain': profile.domain,
'metadata': profile.metadata,
'created_at': profile.created_at,
'updated_at': profile.updated_at,
}
return cls(profile.name, profile.spec, **kwargs)
@classmethod
def load(cls, ctx, profile=None, profile_id=None, project_safe=True):
"""Retrieve a profile object from database."""
if profile is None:
profile = po.Profile.get(ctx, profile_id,
project_safe=project_safe)
if profile is None:
raise exc.ResourceNotFound(type='profile', id=profile_id)
return cls._from_object(profile)
@classmethod
def create(cls, ctx, name, spec, metadata=None):
"""Create a profile object and validate it.
:param ctx: The requesting context.
:param name: The name for the profile object.
:param spec: A dict containing the detailed spec.
:param metadata: An optional dictionary specifying key-value pairs to
be associated with the profile.
:returns: An instance of Profile.
"""
if metadata is None:
metadata = {}
try:
profile = cls(name, spec, metadata=metadata, user=ctx.user_id,
project=ctx.project_id)
profile.validate(True)
except (exc.ResourceNotFound, exc.ESchema) as ex:
error = _("Failed in creating profile %(name)s: %(error)s"
) % {"name": name, "error": str(ex)}
raise exc.InvalidSpec(message=error)
profile.store(ctx)
return profile
@classmethod
def delete(cls, ctx, profile_id):
po.Profile.delete(ctx, profile_id)
def store(self, ctx):
"""Store the profile into database and return its ID."""
timestamp = timeutils.utcnow(True)
values = {
'name': self.name,
'type': self.type,
'context': self.context,
'spec': self.spec,
'user': self.user,
'project': self.project,
'domain': self.domain,
'meta_data': self.metadata,
}
if self.id:
self.updated_at = timestamp
values['updated_at'] = timestamp
po.Profile.update(ctx, self.id, values)
else:
self.created_at = timestamp
values['created_at'] = timestamp
profile = po.Profile.create(ctx, values)
self.id = profile.id
return self.id
@classmethod
@profiler.trace('Profile.create_object', hide_args=False)
def create_object(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_create(obj)
@classmethod
@profiler.trace('Profile.create_cluster_object', hide_args=False)
def create_cluster_object(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
try:
ret = profile.do_cluster_create(obj)
except NotImplementedError:
return None
return ret
@classmethod
@profiler.trace('Profile.delete_object', hide_args=False)
def delete_object(cls, ctx, obj, **params):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_delete(obj, **params)
@classmethod
@profiler.trace('Profile.delete_cluster_object', hide_args=False)
def delete_cluster_object(cls, ctx, obj, **params):
profile = cls.load(ctx, profile_id=obj.profile_id)
try:
ret = profile.do_cluster_delete(obj, **params)
except NotImplementedError:
return None
return ret
@classmethod
@profiler.trace('Profile.update_object', hide_args=False)
def update_object(cls, ctx, obj, new_profile_id=None, **params):
profile = cls.load(ctx, profile_id=obj.profile_id)
new_profile = None
if new_profile_id:
new_profile = cls.load(ctx, profile_id=new_profile_id)
return profile.do_update(obj, new_profile, **params)
@classmethod
@profiler.trace('Profile.get_details', hide_args=False)
def get_details(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_get_details(obj)
@classmethod
@profiler.trace('Profile.adopt_node', hide_args=False)
def adopt_node(cls, ctx, obj, type_name, overrides=None, snapshot=False):
"""Adopt a node.
:param ctx: Request context.
:param obj: A temporary node object.
:param overrides: An optional parameter that specifies the set of
properties to be overridden.
:param snapshot: A boolean flag indicating whether a snapshot should
be created before adopting the node.
:returns: A dictionary containing the profile spec created from the
specific node, or a dictionary containing error message.
"""
parts = type_name.split("-")
tmpspec = {"type": parts[0], "version": parts[1]}
profile = cls("name", tmpspec)
return profile.do_adopt(obj, overrides=overrides, snapshot=snapshot)
@classmethod
@profiler.trace('Profile.join_cluster', hide_args=False)
def join_cluster(cls, ctx, obj, cluster_id):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_join(obj, cluster_id)
@classmethod
@profiler.trace('Profile.leave_cluster', hide_args=False)
def leave_cluster(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_leave(obj)
@classmethod
@profiler.trace('Profile.check_object', hide_args=False)
def check_object(cls, ctx, obj):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_check(obj)
@classmethod
@profiler.trace('Profile.check_object', hide_args=False)
def healthcheck_object(cls, ctx, obj, health_check_type):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_healthcheck(obj, health_check_type)
@classmethod
@profiler.trace('Profile.recover_object', hide_args=False)
def recover_object(cls, ctx, obj, **options):
profile = cls.load(ctx, profile_id=obj.profile_id)
return profile.do_recover(obj, **options)
def validate(self, validate_props=False):
"""Validate the schema and the data provided."""
# general validation
self.spec_data.validate()
self.properties.validate()
ctx_dict = self.properties.get('context', {})
if ctx_dict:
argspec = inspect.getfullargspec(context.RequestContext.__init__)
valid_keys = argspec.args
bad_keys = [k for k in ctx_dict if k not in valid_keys]
if bad_keys:
msg = _("Some keys in 'context' are invalid: %s") % bad_keys
raise exc.ESchema(message=msg)
if validate_props:
self.do_validate(obj=self)
@classmethod
def get_schema(cls):
return dict((name, dict(schema))
for name, schema in cls.properties_schema.items())
@classmethod
def get_ops(cls):
return dict((name, dict(schema))
for name, schema in cls.OPERATIONS.items())
def _init_context(self):
profile_context = {}
if self.CONTEXT in self.properties:
profile_context = self.properties[self.CONTEXT] or {}
ctx_dict = context.get_service_credentials(**profile_context)
ctx_dict.pop('project_name', None)
ctx_dict.pop('project_domain_name', None)
return ctx_dict
def _build_conn_params(self, user, project):
"""Build connection params for specific user and project.
:param user: The ID of the user for which a trust will be used.
:param project: The ID of the project for which a trust will be used.
:returns: A dict containing the required parameters for connection
creation.
"""
cred = co.Credential.get(oslo_context.get_current(), user, project)
if cred is None:
raise exc.TrustNotFound(trustor=user)
trust_id = cred.cred['openstack']['trust']
# This is supposed to be trust-based authentication
params = copy.deepcopy(self.context)
params['trust_id'] = trust_id
return params
def compute(self, obj):
"""Construct compute client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._computeclient is not None:
return self._computeclient
params = self._build_conn_params(obj.user, obj.project)
self._computeclient = driver_base.SenlinDriver().compute(params)
return self._computeclient
def glance(self, obj):
"""Construct glance client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._glanceclient is not None:
return self._glanceclient
params = self._build_conn_params(obj.user, obj.project)
self._glanceclient = driver_base.SenlinDriver().glance(params)
return self._glanceclient
def network(self, obj):
"""Construct network client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._networkclient is not None:
return self._networkclient
params = self._build_conn_params(obj.user, obj.project)
self._networkclient = driver_base.SenlinDriver().network(params)
return self._networkclient
def orchestration(self, obj):
"""Construct orchestration client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._orchestrationclient is not None:
return self._orchestrationclient
params = self._build_conn_params(obj.user, obj.project)
oc = driver_base.SenlinDriver().orchestration(params)
self._orchestrationclient = oc
return oc
def workflow(self, obj):
if self._workflowclient is not None:
return self._workflowclient
params = self._build_conn_params(obj.user, obj.project)
self._workflowclient = driver_base.SenlinDriver().workflow(params)
return self._workflowclient
def block_storage(self, obj):
"""Construct cinder client based on object.
:param obj: Object for which the client is created. It is expected to
be None when retrieving an existing client. When creating
a client, it contains the user and project to be used.
"""
if self._block_storageclient is not None:
return self._block_storageclient
params = self._build_conn_params(obj.user, obj.project)
self._block_storageclient = driver_base.SenlinDriver().block_storage(
params)
return self._block_storageclient
def do_create(self, obj):
"""For subclass to override."""
raise NotImplementedError
def do_cluster_create(self, obj):
"""For subclass to override."""
raise NotImplementedError
def do_delete(self, obj, **params):
"""For subclass to override."""
raise NotImplementedError
def do_cluster_delete(self, obj):
"""For subclass to override."""
raise NotImplementedError
def do_update(self, obj, new_profile, **params):
"""For subclass to override."""
LOG.warning("Update operation not supported.")
return True
def do_check(self, obj):
"""For subclass to override."""
LOG.warning("Check operation not supported.")
return True
def do_healthcheck(self, obj):
"""Default healthcheck operation.
This is provided as a fallback if a specific profile type does not
override this method.
:param obj: The node object to operate on.
:return status: True indicates node is healthy, False indicates
it is unhealthy.
"""
return self.do_check(obj)
def do_get_details(self, obj):
"""For subclass to override."""
LOG.warning("Get_details operation not supported.")
return {}
def do_adopt(self, obj, overrides=None, snapshot=False):
"""For subclass to override."""
LOG.warning("Adopt operation not supported.")
return {}
def do_join(self, obj, cluster_id):
"""For subclass to override to perform extra operations."""
LOG.warning("Join operation not specialized.")
return True
def do_leave(self, obj):
"""For subclass to override to perform extra operations."""
LOG.warning("Leave operation not specialized.")
return True
def do_recover(self, obj, **options):
"""Default recover operation.
This is provided as a fallback if a specific profile type does not
override this method.
:param obj: The node object to operate on.
:param options: Keyword arguments for the recover operation.
:return id: New id of the recovered resource or None if recovery
failed.
:return status: True indicates successful recovery, False indicates
failure.
"""
operation = options.get('operation', None)
force_recreate = options.get('force_recreate', None)
delete_timeout = options.get('delete_timeout', None)
if operation.upper() != consts.RECOVER_RECREATE:
LOG.error("Recover operation not supported: %s", operation)
return None, False
extra_params = options.get('operation_params', None)
fence_compute = False
if extra_params:
fence_compute = extra_params.get('fence_compute', False)
try:
self.do_delete(obj, force=fence_compute, timeout=delete_timeout)
except exc.EResourceDeletion as ex:
if force_recreate:
# log error and continue on to creating the node
LOG.warning('Failed to delete node during recovery action: %s',
ex)
else:
raise exc.EResourceOperation(op='recovering', type='node',
id=obj.id,
message=str(ex))
# pause to allow deleted resource to get reclaimed by nova
# this is needed to avoid a problem when the compute resources are
# at their quota limit. The deleted resource has to become available
# so that the new node can be created.
eventlet.sleep(cfg.CONF.batch_interval)
res = None
try:
res = self.do_create(obj)
except exc.EResourceCreation as ex:
raise exc.EResourceOperation(op='recovering', type='node',
id=obj.id, message=str(ex),
resource_id=ex.resource_id)
return res, True
def do_validate(self, obj):
"""For subclass to override."""
LOG.warning("Validate operation not supported.")
return True
def to_dict(self):
pb_dict = {
'id': self.id,
'name': self.name,
'type': self.type,
'user': self.user,
'project': self.project,
'domain': self.domain,
'spec': self.spec,
'metadata': self.metadata,
'created_at': utils.isotime(self.created_at),
'updated_at': utils.isotime(self.updated_at),
}
return pb_dict
def validate_for_update(self, new_profile):
non_updatables = []
for (k, v) in new_profile.properties.items():
if self.properties.get(k, None) != v:
if not self.properties_schema[k].updatable:
non_updatables.append(k)
if not non_updatables:
return True
msg = ", ".join(non_updatables)
LOG.error("The following properties are not updatable: %s.", msg)
return False
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Rectangle
from matplotlib import rc_context
from .... import units as u
from ....io import fits
from ....wcs import WCS
from ....coordinates import SkyCoord
from ..patches import SphericalCircle
from .. import WCSAxes
from . import datasets
from ....tests.image_tests import IMAGE_REFERENCE_DIR
from ..frame import EllipticalFrame
class BaseImageTests:
@classmethod
def setup_class(cls):
cls._data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
msx_header = os.path.join(cls._data_dir, 'msx_header')
cls.msx_header = fits.Header.fromtextfile(msx_header)
rosat_header = os.path.join(cls._data_dir, 'rosat_header')
cls.rosat_header = fits.Header.fromtextfile(rosat_header)
twoMASS_k_header = os.path.join(cls._data_dir, '2MASS_k_header')
cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
cube_header = os.path.join(cls._data_dir, 'cube_header')
cls.cube_header = fits.Header.fromtextfile(cube_header)
slice_header = os.path.join(cls._data_dir, 'slice_header')
cls.slice_header = fits.Header.fromtextfile(slice_header)
class TestBasic(BaseImageTests):
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='image_plot.png',
tolerance=0, style={})
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=1.5)
@pytest.mark.parametrize('axisbelow', [True, False, 'line'])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
ax.grid()
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30., 50.), 60., 50., facecolor='green', edgecolor='red')
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='contour_overlay.png',
tolerance=0, style={})
def test_contour_overlay(self):
# Test for overlaying contours on images
hdu_msx = datasets.fetch_msx_hdu()
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(hdu_msx.data, transform=ax.get_transform(wcs_msx),
colors='orange', levels=[2.5e-5, 5e-5, 1.e-4])
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0., 720.)
ax.set_ylim(0., 720.)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='overlay_features_image.png',
tolerance=0, style={})
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.25, 0.25, 0.65, 0.65],
projection=WCS(self.msx_header), aspect='equal')
# Change the format of the ticks
ax.coords[0].set_major_formatter('dd:mm:ss')
ax.coords[1].set_major_formatter('dd:mm:ss.ssss')
# Overlay grid on image
ax.grid(color='red', alpha=1.0, lw=1, linestyle='dashed')
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords['glon'].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords['glat'].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords['glon'].set_axislabel('Galactic Longitude', minpad=1.6)
ax.coords['glat'].set_axislabel('Galactic Latitude', minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color('red')
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == 'red'
assert ax.coords.frame.get_linewidth() == 2
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='curvlinear_grid_patches_image.png',
tolerance=0, style={})
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.rosat_header), aspect='equal')
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color='black', alpha=1.0, lw=1, linestyle='dashed')
p = Circle((300, 100), radius=40, ec='yellow', fc='none')
ax.add_patch(p)
p = Circle((30., 20.), radius=20., ec='orange', fc='none',
transform=ax.get_transform('world'))
ax.add_patch(p)
p = Circle((60., 50.), radius=20., ec='red', fc='none',
transform=ax.get_transform('fk5'))
ax.add_patch(p)
p = Circle((40., 60.), radius=20., ec='green', fc='none',
transform=ax.get_transform('galactic'))
ax.add_patch(p)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='cube_slice_image.png',
tolerance=0, style={})
def test_cube_slice_image(self):
# Test for cube slicing
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel('Velocity m/s')
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1,
exclude_overlapping=True)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1,
exclude_overlapping=True)
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
ax.coords[2].grid(grid_type='contours', color='red', linestyle='solid')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='cube_slice_image_lonlat.png',
tolerance=0, style={})
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=('x', 'y', 50), aspect='equal')
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type='contours', color='blue', linestyle='solid')
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
ax.plot_coord(c, 'o')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_line(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='changed_axis_units.png',
tolerance=0, style={})
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_major_formatter('x.xx')
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel('Velocity km/s')
ax.coords[1].set_ticks(width=1, exclude_overlapping=True)
ax.coords[2].set_ticks(width=1, exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='minor_ticks_image.png',
tolerance=0, style={})
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_ticks(exclude_overlapping=True)
ax.coords[1].set_ticks(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='ticks_labels.png',
tolerance=0, style={})
def test_ticks_labels(self):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color='blue', alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color='red', alpha=0.9, width=1)
ax.coords[0].set_ticks_position('all')
ax.coords[1].set_ticks_position('all')
ax.coords[0].set_axislabel('X-axis', size=20)
ax.coords[1].set_axislabel('Y-axis', color='green', size=25,
weight='regular', style='normal',
family='cmtt10')
ax.coords[0].set_axislabel_position('t')
ax.coords[1].set_axislabel_position('r')
ax.coords[0].set_ticklabel(color='purple', size=15, alpha=1,
weight='light', style='normal',
family='cmss10')
ax.coords[1].set_ticklabel(color='black', size=18, alpha=0.9,
weight='bold', family='cmr10')
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('r')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='rcparams.png',
tolerance=0, style={})
def test_rcparams(self):
# Test default style (matplotlib.rcParams) for ticks and gridlines
with rc_context({
'xtick.color': 'red',
'xtick.major.size': 20,
'xtick.major.width': 2,
'grid.color': 'blue',
'grid.linestyle': ':',
'grid.linewidth': 1,
'grid.alpha': 0.5}):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.coords[0].set_ticks(exclude_overlapping=True)
ax.coords[1].set_ticks(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='tick_angles.png',
tolerance=0, style={})
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='tick_angles_non_square_axes.png',
tolerance=0, style={})
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(6, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='set_coord_type.png',
tolerance=0, style={})
def test_set_coord_type(self):
# Test for setting coord_type
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6],
projection=WCS(self.msx_header),
aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type('scalar')
ax.coords[1].set_coord_type('scalar')
ax.coords[0].set_major_formatter('x.xxx')
ax.coords[1].set_major_formatter('x.xxx')
ax.coords[0].set_ticks(exclude_overlapping=True)
ax.coords[1].set_ticks(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='test_ticks_regression_1.png',
tolerance=0, style={})
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5],
projection=wcs, aspect='auto')
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('all')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='test_axislabels_regression.png',
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_axislabels_regression(self):
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='auto')
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[1].ticklabels.set_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_noncelestial_angular(self, tmpdir):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['solar-x', 'solar-y']
wcs.wcs.cunit = ['arcsec', 'arcsec']
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin='lower')
ax.coords[0].set_coord_type('longitude', coord_wrap=180)
ax.coords[1].set_coord_type('latitude')
ax.coords[0].set_major_formatter('s.s')
ax.coords[1].set_major_formatter('s.s')
ax.grid(color='white', ls='solid')
# Force drawing (needed for format_coord)
fig.savefig(tmpdir.join('nothing').strpath)
# TODO: the formatted string should show units
assert ax.format_coord(512, 512) == "513.0 513.0 (world)"
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_patches_distortion(self, tmpdir):
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='equal')
# Pixel coordinates
r = Rectangle((30., 50.), 60., 50., edgecolor='green', facecolor='none')
ax.add_patch(r)
# FK5 coordinates
r = Rectangle((266.4, -28.9), 0.3, 0.3, edgecolor='cyan', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
# FK5 coordinates
c = Circle((266.4, -29.1), 0.15, edgecolor='magenta', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(c)
# Pixel coordinates
ax.scatter([40, 100, 130], [30, 130, 60], s=100, edgecolor='red', facecolor=(1, 0, 0, 0.5))
# World coordinates (should not be distorted)
ax.scatter(266.78238, -28.769255, transform=ax.get_transform('fk5'), s=300,
edgecolor='red', facecolor='none')
# World coordinates (should not be distorted)
r = SphericalCircle((266.4 * u.deg, -29.1 * u.deg), 0.15 * u.degree,
edgecolor='purple', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(5, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_hms_labels(self):
# This tests the apparance of the hms superscripts in tick labels
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={'text.usetex': True})
def test_latex_labels(self):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.